python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/dma-buf.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
#include "lsdc_drv.h"
#include "lsdc_gem.h"
#include "lsdc_ttm.h"
static int lsdc_gem_prime_pin(struct drm_gem_object *obj)
{
struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
int ret;
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret))
return ret;
ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0))
lbo->sharing_count++;
lsdc_bo_unreserve(lbo);
return ret;
}
static void lsdc_gem_prime_unpin(struct drm_gem_object *obj)
{
struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
int ret;
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret))
return;
lsdc_bo_unpin(lbo);
if (lbo->sharing_count)
lbo->sharing_count--;
lsdc_bo_unreserve(lbo);
}
static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct ttm_buffer_object *tbo = to_ttm_bo(obj);
struct ttm_tt *tt = tbo->ttm;
if (!tt) {
drm_err(obj->dev, "sharing a buffer without backing memory\n");
return ERR_PTR(-ENOMEM);
}
return drm_prime_pages_to_sg(obj->dev, tt->pages, tt->num_pages);
}
static void lsdc_gem_object_free(struct drm_gem_object *obj)
{
struct ttm_buffer_object *tbo = to_ttm_bo(obj);
if (tbo)
ttm_bo_put(tbo);
}
static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
struct ttm_buffer_object *tbo = to_ttm_bo(obj);
struct lsdc_bo *lbo = to_lsdc_bo(tbo);
int ret;
if (lbo->vmap_count > 0) {
++lbo->vmap_count;
goto out;
}
ret = lsdc_bo_pin(lbo, 0, NULL);
if (unlikely(ret)) {
drm_err(obj->dev, "pin %p for vmap failed\n", lbo);
return ret;
}
ret = ttm_bo_vmap(tbo, &lbo->map);
if (ret) {
drm_err(obj->dev, "ttm bo vmap failed\n");
lsdc_bo_unpin(lbo);
return ret;
}
lbo->vmap_count = 1;
out:
*map = lbo->map;
return 0;
}
static void lsdc_gem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
struct ttm_buffer_object *tbo = to_ttm_bo(obj);
struct lsdc_bo *lbo = to_lsdc_bo(tbo);
if (unlikely(!lbo->vmap_count)) {
drm_warn(obj->dev, "%p is not mapped\n", lbo);
return;
}
--lbo->vmap_count;
if (lbo->vmap_count == 0) {
ttm_bo_vunmap(tbo, &lbo->map);
lsdc_bo_unpin(lbo);
}
}
static int lsdc_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct ttm_buffer_object *tbo = to_ttm_bo(obj);
int ret;
ret = ttm_bo_mmap_obj(vma, tbo);
if (unlikely(ret)) {
drm_warn(obj->dev, "mmap %p failed\n", tbo);
return ret;
}
drm_gem_object_put(obj);
return 0;
}
static const struct drm_gem_object_funcs lsdc_gem_object_funcs = {
.free = lsdc_gem_object_free,
.export = drm_gem_prime_export,
.pin = lsdc_gem_prime_pin,
.unpin = lsdc_gem_prime_unpin,
.get_sg_table = lsdc_gem_prime_get_sg_table,
.vmap = lsdc_gem_object_vmap,
.vunmap = lsdc_gem_object_vunmap,
.mmap = lsdc_gem_object_mmap,
};
struct drm_gem_object *lsdc_gem_object_create(struct drm_device *ddev,
u32 domain,
size_t size,
bool kerenl,
struct sg_table *sg,
struct dma_resv *resv)
{
struct lsdc_device *ldev = to_lsdc(ddev);
struct drm_gem_object *gobj;
struct lsdc_bo *lbo;
int ret;
lbo = lsdc_bo_create(ddev, domain, size, kerenl, sg, resv);
if (IS_ERR(lbo)) {
ret = PTR_ERR(lbo);
return ERR_PTR(ret);
}
if (!sg) {
/* VRAM is filled with random data */
lsdc_bo_clear(lbo);
}
gobj = &lbo->tbo.base;
gobj->funcs = &lsdc_gem_object_funcs;
/* tracking the BOs we created */
mutex_lock(&ldev->gem.mutex);
list_add_tail(&lbo->list, &ldev->gem.objects);
mutex_unlock(&ldev->gem.mutex);
return gobj;
}
struct drm_gem_object *
lsdc_prime_import_sg_table(struct drm_device *ddev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
struct dma_resv *resv = attach->dmabuf->resv;
u64 size = attach->dmabuf->size;
struct drm_gem_object *gobj;
struct lsdc_bo *lbo;
dma_resv_lock(resv, NULL);
gobj = lsdc_gem_object_create(ddev, LSDC_GEM_DOMAIN_GTT, size, false,
sg, resv);
dma_resv_unlock(resv);
if (IS_ERR(gobj)) {
drm_err(ddev, "Failed to import sg table\n");
return gobj;
}
lbo = gem_to_lsdc_bo(gobj);
lbo->sharing_count = 1;
return gobj;
}
int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev,
struct drm_mode_create_dumb *args)
{
struct lsdc_device *ldev = to_lsdc(ddev);
const struct lsdc_desc *descp = ldev->descp;
u32 domain = LSDC_GEM_DOMAIN_VRAM;
struct drm_gem_object *gobj;
size_t size;
u32 pitch;
u32 handle;
int ret;
if (!args->width || !args->height)
return -EINVAL;
if (args->bpp != 32 && args->bpp != 16)
return -EINVAL;
pitch = args->width * args->bpp / 8;
pitch = ALIGN(pitch, descp->pitch_align);
size = pitch * args->height;
size = ALIGN(size, PAGE_SIZE);
/* Maximum single bo size allowed is the half vram size available */
if (size > ldev->vram_size / 2) {
drm_err(ddev, "Requesting(%zuMiB) failed\n", size >> 20);
return -ENOMEM;
}
gobj = lsdc_gem_object_create(ddev, domain, size, false, NULL, NULL);
if (IS_ERR(gobj)) {
drm_err(ddev, "Failed to create gem object\n");
return PTR_ERR(gobj);
}
ret = drm_gem_handle_create(file, gobj, &handle);
/* drop reference from allocate, handle holds it now */
drm_gem_object_put(gobj);
if (ret)
return ret;
args->pitch = pitch;
args->size = size;
args->handle = handle;
return 0;
}
int lsdc_dumb_map_offset(struct drm_file *filp, struct drm_device *ddev,
u32 handle, uint64_t *offset)
{
struct drm_gem_object *gobj;
gobj = drm_gem_object_lookup(filp, handle);
if (!gobj)
return -ENOENT;
*offset = drm_vma_node_offset_addr(&gobj->vma_node);
drm_gem_object_put(gobj);
return 0;
}
void lsdc_gem_init(struct drm_device *ddev)
{
struct lsdc_device *ldev = to_lsdc(ddev);
mutex_init(&ldev->gem.mutex);
INIT_LIST_HEAD(&ldev->gem.objects);
}
int lsdc_show_buffer_object(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *ddev = node->minor->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
struct lsdc_bo *lbo;
unsigned int i;
mutex_lock(&ldev->gem.mutex);
i = 0;
list_for_each_entry(lbo, &ldev->gem.objects, list) {
struct ttm_buffer_object *tbo = &lbo->tbo;
struct ttm_resource *resource = tbo->resource;
seq_printf(m, "bo[%04u][%p]: size: %8zuKiB %s offset: %8llx\n",
i, lbo, lsdc_bo_size(lbo) >> 10,
lsdc_mem_type_to_str(resource->mem_type),
lsdc_bo_gpu_offset(lbo));
i++;
}
mutex_unlock(&ldev->gem.mutex);
seq_printf(m, "Pinned BO size: VRAM: %zuKiB, GTT: %zu KiB\n",
ldev->vram_pinned_size >> 10, ldev->gtt_pinned_size >> 10);
return 0;
}
| linux-master | drivers/gpu/drm/loongson/lsdc_gem.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "loongson_module.h"
#include "lsdc_drv.h"
#include "lsdc_gem.h"
#include "lsdc_ttm.h"
#define DRIVER_AUTHOR "Sui Jingfeng <[email protected]>"
#define DRIVER_NAME "loongson"
#define DRIVER_DESC "drm driver for loongson graphics"
#define DRIVER_DATE "20220701"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
DEFINE_DRM_GEM_FOPS(lsdc_gem_fops);
static const struct drm_driver lsdc_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_RENDER | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &lsdc_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
.debugfs_init = lsdc_debugfs_init,
.dumb_create = lsdc_dumb_create,
.dumb_map_offset = lsdc_dumb_map_offset,
.gem_prime_import_sg_table = lsdc_prime_import_sg_table,
};
static const struct drm_mode_config_funcs lsdc_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
/* Display related */
static int lsdc_modeset_init(struct lsdc_device *ldev,
unsigned int num_crtc,
const struct lsdc_kms_funcs *funcs,
bool has_vblank)
{
struct drm_device *ddev = &ldev->base;
struct lsdc_display_pipe *dispipe;
unsigned int i;
int ret;
for (i = 0; i < num_crtc; i++) {
dispipe = &ldev->dispipe[i];
/* We need an index before crtc is initialized */
dispipe->index = i;
ret = funcs->create_i2c(ddev, dispipe, i);
if (ret)
return ret;
}
for (i = 0; i < num_crtc; i++) {
struct i2c_adapter *ddc = NULL;
dispipe = &ldev->dispipe[i];
if (dispipe->li2c)
ddc = &dispipe->li2c->adapter;
ret = funcs->output_init(ddev, dispipe, ddc, i);
if (ret)
return ret;
ldev->num_output++;
}
for (i = 0; i < num_crtc; i++) {
dispipe = &ldev->dispipe[i];
ret = funcs->primary_plane_init(ddev, &dispipe->primary.base, i);
if (ret)
return ret;
ret = funcs->cursor_plane_init(ddev, &dispipe->cursor.base, i);
if (ret)
return ret;
ret = funcs->crtc_init(ddev, &dispipe->crtc.base,
&dispipe->primary.base,
&dispipe->cursor.base,
i, has_vblank);
if (ret)
return ret;
}
drm_info(ddev, "Total %u outputs\n", ldev->num_output);
return 0;
}
static const struct drm_mode_config_helper_funcs lsdc_mode_config_helper_funcs = {
.atomic_commit_tail = drm_atomic_helper_commit_tail,
};
static int lsdc_mode_config_init(struct drm_device *ddev,
const struct lsdc_desc *descp)
{
int ret;
ret = drmm_mode_config_init(ddev);
if (ret)
return ret;
ddev->mode_config.funcs = &lsdc_mode_config_funcs;
ddev->mode_config.min_width = 1;
ddev->mode_config.min_height = 1;
ddev->mode_config.max_width = descp->max_width * LSDC_NUM_CRTC;
ddev->mode_config.max_height = descp->max_height * LSDC_NUM_CRTC;
ddev->mode_config.preferred_depth = 24;
ddev->mode_config.prefer_shadow = 1;
ddev->mode_config.cursor_width = descp->hw_cursor_h;
ddev->mode_config.cursor_height = descp->hw_cursor_h;
ddev->mode_config.helper_private = &lsdc_mode_config_helper_funcs;
if (descp->has_vblank_counter)
ddev->max_vblank_count = 0xffffffff;
return ret;
}
/*
* The GPU and display controller in the LS7A1000/LS7A2000/LS2K2000 are
* separated PCIE devices. They are two devices, not one. Bar 2 of the GPU
* device contains the base address and size of the VRAM, both the GPU and
* the DC could access the on-board VRAM.
*/
static int lsdc_get_dedicated_vram(struct lsdc_device *ldev,
struct pci_dev *pdev_dc,
const struct lsdc_desc *descp)
{
struct drm_device *ddev = &ldev->base;
struct pci_dev *pdev_gpu;
resource_size_t base, size;
/*
* The GPU has 00:06.0 as its BDF, while the DC has 00:06.1
* This is true for the LS7A1000, LS7A2000 and LS2K2000.
*/
pdev_gpu = pci_get_domain_bus_and_slot(pci_domain_nr(pdev_dc->bus),
pdev_dc->bus->number,
PCI_DEVFN(6, 0));
if (!pdev_gpu) {
drm_err(ddev, "No GPU device, then no VRAM\n");
return -ENODEV;
}
base = pci_resource_start(pdev_gpu, 2);
size = pci_resource_len(pdev_gpu, 2);
ldev->vram_base = base;
ldev->vram_size = size;
ldev->gpu = pdev_gpu;
drm_info(ddev, "Dedicated vram start: 0x%llx, size: %uMiB\n",
(u64)base, (u32)(size >> 20));
return 0;
}
static struct lsdc_device *
lsdc_create_device(struct pci_dev *pdev,
const struct lsdc_desc *descp,
const struct drm_driver *driver)
{
struct lsdc_device *ldev;
struct drm_device *ddev;
int ret;
ldev = devm_drm_dev_alloc(&pdev->dev, driver, struct lsdc_device, base);
if (IS_ERR(ldev))
return ldev;
ldev->dc = pdev;
ldev->descp = descp;
ddev = &ldev->base;
loongson_gfxpll_create(ddev, &ldev->gfxpll);
ret = lsdc_get_dedicated_vram(ldev, pdev, descp);
if (ret) {
drm_err(ddev, "Init VRAM failed: %d\n", ret);
return ERR_PTR(ret);
}
ret = drm_aperture_remove_conflicting_framebuffers(ldev->vram_base,
ldev->vram_size,
driver);
if (ret) {
drm_err(ddev, "Remove firmware framebuffers failed: %d\n", ret);
return ERR_PTR(ret);
}
ret = lsdc_ttm_init(ldev);
if (ret) {
drm_err(ddev, "Memory manager init failed: %d\n", ret);
return ERR_PTR(ret);
}
lsdc_gem_init(ddev);
/* Bar 0 of the DC device contains the MMIO register's base address */
ldev->reg_base = pcim_iomap(pdev, 0, 0);
if (!ldev->reg_base)
return ERR_PTR(-ENODEV);
spin_lock_init(&ldev->reglock);
ret = lsdc_mode_config_init(ddev, descp);
if (ret)
return ERR_PTR(ret);
ret = lsdc_modeset_init(ldev, descp->num_of_crtc, descp->funcs,
loongson_vblank);
if (ret)
return ERR_PTR(ret);
drm_mode_config_reset(ddev);
return ldev;
}
/* For multiple GPU driver instance co-exixt in the system */
static unsigned int lsdc_vga_set_decode(struct pci_dev *pdev, bool state)
{
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
static int lsdc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct lsdc_desc *descp;
struct drm_device *ddev;
struct lsdc_device *ldev;
int ret;
descp = lsdc_device_probe(pdev, ent->driver_data);
if (IS_ERR_OR_NULL(descp))
return -ENODEV;
pci_set_master(pdev);
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (ret)
return ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
dev_info(&pdev->dev, "Found %s, revision: %u",
to_loongson_gfx(descp)->model, pdev->revision);
ldev = lsdc_create_device(pdev, descp, &lsdc_drm_driver);
if (IS_ERR(ldev))
return PTR_ERR(ldev);
ddev = &ldev->base;
pci_set_drvdata(pdev, ddev);
vga_client_register(pdev, lsdc_vga_set_decode);
drm_kms_helper_poll_init(ddev);
if (loongson_vblank) {
ret = drm_vblank_init(ddev, descp->num_of_crtc);
if (ret)
return ret;
ret = devm_request_irq(&pdev->dev, pdev->irq,
descp->funcs->irq_handler,
IRQF_SHARED,
dev_name(&pdev->dev), ddev);
if (ret) {
drm_err(ddev, "Failed to register interrupt: %d\n", ret);
return ret;
}
drm_info(ddev, "registered irq: %u\n", pdev->irq);
}
ret = drm_dev_register(ddev, 0);
if (ret)
return ret;
drm_fbdev_generic_setup(ddev, 32);
return 0;
}
static void lsdc_pci_remove(struct pci_dev *pdev)
{
struct drm_device *ddev = pci_get_drvdata(pdev);
drm_dev_unregister(ddev);
drm_atomic_helper_shutdown(ddev);
}
static int lsdc_drm_freeze(struct drm_device *ddev)
{
struct lsdc_device *ldev = to_lsdc(ddev);
struct lsdc_bo *lbo;
int ret;
/* unpin all of buffers in the VRAM */
mutex_lock(&ldev->gem.mutex);
list_for_each_entry(lbo, &ldev->gem.objects, list) {
struct ttm_buffer_object *tbo = &lbo->tbo;
struct ttm_resource *resource = tbo->resource;
unsigned int pin_count = tbo->pin_count;
drm_dbg(ddev, "bo[%p], size: %zuKiB, type: %s, pin count: %u\n",
lbo, lsdc_bo_size(lbo) >> 10,
lsdc_mem_type_to_str(resource->mem_type), pin_count);
if (!pin_count)
continue;
if (resource->mem_type == TTM_PL_VRAM) {
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret)) {
drm_err(ddev, "bo reserve failed: %d\n", ret);
continue;
}
do {
lsdc_bo_unpin(lbo);
--pin_count;
} while (pin_count);
lsdc_bo_unreserve(lbo);
}
}
mutex_unlock(&ldev->gem.mutex);
lsdc_bo_evict_vram(ddev);
ret = drm_mode_config_helper_suspend(ddev);
if (unlikely(ret)) {
drm_err(ddev, "Freeze error: %d", ret);
return ret;
}
return 0;
}
static int lsdc_drm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return drm_mode_config_helper_resume(ddev);
}
static int lsdc_pm_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return lsdc_drm_freeze(ddev);
}
static int lsdc_pm_thaw(struct device *dev)
{
return lsdc_drm_resume(dev);
}
static int lsdc_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int error;
error = lsdc_pm_freeze(dev);
if (error)
return error;
pci_save_state(pdev);
/* Shut down the device */
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int lsdc_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (pcim_enable_device(pdev))
return -EIO;
return lsdc_pm_thaw(dev);
}
static const struct dev_pm_ops lsdc_pm_ops = {
.suspend = lsdc_pm_suspend,
.resume = lsdc_pm_resume,
.freeze = lsdc_pm_freeze,
.thaw = lsdc_pm_thaw,
.poweroff = lsdc_pm_freeze,
.restore = lsdc_pm_resume,
};
static const struct pci_device_id lsdc_pciid_list[] = {
{PCI_VDEVICE(LOONGSON, 0x7a06), CHIP_LS7A1000},
{PCI_VDEVICE(LOONGSON, 0x7a36), CHIP_LS7A2000},
{ }
};
struct pci_driver lsdc_pci_driver = {
.name = DRIVER_NAME,
.id_table = lsdc_pciid_list,
.probe = lsdc_pci_probe,
.remove = lsdc_pci_remove,
.driver.pm = &lsdc_pm_ops,
};
MODULE_DEVICE_TABLE(pci, lsdc_pciid_list);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/loongson/lsdc_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2021 Microsoft
*/
#include <linux/hyperv.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "hyperv_drm.h"
static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
const struct iosys_map *vmap,
struct drm_rect *rect)
{
struct hyperv_drm_device *hv = to_hv(fb->dev);
struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(hv->vram);
int idx;
if (!drm_dev_enter(&hv->dev, &idx))
return -ENODEV;
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, rect));
drm_fb_memcpy(&dst, fb->pitches, vmap, fb, rect);
drm_dev_exit(idx);
return 0;
}
static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb,
const struct iosys_map *map)
{
struct drm_rect fullscreen = {
.x1 = 0,
.x2 = fb->width,
.y1 = 0,
.y2 = fb->height,
};
return hyperv_blit_to_vram_rect(fb, map, &fullscreen);
}
static int hyperv_connector_get_modes(struct drm_connector *connector)
{
struct hyperv_drm_device *hv = to_hv(connector->dev);
int count;
count = drm_add_modes_noedid(connector,
connector->dev->mode_config.max_width,
connector->dev->mode_config.max_height);
drm_set_preferred_mode(connector, hv->preferred_width,
hv->preferred_height);
return count;
}
static const struct drm_connector_helper_funcs hyperv_connector_helper_funcs = {
.get_modes = hyperv_connector_get_modes,
};
static const struct drm_connector_funcs hyperv_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static inline int hyperv_conn_init(struct hyperv_drm_device *hv)
{
drm_connector_helper_add(&hv->connector, &hyperv_connector_helper_funcs);
return drm_connector_init(&hv->dev, &hv->connector,
&hyperv_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
}
static int hyperv_check_size(struct hyperv_drm_device *hv, int w, int h,
struct drm_framebuffer *fb)
{
u32 pitch = w * (hv->screen_depth / 8);
if (fb)
pitch = fb->pitches[0];
if (pitch * h > hv->fb_size)
return -EINVAL;
return 0;
}
static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
hyperv_hide_hw_ptr(hv->hdev);
hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
crtc_state->mode.hdisplay,
crtc_state->mode.vdisplay,
plane_state->fb->pitches[0]);
hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->data[0]);
}
static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state,
struct drm_crtc_state *crtc_state)
{
struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
if (fb->format->format != DRM_FORMAT_XRGB8888)
return -EINVAL;
if (fb->pitches[0] * fb->height > hv->fb_size) {
drm_err(&hv->dev, "fb size requested by %s for %dX%d (pitch %d) greater than %ld\n",
current->comm, fb->width, fb->height, fb->pitches[0], hv->fb_size);
return -EINVAL;
}
return 0;
}
static void hyperv_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
struct drm_plane_state *state = pipe->plane.state;
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect)) {
hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->data[0], &rect);
hyperv_update_dirt(hv->hdev, &rect);
}
}
static const struct drm_simple_display_pipe_funcs hyperv_pipe_funcs = {
.enable = hyperv_pipe_enable,
.check = hyperv_pipe_check,
.update = hyperv_pipe_update,
DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
};
static const uint32_t hyperv_formats[] = {
DRM_FORMAT_XRGB8888,
};
static const uint64_t hyperv_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
{
int ret;
ret = drm_simple_display_pipe_init(&hv->dev,
&hv->pipe,
&hyperv_pipe_funcs,
hyperv_formats,
ARRAY_SIZE(hyperv_formats),
hyperv_modifiers,
&hv->connector);
if (ret)
return ret;
drm_plane_enable_fb_damage_clips(&hv->pipe.plane);
return 0;
}
static enum drm_mode_status
hyperv_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
struct hyperv_drm_device *hv = to_hv(dev);
if (hyperv_check_size(hv, mode->hdisplay, mode->vdisplay, NULL))
return MODE_BAD;
return MODE_OK;
}
static const struct drm_mode_config_funcs hyperv_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.mode_valid = hyperv_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
int hyperv_mode_config_init(struct hyperv_drm_device *hv)
{
struct drm_device *dev = &hv->dev;
int ret;
ret = drmm_mode_config_init(dev);
if (ret) {
drm_err(dev, "Failed to initialized mode setting.\n");
return ret;
}
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = hv->screen_width_max;
dev->mode_config.max_height = hv->screen_height_max;
dev->mode_config.preferred_depth = hv->screen_depth;
dev->mode_config.prefer_shadow = 0;
dev->mode_config.funcs = &hyperv_mode_config_funcs;
ret = hyperv_conn_init(hv);
if (ret) {
drm_err(dev, "Failed to initialized connector.\n");
return ret;
}
ret = hyperv_pipe_init(hv);
if (ret) {
drm_err(dev, "Failed to initialized pipe.\n");
return ret;
}
drm_mode_config_reset(dev);
return 0;
}
| linux-master | drivers/gpu/drm/hyperv/hyperv_drm_modeset.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2021 Microsoft
*
* Portions of this code is derived from hyperv_fb.c
*/
#include <linux/hyperv.h>
#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "hyperv_drm.h"
#define VMBUS_RING_BUFSIZE (256 * 1024)
#define VMBUS_VSP_TIMEOUT (10 * HZ)
#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
/* Support for VERSION_WIN7 is removed. #define is retained for reference. */
#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
#define SYNTHVID_DEPTH_WIN8 32
#define SYNTHVID_WIDTH_WIN8 1600
#define SYNTHVID_HEIGHT_WIN8 1200
#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
enum pipe_msg_type {
PIPE_MSG_INVALID,
PIPE_MSG_DATA,
PIPE_MSG_MAX
};
enum synthvid_msg_type {
SYNTHVID_ERROR = 0,
SYNTHVID_VERSION_REQUEST = 1,
SYNTHVID_VERSION_RESPONSE = 2,
SYNTHVID_VRAM_LOCATION = 3,
SYNTHVID_VRAM_LOCATION_ACK = 4,
SYNTHVID_SITUATION_UPDATE = 5,
SYNTHVID_SITUATION_UPDATE_ACK = 6,
SYNTHVID_POINTER_POSITION = 7,
SYNTHVID_POINTER_SHAPE = 8,
SYNTHVID_FEATURE_CHANGE = 9,
SYNTHVID_DIRT = 10,
SYNTHVID_RESOLUTION_REQUEST = 13,
SYNTHVID_RESOLUTION_RESPONSE = 14,
SYNTHVID_MAX = 15
};
struct pipe_msg_hdr {
u32 type;
u32 size; /* size of message after this field */
} __packed;
struct hvd_screen_info {
u16 width;
u16 height;
} __packed;
struct synthvid_msg_hdr {
u32 type;
u32 size; /* size of this header + payload after this field */
} __packed;
struct synthvid_version_req {
u32 version;
} __packed;
struct synthvid_version_resp {
u32 version;
u8 is_accepted;
u8 max_video_outputs;
} __packed;
struct synthvid_vram_location {
u64 user_ctx;
u8 is_vram_gpa_specified;
u64 vram_gpa;
} __packed;
struct synthvid_vram_location_ack {
u64 user_ctx;
} __packed;
struct video_output_situation {
u8 active;
u32 vram_offset;
u8 depth_bits;
u32 width_pixels;
u32 height_pixels;
u32 pitch_bytes;
} __packed;
struct synthvid_situation_update {
u64 user_ctx;
u8 video_output_count;
struct video_output_situation video_output[1];
} __packed;
struct synthvid_situation_update_ack {
u64 user_ctx;
} __packed;
struct synthvid_pointer_position {
u8 is_visible;
u8 video_output;
s32 image_x;
s32 image_y;
} __packed;
#define SYNTHVID_CURSOR_MAX_X 96
#define SYNTHVID_CURSOR_MAX_Y 96
#define SYNTHVID_CURSOR_ARGB_PIXEL_SIZE 4
#define SYNTHVID_CURSOR_MAX_SIZE (SYNTHVID_CURSOR_MAX_X * \
SYNTHVID_CURSOR_MAX_Y * SYNTHVID_CURSOR_ARGB_PIXEL_SIZE)
#define SYNTHVID_CURSOR_COMPLETE (-1)
struct synthvid_pointer_shape {
u8 part_idx;
u8 is_argb;
u32 width; /* SYNTHVID_CURSOR_MAX_X at most */
u32 height; /* SYNTHVID_CURSOR_MAX_Y at most */
u32 hot_x; /* hotspot relative to upper-left of pointer image */
u32 hot_y;
u8 data[4];
} __packed;
struct synthvid_feature_change {
u8 is_dirt_needed;
u8 is_ptr_pos_needed;
u8 is_ptr_shape_needed;
u8 is_situ_needed;
} __packed;
struct rect {
s32 x1, y1; /* top left corner */
s32 x2, y2; /* bottom right corner, exclusive */
} __packed;
struct synthvid_dirt {
u8 video_output;
u8 dirt_count;
struct rect rect[1];
} __packed;
#define SYNTHVID_EDID_BLOCK_SIZE 128
#define SYNTHVID_MAX_RESOLUTION_COUNT 64
struct synthvid_supported_resolution_req {
u8 maximum_resolution_count;
} __packed;
struct synthvid_supported_resolution_resp {
u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
u8 resolution_count;
u8 default_resolution_index;
u8 is_standard;
struct hvd_screen_info supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
} __packed;
struct synthvid_msg {
struct pipe_msg_hdr pipe_hdr;
struct synthvid_msg_hdr vid_hdr;
union {
struct synthvid_version_req ver_req;
struct synthvid_version_resp ver_resp;
struct synthvid_vram_location vram;
struct synthvid_vram_location_ack vram_ack;
struct synthvid_situation_update situ;
struct synthvid_situation_update_ack situ_ack;
struct synthvid_pointer_position ptr_pos;
struct synthvid_pointer_shape ptr_shape;
struct synthvid_feature_change feature_chg;
struct synthvid_dirt dirt;
struct synthvid_supported_resolution_req resolution_req;
struct synthvid_supported_resolution_resp resolution_resp;
};
} __packed;
static inline bool hyperv_version_ge(u32 ver1, u32 ver2)
{
if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
(SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
return true;
return false;
}
static inline int hyperv_sendpacket(struct hv_device *hdev, struct synthvid_msg *msg)
{
static atomic64_t request_id = ATOMIC64_INIT(0);
struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
int ret;
msg->pipe_hdr.type = PIPE_MSG_DATA;
msg->pipe_hdr.size = msg->vid_hdr.size;
ret = vmbus_sendpacket(hdev->channel, msg,
msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
atomic64_inc_return(&request_id),
VM_PKT_DATA_INBAND, 0);
if (ret)
drm_err_ratelimited(&hv->dev, "Unable to send packet via vmbus; error %d\n", ret);
return ret;
}
static int hyperv_negotiate_version(struct hv_device *hdev, u32 ver)
{
struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
struct drm_device *dev = &hv->dev;
unsigned long t;
memset(msg, 0, sizeof(struct synthvid_msg));
msg->vid_hdr.type = SYNTHVID_VERSION_REQUEST;
msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_version_req);
msg->ver_req.version = ver;
hyperv_sendpacket(hdev, msg);
t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
if (!t) {
drm_err(dev, "Time out on waiting version response\n");
return -ETIMEDOUT;
}
if (!msg->ver_resp.is_accepted) {
drm_err(dev, "Version request not accepted\n");
return -ENODEV;
}
hv->synthvid_version = ver;
drm_info(dev, "Synthvid Version major %d, minor %d\n",
SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
return 0;
}
int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp)
{
struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
struct drm_device *dev = &hv->dev;
unsigned long t;
memset(msg, 0, sizeof(struct synthvid_msg));
msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_vram_location);
msg->vram.user_ctx = vram_pp;
msg->vram.vram_gpa = vram_pp;
msg->vram.is_vram_gpa_specified = 1;
hyperv_sendpacket(hdev, msg);
t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
if (!t) {
drm_err(dev, "Time out on waiting vram location ack\n");
return -ETIMEDOUT;
}
if (msg->vram_ack.user_ctx != vram_pp) {
drm_err(dev, "Unable to set VRAM location\n");
return -ENODEV;
}
return 0;
}
int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
u32 w, u32 h, u32 pitch)
{
struct synthvid_msg msg;
memset(&msg, 0, sizeof(struct synthvid_msg));
msg.vid_hdr.type = SYNTHVID_SITUATION_UPDATE;
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_situation_update);
msg.situ.user_ctx = 0;
msg.situ.video_output_count = 1;
msg.situ.video_output[0].active = active;
/* vram_offset should always be 0 */
msg.situ.video_output[0].vram_offset = 0;
msg.situ.video_output[0].depth_bits = bpp;
msg.situ.video_output[0].width_pixels = w;
msg.situ.video_output[0].height_pixels = h;
msg.situ.video_output[0].pitch_bytes = pitch;
hyperv_sendpacket(hdev, &msg);
return 0;
}
/*
* Hyper-V supports a hardware cursor feature. It's not used by Linux VM,
* but the Hyper-V host still draws a point as an extra mouse pointer,
* which is unwanted, especially when Xorg is running.
*
* The hyperv_fb driver uses synthvid_send_ptr() to hide the unwanted
* pointer, by setting msg.ptr_pos.is_visible = 1 and setting the
* msg.ptr_shape.data. Note: setting msg.ptr_pos.is_visible to 0 doesn't
* work in tests.
*
* Copy synthvid_send_ptr() to hyperv_drm and rename it to
* hyperv_hide_hw_ptr(). Note: hyperv_hide_hw_ptr() is also called in the
* handler of the SYNTHVID_FEATURE_CHANGE event, otherwise the host still
* draws an extra unwanted mouse pointer after the VM Connection window is
* closed and reopened.
*/
int hyperv_hide_hw_ptr(struct hv_device *hdev)
{
struct synthvid_msg msg;
memset(&msg, 0, sizeof(struct synthvid_msg));
msg.vid_hdr.type = SYNTHVID_POINTER_POSITION;
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_pointer_position);
msg.ptr_pos.is_visible = 1;
msg.ptr_pos.video_output = 0;
msg.ptr_pos.image_x = 0;
msg.ptr_pos.image_y = 0;
hyperv_sendpacket(hdev, &msg);
memset(&msg, 0, sizeof(struct synthvid_msg));
msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE;
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_pointer_shape);
msg.ptr_shape.part_idx = SYNTHVID_CURSOR_COMPLETE;
msg.ptr_shape.is_argb = 1;
msg.ptr_shape.width = 1;
msg.ptr_shape.height = 1;
msg.ptr_shape.hot_x = 0;
msg.ptr_shape.hot_y = 0;
msg.ptr_shape.data[0] = 0;
msg.ptr_shape.data[1] = 1;
msg.ptr_shape.data[2] = 1;
msg.ptr_shape.data[3] = 1;
hyperv_sendpacket(hdev, &msg);
return 0;
}
int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect)
{
struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
struct synthvid_msg msg;
if (!hv->dirt_needed)
return 0;
memset(&msg, 0, sizeof(struct synthvid_msg));
msg.vid_hdr.type = SYNTHVID_DIRT;
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_dirt);
msg.dirt.video_output = 0;
msg.dirt.dirt_count = 1;
msg.dirt.rect[0].x1 = rect->x1;
msg.dirt.rect[0].y1 = rect->y1;
msg.dirt.rect[0].x2 = rect->x2;
msg.dirt.rect[0].y2 = rect->y2;
hyperv_sendpacket(hdev, &msg);
return 0;
}
static int hyperv_get_supported_resolution(struct hv_device *hdev)
{
struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
struct drm_device *dev = &hv->dev;
unsigned long t;
u8 index;
int i;
memset(msg, 0, sizeof(struct synthvid_msg));
msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_supported_resolution_req);
msg->resolution_req.maximum_resolution_count =
SYNTHVID_MAX_RESOLUTION_COUNT;
hyperv_sendpacket(hdev, msg);
t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
if (!t) {
drm_err(dev, "Time out on waiting resolution response\n");
return -ETIMEDOUT;
}
if (msg->resolution_resp.resolution_count == 0) {
drm_err(dev, "No supported resolutions\n");
return -ENODEV;
}
index = msg->resolution_resp.default_resolution_index;
if (index >= msg->resolution_resp.resolution_count) {
drm_err(dev, "Invalid resolution index: %d\n", index);
return -ENODEV;
}
for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
hv->screen_width_max = max_t(u32, hv->screen_width_max,
msg->resolution_resp.supported_resolution[i].width);
hv->screen_height_max = max_t(u32, hv->screen_height_max,
msg->resolution_resp.supported_resolution[i].height);
}
hv->preferred_width =
msg->resolution_resp.supported_resolution[index].width;
hv->preferred_height =
msg->resolution_resp.supported_resolution[index].height;
return 0;
}
static void hyperv_receive_sub(struct hv_device *hdev)
{
struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
struct synthvid_msg *msg;
if (!hv)
return;
msg = (struct synthvid_msg *)hv->recv_buf;
/* Complete the wait event */
if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
memcpy(hv->init_buf, msg, VMBUS_MAX_PACKET_SIZE);
complete(&hv->wait);
return;
}
if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) {
hv->dirt_needed = msg->feature_chg.is_dirt_needed;
if (hv->dirt_needed)
hyperv_hide_hw_ptr(hv->hdev);
}
}
static void hyperv_receive(void *ctx)
{
struct hv_device *hdev = ctx;
struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
struct synthvid_msg *recv_buf;
u32 bytes_recvd;
u64 req_id;
int ret;
if (!hv)
return;
recv_buf = (struct synthvid_msg *)hv->recv_buf;
do {
ret = vmbus_recvpacket(hdev->channel, recv_buf,
VMBUS_MAX_PACKET_SIZE,
&bytes_recvd, &req_id);
if (bytes_recvd > 0 &&
recv_buf->pipe_hdr.type == PIPE_MSG_DATA)
hyperv_receive_sub(hdev);
} while (bytes_recvd > 0 && ret == 0);
}
int hyperv_connect_vsp(struct hv_device *hdev)
{
struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
struct drm_device *dev = &hv->dev;
int ret;
ret = vmbus_open(hdev->channel, VMBUS_RING_BUFSIZE, VMBUS_RING_BUFSIZE,
NULL, 0, hyperv_receive, hdev);
if (ret) {
drm_err(dev, "Unable to open vmbus channel\n");
return ret;
}
/* Negotiate the protocol version with host */
switch (vmbus_proto_version) {
case VERSION_WIN10:
case VERSION_WIN10_V5:
ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
if (!ret)
break;
fallthrough;
case VERSION_WIN8:
case VERSION_WIN8_1:
ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN8);
break;
default:
ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
break;
}
if (ret) {
drm_err(dev, "Synthetic video device version not accepted %d\n", ret);
goto error;
}
hv->screen_depth = SYNTHVID_DEPTH_WIN8;
if (hyperv_version_ge(hv->synthvid_version, SYNTHVID_VERSION_WIN10)) {
ret = hyperv_get_supported_resolution(hdev);
if (ret)
drm_err(dev, "Failed to get supported resolution from host, use default\n");
} else {
hv->screen_width_max = SYNTHVID_WIDTH_WIN8;
hv->screen_height_max = SYNTHVID_HEIGHT_WIN8;
}
hv->mmio_megabytes = hdev->channel->offermsg.offer.mmio_megabytes;
return 0;
error:
vmbus_close(hdev->channel);
return ret;
}
| linux-master | drivers/gpu/drm/hyperv/hyperv_drm_proto.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2021 Microsoft
*/
#include <linux/efi.h>
#include <linux/hyperv.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/screen_info.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "hyperv_drm.h"
#define DRIVER_NAME "hyperv_drm"
#define DRIVER_DESC "DRM driver for Hyper-V synthetic video device"
#define DRIVER_DATE "2020"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
DEFINE_DRM_GEM_FOPS(hv_fops);
static struct drm_driver hyperv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.fops = &hv_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
};
static int hyperv_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
return 0;
}
static void hyperv_pci_remove(struct pci_dev *pdev)
{
}
static const struct pci_device_id hyperv_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_MICROSOFT,
.device = PCI_DEVICE_ID_HYPERV_VIDEO,
},
{ /* end of list */ }
};
/*
* PCI stub to support gen1 VM.
*/
static struct pci_driver hyperv_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = hyperv_pci_tbl,
.probe = hyperv_pci_probe,
.remove = hyperv_pci_remove,
};
static int hyperv_setup_vram(struct hyperv_drm_device *hv,
struct hv_device *hdev)
{
struct drm_device *dev = &hv->dev;
int ret;
drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
screen_info.lfb_size,
&hyperv_driver);
hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024;
ret = vmbus_allocate_mmio(&hv->mem, hdev, 0, -1, hv->fb_size, 0x100000,
true);
if (ret) {
drm_err(dev, "Failed to allocate mmio\n");
return -ENOMEM;
}
/*
* Map the VRAM cacheable for performance. This is also required for VM
* connect to display properly for ARM64 Linux VM, as the host also maps
* the VRAM cacheable.
*/
hv->vram = ioremap_cache(hv->mem->start, hv->fb_size);
if (!hv->vram) {
drm_err(dev, "Failed to map vram\n");
ret = -ENOMEM;
goto error;
}
hv->fb_base = hv->mem->start;
return 0;
error:
vmbus_free_mmio(hv->mem->start, hv->fb_size);
return ret;
}
static int hyperv_vmbus_probe(struct hv_device *hdev,
const struct hv_vmbus_device_id *dev_id)
{
struct hyperv_drm_device *hv;
struct drm_device *dev;
int ret;
hv = devm_drm_dev_alloc(&hdev->device, &hyperv_driver,
struct hyperv_drm_device, dev);
if (IS_ERR(hv))
return PTR_ERR(hv);
dev = &hv->dev;
init_completion(&hv->wait);
hv_set_drvdata(hdev, hv);
hv->hdev = hdev;
ret = hyperv_connect_vsp(hdev);
if (ret) {
drm_err(dev, "Failed to connect to vmbus.\n");
goto err_hv_set_drv_data;
}
ret = hyperv_setup_vram(hv, hdev);
if (ret)
goto err_vmbus_close;
/*
* Should be done only once during init and resume. Failing to update
* vram location is not fatal. Device will update dirty area till
* preferred resolution only.
*/
ret = hyperv_update_vram_location(hdev, hv->fb_base);
if (ret)
drm_warn(dev, "Failed to update vram location.\n");
ret = hyperv_mode_config_init(hv);
if (ret)
goto err_free_mmio;
ret = drm_dev_register(dev, 0);
if (ret) {
drm_err(dev, "Failed to register drm driver.\n");
goto err_free_mmio;
}
drm_fbdev_generic_setup(dev, 0);
return 0;
err_free_mmio:
vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
err_hv_set_drv_data:
hv_set_drvdata(hdev, NULL);
return ret;
}
static void hyperv_vmbus_remove(struct hv_device *hdev)
{
struct drm_device *dev = hv_get_drvdata(hdev);
struct hyperv_drm_device *hv = to_hv(dev);
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
}
static int hyperv_vmbus_suspend(struct hv_device *hdev)
{
struct drm_device *dev = hv_get_drvdata(hdev);
int ret;
ret = drm_mode_config_helper_suspend(dev);
if (ret)
return ret;
vmbus_close(hdev->channel);
return 0;
}
static int hyperv_vmbus_resume(struct hv_device *hdev)
{
struct drm_device *dev = hv_get_drvdata(hdev);
struct hyperv_drm_device *hv = to_hv(dev);
int ret;
ret = hyperv_connect_vsp(hdev);
if (ret)
return ret;
ret = hyperv_update_vram_location(hdev, hv->fb_base);
if (ret)
return ret;
return drm_mode_config_helper_resume(dev);
}
static const struct hv_vmbus_device_id hyperv_vmbus_tbl[] = {
/* Synthetic Video Device GUID */
{HV_SYNTHVID_GUID},
{}
};
static struct hv_driver hyperv_hv_driver = {
.name = KBUILD_MODNAME,
.id_table = hyperv_vmbus_tbl,
.probe = hyperv_vmbus_probe,
.remove = hyperv_vmbus_remove,
.suspend = hyperv_vmbus_suspend,
.resume = hyperv_vmbus_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int __init hyperv_init(void)
{
int ret;
if (drm_firmware_drivers_only())
return -ENODEV;
ret = pci_register_driver(&hyperv_pci_driver);
if (ret != 0)
return ret;
return vmbus_driver_register(&hyperv_hv_driver);
}
static void __exit hyperv_exit(void)
{
vmbus_driver_unregister(&hyperv_hv_driver);
pci_unregister_driver(&hyperv_pci_driver);
}
module_init(hyperv_init);
module_exit(hyperv_exit);
MODULE_DEVICE_TABLE(pci, hyperv_pci_tbl);
MODULE_DEVICE_TABLE(vmbus, hyperv_vmbus_tbl);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Deepak Rawat <[email protected]>");
MODULE_DESCRIPTION("DRM driver for Hyper-V synthetic video device");
| linux-master | drivers/gpu/drm/hyperv/hyperv_drm_drv.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/iosys-map.h>
#include <drm/drm_drv.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_gem_atomic_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
static bool qxl_head_enabled(struct qxl_head *head)
{
return head->width && head->height;
}
static int qxl_alloc_client_monitors_config(struct qxl_device *qdev,
unsigned int count)
{
if (qdev->client_monitors_config &&
count > qdev->client_monitors_config->count) {
kfree(qdev->client_monitors_config);
qdev->client_monitors_config = NULL;
}
if (!qdev->client_monitors_config) {
qdev->client_monitors_config = kzalloc(
struct_size(qdev->client_monitors_config,
heads, count), GFP_KERNEL);
if (!qdev->client_monitors_config)
return -ENOMEM;
}
qdev->client_monitors_config->count = count;
return 0;
}
enum {
MONITORS_CONFIG_MODIFIED,
MONITORS_CONFIG_UNCHANGED,
MONITORS_CONFIG_BAD_CRC,
MONITORS_CONFIG_ERROR,
};
static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
{
int i;
int num_monitors;
uint32_t crc;
int status = MONITORS_CONFIG_UNCHANGED;
num_monitors = qdev->rom->client_monitors_config.count;
crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
sizeof(qdev->rom->client_monitors_config));
if (crc != qdev->rom->client_monitors_config_crc)
return MONITORS_CONFIG_BAD_CRC;
if (!num_monitors) {
DRM_DEBUG_KMS("no client monitors configured\n");
return status;
}
if (num_monitors > qxl_num_crtc) {
DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
qxl_num_crtc, num_monitors);
num_monitors = qxl_num_crtc;
} else {
num_monitors = qdev->rom->client_monitors_config.count;
}
if (qdev->client_monitors_config
&& (num_monitors != qdev->client_monitors_config->count)) {
status = MONITORS_CONFIG_MODIFIED;
}
if (qxl_alloc_client_monitors_config(qdev, num_monitors)) {
status = MONITORS_CONFIG_ERROR;
return status;
}
/* we copy max from the client but it isn't used */
qdev->client_monitors_config->max_allowed = qxl_num_crtc;
for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
struct qxl_urect *c_rect =
&qdev->rom->client_monitors_config.heads[i];
struct qxl_head *client_head =
&qdev->client_monitors_config->heads[i];
if (client_head->x != c_rect->left) {
client_head->x = c_rect->left;
status = MONITORS_CONFIG_MODIFIED;
}
if (client_head->y != c_rect->top) {
client_head->y = c_rect->top;
status = MONITORS_CONFIG_MODIFIED;
}
if (client_head->width != c_rect->right - c_rect->left) {
client_head->width = c_rect->right - c_rect->left;
status = MONITORS_CONFIG_MODIFIED;
}
if (client_head->height != c_rect->bottom - c_rect->top) {
client_head->height = c_rect->bottom - c_rect->top;
status = MONITORS_CONFIG_MODIFIED;
}
if (client_head->surface_id != 0) {
client_head->surface_id = 0;
status = MONITORS_CONFIG_MODIFIED;
}
if (client_head->id != i) {
client_head->id = i;
status = MONITORS_CONFIG_MODIFIED;
}
if (client_head->flags != 0) {
client_head->flags = 0;
status = MONITORS_CONFIG_MODIFIED;
}
DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height,
client_head->x, client_head->y);
}
return status;
}
static void qxl_update_offset_props(struct qxl_device *qdev)
{
struct drm_device *dev = &qdev->ddev;
struct drm_connector *connector;
struct qxl_output *output;
struct qxl_head *head;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
output = drm_connector_to_qxl_output(connector);
head = &qdev->client_monitors_config->heads[output->index];
drm_object_property_set_value(&connector->base,
dev->mode_config.suggested_x_property, head->x);
drm_object_property_set_value(&connector->base,
dev->mode_config.suggested_y_property, head->y);
}
}
void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
{
struct drm_device *dev = &qdev->ddev;
struct drm_modeset_acquire_ctx ctx;
int status, retries, ret;
for (retries = 0; retries < 10; retries++) {
status = qxl_display_copy_rom_client_monitors_config(qdev);
if (status != MONITORS_CONFIG_BAD_CRC)
break;
udelay(5);
}
if (status == MONITORS_CONFIG_ERROR) {
DRM_DEBUG_KMS("ignoring client monitors config: error");
return;
}
if (status == MONITORS_CONFIG_BAD_CRC) {
DRM_DEBUG_KMS("ignoring client monitors config: bad crc");
return;
}
if (status == MONITORS_CONFIG_UNCHANGED) {
DRM_DEBUG_KMS("ignoring client monitors config: unchanged");
return;
}
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
qxl_update_offset_props(qdev);
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
if (!drm_helper_hpd_irq_event(dev)) {
/* notify that the monitor configuration changed, to
adjust at the arbitrary resolution */
drm_kms_helper_hotplug_event(dev);
}
}
static int qxl_check_mode(struct qxl_device *qdev,
unsigned int width,
unsigned int height)
{
unsigned int stride;
unsigned int size;
if (check_mul_overflow(width, 4u, &stride))
return -EINVAL;
if (check_mul_overflow(stride, height, &size))
return -EINVAL;
if (size > qdev->vram_size)
return -ENOMEM;
return 0;
}
static int qxl_check_framebuffer(struct qxl_device *qdev,
struct qxl_bo *bo)
{
return qxl_check_mode(qdev, bo->surf.width, bo->surf.height);
}
static int qxl_add_mode(struct drm_connector *connector,
unsigned int width,
unsigned int height,
bool preferred)
{
struct drm_device *dev = connector->dev;
struct qxl_device *qdev = to_qxl(dev);
struct drm_display_mode *mode = NULL;
int rc;
rc = qxl_check_mode(qdev, width, height);
if (rc != 0)
return 0;
mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
if (preferred)
mode->type |= DRM_MODE_TYPE_PREFERRED;
mode->hdisplay = width;
mode->vdisplay = height;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
return 1;
}
static int qxl_add_monitors_config_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *output = drm_connector_to_qxl_output(connector);
int h = output->index;
struct qxl_head *head;
if (!qdev->monitors_config)
return 0;
if (h >= qxl_num_crtc)
return 0;
if (!qdev->client_monitors_config)
return 0;
if (h >= qdev->client_monitors_config->count)
return 0;
head = &qdev->client_monitors_config->heads[h];
DRM_DEBUG_KMS("head %d is %dx%d\n", h, head->width, head->height);
return qxl_add_mode(connector, head->width, head->height, true);
}
static struct mode_size {
int w;
int h;
} extra_modes[] = {
{ 720, 480},
{1152, 768},
{1280, 854},
};
static int qxl_add_extra_modes(struct drm_connector *connector)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(extra_modes); i++)
ret += qxl_add_mode(connector,
extra_modes[i].w,
extra_modes[i].h,
false);
return ret;
}
static void qxl_send_monitors_config(struct qxl_device *qdev)
{
int i;
BUG_ON(!qdev->ram_header->monitors_config);
if (qdev->monitors_config->count == 0)
return;
for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
struct qxl_head *head = &qdev->monitors_config->heads[i];
if (head->y > 8192 || head->x > 8192 ||
head->width > 8192 || head->height > 8192) {
DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
i, head->width, head->height,
head->x, head->y);
return;
}
}
qxl_io_monitors_config(qdev);
}
static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
const char *reason)
{
struct drm_device *dev = crtc->dev;
struct qxl_device *qdev = to_qxl(dev);
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
struct qxl_head head;
int oldcount, i = qcrtc->index;
if (!qdev->primary_bo) {
DRM_DEBUG_KMS("no primary surface, skip (%s)\n", reason);
return;
}
if (!qdev->monitors_config || qxl_num_crtc <= i)
return;
head.id = i;
head.flags = 0;
head.surface_id = 0;
oldcount = qdev->monitors_config->count;
if (crtc->state->active) {
struct drm_display_mode *mode = &crtc->mode;
head.width = mode->hdisplay;
head.height = mode->vdisplay;
head.x = crtc->x;
head.y = crtc->y;
if (qdev->monitors_config->count < i + 1)
qdev->monitors_config->count = i + 1;
if (qdev->primary_bo == qdev->dumb_shadow_bo)
head.x += qdev->dumb_heads[i].x;
} else if (i > 0) {
head.width = 0;
head.height = 0;
head.x = 0;
head.y = 0;
if (qdev->monitors_config->count == i + 1)
qdev->monitors_config->count = i;
} else {
DRM_DEBUG_KMS("inactive head 0, skip (%s)\n", reason);
return;
}
if (head.width == qdev->monitors_config->heads[i].width &&
head.height == qdev->monitors_config->heads[i].height &&
head.x == qdev->monitors_config->heads[i].x &&
head.y == qdev->monitors_config->heads[i].y &&
oldcount == qdev->monitors_config->count)
return;
DRM_DEBUG_KMS("head %d, %dx%d, at +%d+%d, %s (%s)\n",
i, head.width, head.height, head.x, head.y,
crtc->state->active ? "on" : "off", reason);
if (oldcount != qdev->monitors_config->count)
DRM_DEBUG_KMS("active heads %d -> %d (%d total)\n",
oldcount, qdev->monitors_config->count,
qxl_num_crtc);
qdev->monitors_config->heads[i] = head;
qdev->monitors_config->max_allowed = qxl_num_crtc;
qxl_send_monitors_config(qdev);
}
static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
qxl_crtc_update_monitors_config(crtc, "flush");
}
static void qxl_crtc_destroy(struct drm_crtc *crtc)
{
struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
qxl_bo_unref(&qxl_crtc->cursor_bo);
drm_crtc_cleanup(crtc);
kfree(qxl_crtc);
}
static const struct drm_crtc_funcs qxl_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = qxl_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
unsigned int num_clips)
{
/* TODO: vmwgfx where this was cribbed from had locking. Why? */
struct qxl_device *qdev = to_qxl(fb->dev);
struct drm_clip_rect norect;
struct qxl_bo *qobj;
struct drm_modeset_acquire_ctx ctx;
bool is_primary;
int inc = 1, ret;
DRM_MODESET_LOCK_ALL_BEGIN(fb->dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
qobj = gem_to_qxl_bo(fb->obj[0]);
/* if we aren't primary surface ignore this */
is_primary = qobj->shadow ? qobj->shadow->is_primary : qobj->is_primary;
if (!is_primary)
goto out_lock_end;
if (!num_clips) {
num_clips = 1;
clips = &norect;
norect.x1 = norect.y1 = 0;
norect.x2 = fb->width;
norect.y2 = fb->height;
} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
num_clips /= 2;
inc = 2; /* skip source rects */
}
qxl_draw_dirty_fb(qdev, fb, qobj, flags, color,
clips, num_clips, inc, 0);
out_lock_end:
DRM_MODESET_LOCK_ALL_END(fb->dev, ctx, ret);
return 0;
}
static const struct drm_framebuffer_funcs qxl_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.dirty = qxl_framebuffer_surface_dirty,
.create_handle = drm_gem_fb_create_handle,
};
static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
qxl_crtc_update_monitors_config(crtc, "enable");
}
static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
qxl_crtc_update_monitors_config(crtc, "disable");
}
static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
.atomic_flush = qxl_crtc_atomic_flush,
.atomic_enable = qxl_crtc_atomic_enable,
.atomic_disable = qxl_crtc_atomic_disable,
};
static int qxl_primary_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_bo *bo;
if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
bo = gem_to_qxl_bo(new_plane_state->fb->obj[0]);
return qxl_check_framebuffer(qdev, bo);
}
static int qxl_primary_apply_cursor(struct qxl_device *qdev,
struct drm_plane_state *plane_state)
{
struct drm_framebuffer *fb = plane_state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
struct qxl_cursor_cmd *cmd;
struct qxl_release *release;
int ret = 0;
if (!qcrtc->cursor_bo)
return 0;
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
QXL_RELEASE_CURSOR_CMD,
&release, NULL);
if (ret)
return ret;
ret = qxl_release_list_add(release, qcrtc->cursor_bo);
if (ret)
goto out_free_release;
ret = qxl_release_reserve_list(release, false);
if (ret)
goto out_free_release;
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x;
cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
return ret;
out_free_release:
qxl_release_free(qdev, release);
return ret;
}
static int qxl_primary_move_cursor(struct qxl_device *qdev,
struct drm_plane_state *plane_state)
{
struct drm_framebuffer *fb = plane_state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
struct qxl_cursor_cmd *cmd;
struct qxl_release *release;
int ret = 0;
if (!qcrtc->cursor_bo)
return 0;
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
QXL_RELEASE_CURSOR_CMD,
&release, NULL);
if (ret)
return ret;
ret = qxl_release_reserve_list(release, true);
if (ret) {
qxl_release_free(qdev, release);
return ret;
}
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
cmd->u.position.x = plane_state->crtc_x + fb->hot_x;
cmd->u.position.y = plane_state->crtc_y + fb->hot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
return ret;
}
static struct qxl_bo *qxl_create_cursor(struct qxl_device *qdev,
struct qxl_bo *user_bo,
int hot_x, int hot_y)
{
static const u32 size = 64 * 64 * 4;
struct qxl_bo *cursor_bo;
struct iosys_map cursor_map;
struct iosys_map user_map;
struct qxl_cursor cursor;
int ret;
if (!user_bo)
return NULL;
ret = qxl_bo_create(qdev, sizeof(struct qxl_cursor) + size,
false, true, QXL_GEM_DOMAIN_VRAM, 1,
NULL, &cursor_bo);
if (ret)
goto err;
ret = qxl_bo_vmap(cursor_bo, &cursor_map);
if (ret)
goto err_unref;
ret = qxl_bo_vmap(user_bo, &user_map);
if (ret)
goto err_unmap;
cursor.header.unique = 0;
cursor.header.type = SPICE_CURSOR_TYPE_ALPHA;
cursor.header.width = 64;
cursor.header.height = 64;
cursor.header.hot_spot_x = hot_x;
cursor.header.hot_spot_y = hot_y;
cursor.data_size = size;
cursor.chunk.next_chunk = 0;
cursor.chunk.prev_chunk = 0;
cursor.chunk.data_size = size;
if (cursor_map.is_iomem) {
memcpy_toio(cursor_map.vaddr_iomem,
&cursor, sizeof(cursor));
memcpy_toio(cursor_map.vaddr_iomem + sizeof(cursor),
user_map.vaddr, size);
} else {
memcpy(cursor_map.vaddr,
&cursor, sizeof(cursor));
memcpy(cursor_map.vaddr + sizeof(cursor),
user_map.vaddr, size);
}
qxl_bo_vunmap(user_bo);
qxl_bo_vunmap(cursor_bo);
return cursor_bo;
err_unmap:
qxl_bo_vunmap(cursor_bo);
err_unref:
qxl_bo_unpin(cursor_bo);
qxl_bo_unref(&cursor_bo);
err:
return NULL;
}
static void qxl_free_cursor(struct qxl_bo *cursor_bo)
{
if (!cursor_bo)
return;
qxl_bo_unpin(cursor_bo);
qxl_bo_unref(&cursor_bo);
}
static void qxl_primary_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_bo *bo = gem_to_qxl_bo(new_state->fb->obj[0]);
struct qxl_bo *primary;
struct drm_clip_rect norect = {
.x1 = 0,
.y1 = 0,
.x2 = new_state->fb->width,
.y2 = new_state->fb->height
};
uint32_t dumb_shadow_offset = 0;
primary = bo->shadow ? bo->shadow : bo;
if (!primary->is_primary) {
if (qdev->primary_bo)
qxl_io_destroy_primary(qdev);
qxl_io_create_primary(qdev, primary);
qxl_primary_apply_cursor(qdev, plane->state);
}
if (bo->is_dumb)
dumb_shadow_offset =
qdev->dumb_heads[new_state->crtc->index].x;
qxl_draw_dirty_fb(qdev, new_state->fb, bo, 0, 0, &norect, 1, 1,
dumb_shadow_offset);
}
static void qxl_primary_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct qxl_device *qdev = to_qxl(plane->dev);
if (old_state->fb) {
struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
if (bo->shadow)
bo = bo->shadow;
if (bo->is_primary)
qxl_io_destroy_primary(qdev);
}
}
static void qxl_cursor_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct qxl_device *qdev = to_qxl(plane->dev);
struct drm_framebuffer *fb = new_state->fb;
if (fb != old_state->fb) {
qxl_primary_apply_cursor(qdev, new_state);
} else {
qxl_primary_move_cursor(qdev, new_state);
}
}
static void qxl_cursor_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_crtc *qcrtc;
struct qxl_release *release;
struct qxl_cursor_cmd *cmd;
int ret;
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
QXL_RELEASE_CURSOR_CMD,
&release, NULL);
if (ret)
return;
ret = qxl_release_reserve_list(release, true);
if (ret) {
qxl_release_free(qdev, release);
return;
}
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qcrtc = to_qxl_crtc(old_state->crtc);
qxl_free_cursor(qcrtc->cursor_bo);
qcrtc->cursor_bo = NULL;
}
static void qxl_update_dumb_head(struct qxl_device *qdev,
int index, struct qxl_bo *bo)
{
uint32_t width, height;
if (index >= qdev->monitors_config->max_allowed)
return;
if (bo && bo->is_dumb) {
width = bo->surf.width;
height = bo->surf.height;
} else {
width = 0;
height = 0;
}
if (qdev->dumb_heads[index].width == width &&
qdev->dumb_heads[index].height == height)
return;
DRM_DEBUG("#%d: %dx%d -> %dx%d\n", index,
qdev->dumb_heads[index].width,
qdev->dumb_heads[index].height,
width, height);
qdev->dumb_heads[index].width = width;
qdev->dumb_heads[index].height = height;
}
static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
struct qxl_surface *surf)
{
struct qxl_head *head;
int i;
memset(surf, 0, sizeof(*surf));
for (i = 0; i < qdev->monitors_config->max_allowed; i++) {
head = qdev->dumb_heads + i;
head->x = surf->width;
surf->width += head->width;
if (surf->height < head->height)
surf->height = head->height;
}
if (surf->width < 64)
surf->width = 64;
if (surf->height < 64)
surf->height = 64;
surf->format = SPICE_SURFACE_FMT_32_xRGB;
surf->stride = surf->width * 4;
if (!qdev->dumb_shadow_bo ||
qdev->dumb_shadow_bo->surf.width != surf->width ||
qdev->dumb_shadow_bo->surf.height != surf->height)
DRM_DEBUG("%dx%d\n", surf->width, surf->height);
}
static void qxl_prepare_shadow(struct qxl_device *qdev, struct qxl_bo *user_bo,
int crtc_index)
{
struct qxl_surface surf;
qxl_update_dumb_head(qdev, crtc_index,
user_bo);
qxl_calc_dumb_shadow(qdev, &surf);
if (!qdev->dumb_shadow_bo ||
qdev->dumb_shadow_bo->surf.width != surf.width ||
qdev->dumb_shadow_bo->surf.height != surf.height) {
if (qdev->dumb_shadow_bo) {
qxl_bo_unpin(qdev->dumb_shadow_bo);
drm_gem_object_put
(&qdev->dumb_shadow_bo->tbo.base);
qdev->dumb_shadow_bo = NULL;
}
qxl_bo_create(qdev, surf.height * surf.stride,
true, true, QXL_GEM_DOMAIN_SURFACE, 0,
&surf, &qdev->dumb_shadow_bo);
}
if (user_bo->shadow != qdev->dumb_shadow_bo) {
if (user_bo->shadow) {
qxl_bo_unpin(user_bo->shadow);
drm_gem_object_put
(&user_bo->shadow->tbo.base);
user_bo->shadow = NULL;
}
drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base);
user_bo->shadow = qdev->dumb_shadow_bo;
qxl_bo_pin(user_bo->shadow);
}
}
static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct qxl_device *qdev = to_qxl(plane->dev);
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
int ret;
if (!new_state->fb)
return 0;
obj = new_state->fb->obj[0];
user_bo = gem_to_qxl_bo(obj);
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
user_bo->is_dumb) {
qxl_prepare_shadow(qdev, user_bo, new_state->crtc->index);
}
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
plane->state->fb != new_state->fb) {
struct qxl_crtc *qcrtc = to_qxl_crtc(new_state->crtc);
struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo;
qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo,
new_state->fb->hot_x,
new_state->fb->hot_y);
qxl_free_cursor(old_cursor_bo);
}
ret = qxl_bo_pin(user_bo);
if (ret)
return ret;
return drm_gem_plane_helper_prepare_fb(plane, new_state);
}
static void qxl_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
if (!old_state->fb) {
/*
* we never executed prepare_fb, so there's nothing to
* unpin.
*/
return;
}
obj = old_state->fb->obj[0];
user_bo = gem_to_qxl_bo(obj);
qxl_bo_unpin(user_bo);
if (old_state->fb != plane->state->fb && user_bo->shadow) {
qxl_bo_unpin(user_bo->shadow);
drm_gem_object_put(&user_bo->shadow->tbo.base);
user_bo->shadow = NULL;
}
}
static const uint32_t qxl_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = {
.atomic_update = qxl_cursor_atomic_update,
.atomic_disable = qxl_cursor_atomic_disable,
.prepare_fb = qxl_plane_prepare_fb,
.cleanup_fb = qxl_plane_cleanup_fb,
};
static const struct drm_plane_funcs qxl_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_helper_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static const uint32_t qxl_primary_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
static const struct drm_plane_helper_funcs primary_helper_funcs = {
.atomic_check = qxl_primary_atomic_check,
.atomic_update = qxl_primary_atomic_update,
.atomic_disable = qxl_primary_atomic_disable,
.prepare_fb = qxl_plane_prepare_fb,
.cleanup_fb = qxl_plane_cleanup_fb,
};
static const struct drm_plane_funcs qxl_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_helper_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static struct drm_plane *qxl_create_plane(struct qxl_device *qdev,
unsigned int possible_crtcs,
enum drm_plane_type type)
{
const struct drm_plane_helper_funcs *helper_funcs = NULL;
struct drm_plane *plane;
const struct drm_plane_funcs *funcs;
const uint32_t *formats;
int num_formats;
int err;
if (type == DRM_PLANE_TYPE_PRIMARY) {
funcs = &qxl_primary_plane_funcs;
formats = qxl_primary_plane_formats;
num_formats = ARRAY_SIZE(qxl_primary_plane_formats);
helper_funcs = &primary_helper_funcs;
} else if (type == DRM_PLANE_TYPE_CURSOR) {
funcs = &qxl_cursor_plane_funcs;
formats = qxl_cursor_plane_formats;
helper_funcs = &qxl_cursor_helper_funcs;
num_formats = ARRAY_SIZE(qxl_cursor_plane_formats);
} else {
return ERR_PTR(-EINVAL);
}
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
err = drm_universal_plane_init(&qdev->ddev, plane, possible_crtcs,
funcs, formats, num_formats,
NULL, type, NULL);
if (err)
goto free_plane;
drm_plane_helper_add(plane, helper_funcs);
return plane;
free_plane:
kfree(plane);
return ERR_PTR(-EINVAL);
}
static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
{
struct qxl_crtc *qxl_crtc;
struct drm_plane *primary, *cursor;
struct qxl_device *qdev = to_qxl(dev);
int r;
qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
if (!qxl_crtc)
return -ENOMEM;
primary = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(primary)) {
r = -ENOMEM;
goto free_mem;
}
cursor = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_CURSOR);
if (IS_ERR(cursor)) {
r = -ENOMEM;
goto clean_primary;
}
r = drm_crtc_init_with_planes(dev, &qxl_crtc->base, primary, cursor,
&qxl_crtc_funcs, NULL);
if (r)
goto clean_cursor;
qxl_crtc->index = crtc_id;
drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
return 0;
clean_cursor:
drm_plane_cleanup(cursor);
kfree(cursor);
clean_primary:
drm_plane_cleanup(primary);
kfree(primary);
free_mem:
kfree(qxl_crtc);
return r;
}
static int qxl_conn_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *output = drm_connector_to_qxl_output(connector);
unsigned int pwidth = 1024;
unsigned int pheight = 768;
int ret = 0;
if (qdev->client_monitors_config) {
struct qxl_head *head;
head = &qdev->client_monitors_config->heads[output->index];
if (head->width)
pwidth = head->width;
if (head->height)
pheight = head->height;
}
ret += drm_add_modes_noedid(connector, 8192, 8192);
ret += qxl_add_extra_modes(connector);
ret += qxl_add_monitors_config_modes(connector);
drm_set_preferred_mode(connector, pwidth, pheight);
return ret;
}
static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *ddev = connector->dev;
struct qxl_device *qdev = to_qxl(ddev);
if (qxl_check_mode(qdev, mode->hdisplay, mode->vdisplay) != 0)
return MODE_BAD;
return MODE_OK;
}
static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
{
struct qxl_output *qxl_output =
drm_connector_to_qxl_output(connector);
DRM_DEBUG("\n");
return &qxl_output->enc;
}
static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
.get_modes = qxl_conn_get_modes,
.mode_valid = qxl_conn_mode_valid,
.best_encoder = qxl_best_encoder,
};
static enum drm_connector_status qxl_conn_detect(
struct drm_connector *connector,
bool force)
{
struct qxl_output *output =
drm_connector_to_qxl_output(connector);
struct drm_device *ddev = connector->dev;
struct qxl_device *qdev = to_qxl(ddev);
bool connected = false;
/* The first monitor is always connected */
if (!qdev->client_monitors_config) {
if (output->index == 0)
connected = true;
} else
connected = qdev->client_monitors_config->count > output->index &&
qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
DRM_DEBUG("#%d connected: %d\n", output->index, connected);
return connected ? connector_status_connected
: connector_status_disconnected;
}
static void qxl_conn_destroy(struct drm_connector *connector)
{
struct qxl_output *qxl_output =
drm_connector_to_qxl_output(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(qxl_output);
}
static const struct drm_connector_funcs qxl_connector_funcs = {
.detect = qxl_conn_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = qxl_conn_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
{
if (qdev->hotplug_mode_update_property)
return 0;
qdev->hotplug_mode_update_property =
drm_property_create_range(&qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
"hotplug_mode_update", 0, 1);
return 0;
}
static int qdev_output_init(struct drm_device *dev, int num_output)
{
struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *qxl_output;
struct drm_connector *connector;
struct drm_encoder *encoder;
int ret;
qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
if (!qxl_output)
return -ENOMEM;
qxl_output->index = num_output;
connector = &qxl_output->base;
encoder = &qxl_output->enc;
drm_connector_init(dev, &qxl_output->base,
&qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
ret = drm_simple_encoder_init(dev, &qxl_output->enc,
DRM_MODE_ENCODER_VIRTUAL);
if (ret) {
drm_err(dev, "drm_simple_encoder_init() failed, error %d\n",
ret);
goto err_drm_connector_cleanup;
}
/* we get HPD via client monitors config */
connector->polled = DRM_CONNECTOR_POLL_HPD;
encoder->possible_crtcs = 1 << num_output;
drm_connector_attach_encoder(&qxl_output->base,
&qxl_output->enc);
drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
drm_object_attach_property(&connector->base,
qdev->hotplug_mode_update_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
return 0;
err_drm_connector_cleanup:
drm_connector_cleanup(&qxl_output->base);
kfree(qxl_output);
return ret;
}
static struct drm_framebuffer *
qxl_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd,
&qxl_fb_funcs);
}
static const struct drm_mode_config_funcs qxl_mode_funcs = {
.fb_create = qxl_user_framebuffer_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
int qxl_create_monitors_object(struct qxl_device *qdev)
{
int ret;
struct drm_gem_object *gobj;
struct iosys_map map;
int monitors_config_size = sizeof(struct qxl_monitors_config) +
qxl_num_crtc * sizeof(struct qxl_head);
ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
QXL_GEM_DOMAIN_VRAM,
false, false, NULL, &gobj);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
return -ENOMEM;
}
qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
ret = qxl_bo_vmap(qdev->monitors_config_bo, &map);
if (ret)
return ret;
qdev->monitors_config = qdev->monitors_config_bo->kptr;
qdev->ram_header->monitors_config =
qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
memset(qdev->monitors_config, 0, monitors_config_size);
qdev->dumb_heads = kcalloc(qxl_num_crtc, sizeof(qdev->dumb_heads[0]),
GFP_KERNEL);
if (!qdev->dumb_heads) {
qxl_destroy_monitors_object(qdev);
return -ENOMEM;
}
return 0;
}
int qxl_destroy_monitors_object(struct qxl_device *qdev)
{
int ret;
if (!qdev->monitors_config_bo)
return 0;
qdev->monitors_config = NULL;
qdev->ram_header->monitors_config = 0;
ret = qxl_bo_vunmap(qdev->monitors_config_bo);
if (ret)
return ret;
qxl_bo_unref(&qdev->monitors_config_bo);
return 0;
}
int qxl_modeset_init(struct qxl_device *qdev)
{
int i;
int ret;
ret = drmm_mode_config_init(&qdev->ddev);
if (ret)
return ret;
ret = qxl_create_monitors_object(qdev);
if (ret)
return ret;
qdev->ddev.mode_config.funcs = (void *)&qxl_mode_funcs;
/* modes will be validated against the framebuffer size */
qdev->ddev.mode_config.min_width = 0;
qdev->ddev.mode_config.min_height = 0;
qdev->ddev.mode_config.max_width = 8192;
qdev->ddev.mode_config.max_height = 8192;
drm_mode_create_suggested_offset_properties(&qdev->ddev);
qxl_mode_create_hotplug_mode_update_property(qdev);
for (i = 0 ; i < qxl_num_crtc; ++i) {
qdev_crtc_init(&qdev->ddev, i);
qdev_output_init(&qdev->ddev, i);
}
qxl_display_read_client_monitors_config(qdev);
drm_mode_config_reset(&qdev->ddev);
return 0;
}
void qxl_modeset_fini(struct qxl_device *qdev)
{
if (qdev->dumb_shadow_bo) {
qxl_bo_unpin(qdev->dumb_shadow_bo);
drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
qdev->dumb_shadow_bo = NULL;
}
qxl_destroy_monitors_object(qdev);
}
| linux-master | drivers/gpu/drm/qxl/qxl_display.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include "qxl_drv.h"
static irqreturn_t qxl_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct qxl_device *qdev = to_qxl(dev);
uint32_t pending;
pending = xchg(&qdev->ram_header->int_pending, 0);
if (!pending)
return IRQ_NONE;
atomic_inc(&qdev->irq_received);
if (pending & QXL_INTERRUPT_DISPLAY) {
atomic_inc(&qdev->irq_received_display);
wake_up_all(&qdev->display_event);
qxl_queue_garbage_collect(qdev, false);
}
if (pending & QXL_INTERRUPT_CURSOR) {
atomic_inc(&qdev->irq_received_cursor);
wake_up_all(&qdev->cursor_event);
}
if (pending & QXL_INTERRUPT_IO_CMD) {
atomic_inc(&qdev->irq_received_io_cmd);
wake_up_all(&qdev->io_cmd_event);
}
if (pending & QXL_INTERRUPT_ERROR) {
/* TODO: log it, reset device (only way to exit this condition)
* (do it a certain number of times, afterwards admit defeat,
* to avoid endless loops).
*/
qdev->irq_received_error++;
DRM_WARN("driver is in bug mode\n");
}
if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
schedule_work(&qdev->client_monitors_config_work);
}
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
return IRQ_HANDLED;
}
static void qxl_client_monitors_config_work_func(struct work_struct *work)
{
struct qxl_device *qdev = container_of(work, struct qxl_device,
client_monitors_config_work);
qxl_display_read_client_monitors_config(qdev);
}
int qxl_irq_init(struct qxl_device *qdev)
{
struct drm_device *ddev = &qdev->ddev;
struct pci_dev *pdev = to_pci_dev(ddev->dev);
int ret;
init_waitqueue_head(&qdev->display_event);
init_waitqueue_head(&qdev->cursor_event);
init_waitqueue_head(&qdev->io_cmd_event);
init_waitqueue_head(&qdev->release_event);
INIT_WORK(&qdev->client_monitors_config_work,
qxl_client_monitors_config_work_func);
atomic_set(&qdev->irq_received, 0);
atomic_set(&qdev->irq_received_display, 0);
atomic_set(&qdev->irq_received_cursor, 0);
atomic_set(&qdev->irq_received_io_cmd, 0);
qdev->irq_received_error = 0;
ret = request_irq(pdev->irq, qxl_irq_handler, IRQF_SHARED, ddev->driver->name, ddev);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (unlikely(ret != 0)) {
DRM_ERROR("Failed installing irq: %d\n", ret);
return 1;
}
return 0;
}
| linux-master | drivers/gpu/drm/qxl/qxl_irq.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include <linux/delay.h>
#include <drm/drm.h>
#include <drm/drm_file.h>
#include <drm/drm_debugfs.h>
#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_tt.h>
#include "qxl_drv.h"
#include "qxl_object.h"
static struct qxl_device *qxl_get_qdev(struct ttm_device *bdev)
{
struct qxl_mman *mman;
struct qxl_device *qdev;
mman = container_of(bdev, struct qxl_mman, bdev);
qdev = container_of(mman, struct qxl_device, mman);
return qdev;
}
static void qxl_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct qxl_bo *qbo;
static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
.flags = 0
};
if (!qxl_ttm_bo_is_qxl_bo(bo)) {
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
placement->num_busy_placement = 1;
return;
}
qbo = to_qxl_bo(bo);
qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
*placement = qbo->placement;
}
int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct qxl_device *qdev = qxl_get_qdev(bdev);
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
return 0;
case TTM_PL_VRAM:
mem->bus.is_iomem = true;
mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base;
mem->bus.caching = ttm_write_combined;
break;
case TTM_PL_PRIV:
mem->bus.is_iomem = true;
mem->bus.offset = (mem->start << PAGE_SHIFT) +
qdev->surfaceram_base;
mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* TTM backend functions.
*/
static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_fini(ttm);
kfree(ttm);
}
static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
struct ttm_tt *ttm;
ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
if (ttm == NULL)
return NULL;
if (ttm_tt_init(ttm, bo, page_flags, ttm_cached, 0)) {
kfree(ttm);
return NULL;
}
return ttm;
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
struct qxl_bo *qbo;
struct qxl_device *qdev;
if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource)
return;
qbo = to_qxl_bo(bo);
qdev = to_qxl(qbo->tbo.base.dev);
if (bo->resource->mem_type == TTM_PL_PRIV && qbo->surface_id)
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
}
static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
struct ttm_resource *old_mem = bo->resource;
int ret;
if (!old_mem) {
if (new_mem->mem_type != TTM_PL_SYSTEM) {
hop->mem_type = TTM_PL_SYSTEM;
hop->flags = TTM_PL_FLAG_TEMPORARY;
return -EMULTIHOP;
}
ttm_bo_move_null(bo, new_mem);
return 0;
}
qxl_bo_move_notify(bo, new_mem);
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
return ttm_bo_move_memcpy(bo, ctx, new_mem);
}
static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
qxl_bo_move_notify(bo, NULL);
}
static struct ttm_device_funcs qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
.ttm_tt_destroy = &qxl_ttm_backend_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.delete_mem_notify = &qxl_bo_delete_mem_notify,
};
static int qxl_ttm_init_mem_type(struct qxl_device *qdev,
unsigned int type,
uint64_t size)
{
return ttm_range_man_init(&qdev->mman.bdev, type, false, size);
}
int qxl_ttm_init(struct qxl_device *qdev)
{
int r;
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
/* No others user of address space so set it to 0 */
r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
qdev->ddev.anon_inode->i_mapping,
qdev->ddev.vma_offset_manager,
false, false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
/* NOTE: this includes the framebuffer (aka surface 0) */
num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
r = qxl_ttm_init_mem_type(qdev, TTM_PL_VRAM, num_io_pages);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
r = qxl_ttm_init_mem_type(qdev, TTM_PL_PRIV,
qdev->surfaceram_size / PAGE_SIZE);
if (r) {
DRM_ERROR("Failed initializing Surfaces heap.\n");
return r;
}
DRM_INFO("qxl: %uM of VRAM memory size\n",
(unsigned int)qdev->vram_size / (1024 * 1024));
DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
DRM_INFO("qxl: %uM of Surface memory size\n",
(unsigned int)qdev->surfaceram_size / (1024 * 1024));
return 0;
}
void qxl_ttm_fini(struct qxl_device *qdev)
{
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_device_fini(&qdev->mman.bdev);
DRM_INFO("qxl: ttm finalized\n");
}
void qxl_ttm_debugfs_init(struct qxl_device *qdev)
{
#if defined(CONFIG_DEBUG_FS)
ttm_resource_manager_create_debugfs(ttm_manager_type(&qdev->mman.bdev,
TTM_PL_VRAM),
qdev->ddev.primary->debugfs_root, "qxl_mem_mm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&qdev->mman.bdev,
TTM_PL_PRIV),
qdev->ddev.primary->debugfs_root, "qxl_surf_mm");
#endif
}
| linux-master | drivers/gpu/drm/qxl/qxl_ttm.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
/* QXL cmd/ring handling */
#include <linux/delay.h>
#include <drm/drm_util.h>
#include "qxl_drv.h"
#include "qxl_object.h"
static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
struct ring {
struct qxl_ring_header header;
uint8_t elements[];
};
struct qxl_ring {
struct ring *ring;
int element_size;
int n_elements;
int prod_notify;
wait_queue_head_t *push_event;
spinlock_t lock;
};
void qxl_ring_free(struct qxl_ring *ring)
{
kfree(ring);
}
struct qxl_ring *
qxl_ring_create(struct qxl_ring_header *header,
int element_size,
int n_elements,
int prod_notify,
wait_queue_head_t *push_event)
{
struct qxl_ring *ring;
ring = kmalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
return NULL;
ring->ring = (struct ring *)header;
ring->element_size = element_size;
ring->n_elements = n_elements;
ring->prod_notify = prod_notify;
ring->push_event = push_event;
spin_lock_init(&ring->lock);
return ring;
}
static int qxl_check_header(struct qxl_ring *ring)
{
int ret;
struct qxl_ring_header *header = &(ring->ring->header);
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
ret = header->prod - header->cons < header->num_items;
if (ret == 0)
header->notify_on_cons = header->cons + 1;
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
int qxl_check_idle(struct qxl_ring *ring)
{
int ret;
struct qxl_ring_header *header = &(ring->ring->header);
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
ret = header->prod == header->cons;
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
int qxl_ring_push(struct qxl_ring *ring,
const void *new_elt, bool interruptible)
{
struct qxl_ring_header *header = &(ring->ring->header);
uint8_t *elt;
int idx, ret;
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
if (header->prod - header->cons == header->num_items) {
header->notify_on_cons = header->cons + 1;
mb();
spin_unlock_irqrestore(&ring->lock, flags);
if (!drm_can_sleep()) {
while (!qxl_check_header(ring))
udelay(1);
} else {
if (interruptible) {
ret = wait_event_interruptible(*ring->push_event,
qxl_check_header(ring));
if (ret)
return ret;
} else {
wait_event(*ring->push_event,
qxl_check_header(ring));
}
}
spin_lock_irqsave(&ring->lock, flags);
}
idx = header->prod & (ring->n_elements - 1);
elt = ring->ring->elements + idx * ring->element_size;
memcpy((void *)elt, new_elt, ring->element_size);
header->prod++;
mb();
if (header->prod == header->notify_on_prod)
outb(0, ring->prod_notify);
spin_unlock_irqrestore(&ring->lock, flags);
return 0;
}
static bool qxl_ring_pop(struct qxl_ring *ring,
void *element)
{
volatile struct qxl_ring_header *header = &(ring->ring->header);
volatile uint8_t *ring_elt;
int idx;
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
if (header->cons == header->prod) {
header->notify_on_prod = header->cons + 1;
spin_unlock_irqrestore(&ring->lock, flags);
return false;
}
idx = header->cons & (ring->n_elements - 1);
ring_elt = ring->ring->elements + idx * ring->element_size;
memcpy(element, (void *)ring_elt, ring->element_size);
header->cons++;
spin_unlock_irqrestore(&ring->lock, flags);
return true;
}
int
qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
cmd.type = type;
cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
}
int
qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
cmd.type = type;
cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
}
bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
{
if (!qxl_check_idle(qdev->release_ring)) {
schedule_work(&qdev->gc_work);
if (flush)
flush_work(&qdev->gc_work);
return true;
}
return false;
}
int qxl_garbage_collect(struct qxl_device *qdev)
{
struct qxl_release *release;
uint64_t id, next_id;
int i = 0;
union qxl_release_info *info;
while (qxl_ring_pop(qdev->release_ring, &id)) {
DRM_DEBUG_DRIVER("popped %lld\n", id);
while (id) {
release = qxl_release_from_id_locked(qdev, id);
if (release == NULL)
break;
info = qxl_release_map(qdev, release);
next_id = info->next;
qxl_release_unmap(qdev, release, info);
DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
next_id);
switch (release->type) {
case QXL_RELEASE_DRAWABLE:
case QXL_RELEASE_SURFACE_CMD:
case QXL_RELEASE_CURSOR_CMD:
break;
default:
DRM_ERROR("unexpected release type\n");
break;
}
id = next_id;
qxl_release_free(qdev, release);
++i;
}
}
wake_up_all(&qdev->release_event);
DRM_DEBUG_DRIVER("%d\n", i);
return i;
}
int qxl_alloc_bo_reserved(struct qxl_device *qdev,
struct qxl_release *release,
unsigned long size,
struct qxl_bo **_bo)
{
struct qxl_bo *bo;
int ret;
ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (ret) {
DRM_ERROR("failed to allocate VRAM BO\n");
return ret;
}
ret = qxl_release_list_add(release, bo);
if (ret)
goto out_unref;
*_bo = bo;
return 0;
out_unref:
qxl_bo_unref(&bo);
return ret;
}
static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
{
int irq_num;
long addr = qdev->io_base + port;
int ret;
mutex_lock(&qdev->async_io_mutex);
irq_num = atomic_read(&qdev->irq_received_io_cmd);
if (qdev->last_sent_io_cmd > irq_num) {
if (intr)
ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
else
ret = wait_event_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
/* 0 is timeout, just bail the "hw" has gone away */
if (ret <= 0)
goto out;
irq_num = atomic_read(&qdev->irq_received_io_cmd);
}
outb(val, addr);
qdev->last_sent_io_cmd = irq_num + 1;
if (intr)
ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
else
ret = wait_event_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
out:
if (ret > 0)
ret = 0;
mutex_unlock(&qdev->async_io_mutex);
return ret;
}
static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
{
int ret;
restart:
ret = wait_for_io_cmd_user(qdev, val, port, false);
if (ret == -ERESTARTSYS)
goto restart;
}
int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
const struct qxl_rect *area)
{
int surface_id;
uint32_t surface_width, surface_height;
int ret;
if (!surf->hw_surf_alloc)
DRM_ERROR("got io update area with no hw surface\n");
if (surf->is_primary)
surface_id = 0;
else
surface_id = surf->surface_id;
surface_width = surf->surf.width;
surface_height = surf->surf.height;
if (area->left < 0 || area->top < 0 ||
area->right > surface_width || area->bottom > surface_height)
return -EINVAL;
mutex_lock(&qdev->update_area_mutex);
qdev->ram_header->update_area = *area;
qdev->ram_header->update_surface = surface_id;
ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
mutex_unlock(&qdev->update_area_mutex);
return ret;
}
void qxl_io_notify_oom(struct qxl_device *qdev)
{
outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
}
void qxl_io_flush_release(struct qxl_device *qdev)
{
outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
}
void qxl_io_flush_surfaces(struct qxl_device *qdev)
{
wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
}
void qxl_io_destroy_primary(struct qxl_device *qdev)
{
wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
qdev->primary_bo->is_primary = false;
drm_gem_object_put(&qdev->primary_bo->tbo.base);
qdev->primary_bo = NULL;
}
void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
{
struct qxl_surface_create *create;
if (WARN_ON(qdev->primary_bo))
return;
DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
create = &qdev->ram_header->create_surface;
create->format = bo->surf.format;
create->width = bo->surf.width;
create->height = bo->surf.height;
create->stride = bo->surf.stride;
create->mem = qxl_bo_physical_address(qdev, bo, 0);
DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
create->flags = QXL_SURF_FLAG_KEEP_DATA;
create->type = QXL_SURF_TYPE_PRIMARY;
wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
qdev->primary_bo = bo;
qdev->primary_bo->is_primary = true;
drm_gem_object_get(&qdev->primary_bo->tbo.base);
}
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
{
DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
}
void qxl_io_reset(struct qxl_device *qdev)
{
outb(0, qdev->io_base + QXL_IO_RESET);
}
void qxl_io_monitors_config(struct qxl_device *qdev)
{
wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
}
int qxl_surface_id_alloc(struct qxl_device *qdev,
struct qxl_bo *surf)
{
uint32_t handle;
int idr_ret;
int count = 0;
again:
idr_preload(GFP_ATOMIC);
spin_lock(&qdev->surf_id_idr_lock);
idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
spin_unlock(&qdev->surf_id_idr_lock);
idr_preload_end();
if (idr_ret < 0)
return idr_ret;
handle = idr_ret;
if (handle >= qdev->rom->n_surfaces) {
count++;
spin_lock(&qdev->surf_id_idr_lock);
idr_remove(&qdev->surf_id_idr, handle);
spin_unlock(&qdev->surf_id_idr_lock);
qxl_reap_surface_id(qdev, 2);
goto again;
}
surf->surface_id = handle;
spin_lock(&qdev->surf_id_idr_lock);
qdev->last_alloced_surf_id = handle;
spin_unlock(&qdev->surf_id_idr_lock);
return 0;
}
void qxl_surface_id_dealloc(struct qxl_device *qdev,
uint32_t surface_id)
{
spin_lock(&qdev->surf_id_idr_lock);
idr_remove(&qdev->surf_id_idr, surface_id);
spin_unlock(&qdev->surf_id_idr_lock);
}
int qxl_hw_surface_alloc(struct qxl_device *qdev,
struct qxl_bo *surf)
{
struct qxl_surface_cmd *cmd;
struct qxl_release *release;
int ret;
if (surf->hw_surf_alloc)
return 0;
ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
NULL,
&release);
if (ret)
return ret;
ret = qxl_release_reserve_list(release, true);
if (ret) {
qxl_release_free(qdev, release);
return ret;
}
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
cmd->u.surface_create.format = surf->surf.format;
cmd->u.surface_create.width = surf->surf.width;
cmd->u.surface_create.height = surf->surf.height;
cmd->u.surface_create.stride = surf->surf.stride;
cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
cmd->surface_id = surf->surface_id;
qxl_release_unmap(qdev, release, &cmd->release_info);
surf->surf_create = release;
/* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
spin_unlock(&qdev->surf_id_idr_lock);
return 0;
}
int qxl_hw_surface_dealloc(struct qxl_device *qdev,
struct qxl_bo *surf)
{
struct qxl_surface_cmd *cmd;
struct qxl_release *release;
int ret;
int id;
if (!surf->hw_surf_alloc)
return 0;
ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
surf->surf_create,
&release);
if (ret)
return ret;
surf->surf_create = NULL;
/* remove the surface from the idr, but not the surface id yet */
spin_lock(&qdev->surf_id_idr_lock);
idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
spin_unlock(&qdev->surf_id_idr_lock);
surf->hw_surf_alloc = false;
id = surf->surface_id;
surf->surface_id = 0;
release->surface_release_id = id;
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_DESTROY;
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
return 0;
}
static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
{
struct qxl_rect rect;
int ret;
/* if we are evicting, we need to make sure the surface is up
to date */
rect.left = 0;
rect.right = surf->surf.width;
rect.top = 0;
rect.bottom = surf->surf.height;
retry:
ret = qxl_io_update_area(qdev, surf, &rect);
if (ret == -ERESTARTSYS)
goto retry;
return ret;
}
static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
{
/* no need to update area if we are just freeing the surface normally */
if (do_update_area)
qxl_update_surface(qdev, surf);
/* nuke the surface id at the hw */
qxl_hw_surface_dealloc(qdev, surf);
}
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
{
mutex_lock(&qdev->surf_evict_mutex);
qxl_surface_evict_locked(qdev, surf, do_update_area);
mutex_unlock(&qdev->surf_evict_mutex);
}
static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
{
long ret;
ret = qxl_bo_reserve(surf);
if (ret)
return ret;
if (stall)
mutex_unlock(&qdev->surf_evict_mutex);
if (stall) {
ret = dma_resv_wait_timeout(surf->tbo.base.resv,
DMA_RESV_USAGE_BOOKKEEP, true,
15 * HZ);
if (ret > 0)
ret = 0;
else if (ret == 0)
ret = -EBUSY;
} else {
ret = dma_resv_test_signaled(surf->tbo.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
ret = ret ? -EBUSY : 0;
}
if (stall)
mutex_lock(&qdev->surf_evict_mutex);
if (ret) {
qxl_bo_unreserve(surf);
return ret;
}
qxl_surface_evict_locked(qdev, surf, true);
qxl_bo_unreserve(surf);
return 0;
}
static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
{
int num_reaped = 0;
int i, ret;
bool stall = false;
int start = 0;
mutex_lock(&qdev->surf_evict_mutex);
again:
spin_lock(&qdev->surf_id_idr_lock);
start = qdev->last_alloced_surf_id + 1;
spin_unlock(&qdev->surf_id_idr_lock);
for (i = start; i < start + qdev->rom->n_surfaces; i++) {
void *objptr;
int surfid = i % qdev->rom->n_surfaces;
/* this avoids the case where the objects is in the
idr but has been evicted half way - its makes
the idr lookup atomic with the eviction */
spin_lock(&qdev->surf_id_idr_lock);
objptr = idr_find(&qdev->surf_id_idr, surfid);
spin_unlock(&qdev->surf_id_idr_lock);
if (!objptr)
continue;
ret = qxl_reap_surf(qdev, objptr, stall);
if (ret == 0)
num_reaped++;
if (num_reaped >= max_to_reap)
break;
}
if (num_reaped == 0 && stall == false) {
stall = true;
goto again;
}
mutex_unlock(&qdev->surf_evict_mutex);
if (num_reaped) {
usleep_range(500, 1000);
qxl_queue_garbage_collect(qdev, true);
}
return 0;
}
| linux-master | drivers/gpu/drm/qxl/qxl_cmd.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include "qxl_drv.h"
#include "qxl_object.h"
/* dumb ioctls implementation */
int qxl_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct qxl_device *qdev = to_qxl(dev);
struct qxl_bo *qobj;
struct drm_gem_object *gobj;
uint32_t handle;
int r;
struct qxl_surface surf;
uint32_t pitch, format;
pitch = args->width * ((args->bpp + 1) / 8);
args->size = pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
switch (args->bpp) {
case 16:
format = SPICE_SURFACE_FMT_16_565;
break;
case 32:
format = SPICE_SURFACE_FMT_32_xRGB;
break;
default:
return -EINVAL;
}
surf.width = args->width;
surf.height = args->height;
surf.stride = pitch;
surf.format = format;
surf.data = 0;
r = qxl_gem_object_create_with_handle(qdev, file_priv,
QXL_GEM_DOMAIN_CPU,
args->size, &surf, &gobj,
&handle);
if (r)
return r;
qobj = gem_to_qxl_bo(gobj);
qobj->is_dumb = true;
drm_gem_object_put(gobj);
args->pitch = pitch;
args->handle = handle;
return 0;
}
| linux-master | drivers/gpu/drm/qxl/qxl_dumb.c |
/*
* Copyright (C) 2009 Red Hat <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*
* Authors:
* Alon Levy <[email protected]>
*/
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include "qxl_drv.h"
#include "qxl_object.h"
#if defined(CONFIG_DEBUG_FS)
static int
qxl_debugfs_irq_received(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct qxl_device *qdev = to_qxl(node->minor->dev);
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
seq_printf(m, "%d\n", qdev->irq_received_error);
return 0;
}
static int
qxl_debugfs_buffers_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct qxl_device *qdev = to_qxl(node->minor->dev);
struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) {
struct dma_resv_iter cursor;
struct dma_fence *fence;
int rel = 0;
dma_resv_iter_begin(&cursor, bo->tbo.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor))
rel = 0;
++rel;
}
seq_printf(m, "size %ld, pc %d, num releases %d\n",
(unsigned long)bo->tbo.base.size,
bo->tbo.pin_count, rel);
}
return 0;
}
static struct drm_info_list qxl_debugfs_list[] = {
{ "irq_received", qxl_debugfs_irq_received, 0, NULL },
{ "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
};
#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
#endif
void
qxl_debugfs_init(struct drm_minor *minor)
{
#if defined(CONFIG_DEBUG_FS)
struct qxl_device *dev = to_qxl(minor->dev);
drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
qxl_ttm_debugfs_init(dev);
#endif
}
void qxl_debugfs_add_files(struct qxl_device *qdev,
struct drm_info_list *files,
unsigned int nfiles)
{
unsigned int i;
for (i = 0; i < qdev->debugfs_count; i++) {
if (qdev->debugfs[i].files == files) {
/* Already registered */
return;
}
}
i = qdev->debugfs_count + 1;
if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
return;
}
qdev->debugfs[qdev->debugfs_count].files = files;
qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
qdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
qdev->ddev.primary->debugfs_root,
qdev->ddev.primary);
#endif
}
| linux-master | drivers/gpu/drm/qxl/qxl_debugfs.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include <linux/gfp.h>
#include <linux/slab.h>
#include "qxl_drv.h"
#include "qxl_object.h"
static int
qxl_allocate_chunk(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_drm_image *image,
unsigned int chunk_size)
{
struct qxl_drm_chunk *chunk;
int ret;
chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
if (!chunk)
return -ENOMEM;
ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
if (ret) {
kfree(chunk);
return ret;
}
list_add_tail(&chunk->head, &image->chunk_list);
return 0;
}
int
qxl_image_alloc_objects(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_drm_image **image_ptr,
int height, int stride)
{
struct qxl_drm_image *image;
int ret;
image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
if (!image)
return -ENOMEM;
INIT_LIST_HEAD(&image->chunk_list);
ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
if (ret) {
kfree(image);
return ret;
}
ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
if (ret) {
qxl_bo_unref(&image->bo);
kfree(image);
return ret;
}
*image_ptr = image;
return 0;
}
void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
{
struct qxl_drm_chunk *chunk, *tmp;
list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
qxl_bo_unref(&chunk->bo);
kfree(chunk);
}
qxl_bo_unref(&dimage->bo);
kfree(dimage);
}
static int
qxl_image_init_helper(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_drm_image *dimage,
const uint8_t *data,
int width, int height,
int depth, unsigned int hash,
int stride)
{
struct qxl_drm_chunk *drv_chunk;
struct qxl_image *image;
struct qxl_data_chunk *chunk;
int i;
int chunk_stride;
int linesize = width * depth / 8;
struct qxl_bo *chunk_bo, *image_bo;
void *ptr;
/* Chunk */
/* FIXME: Check integer overflow */
/* TODO: variable number of chunks */
drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
chunk_bo = drv_chunk->bo;
chunk_stride = stride; /* TODO: should use linesize, but it renders
wrong (check the bitmaps are sent correctly
first) */
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
chunk = ptr;
chunk->data_size = height * chunk_stride;
chunk->prev_chunk = 0;
chunk->next_chunk = 0;
qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
{
void *k_data, *i_data;
int remain;
int page;
int size;
if (stride == linesize && chunk_stride == stride) {
remain = linesize * height;
page = 0;
i_data = (void *)data;
while (remain > 0) {
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
if (page == 0) {
chunk = ptr;
k_data = chunk->data;
size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
} else {
k_data = ptr;
size = PAGE_SIZE;
}
size = min(size, remain);
memcpy(k_data, i_data, size);
qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
i_data += size;
remain -= size;
page++;
}
} else {
unsigned int page_base, page_offset, out_offset;
for (i = 0 ; i < height ; ++i) {
i_data = (void *)data + i * stride;
remain = linesize;
out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
while (remain > 0) {
page_base = out_offset & PAGE_MASK;
page_offset = offset_in_page(out_offset);
size = min((int)(PAGE_SIZE - page_offset), remain);
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
k_data = ptr + page_offset;
memcpy(k_data, i_data, size);
qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
remain -= size;
i_data += size;
out_offset += size;
}
}
}
}
qxl_bo_vunmap_locked(chunk_bo);
image_bo = dimage->bo;
ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
image = ptr;
image->descriptor.id = 0;
image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
image->descriptor.flags = 0;
image->descriptor.width = width;
image->descriptor.height = height;
switch (depth) {
case 1:
/* TODO: BE? check by arch? */
image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
break;
case 24:
image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
break;
case 32:
image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
break;
default:
DRM_ERROR("unsupported image bit depth\n");
qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
return -EINVAL;
}
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
image->u.bitmap.x = width;
image->u.bitmap.y = height;
image->u.bitmap.stride = chunk_stride;
image->u.bitmap.palette = 0;
image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
return 0;
}
int qxl_image_init(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_drm_image *dimage,
const uint8_t *data,
int x, int y, int width, int height,
int depth, int stride)
{
data += y * stride + x * (depth / 8);
return qxl_image_init_helper(qdev, release, dimage, data,
width, height, depth, 0, stride);
}
| linux-master | drivers/gpu/drm/qxl/qxl_image.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include <linux/io-mapping.h>
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
static bool qxl_check_device(struct qxl_device *qdev)
{
struct qxl_rom *rom = qdev->rom;
if (rom->magic != 0x4f525851) {
DRM_ERROR("bad rom signature %x\n", rom->magic);
return false;
}
DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
rom->log_level);
DRM_INFO("%d io pages at offset 0x%x\n",
rom->num_io_pages, rom->pages_offset);
DRM_INFO("%d byte draw area at offset 0x%x\n",
rom->surface0_area_size, rom->draw_area_offset);
qdev->vram_size = rom->surface0_area_size;
DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
return true;
}
static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
{
qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
}
static void setup_slot(struct qxl_device *qdev,
struct qxl_memslot *slot,
unsigned int slot_index,
const char *slot_name,
unsigned long start_phys_addr,
unsigned long size)
{
uint64_t high_bits;
slot->index = slot_index;
slot->name = slot_name;
slot->start_phys_addr = start_phys_addr;
slot->size = size;
setup_hw_slot(qdev, slot);
slot->generation = qdev->rom->slot_generation;
high_bits = (qdev->rom->slots_start + slot->index)
<< qdev->rom->slot_gen_bits;
high_bits |= slot->generation;
high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
slot->high_bits = high_bits;
DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx\n",
slot->index, slot->name,
(unsigned long)slot->start_phys_addr,
(unsigned long)slot->size);
}
void qxl_reinit_memslots(struct qxl_device *qdev)
{
setup_hw_slot(qdev, &qdev->main_slot);
setup_hw_slot(qdev, &qdev->surfaces_slot);
}
static void qxl_gc_work(struct work_struct *work)
{
struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
qxl_garbage_collect(qdev);
}
int qxl_device_init(struct qxl_device *qdev,
struct pci_dev *pdev)
{
int r, sb;
pci_set_drvdata(pdev, &qdev->ddev);
mutex_init(&qdev->gem.mutex);
mutex_init(&qdev->update_area_mutex);
mutex_init(&qdev->release_mutex);
mutex_init(&qdev->surf_evict_mutex);
qxl_gem_init(qdev);
qdev->rom_base = pci_resource_start(pdev, 2);
qdev->rom_size = pci_resource_len(pdev, 2);
qdev->vram_base = pci_resource_start(pdev, 0);
qdev->io_base = pci_resource_start(pdev, 3);
qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
if (!qdev->vram_mapping) {
pr_err("Unable to create vram_mapping");
return -ENOMEM;
}
if (pci_resource_len(pdev, 4) > 0) {
/* 64bit surface bar present */
sb = 4;
qdev->surfaceram_base = pci_resource_start(pdev, sb);
qdev->surfaceram_size = pci_resource_len(pdev, sb);
qdev->surface_mapping =
io_mapping_create_wc(qdev->surfaceram_base,
qdev->surfaceram_size);
}
if (qdev->surface_mapping == NULL) {
/* 64bit surface bar not present (or mapping failed) */
sb = 1;
qdev->surfaceram_base = pci_resource_start(pdev, sb);
qdev->surfaceram_size = pci_resource_len(pdev, sb);
qdev->surface_mapping =
io_mapping_create_wc(qdev->surfaceram_base,
qdev->surfaceram_size);
if (!qdev->surface_mapping) {
pr_err("Unable to create surface_mapping");
r = -ENOMEM;
goto vram_mapping_free;
}
}
DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
(unsigned long long)qdev->vram_base,
(unsigned long long)pci_resource_end(pdev, 0),
(int)pci_resource_len(pdev, 0) / 1024 / 1024,
(int)pci_resource_len(pdev, 0) / 1024,
(unsigned long long)qdev->surfaceram_base,
(unsigned long long)pci_resource_end(pdev, sb),
(int)qdev->surfaceram_size / 1024 / 1024,
(int)qdev->surfaceram_size / 1024,
(sb == 4) ? "64bit" : "32bit");
qdev->rom = ioremap_wc(qdev->rom_base, qdev->rom_size);
if (!qdev->rom) {
pr_err("Unable to ioremap ROM\n");
r = -ENOMEM;
goto surface_mapping_free;
}
if (!qxl_check_device(qdev)) {
r = -ENODEV;
goto rom_unmap;
}
r = qxl_bo_init(qdev);
if (r) {
DRM_ERROR("bo init failed %d\n", r);
goto rom_unmap;
}
qdev->ram_header = ioremap_wc(qdev->vram_base +
qdev->rom->ram_header_offset,
sizeof(*qdev->ram_header));
if (!qdev->ram_header) {
DRM_ERROR("Unable to ioremap RAM header\n");
r = -ENOMEM;
goto bo_fini;
}
qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
sizeof(struct qxl_command),
QXL_COMMAND_RING_SIZE,
qdev->io_base + QXL_IO_NOTIFY_CMD,
&qdev->display_event);
if (!qdev->command_ring) {
DRM_ERROR("Unable to create command ring\n");
r = -ENOMEM;
goto ram_header_unmap;
}
qdev->cursor_ring = qxl_ring_create(
&(qdev->ram_header->cursor_ring_hdr),
sizeof(struct qxl_command),
QXL_CURSOR_RING_SIZE,
qdev->io_base + QXL_IO_NOTIFY_CURSOR,
&qdev->cursor_event);
if (!qdev->cursor_ring) {
DRM_ERROR("Unable to create cursor ring\n");
r = -ENOMEM;
goto command_ring_free;
}
qdev->release_ring = qxl_ring_create(
&(qdev->ram_header->release_ring_hdr),
sizeof(uint64_t),
QXL_RELEASE_RING_SIZE, 0,
NULL);
if (!qdev->release_ring) {
DRM_ERROR("Unable to create release ring\n");
r = -ENOMEM;
goto cursor_ring_free;
}
idr_init_base(&qdev->release_idr, 1);
spin_lock_init(&qdev->release_idr_lock);
spin_lock_init(&qdev->release_lock);
idr_init_base(&qdev->surf_id_idr, 1);
spin_lock_init(&qdev->surf_id_idr_lock);
mutex_init(&qdev->async_io_mutex);
/* reset the device into a known state - no memslots, no primary
* created, no surfaces. */
qxl_io_reset(qdev);
/* must initialize irq before first async io - slot creation */
r = qxl_irq_init(qdev);
if (r) {
DRM_ERROR("Unable to init qxl irq\n");
goto release_ring_free;
}
/*
* Note that virtual is surface0. We rely on the single ioremap done
* before.
*/
setup_slot(qdev, &qdev->main_slot, 0, "main",
(unsigned long)qdev->vram_base,
(unsigned long)qdev->rom->ram_header_offset);
setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
(unsigned long)qdev->surfaceram_base,
(unsigned long)qdev->surfaceram_size);
INIT_WORK(&qdev->gc_work, qxl_gc_work);
return 0;
release_ring_free:
qxl_ring_free(qdev->release_ring);
cursor_ring_free:
qxl_ring_free(qdev->cursor_ring);
command_ring_free:
qxl_ring_free(qdev->command_ring);
ram_header_unmap:
iounmap(qdev->ram_header);
bo_fini:
qxl_bo_fini(qdev);
rom_unmap:
iounmap(qdev->rom);
surface_mapping_free:
io_mapping_free(qdev->surface_mapping);
vram_mapping_free:
io_mapping_free(qdev->vram_mapping);
return r;
}
void qxl_device_fini(struct qxl_device *qdev)
{
int cur_idx;
/* check if qxl_device_init() was successful (gc_work is initialized last) */
if (!qdev->gc_work.func)
return;
for (cur_idx = 0; cur_idx < 3; cur_idx++) {
if (!qdev->current_release_bo[cur_idx])
continue;
qxl_bo_unpin(qdev->current_release_bo[cur_idx]);
qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
qdev->current_release_bo_offset[cur_idx] = 0;
qdev->current_release_bo[cur_idx] = NULL;
}
/*
* Ask host to release resources (+fill release ring),
* then wait for the release actually happening.
*/
qxl_io_notify_oom(qdev);
wait_event_timeout(qdev->release_event,
atomic_read(&qdev->release_count) == 0,
HZ);
flush_work(&qdev->gc_work);
qxl_surf_evict(qdev);
qxl_vram_evict(qdev);
qxl_gem_fini(qdev);
qxl_bo_fini(qdev);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
io_mapping_free(qdev->surface_mapping);
io_mapping_free(qdev->vram_mapping);
iounmap(qdev->ram_header);
iounmap(qdev->rom);
qdev->rom = NULL;
}
| linux-master | drivers/gpu/drm/qxl/qxl_kms.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include <linux/pci.h>
#include <linux/uaccess.h>
#include "qxl_drv.h"
#include "qxl_object.h"
/*
* TODO: allocating a new gem(in qxl_bo) for each request.
* This is wasteful since bo's are page aligned.
*/
int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc *qxl_alloc = data;
int ret;
uint32_t handle;
u32 domain = QXL_GEM_DOMAIN_VRAM;
if (qxl_alloc->size == 0) {
DRM_ERROR("invalid size %d\n", qxl_alloc->size);
return -EINVAL;
}
ret = qxl_gem_object_create_with_handle(qdev, file_priv,
domain,
qxl_alloc->size,
NULL,
NULL, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
return -ENOMEM;
}
qxl_alloc->handle = handle;
return 0;
}
int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_map *qxl_map = data;
return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle,
&qxl_map->offset);
}
struct qxl_reloc_info {
int type;
struct qxl_bo *dst_bo;
uint32_t dst_offset;
struct qxl_bo *src_bo;
int src_offset;
};
/*
* dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
* are on vram).
* *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
*/
static void
apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
void *reloc_page;
reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
info->src_bo,
info->src_offset);
qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
}
static void
apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
uint32_t id = 0;
void *reloc_page;
if (info->src_bo && !info->src_bo->is_primary)
id = info->src_bo->surface_id;
reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
}
/* return holding the reference to this object */
static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
struct qxl_release *release, struct qxl_bo **qbo_p)
{
struct drm_gem_object *gobj;
struct qxl_bo *qobj;
int ret;
gobj = drm_gem_object_lookup(file_priv, handle);
if (!gobj)
return -EINVAL;
qobj = gem_to_qxl_bo(gobj);
ret = qxl_release_list_add(release, qobj);
drm_gem_object_put(gobj);
if (ret)
return ret;
*qbo_p = qobj;
return 0;
}
/*
* Usage of execbuffer:
* Relocations need to take into account the full QXLDrawable size.
* However, the command as passed from user space must *not* contain the initial
* QXLReleaseInfo struct (first XXX bytes)
*/
static int qxl_process_single_command(struct qxl_device *qdev,
struct drm_qxl_command *cmd,
struct drm_file *file_priv)
{
struct qxl_reloc_info *reloc_info;
int release_type;
struct qxl_release *release;
struct qxl_bo *cmd_bo;
void *fb_cmd;
int i, ret, num_relocs;
int unwritten;
switch (cmd->type) {
case QXL_CMD_DRAW:
release_type = QXL_RELEASE_DRAWABLE;
break;
case QXL_CMD_SURFACE:
case QXL_CMD_CURSOR:
default:
DRM_DEBUG("Only draw commands in execbuffers\n");
return -EINVAL;
}
if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
return -EINVAL;
if (!access_ok(u64_to_user_ptr(cmd->command),
cmd->command_size))
return -EFAULT;
reloc_info = kmalloc_array(cmd->relocs_num,
sizeof(struct qxl_reloc_info), GFP_KERNEL);
if (!reloc_info)
return -ENOMEM;
ret = qxl_alloc_release_reserved(qdev,
sizeof(union qxl_release_info) +
cmd->command_size,
release_type,
&release,
&cmd_bo);
if (ret)
goto out_free_reloc;
/* TODO copy slow path code from i915 */
fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
unwritten = __copy_from_user_inatomic_nocache
(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
u64_to_user_ptr(cmd->command), cmd->command_size);
{
struct qxl_drawable *draw = fb_cmd;
draw->mm_time = qdev->rom->mm_clock;
}
qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
if (unwritten) {
DRM_ERROR("got unwritten %d\n", unwritten);
ret = -EFAULT;
goto out_free_release;
}
/* fill out reloc info structs */
num_relocs = 0;
for (i = 0; i < cmd->relocs_num; ++i) {
struct drm_qxl_reloc reloc;
struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
ret = -EFAULT;
goto out_free_bos;
}
/* add the bos to the list of bos to validate -
need to validate first then process relocs? */
if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
ret = -EINVAL;
goto out_free_bos;
}
reloc_info[i].type = reloc.reloc_type;
if (reloc.dst_handle) {
ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
&reloc_info[i].dst_bo);
if (ret)
goto out_free_bos;
reloc_info[i].dst_offset = reloc.dst_offset;
} else {
reloc_info[i].dst_bo = cmd_bo;
reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
}
num_relocs++;
/* reserve and validate the reloc dst bo */
if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
&reloc_info[i].src_bo);
if (ret)
goto out_free_bos;
reloc_info[i].src_offset = reloc.src_offset;
} else {
reloc_info[i].src_bo = NULL;
reloc_info[i].src_offset = 0;
}
}
/* validate all buffers */
ret = qxl_release_reserve_list(release, false);
if (ret)
goto out_free_bos;
for (i = 0; i < cmd->relocs_num; ++i) {
if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
apply_reloc(qdev, &reloc_info[i]);
else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
apply_surf_reloc(qdev, &reloc_info[i]);
}
qxl_release_fence_buffer_objects(release);
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
out_free_bos:
out_free_release:
if (ret)
qxl_release_free(qdev, release);
out_free_reloc:
kfree(reloc_info);
return ret;
}
int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_execbuffer *execbuffer = data;
struct drm_qxl_command user_cmd;
int cmd_num;
int ret;
for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
struct drm_qxl_command __user *commands =
u64_to_user_ptr(execbuffer->commands);
if (copy_from_user(&user_cmd, commands + cmd_num,
sizeof(user_cmd)))
return -EFAULT;
ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
if (ret)
return ret;
}
return 0;
}
int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_update_area *update_area = data;
struct qxl_rect area = {.left = update_area->left,
.top = update_area->top,
.right = update_area->right,
.bottom = update_area->bottom};
int ret;
struct drm_gem_object *gobj = NULL;
struct qxl_bo *qobj = NULL;
struct ttm_operation_ctx ctx = { true, false };
if (update_area->left >= update_area->right ||
update_area->top >= update_area->bottom)
return -EINVAL;
gobj = drm_gem_object_lookup(file, update_area->handle);
if (gobj == NULL)
return -ENOENT;
qobj = gem_to_qxl_bo(gobj);
ret = qxl_bo_reserve(qobj);
if (ret)
goto out;
if (!qobj->tbo.pin_count) {
qxl_ttm_placement_from_domain(qobj, qobj->type);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
if (unlikely(ret))
goto out;
}
ret = qxl_bo_check_id(qdev, qobj);
if (ret)
goto out2;
if (!qobj->surface_id)
DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
ret = qxl_io_update_area(qdev, qobj, &area);
out2:
qxl_bo_unreserve(qobj);
out:
drm_gem_object_put(gobj);
return ret;
}
int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_getparam *param = data;
switch (param->param) {
case QXL_PARAM_NUM_SURFACES:
param->value = qdev->rom->n_surfaces;
break;
case QXL_PARAM_MAX_RELOCS:
param->value = QXL_MAX_RES;
break;
default:
return -EINVAL;
}
return 0;
}
int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_qxl_clientcap *param = data;
int byte, idx;
byte = param->index / 8;
idx = param->index % 8;
if (pdev->revision < 4)
return -ENOSYS;
if (byte >= 58)
return -ENOSYS;
if (qdev->rom->client_capabilities[byte] & (1 << idx))
return 0;
return -ENOSYS;
}
int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc_surf *param = data;
int handle;
int ret;
int size, actual_stride;
struct qxl_surface surf;
/* work out size allocate bo with handle */
actual_stride = param->stride < 0 ? -param->stride : param->stride;
size = actual_stride * param->height + actual_stride;
surf.format = param->format;
surf.width = param->width;
surf.height = param->height;
surf.stride = param->stride;
surf.data = 0;
ret = qxl_gem_object_create_with_handle(qdev, file,
QXL_GEM_DOMAIN_SURFACE,
size,
&surf,
NULL, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
return -ENOMEM;
} else
param->handle = handle;
return ret;
}
| linux-master | drivers/gpu/drm/qxl/qxl_ioctl.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include <linux/iosys-map.h>
#include <linux/io-mapping.h>
#include "qxl_drv.h"
#include "qxl_object.h"
static int __qxl_bo_pin(struct qxl_bo *bo);
static void __qxl_bo_unpin(struct qxl_bo *bo);
static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct qxl_bo *bo;
struct qxl_device *qdev;
bo = to_qxl_bo(tbo);
qdev = to_qxl(bo->tbo.base.dev);
qxl_surface_evict(qdev, bo, false);
WARN_ON_ONCE(bo->map_count > 0);
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
drm_gem_object_release(&bo->tbo.base);
kfree(bo);
}
bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &qxl_ttm_bo_destroy)
return true;
return false;
}
void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
{
u32 c = 0;
u32 pflag = 0;
unsigned int i;
if (qbo->tbo.base.size <= PAGE_SIZE)
pflag |= TTM_PL_FLAG_TOPDOWN;
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM) {
qbo->placements[c].mem_type = TTM_PL_VRAM;
qbo->placements[c++].flags = pflag;
}
if (domain == QXL_GEM_DOMAIN_SURFACE) {
qbo->placements[c].mem_type = TTM_PL_PRIV;
qbo->placements[c++].flags = pflag;
qbo->placements[c].mem_type = TTM_PL_VRAM;
qbo->placements[c++].flags = pflag;
}
if (domain == QXL_GEM_DOMAIN_CPU) {
qbo->placements[c].mem_type = TTM_PL_SYSTEM;
qbo->placements[c++].flags = pflag;
}
if (!c) {
qbo->placements[c].mem_type = TTM_PL_SYSTEM;
qbo->placements[c++].flags = 0;
}
qbo->placement.num_placement = c;
qbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
qbo->placements[i].fpfn = 0;
qbo->placements[i].lpfn = 0;
}
}
static const struct drm_gem_object_funcs qxl_object_funcs = {
.free = qxl_gem_object_free,
.open = qxl_gem_object_open,
.close = qxl_gem_object_close,
.pin = qxl_gem_prime_pin,
.unpin = qxl_gem_prime_unpin,
.get_sg_table = qxl_gem_prime_get_sg_table,
.vmap = qxl_gem_prime_vmap,
.vunmap = qxl_gem_prime_vunmap,
.mmap = drm_gem_ttm_mmap,
.print_info = drm_gem_ttm_print_info,
};
int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
bool kernel, bool pinned, u32 domain, u32 priority,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
struct ttm_operation_ctx ctx = { !kernel, false };
struct qxl_bo *bo;
enum ttm_bo_type type;
int r;
if (kernel)
type = ttm_bo_type_kernel;
else
type = ttm_bo_type_device;
*bo_ptr = NULL;
bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
if (unlikely(r)) {
kfree(bo);
return r;
}
bo->tbo.base.funcs = &qxl_object_funcs;
bo->type = domain;
bo->surface_id = 0;
INIT_LIST_HEAD(&bo->list);
if (surf)
bo->surf = *surf;
qxl_ttm_placement_from_domain(bo, domain);
bo->tbo.priority = priority;
r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type,
&bo->placement, 0, &ctx, NULL, NULL,
&qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(qdev->ddev.dev,
"object_init failed for (%lu, 0x%08X)\n",
size, domain);
return r;
}
if (pinned)
ttm_bo_pin(&bo->tbo);
ttm_bo_unreserve(&bo->tbo);
*bo_ptr = bo;
return 0;
}
int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
{
int r;
dma_resv_assert_held(bo->tbo.base.resv);
if (bo->kptr) {
bo->map_count++;
goto out;
}
r = __qxl_bo_pin(bo);
if (r)
return r;
r = ttm_bo_vmap(&bo->tbo, &bo->map);
if (r) {
__qxl_bo_unpin(bo);
return r;
}
bo->map_count = 1;
/* TODO: Remove kptr in favor of map everywhere. */
if (bo->map.is_iomem)
bo->kptr = (void *)bo->map.vaddr_iomem;
else
bo->kptr = bo->map.vaddr;
out:
*map = bo->map;
return 0;
}
int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map)
{
int r;
r = qxl_bo_reserve(bo);
if (r)
return r;
r = qxl_bo_vmap_locked(bo, map);
qxl_bo_unreserve(bo);
return r;
}
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
unsigned long offset;
void *rptr;
int ret;
struct io_mapping *map;
struct iosys_map bo_map;
if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
map = qdev->vram_mapping;
else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
map = qdev->surface_mapping;
else
goto fallback;
offset = bo->tbo.resource->start << PAGE_SHIFT;
return io_mapping_map_atomic_wc(map, offset + page_offset);
fallback:
if (bo->kptr) {
rptr = bo->kptr + (page_offset * PAGE_SIZE);
return rptr;
}
ret = qxl_bo_vmap_locked(bo, &bo_map);
if (ret)
return NULL;
rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
rptr += page_offset * PAGE_SIZE;
return rptr;
}
void qxl_bo_vunmap_locked(struct qxl_bo *bo)
{
dma_resv_assert_held(bo->tbo.base.resv);
if (bo->kptr == NULL)
return;
bo->map_count--;
if (bo->map_count > 0)
return;
bo->kptr = NULL;
ttm_bo_vunmap(&bo->tbo, &bo->map);
__qxl_bo_unpin(bo);
}
int qxl_bo_vunmap(struct qxl_bo *bo)
{
int r;
r = qxl_bo_reserve(bo);
if (r)
return r;
qxl_bo_vunmap_locked(bo);
qxl_bo_unreserve(bo);
return 0;
}
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
(bo->tbo.resource->mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
return;
fallback:
qxl_bo_vunmap_locked(bo);
}
void qxl_bo_unref(struct qxl_bo **bo)
{
if ((*bo) == NULL)
return;
drm_gem_object_put(&(*bo)->tbo.base);
*bo = NULL;
}
struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
{
drm_gem_object_get(&bo->tbo.base);
return bo;
}
static int __qxl_bo_pin(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->tbo.base.dev;
int r;
if (bo->tbo.pin_count) {
ttm_bo_pin(&bo->tbo);
return 0;
}
qxl_ttm_placement_from_domain(bo, bo->type);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0))
ttm_bo_pin(&bo->tbo);
if (unlikely(r != 0))
dev_err(ddev->dev, "%p pin failed\n", bo);
return r;
}
static void __qxl_bo_unpin(struct qxl_bo *bo)
{
ttm_bo_unpin(&bo->tbo);
}
/*
* Reserve the BO before pinning the object. If the BO was reserved
* beforehand, use the internal version directly __qxl_bo_pin.
*
*/
int qxl_bo_pin(struct qxl_bo *bo)
{
int r;
r = qxl_bo_reserve(bo);
if (r)
return r;
r = __qxl_bo_pin(bo);
qxl_bo_unreserve(bo);
return r;
}
/*
* Reserve the BO before pinning the object. If the BO was reserved
* beforehand, use the internal version directly __qxl_bo_unpin.
*
*/
int qxl_bo_unpin(struct qxl_bo *bo)
{
int r;
r = qxl_bo_reserve(bo);
if (r)
return r;
__qxl_bo_unpin(bo);
qxl_bo_unreserve(bo);
return 0;
}
void qxl_bo_force_delete(struct qxl_device *qdev)
{
struct qxl_bo *bo, *n;
if (list_empty(&qdev->gem.objects))
return;
dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
*((unsigned long *)&bo->tbo.base.refcount));
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
/* this should unref the ttm bo */
drm_gem_object_put(&bo->tbo.base);
}
}
int qxl_bo_init(struct qxl_device *qdev)
{
return qxl_ttm_init(qdev);
}
void qxl_bo_fini(struct qxl_device *qdev)
{
qxl_ttm_fini(qdev);
}
int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
{
int ret;
if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
/* allocate a surface id for this surface now */
ret = qxl_surface_id_alloc(qdev, bo);
if (ret)
return ret;
ret = qxl_hw_surface_alloc(qdev, bo);
if (ret)
return ret;
}
return 0;
}
int qxl_surf_evict(struct qxl_device *qdev)
{
struct ttm_resource_manager *man;
man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
}
int qxl_vram_evict(struct qxl_device *qdev)
{
struct ttm_resource_manager *man;
man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
}
| linux-master | drivers/gpu/drm/qxl/qxl_object.c |
/*
* Copyright 2011 Red Hat, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/iosys-map.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include "qxl_drv.h"
#include "qxl_object.h"
static int alloc_clips(struct qxl_device *qdev,
struct qxl_release *release,
unsigned int num_clips,
struct qxl_bo **clips_bo)
{
int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
}
/* returns a pointer to the already allocated qxl_rect array inside
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
unsigned int num_clips,
struct qxl_bo *clips_bo)
{
struct iosys_map map;
struct qxl_clip_rects *dev_clips;
int ret;
ret = qxl_bo_vmap_locked(clips_bo, &map);
if (ret)
return NULL;
dev_clips = map.vaddr; /* TODO: Use mapping abstraction properly */
dev_clips->num_rects = num_clips;
dev_clips->chunk.next_chunk = 0;
dev_clips->chunk.prev_chunk = 0;
dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
return (struct qxl_rect *)dev_clips->chunk.data;
}
static int
alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
{
return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
QXL_RELEASE_DRAWABLE, release, NULL);
}
static void
free_drawable(struct qxl_device *qdev, struct qxl_release *release)
{
qxl_release_free(qdev, release);
}
/* release needs to be reserved at this point */
static int
make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
const struct qxl_rect *rect,
struct qxl_release *release)
{
struct qxl_drawable *drawable;
int i;
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
if (!drawable)
return -ENOMEM;
drawable->type = type;
drawable->surface_id = surface; /* Only primary for now */
drawable->effect = QXL_EFFECT_OPAQUE;
drawable->self_bitmap = 0;
drawable->self_bitmap_area.top = 0;
drawable->self_bitmap_area.left = 0;
drawable->self_bitmap_area.bottom = 0;
drawable->self_bitmap_area.right = 0;
/* FIXME: add clipping */
drawable->clip.type = SPICE_CLIP_TYPE_NONE;
/*
* surfaces_dest[i] should apparently be filled out with the
* surfaces that we depend on, and surface_rects should be
* filled with the rectangles of those surfaces that we
* are going to use.
*/
for (i = 0; i < 3; ++i)
drawable->surfaces_dest[i] = -1;
if (rect)
drawable->bbox = *rect;
drawable->mm_time = qdev->rom->mm_clock;
qxl_release_unmap(qdev, release, &drawable->release_info);
return 0;
}
/* push a draw command using the given clipping rectangles as
* the sources from the shadow framebuffer.
*
* Right now implementing with a single draw and a clip list. Clip
* lists are known to be a problem performance wise, this can be solved
* by treating them differently in the server.
*/
void qxl_draw_dirty_fb(struct qxl_device *qdev,
struct drm_framebuffer *fb,
struct qxl_bo *bo,
unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
unsigned int num_clips, int inc,
uint32_t dumb_shadow_offset)
{
/*
* TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
* send a fill command instead, much cheaper.
*
* See include/drm/drm_mode.h
*/
struct drm_clip_rect *clips_ptr;
int i;
int left, right, top, bottom;
int width, height;
struct qxl_drawable *drawable;
struct qxl_rect drawable_rect;
struct qxl_rect *rects;
int stride = fb->pitches[0];
/* depth is not actually interesting, we don't mask with it */
int depth = fb->format->cpp[0] * 8;
struct iosys_map surface_map;
uint8_t *surface_base;
struct qxl_release *release;
struct qxl_bo *clips_bo;
struct qxl_drm_image *dimage;
int ret;
ret = alloc_drawable(qdev, &release);
if (ret)
return;
clips->x1 += dumb_shadow_offset;
clips->x2 += dumb_shadow_offset;
left = clips->x1;
right = clips->x2;
top = clips->y1;
bottom = clips->y2;
/* skip the first clip rect */
for (i = 1, clips_ptr = clips + inc;
i < num_clips; i++, clips_ptr += inc) {
left = min_t(int, left, (int)clips_ptr->x1);
right = max_t(int, right, (int)clips_ptr->x2);
top = min_t(int, top, (int)clips_ptr->y1);
bottom = max_t(int, bottom, (int)clips_ptr->y2);
}
width = right - left;
height = bottom - top;
ret = alloc_clips(qdev, release, num_clips, &clips_bo);
if (ret)
goto out_free_drawable;
ret = qxl_image_alloc_objects(qdev, release,
&dimage,
height, stride);
if (ret)
goto out_free_clips;
/* do a reservation run over all the objects we just allocated */
ret = qxl_release_reserve_list(release, true);
if (ret)
goto out_free_image;
drawable_rect.left = left;
drawable_rect.right = right;
drawable_rect.top = top;
drawable_rect.bottom = bottom;
ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
release);
if (ret)
goto out_release_backoff;
ret = qxl_bo_vmap_locked(bo, &surface_map);
if (ret)
goto out_release_backoff;
surface_base = surface_map.vaddr; /* TODO: Use mapping abstraction properly */
ret = qxl_image_init(qdev, release, dimage, surface_base,
left - dumb_shadow_offset,
top, width, height, depth, stride);
qxl_bo_vunmap_locked(bo);
if (ret)
goto out_release_backoff;
rects = drawable_set_clipping(qdev, num_clips, clips_bo);
if (!rects) {
ret = -EINVAL;
goto out_release_backoff;
}
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
drawable->clip.data = qxl_bo_physical_address(qdev,
clips_bo, 0);
drawable->u.copy.src_area.top = 0;
drawable->u.copy.src_area.bottom = height;
drawable->u.copy.src_area.left = 0;
drawable->u.copy.src_area.right = width;
drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
drawable->u.copy.scale_mode = 0;
drawable->u.copy.mask.flags = 0;
drawable->u.copy.mask.pos.x = 0;
drawable->u.copy.mask.pos.y = 0;
drawable->u.copy.mask.bitmap = 0;
drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
rects[i].left = clips_ptr->x1;
rects[i].right = clips_ptr->x2;
rects[i].top = clips_ptr->y1;
rects[i].bottom = clips_ptr->y2;
}
qxl_bo_vunmap_locked(clips_bo);
qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
out_release_backoff:
if (ret)
qxl_release_backoff_reserve_list(release);
out_free_image:
qxl_image_free_objects(qdev, dimage);
out_free_clips:
qxl_bo_unref(&clips_bo);
out_free_drawable:
/* only free drawable on error */
if (ret)
free_drawable(qdev, release);
}
| linux-master | drivers/gpu/drm/qxl/qxl_draw.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include <drm/drm.h>
#include "qxl_drv.h"
#include "qxl_object.h"
void qxl_gem_object_free(struct drm_gem_object *gobj)
{
struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
struct qxl_device *qdev;
struct ttm_buffer_object *tbo;
qdev = to_qxl(gobj->dev);
qxl_surface_evict(qdev, qobj, false);
tbo = &qobj->tbo;
ttm_bo_put(tbo);
}
int qxl_gem_object_create(struct qxl_device *qdev, int size,
int alignment, int initial_domain,
bool discardable, bool kernel,
struct qxl_surface *surf,
struct drm_gem_object **obj)
{
struct qxl_bo *qbo;
int r;
*obj = NULL;
/* At least align on page size */
if (alignment < PAGE_SIZE)
alignment = PAGE_SIZE;
r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR(
"Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
return r;
}
*obj = &qbo->tbo.base;
mutex_lock(&qdev->gem.mutex);
list_add_tail(&qbo->list, &qdev->gem.objects);
mutex_unlock(&qdev->gem.mutex);
return 0;
}
/*
* If the caller passed a valid gobj pointer, it is responsible to call
* drm_gem_object_put() when it no longer needs to acess the object.
*
* If gobj is NULL, it is handled internally.
*/
int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
struct drm_file *file_priv,
u32 domain,
size_t size,
struct qxl_surface *surf,
struct drm_gem_object **gobj,
uint32_t *handle)
{
int r;
struct drm_gem_object *local_gobj;
BUG_ON(!handle);
r = qxl_gem_object_create(qdev, size, 0,
domain,
false, false, surf,
&local_gobj);
if (r)
return -ENOMEM;
r = drm_gem_handle_create(file_priv, local_gobj, handle);
if (r)
return r;
if (gobj)
*gobj = local_gobj;
else
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(local_gobj);
return 0;
}
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
return 0;
}
void qxl_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
}
void qxl_gem_init(struct qxl_device *qdev)
{
INIT_LIST_HEAD(&qdev->gem.objects);
}
void qxl_gem_fini(struct qxl_device *qdev)
{
qxl_bo_force_delete(qdev);
}
| linux-master | drivers/gpu/drm/qxl/qxl_gem.c |
/*
* Copyright 2014 Canonical
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Andreas Pokorny
*/
#include "qxl_drv.h"
#include "qxl_object.h"
/* Empty Implementations as there should not be any other driver for a virtual
* device that might share buffers with qxl */
int qxl_gem_prime_pin(struct drm_gem_object *obj)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
return qxl_bo_pin(bo);
}
void qxl_gem_prime_unpin(struct drm_gem_object *obj)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
qxl_bo_unpin(bo);
}
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
return ERR_PTR(-ENOSYS);
}
struct drm_gem_object *qxl_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table)
{
return ERR_PTR(-ENOSYS);
}
int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
int ret;
ret = qxl_bo_vmap_locked(bo, map);
if (ret < 0)
return ret;
return 0;
}
void qxl_gem_prime_vunmap(struct drm_gem_object *obj,
struct iosys_map *map)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
qxl_bo_vunmap_locked(bo);
}
| linux-master | drivers/gpu/drm/qxl/qxl_prime.c |
/* qxl_drv.c -- QXL driver -*- linux-c -*-
*
* Copyright 2011 Red Hat, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Dave Airlie <[email protected]>
* Alon Levy <[email protected]>
*/
#include "qxl_drv.h"
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <drm/drm.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include "qxl_object.h"
static const struct pci_device_id pciidlist[] = {
{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
0xffff00, 0 },
{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
0xffff00, 0 },
{ 0, 0, 0 },
};
MODULE_DEVICE_TABLE(pci, pciidlist);
static int qxl_modeset = -1;
int qxl_num_crtc = 4;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, qxl_modeset, int, 0400);
MODULE_PARM_DESC(num_heads, "Number of virtual crtcs to expose (default 4)");
module_param_named(num_heads, qxl_num_crtc, int, 0400);
static struct drm_driver qxl_driver;
static struct pci_driver qxl_pci_driver;
static bool is_vga(struct pci_dev *pdev)
{
return pdev->class == PCI_CLASS_DISPLAY_VGA << 8;
}
static int
qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct qxl_device *qdev;
int ret;
if (pdev->revision < 4) {
DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
" use xf86-video-qxl in user mode");
return -EINVAL; /* TODO: ENODEV ? */
}
qdev = devm_drm_dev_alloc(&pdev->dev, &qxl_driver,
struct qxl_device, ddev);
if (IS_ERR(qdev)) {
pr_err("Unable to init drm dev");
return -ENOMEM;
}
ret = pci_enable_device(pdev);
if (ret)
return ret;
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &qxl_driver);
if (ret)
goto disable_pci;
if (is_vga(pdev) && pdev->revision < 5) {
ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
if (ret) {
DRM_ERROR("can't get legacy vga ioports\n");
goto disable_pci;
}
}
ret = qxl_device_init(qdev, pdev);
if (ret)
goto put_vga;
ret = qxl_modeset_init(qdev);
if (ret)
goto unload;
drm_kms_helper_poll_init(&qdev->ddev);
/* Complete initialization. */
ret = drm_dev_register(&qdev->ddev, ent->driver_data);
if (ret)
goto modeset_cleanup;
drm_fbdev_generic_setup(&qdev->ddev, 32);
return 0;
modeset_cleanup:
qxl_modeset_fini(qdev);
unload:
qxl_device_fini(qdev);
put_vga:
if (is_vga(pdev) && pdev->revision < 5)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
disable_pci:
pci_disable_device(pdev);
return ret;
}
static void qxl_drm_release(struct drm_device *dev)
{
struct qxl_device *qdev = to_qxl(dev);
/*
* TODO: qxl_device_fini() call should be in qxl_pci_remove(),
* reordering qxl_modeset_fini() + qxl_device_fini() calls is
* non-trivial though.
*/
qxl_modeset_fini(qdev);
qxl_device_fini(qdev);
}
static void
qxl_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
drm_atomic_helper_shutdown(dev);
if (is_vga(pdev) && pdev->revision < 5)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
}
DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct qxl_device *qdev = to_qxl(dev);
int ret;
ret = drm_mode_config_helper_suspend(dev);
if (ret)
return ret;
qxl_destroy_monitors_object(qdev);
qxl_surf_evict(qdev);
qxl_vram_evict(qdev);
while (!qxl_check_idle(qdev->command_ring));
while (!qxl_check_idle(qdev->release_ring))
qxl_queue_garbage_collect(qdev, 1);
pci_save_state(pdev);
return 0;
}
static int qxl_drm_resume(struct drm_device *dev, bool thaw)
{
struct qxl_device *qdev = to_qxl(dev);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (!thaw) {
qxl_reinit_memslots(qdev);
}
qxl_create_monitors_object(qdev);
return drm_mode_config_helper_resume(dev);
}
static int qxl_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int error;
error = qxl_drm_freeze(drm_dev);
if (error)
return error;
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int qxl_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct qxl_device *qdev = to_qxl(drm_dev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (pci_enable_device(pdev)) {
return -EIO;
}
qxl_io_reset(qdev);
return qxl_drm_resume(drm_dev, false);
}
static int qxl_pm_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
return qxl_drm_resume(drm_dev, true);
}
static int qxl_pm_freeze(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
return qxl_drm_freeze(drm_dev);
}
static int qxl_pm_restore(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct qxl_device *qdev = to_qxl(drm_dev);
qxl_io_reset(qdev);
return qxl_drm_resume(drm_dev, false);
}
static const struct dev_pm_ops qxl_pm_ops = {
.suspend = qxl_pm_suspend,
.resume = qxl_pm_resume,
.freeze = qxl_pm_freeze,
.thaw = qxl_pm_thaw,
.poweroff = qxl_pm_freeze,
.restore = qxl_pm_restore,
};
static struct pci_driver qxl_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = qxl_pci_probe,
.remove = qxl_pci_remove,
.driver.pm = &qxl_pm_ops,
};
static const struct drm_ioctl_desc qxl_ioctls[] = {
DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, DRM_AUTH),
};
static struct drm_driver qxl_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.fops = &qxl_fops,
.ioctls = qxl_ioctls,
.num_ioctls = ARRAY_SIZE(qxl_ioctls),
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = 0,
.minor = 1,
.patchlevel = 0,
.release = qxl_drm_release,
};
drm_module_pci_driver_if_modeset(qxl_pci_driver, qxl_modeset);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/qxl/qxl_drv.c |
/*
* Copyright 2011 Red Hat, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/delay.h>
#include <trace/events/dma_fence.h>
#include "qxl_drv.h"
#include "qxl_object.h"
/*
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
* into 256 byte chunks for now - gives 16 cmds per page.
*
* use an ida to index into the chunks?
*/
/* manage releaseables */
/* stack them 16 high for now -drawable object is 191 */
#define RELEASE_SIZE 256
#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
#define SURFACE_RELEASE_SIZE 128
#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
static const char *qxl_get_driver_name(struct dma_fence *fence)
{
return "qxl";
}
static const char *qxl_get_timeline_name(struct dma_fence *fence)
{
return "release";
}
static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct qxl_device *qdev;
unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock);
if (!wait_event_timeout(qdev->release_event,
(dma_fence_is_signaled(fence) ||
(qxl_io_notify_oom(qdev), 0)),
timeout))
return 0;
cur = jiffies;
if (time_after(cur, end))
return 0;
return end - cur;
}
static const struct dma_fence_ops qxl_fence_ops = {
.get_driver_name = qxl_get_driver_name,
.get_timeline_name = qxl_get_timeline_name,
.wait = qxl_fence_wait,
};
static int
qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret)
{
struct qxl_release *release;
int handle;
size_t size = sizeof(*release);
release = kmalloc(size, GFP_KERNEL);
if (!release) {
DRM_ERROR("Out of memory\n");
return -ENOMEM;
}
release->base.ops = NULL;
release->type = type;
release->release_offset = 0;
release->surface_release_id = 0;
INIT_LIST_HEAD(&release->bos);
idr_preload(GFP_KERNEL);
spin_lock(&qdev->release_idr_lock);
handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
release->base.seqno = ++qdev->release_seqno;
spin_unlock(&qdev->release_idr_lock);
idr_preload_end();
if (handle < 0) {
kfree(release);
*ret = NULL;
return handle;
}
*ret = release;
DRM_DEBUG_DRIVER("allocated release %d\n", handle);
release->id = handle;
return handle;
}
static void
qxl_release_free_list(struct qxl_release *release)
{
while (!list_empty(&release->bos)) {
struct qxl_bo_list *entry;
struct qxl_bo *bo;
entry = container_of(release->bos.next,
struct qxl_bo_list, tv.head);
bo = to_qxl_bo(entry->tv.bo);
qxl_bo_unref(&bo);
list_del(&entry->tv.head);
kfree(entry);
}
release->release_bo = NULL;
}
void
qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release)
{
DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
if (release->surface_release_id)
qxl_surface_id_dealloc(qdev, release->surface_release_id);
spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
spin_unlock(&qdev->release_idr_lock);
if (release->base.ops) {
WARN_ON(list_empty(&release->bos));
qxl_release_free_list(release);
dma_fence_signal(&release->base);
dma_fence_put(&release->base);
} else {
qxl_release_free_list(release);
kfree(release);
}
atomic_dec(&qdev->release_count);
}
static int qxl_release_bo_alloc(struct qxl_device *qdev,
struct qxl_bo **bo,
u32 priority)
{
/* pin releases bo's they are too messy to evict */
return qxl_bo_create(qdev, PAGE_SIZE, false, true,
QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
}
int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
{
struct qxl_bo_list *entry;
list_for_each_entry(entry, &release->bos, tv.head) {
if (entry->tv.bo == &bo->tbo)
return 0;
}
entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
if (!entry)
return -ENOMEM;
qxl_bo_ref(bo);
entry->tv.bo = &bo->tbo;
entry->tv.num_shared = 0;
list_add_tail(&entry->tv.head, &release->bos);
return 0;
}
static int qxl_release_validate_bo(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { true, false };
int ret;
if (!bo->tbo.pin_count) {
qxl_ttm_placement_from_domain(bo, bo->type);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
return ret;
}
ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
if (ret)
return ret;
/* allocate a surface for reserved + validated buffers */
ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
if (ret)
return ret;
return 0;
}
int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
{
int ret;
struct qxl_bo_list *entry;
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos))
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
!no_intr, NULL);
if (ret)
return ret;
list_for_each_entry(entry, &release->bos, tv.head) {
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
ret = qxl_release_validate_bo(bo);
if (ret) {
ttm_eu_backoff_reservation(&release->ticket, &release->bos);
return ret;
}
}
return 0;
}
void qxl_release_backoff_reserve_list(struct qxl_release *release)
{
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos))
return;
ttm_eu_backoff_reservation(&release->ticket, &release->bos);
}
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
struct qxl_release *create_rel,
struct qxl_release **release)
{
if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
int idr_ret;
struct qxl_bo *bo;
union qxl_release_info *info;
/* stash the release after the create command */
idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
if (idr_ret < 0)
return idr_ret;
bo = create_rel->release_bo;
(*release)->release_bo = bo;
(*release)->release_offset = create_rel->release_offset + 64;
qxl_release_list_add(*release, bo);
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
return 0;
}
return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
QXL_RELEASE_SURFACE_CMD, release, NULL);
}
int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
int type, struct qxl_release **release,
struct qxl_bo **rbo)
{
struct qxl_bo *bo, *free_bo = NULL;
int idr_ret;
int ret = 0;
union qxl_release_info *info;
int cur_idx;
u32 priority;
if (type == QXL_RELEASE_DRAWABLE) {
cur_idx = 0;
priority = 0;
} else if (type == QXL_RELEASE_SURFACE_CMD) {
cur_idx = 1;
priority = 1;
} else if (type == QXL_RELEASE_CURSOR_CMD) {
cur_idx = 2;
priority = 1;
}
else {
DRM_ERROR("got illegal type: %d\n", type);
return -EINVAL;
}
idr_ret = qxl_release_alloc(qdev, type, release);
if (idr_ret < 0) {
if (rbo)
*rbo = NULL;
return idr_ret;
}
atomic_inc(&qdev->release_count);
mutex_lock(&qdev->release_mutex);
if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
free_bo = qdev->current_release_bo[cur_idx];
qdev->current_release_bo_offset[cur_idx] = 0;
qdev->current_release_bo[cur_idx] = NULL;
}
if (!qdev->current_release_bo[cur_idx]) {
ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
if (ret) {
mutex_unlock(&qdev->release_mutex);
if (free_bo) {
qxl_bo_unpin(free_bo);
qxl_bo_unref(&free_bo);
}
qxl_release_free(qdev, *release);
return ret;
}
}
bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
(*release)->release_bo = bo;
(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
qdev->current_release_bo_offset[cur_idx]++;
if (rbo)
*rbo = bo;
mutex_unlock(&qdev->release_mutex);
if (free_bo) {
qxl_bo_unpin(free_bo);
qxl_bo_unref(&free_bo);
}
ret = qxl_release_list_add(*release, bo);
qxl_bo_unref(&bo);
if (ret) {
qxl_release_free(qdev, *release);
return ret;
}
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
return ret;
}
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id)
{
struct qxl_release *release;
spin_lock(&qdev->release_idr_lock);
release = idr_find(&qdev->release_idr, id);
spin_unlock(&qdev->release_idr_lock);
if (!release) {
DRM_ERROR("failed to find id in release_idr\n");
return NULL;
}
return release;
}
union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
struct qxl_release *release)
{
void *ptr;
union qxl_release_info *info;
struct qxl_bo *bo = release->release_bo;
ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
if (!ptr)
return NULL;
info = ptr + (release->release_offset & ~PAGE_MASK);
return info;
}
void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info)
{
struct qxl_bo *bo = release->release_bo;
void *ptr;
ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
}
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_buffer_object *bo;
struct ttm_device *bdev;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos) || list_empty(&release->bos))
return;
bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
bdev = bo->bdev;
qdev = container_of(bdev, struct qxl_device, mman.bdev);
/*
* Since we never really allocated a context and we don't want to conflict,
* set the highest bits. This will break if we really allow exporting of dma-bufs.
*/
dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
dma_resv_add_fence(bo->base.resv, &release->base,
DMA_RESV_USAGE_READ);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
ww_acquire_fini(&release->ticket);
}
| linux-master | drivers/gpu/drm/qxl/qxl_release.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2017
*
* Authors: Philippe Cornu <[email protected]>
* Yannick Fertre <[email protected]>
* Fabien Dessenne <[email protected]>
* Mickael Reulier <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include <video/videomode.h>
#include "ltdc.h"
#define NB_CRTC 1
#define CRTC_MASK GENMASK(NB_CRTC - 1, 0)
#define MAX_IRQ 4
#define HWVER_10200 0x010200
#define HWVER_10300 0x010300
#define HWVER_20101 0x020101
#define HWVER_40100 0x040100
/*
* The address of some registers depends on the HW version: such registers have
* an extra offset specified with layer_ofs.
*/
#define LAY_OFS_0 0x80
#define LAY_OFS_1 0x100
#define LAY_OFS (ldev->caps.layer_ofs)
/* Global register offsets */
#define LTDC_IDR 0x0000 /* IDentification */
#define LTDC_LCR 0x0004 /* Layer Count */
#define LTDC_SSCR 0x0008 /* Synchronization Size Configuration */
#define LTDC_BPCR 0x000C /* Back Porch Configuration */
#define LTDC_AWCR 0x0010 /* Active Width Configuration */
#define LTDC_TWCR 0x0014 /* Total Width Configuration */
#define LTDC_GCR 0x0018 /* Global Control */
#define LTDC_GC1R 0x001C /* Global Configuration 1 */
#define LTDC_GC2R 0x0020 /* Global Configuration 2 */
#define LTDC_SRCR 0x0024 /* Shadow Reload Configuration */
#define LTDC_GACR 0x0028 /* GAmma Correction */
#define LTDC_BCCR 0x002C /* Background Color Configuration */
#define LTDC_IER 0x0034 /* Interrupt Enable */
#define LTDC_ISR 0x0038 /* Interrupt Status */
#define LTDC_ICR 0x003C /* Interrupt Clear */
#define LTDC_LIPCR 0x0040 /* Line Interrupt Position Conf. */
#define LTDC_CPSR 0x0044 /* Current Position Status */
#define LTDC_CDSR 0x0048 /* Current Display Status */
#define LTDC_EDCR 0x0060 /* External Display Control */
#define LTDC_CCRCR 0x007C /* Computed CRC value */
#define LTDC_FUT 0x0090 /* Fifo underrun Threshold */
/* Layer register offsets */
#define LTDC_L1C0R (ldev->caps.layer_regs[0]) /* L1 configuration 0 */
#define LTDC_L1C1R (ldev->caps.layer_regs[1]) /* L1 configuration 1 */
#define LTDC_L1RCR (ldev->caps.layer_regs[2]) /* L1 reload control */
#define LTDC_L1CR (ldev->caps.layer_regs[3]) /* L1 control register */
#define LTDC_L1WHPCR (ldev->caps.layer_regs[4]) /* L1 window horizontal position configuration */
#define LTDC_L1WVPCR (ldev->caps.layer_regs[5]) /* L1 window vertical position configuration */
#define LTDC_L1CKCR (ldev->caps.layer_regs[6]) /* L1 color keying configuration */
#define LTDC_L1PFCR (ldev->caps.layer_regs[7]) /* L1 pixel format configuration */
#define LTDC_L1CACR (ldev->caps.layer_regs[8]) /* L1 constant alpha configuration */
#define LTDC_L1DCCR (ldev->caps.layer_regs[9]) /* L1 default color configuration */
#define LTDC_L1BFCR (ldev->caps.layer_regs[10]) /* L1 blending factors configuration */
#define LTDC_L1BLCR (ldev->caps.layer_regs[11]) /* L1 burst length configuration */
#define LTDC_L1PCR (ldev->caps.layer_regs[12]) /* L1 planar configuration */
#define LTDC_L1CFBAR (ldev->caps.layer_regs[13]) /* L1 color frame buffer address */
#define LTDC_L1CFBLR (ldev->caps.layer_regs[14]) /* L1 color frame buffer length */
#define LTDC_L1CFBLNR (ldev->caps.layer_regs[15]) /* L1 color frame buffer line number */
#define LTDC_L1AFBA0R (ldev->caps.layer_regs[16]) /* L1 auxiliary frame buffer address 0 */
#define LTDC_L1AFBA1R (ldev->caps.layer_regs[17]) /* L1 auxiliary frame buffer address 1 */
#define LTDC_L1AFBLR (ldev->caps.layer_regs[18]) /* L1 auxiliary frame buffer length */
#define LTDC_L1AFBLNR (ldev->caps.layer_regs[19]) /* L1 auxiliary frame buffer line number */
#define LTDC_L1CLUTWR (ldev->caps.layer_regs[20]) /* L1 CLUT write */
#define LTDC_L1CYR0R (ldev->caps.layer_regs[21]) /* L1 Conversion YCbCr RGB 0 */
#define LTDC_L1CYR1R (ldev->caps.layer_regs[22]) /* L1 Conversion YCbCr RGB 1 */
#define LTDC_L1FPF0R (ldev->caps.layer_regs[23]) /* L1 Flexible Pixel Format 0 */
#define LTDC_L1FPF1R (ldev->caps.layer_regs[24]) /* L1 Flexible Pixel Format 1 */
/* Bit definitions */
#define SSCR_VSH GENMASK(10, 0) /* Vertical Synchronization Height */
#define SSCR_HSW GENMASK(27, 16) /* Horizontal Synchronization Width */
#define BPCR_AVBP GENMASK(10, 0) /* Accumulated Vertical Back Porch */
#define BPCR_AHBP GENMASK(27, 16) /* Accumulated Horizontal Back Porch */
#define AWCR_AAH GENMASK(10, 0) /* Accumulated Active Height */
#define AWCR_AAW GENMASK(27, 16) /* Accumulated Active Width */
#define TWCR_TOTALH GENMASK(10, 0) /* TOTAL Height */
#define TWCR_TOTALW GENMASK(27, 16) /* TOTAL Width */
#define GCR_LTDCEN BIT(0) /* LTDC ENable */
#define GCR_DEN BIT(16) /* Dither ENable */
#define GCR_CRCEN BIT(19) /* CRC ENable */
#define GCR_PCPOL BIT(28) /* Pixel Clock POLarity-Inverted */
#define GCR_DEPOL BIT(29) /* Data Enable POLarity-High */
#define GCR_VSPOL BIT(30) /* Vertical Synchro POLarity-High */
#define GCR_HSPOL BIT(31) /* Horizontal Synchro POLarity-High */
#define GC1R_WBCH GENMASK(3, 0) /* Width of Blue CHannel output */
#define GC1R_WGCH GENMASK(7, 4) /* Width of Green Channel output */
#define GC1R_WRCH GENMASK(11, 8) /* Width of Red Channel output */
#define GC1R_PBEN BIT(12) /* Precise Blending ENable */
#define GC1R_DT GENMASK(15, 14) /* Dithering Technique */
#define GC1R_GCT GENMASK(19, 17) /* Gamma Correction Technique */
#define GC1R_SHREN BIT(21) /* SHadow Registers ENabled */
#define GC1R_BCP BIT(22) /* Background Colour Programmable */
#define GC1R_BBEN BIT(23) /* Background Blending ENabled */
#define GC1R_LNIP BIT(24) /* Line Number IRQ Position */
#define GC1R_TP BIT(25) /* Timing Programmable */
#define GC1R_IPP BIT(26) /* IRQ Polarity Programmable */
#define GC1R_SPP BIT(27) /* Sync Polarity Programmable */
#define GC1R_DWP BIT(28) /* Dither Width Programmable */
#define GC1R_STREN BIT(29) /* STatus Registers ENabled */
#define GC1R_BMEN BIT(31) /* Blind Mode ENabled */
#define GC2R_EDCA BIT(0) /* External Display Control Ability */
#define GC2R_STSAEN BIT(1) /* Slave Timing Sync Ability ENabled */
#define GC2R_DVAEN BIT(2) /* Dual-View Ability ENabled */
#define GC2R_DPAEN BIT(3) /* Dual-Port Ability ENabled */
#define GC2R_BW GENMASK(6, 4) /* Bus Width (log2 of nb of bytes) */
#define GC2R_EDCEN BIT(7) /* External Display Control ENabled */
#define SRCR_IMR BIT(0) /* IMmediate Reload */
#define SRCR_VBR BIT(1) /* Vertical Blanking Reload */
#define BCCR_BCBLACK 0x00 /* Background Color BLACK */
#define BCCR_BCBLUE GENMASK(7, 0) /* Background Color BLUE */
#define BCCR_BCGREEN GENMASK(15, 8) /* Background Color GREEN */
#define BCCR_BCRED GENMASK(23, 16) /* Background Color RED */
#define BCCR_BCWHITE GENMASK(23, 0) /* Background Color WHITE */
#define IER_LIE BIT(0) /* Line Interrupt Enable */
#define IER_FUWIE BIT(1) /* Fifo Underrun Warning Interrupt Enable */
#define IER_TERRIE BIT(2) /* Transfer ERRor Interrupt Enable */
#define IER_RRIE BIT(3) /* Register Reload Interrupt Enable */
#define IER_FUEIE BIT(6) /* Fifo Underrun Error Interrupt Enable */
#define IER_CRCIE BIT(7) /* CRC Error Interrupt Enable */
#define CPSR_CYPOS GENMASK(15, 0) /* Current Y position */
#define ISR_LIF BIT(0) /* Line Interrupt Flag */
#define ISR_FUWIF BIT(1) /* Fifo Underrun Warning Interrupt Flag */
#define ISR_TERRIF BIT(2) /* Transfer ERRor Interrupt Flag */
#define ISR_RRIF BIT(3) /* Register Reload Interrupt Flag */
#define ISR_FUEIF BIT(6) /* Fifo Underrun Error Interrupt Flag */
#define ISR_CRCIF BIT(7) /* CRC Error Interrupt Flag */
#define EDCR_OCYEN BIT(25) /* Output Conversion to YCbCr 422: ENable */
#define EDCR_OCYSEL BIT(26) /* Output Conversion to YCbCr 422: SELection of the CCIR */
#define EDCR_OCYCO BIT(27) /* Output Conversion to YCbCr 422: Chrominance Order */
#define LXCR_LEN BIT(0) /* Layer ENable */
#define LXCR_COLKEN BIT(1) /* Color Keying Enable */
#define LXCR_CLUTEN BIT(4) /* Color Look-Up Table ENable */
#define LXCR_HMEN BIT(8) /* Horizontal Mirroring ENable */
#define LXWHPCR_WHSTPOS GENMASK(11, 0) /* Window Horizontal StarT POSition */
#define LXWHPCR_WHSPPOS GENMASK(27, 16) /* Window Horizontal StoP POSition */
#define LXWVPCR_WVSTPOS GENMASK(10, 0) /* Window Vertical StarT POSition */
#define LXWVPCR_WVSPPOS GENMASK(26, 16) /* Window Vertical StoP POSition */
#define LXPFCR_PF GENMASK(2, 0) /* Pixel Format */
#define PF_FLEXIBLE 0x7 /* Flexible Pixel Format selected */
#define LXCACR_CONSTA GENMASK(7, 0) /* CONSTant Alpha */
#define LXBFCR_BF2 GENMASK(2, 0) /* Blending Factor 2 */
#define LXBFCR_BF1 GENMASK(10, 8) /* Blending Factor 1 */
#define LXBFCR_BOR GENMASK(18, 16) /* Blending ORder */
#define LXCFBLR_CFBLL GENMASK(12, 0) /* Color Frame Buffer Line Length */
#define LXCFBLR_CFBP GENMASK(31, 16) /* Color Frame Buffer Pitch in bytes */
#define LXCFBLNR_CFBLN GENMASK(10, 0) /* Color Frame Buffer Line Number */
#define LXCR_C1R_YIA BIT(0) /* Ycbcr 422 Interleaved Ability */
#define LXCR_C1R_YSPA BIT(1) /* Ycbcr 420 Semi-Planar Ability */
#define LXCR_C1R_YFPA BIT(2) /* Ycbcr 420 Full-Planar Ability */
#define LXCR_C1R_SCA BIT(31) /* SCaling Ability*/
#define LxPCR_YREN BIT(9) /* Y Rescale Enable for the color dynamic range */
#define LxPCR_OF BIT(8) /* Odd pixel First */
#define LxPCR_CBF BIT(7) /* CB component First */
#define LxPCR_YF BIT(6) /* Y component First */
#define LxPCR_YCM GENMASK(5, 4) /* Ycbcr Conversion Mode */
#define YCM_I 0x0 /* Interleaved 422 */
#define YCM_SP 0x1 /* Semi-Planar 420 */
#define YCM_FP 0x2 /* Full-Planar 420 */
#define LxPCR_YCEN BIT(3) /* YCbCr-to-RGB Conversion Enable */
#define LXRCR_IMR BIT(0) /* IMmediate Reload */
#define LXRCR_VBR BIT(1) /* Vertical Blanking Reload */
#define LXRCR_GRMSK BIT(2) /* Global (centralized) Reload MaSKed */
#define CLUT_SIZE 256
#define CONSTA_MAX 0xFF /* CONSTant Alpha MAX= 1.0 */
#define BF1_PAXCA 0x600 /* Pixel Alpha x Constant Alpha */
#define BF1_CA 0x400 /* Constant Alpha */
#define BF2_1PAXCA 0x007 /* 1 - (Pixel Alpha x Constant Alpha) */
#define BF2_1CA 0x005 /* 1 - Constant Alpha */
#define NB_PF 8 /* Max nb of HW pixel format */
#define FUT_DFT 128 /* Default value of fifo underrun threshold */
/*
* Skip the first value and the second in case CRC was enabled during
* the thread irq. This is to be sure CRC value is relevant for the
* frame.
*/
#define CRC_SKIP_FRAMES 2
enum ltdc_pix_fmt {
PF_NONE,
/* RGB formats */
PF_ARGB8888, /* ARGB [32 bits] */
PF_RGBA8888, /* RGBA [32 bits] */
PF_ABGR8888, /* ABGR [32 bits] */
PF_BGRA8888, /* BGRA [32 bits] */
PF_RGB888, /* RGB [24 bits] */
PF_BGR888, /* BGR [24 bits] */
PF_RGB565, /* RGB [16 bits] */
PF_BGR565, /* BGR [16 bits] */
PF_ARGB1555, /* ARGB A:1 bit RGB:15 bits [16 bits] */
PF_ARGB4444, /* ARGB A:4 bits R/G/B: 4 bits each [16 bits] */
/* Indexed formats */
PF_L8, /* Indexed 8 bits [8 bits] */
PF_AL44, /* Alpha:4 bits + indexed 4 bits [8 bits] */
PF_AL88 /* Alpha:8 bits + indexed 8 bits [16 bits] */
};
/* The index gives the encoding of the pixel format for an HW version */
static const enum ltdc_pix_fmt ltdc_pix_fmt_a0[NB_PF] = {
PF_ARGB8888, /* 0x00 */
PF_RGB888, /* 0x01 */
PF_RGB565, /* 0x02 */
PF_ARGB1555, /* 0x03 */
PF_ARGB4444, /* 0x04 */
PF_L8, /* 0x05 */
PF_AL44, /* 0x06 */
PF_AL88 /* 0x07 */
};
static const enum ltdc_pix_fmt ltdc_pix_fmt_a1[NB_PF] = {
PF_ARGB8888, /* 0x00 */
PF_RGB888, /* 0x01 */
PF_RGB565, /* 0x02 */
PF_RGBA8888, /* 0x03 */
PF_AL44, /* 0x04 */
PF_L8, /* 0x05 */
PF_ARGB1555, /* 0x06 */
PF_ARGB4444 /* 0x07 */
};
static const enum ltdc_pix_fmt ltdc_pix_fmt_a2[NB_PF] = {
PF_ARGB8888, /* 0x00 */
PF_ABGR8888, /* 0x01 */
PF_RGBA8888, /* 0x02 */
PF_BGRA8888, /* 0x03 */
PF_RGB565, /* 0x04 */
PF_BGR565, /* 0x05 */
PF_RGB888, /* 0x06 */
PF_NONE /* 0x07 */
};
static const u32 ltdc_drm_fmt_a0[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_C8
};
static const u32 ltdc_drm_fmt_a1[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_C8
};
static const u32 ltdc_drm_fmt_a2[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_C8
};
static const u32 ltdc_drm_fmt_ycbcr_cp[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY
};
static const u32 ltdc_drm_fmt_ycbcr_sp[] = {
DRM_FORMAT_NV12,
DRM_FORMAT_NV21
};
static const u32 ltdc_drm_fmt_ycbcr_fp[] = {
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420
};
/* Layer register offsets */
static const u32 ltdc_layer_regs_a0[] = {
0x80, /* L1 configuration 0 */
0x00, /* not available */
0x00, /* not available */
0x84, /* L1 control register */
0x88, /* L1 window horizontal position configuration */
0x8c, /* L1 window vertical position configuration */
0x90, /* L1 color keying configuration */
0x94, /* L1 pixel format configuration */
0x98, /* L1 constant alpha configuration */
0x9c, /* L1 default color configuration */
0xa0, /* L1 blending factors configuration */
0x00, /* not available */
0x00, /* not available */
0xac, /* L1 color frame buffer address */
0xb0, /* L1 color frame buffer length */
0xb4, /* L1 color frame buffer line number */
0x00, /* not available */
0x00, /* not available */
0x00, /* not available */
0x00, /* not available */
0xc4, /* L1 CLUT write */
0x00, /* not available */
0x00, /* not available */
0x00, /* not available */
0x00 /* not available */
};
static const u32 ltdc_layer_regs_a1[] = {
0x80, /* L1 configuration 0 */
0x84, /* L1 configuration 1 */
0x00, /* L1 reload control */
0x88, /* L1 control register */
0x8c, /* L1 window horizontal position configuration */
0x90, /* L1 window vertical position configuration */
0x94, /* L1 color keying configuration */
0x98, /* L1 pixel format configuration */
0x9c, /* L1 constant alpha configuration */
0xa0, /* L1 default color configuration */
0xa4, /* L1 blending factors configuration */
0xa8, /* L1 burst length configuration */
0x00, /* not available */
0xac, /* L1 color frame buffer address */
0xb0, /* L1 color frame buffer length */
0xb4, /* L1 color frame buffer line number */
0xb8, /* L1 auxiliary frame buffer address 0 */
0xbc, /* L1 auxiliary frame buffer address 1 */
0xc0, /* L1 auxiliary frame buffer length */
0xc4, /* L1 auxiliary frame buffer line number */
0xc8, /* L1 CLUT write */
0x00, /* not available */
0x00, /* not available */
0x00, /* not available */
0x00 /* not available */
};
static const u32 ltdc_layer_regs_a2[] = {
0x100, /* L1 configuration 0 */
0x104, /* L1 configuration 1 */
0x108, /* L1 reload control */
0x10c, /* L1 control register */
0x110, /* L1 window horizontal position configuration */
0x114, /* L1 window vertical position configuration */
0x118, /* L1 color keying configuration */
0x11c, /* L1 pixel format configuration */
0x120, /* L1 constant alpha configuration */
0x124, /* L1 default color configuration */
0x128, /* L1 blending factors configuration */
0x12c, /* L1 burst length configuration */
0x130, /* L1 planar configuration */
0x134, /* L1 color frame buffer address */
0x138, /* L1 color frame buffer length */
0x13c, /* L1 color frame buffer line number */
0x140, /* L1 auxiliary frame buffer address 0 */
0x144, /* L1 auxiliary frame buffer address 1 */
0x148, /* L1 auxiliary frame buffer length */
0x14c, /* L1 auxiliary frame buffer line number */
0x150, /* L1 CLUT write */
0x16c, /* L1 Conversion YCbCr RGB 0 */
0x170, /* L1 Conversion YCbCr RGB 1 */
0x174, /* L1 Flexible Pixel Format 0 */
0x178 /* L1 Flexible Pixel Format 1 */
};
static const u64 ltdc_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static const struct regmap_config stm32_ltdc_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = sizeof(u32),
.max_register = 0x400,
.use_relaxed_mmio = true,
.cache_type = REGCACHE_NONE,
};
static const u32 ltdc_ycbcr2rgb_coeffs[DRM_COLOR_ENCODING_MAX][DRM_COLOR_RANGE_MAX][2] = {
[DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
0x02040199, /* (b_cb = 516 / r_cr = 409) */
0x006400D0 /* (g_cb = 100 / g_cr = 208) */
},
[DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = {
0x01C60167, /* (b_cb = 454 / r_cr = 359) */
0x005800B7 /* (g_cb = 88 / g_cr = 183) */
},
[DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
0x021D01CB, /* (b_cb = 541 / r_cr = 459) */
0x00370089 /* (g_cb = 55 / g_cr = 137) */
},
[DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = {
0x01DB0193, /* (b_cb = 475 / r_cr = 403) */
0x00300078 /* (g_cb = 48 / g_cr = 120) */
}
/* BT2020 not supported */
};
static inline struct ltdc_device *crtc_to_ltdc(struct drm_crtc *crtc)
{
return (struct ltdc_device *)crtc->dev->dev_private;
}
static inline struct ltdc_device *plane_to_ltdc(struct drm_plane *plane)
{
return (struct ltdc_device *)plane->dev->dev_private;
}
static inline struct ltdc_device *encoder_to_ltdc(struct drm_encoder *enc)
{
return (struct ltdc_device *)enc->dev->dev_private;
}
static inline enum ltdc_pix_fmt to_ltdc_pixelformat(u32 drm_fmt)
{
enum ltdc_pix_fmt pf;
switch (drm_fmt) {
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
pf = PF_ARGB8888;
break;
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
pf = PF_ABGR8888;
break;
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_RGBX8888:
pf = PF_RGBA8888;
break;
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_BGRX8888:
pf = PF_BGRA8888;
break;
case DRM_FORMAT_RGB888:
pf = PF_RGB888;
break;
case DRM_FORMAT_BGR888:
pf = PF_BGR888;
break;
case DRM_FORMAT_RGB565:
pf = PF_RGB565;
break;
case DRM_FORMAT_BGR565:
pf = PF_BGR565;
break;
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
pf = PF_ARGB1555;
break;
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_XRGB4444:
pf = PF_ARGB4444;
break;
case DRM_FORMAT_C8:
pf = PF_L8;
break;
default:
pf = PF_NONE;
break;
/* Note: There are no DRM_FORMAT for AL44 and AL88 */
}
return pf;
}
static inline u32 ltdc_set_flexible_pixel_format(struct drm_plane *plane, enum ltdc_pix_fmt pix_fmt)
{
struct ltdc_device *ldev = plane_to_ltdc(plane);
u32 lofs = plane->index * LAY_OFS, ret = PF_FLEXIBLE;
int psize, alen, apos, rlen, rpos, glen, gpos, blen, bpos;
switch (pix_fmt) {
case PF_BGR888:
psize = 3;
alen = 0; apos = 0; rlen = 8; rpos = 0;
glen = 8; gpos = 8; blen = 8; bpos = 16;
break;
case PF_ARGB1555:
psize = 2;
alen = 1; apos = 15; rlen = 5; rpos = 10;
glen = 5; gpos = 5; blen = 5; bpos = 0;
break;
case PF_ARGB4444:
psize = 2;
alen = 4; apos = 12; rlen = 4; rpos = 8;
glen = 4; gpos = 4; blen = 4; bpos = 0;
break;
case PF_L8:
psize = 1;
alen = 0; apos = 0; rlen = 8; rpos = 0;
glen = 8; gpos = 0; blen = 8; bpos = 0;
break;
case PF_AL44:
psize = 1;
alen = 4; apos = 4; rlen = 4; rpos = 0;
glen = 4; gpos = 0; blen = 4; bpos = 0;
break;
case PF_AL88:
psize = 2;
alen = 8; apos = 8; rlen = 8; rpos = 0;
glen = 8; gpos = 0; blen = 8; bpos = 0;
break;
default:
ret = NB_PF; /* error case, trace msg is handled by the caller */
break;
}
if (ret == PF_FLEXIBLE) {
regmap_write(ldev->regmap, LTDC_L1FPF0R + lofs,
(rlen << 14) + (rpos << 9) + (alen << 5) + apos);
regmap_write(ldev->regmap, LTDC_L1FPF1R + lofs,
(psize << 18) + (blen << 14) + (bpos << 9) + (glen << 5) + gpos);
}
return ret;
}
/*
* All non-alpha color formats derived from native alpha color formats are
* either characterized by a FourCC format code
*/
static inline u32 is_xrgb(u32 drm)
{
return ((drm & 0xFF) == 'X' || ((drm >> 8) & 0xFF) == 'X');
}
static inline void ltdc_set_ycbcr_config(struct drm_plane *plane, u32 drm_pix_fmt)
{
struct ltdc_device *ldev = plane_to_ltdc(plane);
struct drm_plane_state *state = plane->state;
u32 lofs = plane->index * LAY_OFS;
u32 val;
switch (drm_pix_fmt) {
case DRM_FORMAT_YUYV:
val = (YCM_I << 4) | LxPCR_YF | LxPCR_CBF;
break;
case DRM_FORMAT_YVYU:
val = (YCM_I << 4) | LxPCR_YF;
break;
case DRM_FORMAT_UYVY:
val = (YCM_I << 4) | LxPCR_CBF;
break;
case DRM_FORMAT_VYUY:
val = (YCM_I << 4);
break;
case DRM_FORMAT_NV12:
val = (YCM_SP << 4) | LxPCR_CBF;
break;
case DRM_FORMAT_NV21:
val = (YCM_SP << 4);
break;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
val = (YCM_FP << 4);
break;
default:
/* RGB or not a YCbCr supported format */
DRM_ERROR("Unsupported pixel format: %u\n", drm_pix_fmt);
return;
}
/* Enable limited range */
if (state->color_range == DRM_COLOR_YCBCR_LIMITED_RANGE)
val |= LxPCR_YREN;
/* enable ycbcr conversion */
val |= LxPCR_YCEN;
regmap_write(ldev->regmap, LTDC_L1PCR + lofs, val);
}
static inline void ltdc_set_ycbcr_coeffs(struct drm_plane *plane)
{
struct ltdc_device *ldev = plane_to_ltdc(plane);
struct drm_plane_state *state = plane->state;
enum drm_color_encoding enc = state->color_encoding;
enum drm_color_range ran = state->color_range;
u32 lofs = plane->index * LAY_OFS;
if (enc != DRM_COLOR_YCBCR_BT601 && enc != DRM_COLOR_YCBCR_BT709) {
DRM_ERROR("color encoding %d not supported, use bt601 by default\n", enc);
/* set by default color encoding to DRM_COLOR_YCBCR_BT601 */
enc = DRM_COLOR_YCBCR_BT601;
}
if (ran != DRM_COLOR_YCBCR_LIMITED_RANGE && ran != DRM_COLOR_YCBCR_FULL_RANGE) {
DRM_ERROR("color range %d not supported, use limited range by default\n", ran);
/* set by default color range to DRM_COLOR_YCBCR_LIMITED_RANGE */
ran = DRM_COLOR_YCBCR_LIMITED_RANGE;
}
DRM_DEBUG_DRIVER("Color encoding=%d, range=%d\n", enc, ran);
regmap_write(ldev->regmap, LTDC_L1CYR0R + lofs,
ltdc_ycbcr2rgb_coeffs[enc][ran][0]);
regmap_write(ldev->regmap, LTDC_L1CYR1R + lofs,
ltdc_ycbcr2rgb_coeffs[enc][ran][1]);
}
static inline void ltdc_irq_crc_handle(struct ltdc_device *ldev,
struct drm_crtc *crtc)
{
u32 crc;
int ret;
if (ldev->crc_skip_count < CRC_SKIP_FRAMES) {
ldev->crc_skip_count++;
return;
}
/* Get the CRC of the frame */
ret = regmap_read(ldev->regmap, LTDC_CCRCR, &crc);
if (ret)
return;
/* Report to DRM the CRC (hw dependent feature) */
drm_crtc_add_crc_entry(crtc, true, drm_crtc_accurate_vblank_count(crtc), &crc);
}
static irqreturn_t ltdc_irq_thread(int irq, void *arg)
{
struct drm_device *ddev = arg;
struct ltdc_device *ldev = ddev->dev_private;
struct drm_crtc *crtc = drm_crtc_from_index(ddev, 0);
/* Line IRQ : trigger the vblank event */
if (ldev->irq_status & ISR_LIF) {
drm_crtc_handle_vblank(crtc);
/* Early return if CRC is not active */
if (ldev->crc_active)
ltdc_irq_crc_handle(ldev, crtc);
}
mutex_lock(&ldev->err_lock);
if (ldev->irq_status & ISR_TERRIF)
ldev->transfer_err++;
if (ldev->irq_status & ISR_FUEIF)
ldev->fifo_err++;
if (ldev->irq_status & ISR_FUWIF)
ldev->fifo_warn++;
mutex_unlock(&ldev->err_lock);
return IRQ_HANDLED;
}
static irqreturn_t ltdc_irq(int irq, void *arg)
{
struct drm_device *ddev = arg;
struct ltdc_device *ldev = ddev->dev_private;
/*
* Read & Clear the interrupt status
* In order to write / read registers in this critical section
* very quickly, the regmap functions are not used.
*/
ldev->irq_status = readl_relaxed(ldev->regs + LTDC_ISR);
writel_relaxed(ldev->irq_status, ldev->regs + LTDC_ICR);
return IRQ_WAKE_THREAD;
}
/*
* DRM_CRTC
*/
static void ltdc_crtc_update_clut(struct drm_crtc *crtc)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_color_lut *lut;
u32 val;
int i;
if (!crtc->state->color_mgmt_changed || !crtc->state->gamma_lut)
return;
lut = (struct drm_color_lut *)crtc->state->gamma_lut->data;
for (i = 0; i < CLUT_SIZE; i++, lut++) {
val = ((lut->red << 8) & 0xff0000) | (lut->green & 0xff00) |
(lut->blue >> 8) | (i << 24);
regmap_write(ldev->regmap, LTDC_L1CLUTWR, val);
}
}
static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
DRM_DEBUG_DRIVER("\n");
pm_runtime_get_sync(ddev->dev);
/* Sets the background color value */
regmap_write(ldev->regmap, LTDC_BCCR, BCCR_BCBLACK);
/* Enable IRQ */
regmap_set_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_RRIE | IER_TERRIE);
/* Commit shadow registers = update planes at next vblank */
if (!ldev->caps.plane_reg_shadow)
regmap_set_bits(ldev->regmap, LTDC_SRCR, SRCR_VBR);
drm_crtc_vblank_on(crtc);
}
static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
int layer_index = 0;
DRM_DEBUG_DRIVER("\n");
drm_crtc_vblank_off(crtc);
/* Disable all layers */
for (layer_index = 0; layer_index < ldev->caps.nb_layers; layer_index++)
regmap_write_bits(ldev->regmap, LTDC_L1CR + layer_index * LAY_OFS,
LXCR_CLUTEN | LXCR_LEN, 0);
/* disable IRQ */
regmap_clear_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_RRIE | IER_TERRIE);
/* immediately commit disable of layers before switching off LTDC */
if (!ldev->caps.plane_reg_shadow)
regmap_set_bits(ldev->regmap, LTDC_SRCR, SRCR_IMR);
pm_runtime_put_sync(ddev->dev);
/* clear interrupt error counters */
mutex_lock(&ldev->err_lock);
ldev->transfer_err = 0;
ldev->fifo_err = 0;
ldev->fifo_warn = 0;
mutex_unlock(&ldev->err_lock);
}
#define CLK_TOLERANCE_HZ 50
static enum drm_mode_status
ltdc_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
int target_max = target + CLK_TOLERANCE_HZ;
int result;
result = clk_round_rate(ldev->pixel_clk, target);
DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
/* Filter modes according to the max frequency supported by the pads */
if (result > ldev->caps.pad_max_freq_hz)
return MODE_CLOCK_HIGH;
/*
* Accept all "preferred" modes:
* - this is important for panels because panel clock tolerances are
* bigger than hdmi ones and there is no reason to not accept them
* (the fps may vary a little but it is not a problem).
* - the hdmi preferred mode will be accepted too, but userland will
* be able to use others hdmi "valid" modes if necessary.
*/
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return MODE_OK;
/*
* Filter modes according to the clock value, particularly useful for
* hdmi modes that require precise pixel clocks.
*/
if (result < target_min || result > target_max)
return MODE_CLOCK_RANGE;
return MODE_OK;
}
static bool ltdc_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
int rate = mode->clock * 1000;
if (clk_set_rate(ldev->pixel_clk, rate) < 0) {
DRM_ERROR("Cannot set rate (%dHz) for pixel clk\n", rate);
return false;
}
adjusted_mode->clock = clk_get_rate(ldev->pixel_clk) / 1000;
DRM_DEBUG_DRIVER("requested clock %dkHz, adjusted clock %dkHz\n",
mode->clock, adjusted_mode->clock);
return true;
}
static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
struct drm_connector_list_iter iter;
struct drm_connector *connector = NULL;
struct drm_encoder *encoder = NULL, *en_iter;
struct drm_bridge *bridge = NULL, *br_iter;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
u32 bus_formats = MEDIA_BUS_FMT_RGB888_1X24;
u32 bus_flags = 0;
u32 val;
int ret;
/* get encoder from crtc */
drm_for_each_encoder(en_iter, ddev)
if (en_iter->crtc == crtc) {
encoder = en_iter;
break;
}
if (encoder) {
/* get bridge from encoder */
list_for_each_entry(br_iter, &encoder->bridge_chain, chain_node)
if (br_iter->encoder == encoder) {
bridge = br_iter;
break;
}
/* Get the connector from encoder */
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter)
if (connector->encoder == encoder)
break;
drm_connector_list_iter_end(&iter);
}
if (bridge && bridge->timings) {
bus_flags = bridge->timings->input_bus_flags;
} else if (connector) {
bus_flags = connector->display_info.bus_flags;
if (connector->display_info.num_bus_formats)
bus_formats = connector->display_info.bus_formats[0];
}
if (!pm_runtime_active(ddev->dev)) {
ret = pm_runtime_get_sync(ddev->dev);
if (ret) {
DRM_ERROR("Failed to set mode, cannot get sync\n");
return;
}
}
DRM_DEBUG_DRIVER("CRTC:%d mode:%s\n", crtc->base.id, mode->name);
DRM_DEBUG_DRIVER("Video mode: %dx%d", mode->hdisplay, mode->vdisplay);
DRM_DEBUG_DRIVER(" hfp %d hbp %d hsl %d vfp %d vbp %d vsl %d\n",
mode->hsync_start - mode->hdisplay,
mode->htotal - mode->hsync_end,
mode->hsync_end - mode->hsync_start,
mode->vsync_start - mode->vdisplay,
mode->vtotal - mode->vsync_end,
mode->vsync_end - mode->vsync_start);
/* Convert video timings to ltdc timings */
hsync = mode->hsync_end - mode->hsync_start - 1;
vsync = mode->vsync_end - mode->vsync_start - 1;
accum_hbp = mode->htotal - mode->hsync_start - 1;
accum_vbp = mode->vtotal - mode->vsync_start - 1;
accum_act_w = accum_hbp + mode->hdisplay;
accum_act_h = accum_vbp + mode->vdisplay;
total_width = mode->htotal - 1;
total_height = mode->vtotal - 1;
/* Configures the HS, VS, DE and PC polarities. Default Active Low */
val = 0;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= GCR_HSPOL;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= GCR_VSPOL;
if (bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= GCR_DEPOL;
if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
val |= GCR_PCPOL;
regmap_update_bits(ldev->regmap, LTDC_GCR,
GCR_HSPOL | GCR_VSPOL | GCR_DEPOL | GCR_PCPOL, val);
/* Set Synchronization size */
val = (hsync << 16) | vsync;
regmap_update_bits(ldev->regmap, LTDC_SSCR, SSCR_VSH | SSCR_HSW, val);
/* Set Accumulated Back porch */
val = (accum_hbp << 16) | accum_vbp;
regmap_update_bits(ldev->regmap, LTDC_BPCR, BPCR_AVBP | BPCR_AHBP, val);
/* Set Accumulated Active Width */
val = (accum_act_w << 16) | accum_act_h;
regmap_update_bits(ldev->regmap, LTDC_AWCR, AWCR_AAW | AWCR_AAH, val);
/* Set total width & height */
val = (total_width << 16) | total_height;
regmap_update_bits(ldev->regmap, LTDC_TWCR, TWCR_TOTALH | TWCR_TOTALW, val);
regmap_write(ldev->regmap, LTDC_LIPCR, (accum_act_h + 1));
/* Configure the output format (hw version dependent) */
if (ldev->caps.ycbcr_output) {
/* Input video dynamic_range & colorimetry */
int vic = drm_match_cea_mode(mode);
u32 val;
if (vic == 6 || vic == 7 || vic == 21 || vic == 22 ||
vic == 2 || vic == 3 || vic == 17 || vic == 18)
/* ITU-R BT.601 */
val = 0;
else
/* ITU-R BT.709 */
val = EDCR_OCYSEL;
switch (bus_formats) {
case MEDIA_BUS_FMT_YUYV8_1X16:
/* enable ycbcr output converter */
regmap_write(ldev->regmap, LTDC_EDCR, EDCR_OCYEN | val);
break;
case MEDIA_BUS_FMT_YVYU8_1X16:
/* enable ycbcr output converter & invert chrominance order */
regmap_write(ldev->regmap, LTDC_EDCR, EDCR_OCYEN | EDCR_OCYCO | val);
break;
default:
/* disable ycbcr output converter */
regmap_write(ldev->regmap, LTDC_EDCR, 0);
break;
}
}
}
static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
struct drm_pending_vblank_event *event = crtc->state->event;
DRM_DEBUG_ATOMIC("\n");
ltdc_crtc_update_clut(crtc);
/* Commit shadow registers = update planes at next vblank */
if (!ldev->caps.plane_reg_shadow)
regmap_set_bits(ldev->regmap, LTDC_SRCR, SRCR_VBR);
if (event) {
crtc->state->event = NULL;
spin_lock_irq(&ddev->event_lock);
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&ddev->event_lock);
}
}
static bool ltdc_crtc_get_scanout_position(struct drm_crtc *crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
struct drm_device *ddev = crtc->dev;
struct ltdc_device *ldev = ddev->dev_private;
int line, vactive_start, vactive_end, vtotal;
if (stime)
*stime = ktime_get();
/* The active area starts after vsync + front porch and ends
* at vsync + front porc + display size.
* The total height also include back porch.
* We have 3 possible cases to handle:
* - line < vactive_start: vpos = line - vactive_start and will be
* negative
* - vactive_start < line < vactive_end: vpos = line - vactive_start
* and will be positive
* - line > vactive_end: vpos = line - vtotal - vactive_start
* and will negative
*
* Computation for the two first cases are identical so we can
* simplify the code and only test if line > vactive_end
*/
if (pm_runtime_active(ddev->dev)) {
regmap_read(ldev->regmap, LTDC_CPSR, &line);
line &= CPSR_CYPOS;
regmap_read(ldev->regmap, LTDC_BPCR, &vactive_start);
vactive_start &= BPCR_AVBP;
regmap_read(ldev->regmap, LTDC_AWCR, &vactive_end);
vactive_end &= AWCR_AAH;
regmap_read(ldev->regmap, LTDC_TWCR, &vtotal);
vtotal &= TWCR_TOTALH;
if (line > vactive_end)
*vpos = line - vtotal - vactive_start;
else
*vpos = line - vactive_start;
} else {
*vpos = 0;
}
*hpos = 0;
if (etime)
*etime = ktime_get();
return true;
}
static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
.mode_valid = ltdc_crtc_mode_valid,
.mode_fixup = ltdc_crtc_mode_fixup,
.mode_set_nofb = ltdc_crtc_mode_set_nofb,
.atomic_flush = ltdc_crtc_atomic_flush,
.atomic_enable = ltdc_crtc_atomic_enable,
.atomic_disable = ltdc_crtc_atomic_disable,
.get_scanout_position = ltdc_crtc_get_scanout_position,
};
static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_crtc_state *state = crtc->state;
DRM_DEBUG_DRIVER("\n");
if (state->enable)
regmap_set_bits(ldev->regmap, LTDC_IER, IER_LIE);
else
return -EPERM;
return 0;
}
static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
DRM_DEBUG_DRIVER("\n");
regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE);
}
static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
{
struct ltdc_device *ldev;
int ret;
DRM_DEBUG_DRIVER("\n");
if (!crtc)
return -ENODEV;
ldev = crtc_to_ltdc(crtc);
if (source && strcmp(source, "auto") == 0) {
ldev->crc_active = true;
ret = regmap_set_bits(ldev->regmap, LTDC_GCR, GCR_CRCEN);
} else if (!source) {
ldev->crc_active = false;
ret = regmap_clear_bits(ldev->regmap, LTDC_GCR, GCR_CRCEN);
} else {
ret = -EINVAL;
}
ldev->crc_skip_count = 0;
return ret;
}
static int ltdc_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source, size_t *values_cnt)
{
DRM_DEBUG_DRIVER("\n");
if (!crtc)
return -ENODEV;
if (source && strcmp(source, "auto") != 0) {
DRM_DEBUG_DRIVER("Unknown CRC source %s for %s\n",
source, crtc->name);
return -EINVAL;
}
*values_cnt = 1;
return 0;
}
static void ltdc_crtc_atomic_print_state(struct drm_printer *p,
const struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
drm_printf(p, "\ttransfer_error=%d\n", ldev->transfer_err);
drm_printf(p, "\tfifo_underrun_error=%d\n", ldev->fifo_err);
drm_printf(p, "\tfifo_underrun_warning=%d\n", ldev->fifo_warn);
drm_printf(p, "\tfifo_underrun_threshold=%d\n", ldev->fifo_threshold);
}
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = ltdc_crtc_enable_vblank,
.disable_vblank = ltdc_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.atomic_print_state = ltdc_crtc_atomic_print_state,
};
static const struct drm_crtc_funcs ltdc_crtc_with_crc_support_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = ltdc_crtc_enable_vblank,
.disable_vblank = ltdc_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.set_crc_source = ltdc_crtc_set_crc_source,
.verify_crc_source = ltdc_crtc_verify_crc_source,
.atomic_print_state = ltdc_crtc_atomic_print_state,
};
/*
* DRM_PLANE
*/
static int ltdc_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
u32 src_w, src_h;
DRM_DEBUG_DRIVER("\n");
if (!fb)
return 0;
/* convert src_ from 16:16 format */
src_w = new_plane_state->src_w >> 16;
src_h = new_plane_state->src_h >> 16;
/* Reject scaling */
if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) {
DRM_DEBUG_DRIVER("Scaling is not supported");
return -EINVAL;
}
return 0;
}
static void ltdc_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ltdc_device *ldev = plane_to_ltdc(plane);
struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = newstate->fb;
u32 lofs = plane->index * LAY_OFS;
u32 x0 = newstate->crtc_x;
u32 x1 = newstate->crtc_x + newstate->crtc_w - 1;
u32 y0 = newstate->crtc_y;
u32 y1 = newstate->crtc_y + newstate->crtc_h - 1;
u32 src_x, src_y, src_w, src_h;
u32 val, pitch_in_bytes, line_length, line_number, ahbp, avbp, bpcr;
u32 paddr, paddr1, paddr2;
enum ltdc_pix_fmt pf;
if (!newstate->crtc || !fb) {
DRM_DEBUG_DRIVER("fb or crtc NULL");
return;
}
/* convert src_ from 16:16 format */
src_x = newstate->src_x >> 16;
src_y = newstate->src_y >> 16;
src_w = newstate->src_w >> 16;
src_h = newstate->src_h >> 16;
DRM_DEBUG_DRIVER("plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n",
plane->base.id, fb->base.id,
src_w, src_h, src_x, src_y,
newstate->crtc_w, newstate->crtc_h,
newstate->crtc_x, newstate->crtc_y);
regmap_read(ldev->regmap, LTDC_BPCR, &bpcr);
ahbp = (bpcr & BPCR_AHBP) >> 16;
avbp = bpcr & BPCR_AVBP;
/* Configures the horizontal start and stop position */
val = ((x1 + 1 + ahbp) << 16) + (x0 + 1 + ahbp);
regmap_write_bits(ldev->regmap, LTDC_L1WHPCR + lofs,
LXWHPCR_WHSTPOS | LXWHPCR_WHSPPOS, val);
/* Configures the vertical start and stop position */
val = ((y1 + 1 + avbp) << 16) + (y0 + 1 + avbp);
regmap_write_bits(ldev->regmap, LTDC_L1WVPCR + lofs,
LXWVPCR_WVSTPOS | LXWVPCR_WVSPPOS, val);
/* Specifies the pixel format */
pf = to_ltdc_pixelformat(fb->format->format);
for (val = 0; val < NB_PF; val++)
if (ldev->caps.pix_fmt_hw[val] == pf)
break;
/* Use the flexible color format feature if necessary and available */
if (ldev->caps.pix_fmt_flex && val == NB_PF)
val = ltdc_set_flexible_pixel_format(plane, pf);
if (val == NB_PF) {
DRM_ERROR("Pixel format %.4s not supported\n",
(char *)&fb->format->format);
val = 0; /* set by default ARGB 32 bits */
}
regmap_write_bits(ldev->regmap, LTDC_L1PFCR + lofs, LXPFCR_PF, val);
/* Specifies the constant alpha value */
val = newstate->alpha >> 8;
regmap_write_bits(ldev->regmap, LTDC_L1CACR + lofs, LXCACR_CONSTA, val);
/* Specifies the blending factors */
val = BF1_PAXCA | BF2_1PAXCA;
if (!fb->format->has_alpha)
val = BF1_CA | BF2_1CA;
/* Manage hw-specific capabilities */
if (ldev->caps.non_alpha_only_l1 &&
plane->type != DRM_PLANE_TYPE_PRIMARY)
val = BF1_PAXCA | BF2_1PAXCA;
if (ldev->caps.dynamic_zorder) {
val |= (newstate->normalized_zpos << 16);
regmap_write_bits(ldev->regmap, LTDC_L1BFCR + lofs,
LXBFCR_BF2 | LXBFCR_BF1 | LXBFCR_BOR, val);
} else {
regmap_write_bits(ldev->regmap, LTDC_L1BFCR + lofs,
LXBFCR_BF2 | LXBFCR_BF1, val);
}
/* Sets the FB address */
paddr = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 0);
if (newstate->rotation & DRM_MODE_REFLECT_X)
paddr += (fb->format->cpp[0] * (x1 - x0 + 1)) - 1;
if (newstate->rotation & DRM_MODE_REFLECT_Y)
paddr += (fb->pitches[0] * (y1 - y0));
DRM_DEBUG_DRIVER("fb: phys 0x%08x", paddr);
regmap_write(ldev->regmap, LTDC_L1CFBAR + lofs, paddr);
/* Configures the color frame buffer pitch in bytes & line length */
line_length = fb->format->cpp[0] *
(x1 - x0 + 1) + (ldev->caps.bus_width >> 3) - 1;
if (newstate->rotation & DRM_MODE_REFLECT_Y)
/* Compute negative value (signed on 16 bits) for the picth */
pitch_in_bytes = 0x10000 - fb->pitches[0];
else
pitch_in_bytes = fb->pitches[0];
val = (pitch_in_bytes << 16) | line_length;
regmap_write_bits(ldev->regmap, LTDC_L1CFBLR + lofs, LXCFBLR_CFBLL | LXCFBLR_CFBP, val);
/* Configures the frame buffer line number */
line_number = y1 - y0 + 1;
regmap_write_bits(ldev->regmap, LTDC_L1CFBLNR + lofs, LXCFBLNR_CFBLN, line_number);
if (ldev->caps.ycbcr_input) {
if (fb->format->is_yuv) {
switch (fb->format->format) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
/* Configure the auxiliary frame buffer address 0 */
paddr1 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 1);
if (newstate->rotation & DRM_MODE_REFLECT_X)
paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
if (newstate->rotation & DRM_MODE_REFLECT_Y)
paddr1 += (fb->pitches[1] * (y1 - y0 - 1)) >> 1;
regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr1);
break;
case DRM_FORMAT_YUV420:
/* Configure the auxiliary frame buffer address 0 & 1 */
paddr1 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 1);
paddr2 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 2);
if (newstate->rotation & DRM_MODE_REFLECT_X) {
paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
paddr2 += ((fb->format->cpp[2] * (x1 - x0 + 1)) >> 1) - 1;
}
if (newstate->rotation & DRM_MODE_REFLECT_Y) {
paddr1 += (fb->pitches[1] * (y1 - y0 - 1)) >> 1;
paddr2 += (fb->pitches[2] * (y1 - y0 - 1)) >> 1;
}
regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr1);
regmap_write(ldev->regmap, LTDC_L1AFBA1R + lofs, paddr2);
break;
case DRM_FORMAT_YVU420:
/* Configure the auxiliary frame buffer address 0 & 1 */
paddr1 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 2);
paddr2 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 1);
if (newstate->rotation & DRM_MODE_REFLECT_X) {
paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
paddr2 += ((fb->format->cpp[2] * (x1 - x0 + 1)) >> 1) - 1;
}
if (newstate->rotation & DRM_MODE_REFLECT_Y) {
paddr1 += (fb->pitches[1] * (y1 - y0 - 1)) >> 1;
paddr2 += (fb->pitches[2] * (y1 - y0 - 1)) >> 1;
}
regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr1);
regmap_write(ldev->regmap, LTDC_L1AFBA1R + lofs, paddr2);
break;
}
/*
* Set the length and the number of lines of the auxiliary
* buffers if the framebuffer contains more than one plane.
*/
if (fb->format->num_planes > 1) {
if (newstate->rotation & DRM_MODE_REFLECT_Y)
/*
* Compute negative value (signed on 16 bits)
* for the picth
*/
pitch_in_bytes = 0x10000 - fb->pitches[1];
else
pitch_in_bytes = fb->pitches[1];
line_length = ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) +
(ldev->caps.bus_width >> 3) - 1;
/* Configure the auxiliary buffer length */
val = (pitch_in_bytes << 16) | line_length;
regmap_write(ldev->regmap, LTDC_L1AFBLR + lofs, val);
/* Configure the auxiliary frame buffer line number */
val = line_number >> 1;
regmap_write(ldev->regmap, LTDC_L1AFBLNR + lofs, val);
}
/* Configure YCbC conversion coefficient */
ltdc_set_ycbcr_coeffs(plane);
/* Configure YCbCr format and enable/disable conversion */
ltdc_set_ycbcr_config(plane, fb->format->format);
} else {
/* disable ycbcr conversion */
regmap_write(ldev->regmap, LTDC_L1PCR + lofs, 0);
}
}
/* Enable layer and CLUT if needed */
val = fb->format->format == DRM_FORMAT_C8 ? LXCR_CLUTEN : 0;
val |= LXCR_LEN;
/* Enable horizontal mirroring if requested */
if (newstate->rotation & DRM_MODE_REFLECT_X)
val |= LXCR_HMEN;
regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, val);
/* Commit shadow registers = update plane at next vblank */
if (ldev->caps.plane_reg_shadow)
regmap_write_bits(ldev->regmap, LTDC_L1RCR + lofs,
LXRCR_IMR | LXRCR_VBR | LXRCR_GRMSK, LXRCR_VBR);
ldev->plane_fpsi[plane->index].counter++;
mutex_lock(&ldev->err_lock);
if (ldev->transfer_err) {
DRM_WARN("ltdc transfer error: %d\n", ldev->transfer_err);
ldev->transfer_err = 0;
}
if (ldev->caps.fifo_threshold) {
if (ldev->fifo_err) {
DRM_WARN("ltdc fifo underrun: please verify display mode\n");
ldev->fifo_err = 0;
}
} else {
if (ldev->fifo_warn >= ldev->fifo_threshold) {
DRM_WARN("ltdc fifo underrun: please verify display mode\n");
ldev->fifo_warn = 0;
}
}
mutex_unlock(&ldev->err_lock);
}
static void ltdc_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
plane);
struct ltdc_device *ldev = plane_to_ltdc(plane);
u32 lofs = plane->index * LAY_OFS;
/* Disable layer */
regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, 0);
/* Commit shadow registers = update plane at next vblank */
if (ldev->caps.plane_reg_shadow)
regmap_write_bits(ldev->regmap, LTDC_L1RCR + lofs,
LXRCR_IMR | LXRCR_VBR | LXRCR_GRMSK, LXRCR_VBR);
DRM_DEBUG_DRIVER("CRTC:%d plane:%d\n",
oldstate->crtc->base.id, plane->base.id);
}
static void ltdc_plane_atomic_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
struct drm_plane *plane = state->plane;
struct ltdc_device *ldev = plane_to_ltdc(plane);
struct fps_info *fpsi = &ldev->plane_fpsi[plane->index];
int ms_since_last;
ktime_t now;
now = ktime_get();
ms_since_last = ktime_to_ms(ktime_sub(now, fpsi->last_timestamp));
drm_printf(p, "\tuser_updates=%dfps\n",
DIV_ROUND_CLOSEST(fpsi->counter * 1000, ms_since_last));
fpsi->last_timestamp = now;
fpsi->counter = 0;
}
static const struct drm_plane_funcs ltdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_print_state = ltdc_plane_atomic_print_state,
};
static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
.atomic_check = ltdc_plane_atomic_check,
.atomic_update = ltdc_plane_atomic_update,
.atomic_disable = ltdc_plane_atomic_disable,
};
static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
enum drm_plane_type type,
int index)
{
unsigned long possible_crtcs = CRTC_MASK;
struct ltdc_device *ldev = ddev->dev_private;
struct device *dev = ddev->dev;
struct drm_plane *plane;
unsigned int i, nb_fmt = 0;
u32 *formats;
u32 drm_fmt;
const u64 *modifiers = ltdc_format_modifiers;
u32 lofs = index * LAY_OFS;
u32 val;
int ret;
/* Allocate the biggest size according to supported color formats */
formats = devm_kzalloc(dev, (ldev->caps.pix_fmt_nb +
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_cp) +
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) +
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp)) *
sizeof(*formats), GFP_KERNEL);
for (i = 0; i < ldev->caps.pix_fmt_nb; i++) {
drm_fmt = ldev->caps.pix_fmt_drm[i];
/* Manage hw-specific capabilities */
if (ldev->caps.non_alpha_only_l1)
/* XR24 & RX24 like formats supported only on primary layer */
if (type != DRM_PLANE_TYPE_PRIMARY && is_xrgb(drm_fmt))
continue;
formats[nb_fmt++] = drm_fmt;
}
/* Add YCbCr supported pixel formats */
if (ldev->caps.ycbcr_input) {
regmap_read(ldev->regmap, LTDC_L1C1R + lofs, &val);
if (val & LXCR_C1R_YIA) {
memcpy(&formats[nb_fmt], ltdc_drm_fmt_ycbcr_cp,
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_cp) * sizeof(*formats));
nb_fmt += ARRAY_SIZE(ltdc_drm_fmt_ycbcr_cp);
}
if (val & LXCR_C1R_YSPA) {
memcpy(&formats[nb_fmt], ltdc_drm_fmt_ycbcr_sp,
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) * sizeof(*formats));
nb_fmt += ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp);
}
if (val & LXCR_C1R_YFPA) {
memcpy(&formats[nb_fmt], ltdc_drm_fmt_ycbcr_fp,
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp) * sizeof(*formats));
nb_fmt += ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp);
}
}
plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
if (!plane)
return NULL;
ret = drm_universal_plane_init(ddev, plane, possible_crtcs,
<dc_plane_funcs, formats, nb_fmt,
modifiers, type, NULL);
if (ret < 0)
return NULL;
if (ldev->caps.ycbcr_input) {
if (val & (LXCR_C1R_YIA | LXCR_C1R_YSPA | LXCR_C1R_YFPA))
drm_plane_create_color_properties(plane,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_LIMITED_RANGE);
}
drm_plane_helper_add(plane, <dc_plane_helper_funcs);
drm_plane_create_alpha_property(plane);
DRM_DEBUG_DRIVER("plane:%d created\n", plane->base.id);
return plane;
}
static void ltdc_plane_destroy_all(struct drm_device *ddev)
{
struct drm_plane *plane, *plane_temp;
list_for_each_entry_safe(plane, plane_temp,
&ddev->mode_config.plane_list, head)
drm_plane_cleanup(plane);
}
static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
{
struct ltdc_device *ldev = ddev->dev_private;
struct drm_plane *primary, *overlay;
int supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
unsigned int i;
int ret;
primary = ltdc_plane_create(ddev, DRM_PLANE_TYPE_PRIMARY, 0);
if (!primary) {
DRM_ERROR("Can not create primary plane\n");
return -EINVAL;
}
if (ldev->caps.dynamic_zorder)
drm_plane_create_zpos_property(primary, 0, 0, ldev->caps.nb_layers - 1);
else
drm_plane_create_zpos_immutable_property(primary, 0);
if (ldev->caps.plane_rotation)
drm_plane_create_rotation_property(primary, DRM_MODE_ROTATE_0,
supported_rotations);
/* Init CRTC according to its hardware features */
if (ldev->caps.crc)
ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
<dc_crtc_with_crc_support_funcs, NULL);
else
ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
<dc_crtc_funcs, NULL);
if (ret) {
DRM_ERROR("Can not initialize CRTC\n");
goto cleanup;
}
drm_crtc_helper_add(crtc, <dc_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(crtc, CLUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, CLUT_SIZE);
DRM_DEBUG_DRIVER("CRTC:%d created\n", crtc->base.id);
/* Add planes. Note : the first layer is used by primary plane */
for (i = 1; i < ldev->caps.nb_layers; i++) {
overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY, i);
if (!overlay) {
ret = -ENOMEM;
DRM_ERROR("Can not create overlay plane %d\n", i);
goto cleanup;
}
if (ldev->caps.dynamic_zorder)
drm_plane_create_zpos_property(overlay, i, 0, ldev->caps.nb_layers - 1);
else
drm_plane_create_zpos_immutable_property(overlay, i);
if (ldev->caps.plane_rotation)
drm_plane_create_rotation_property(overlay, DRM_MODE_ROTATE_0,
supported_rotations);
}
return 0;
cleanup:
ltdc_plane_destroy_all(ddev);
return ret;
}
static void ltdc_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *ddev = encoder->dev;
struct ltdc_device *ldev = ddev->dev_private;
DRM_DEBUG_DRIVER("\n");
/* Disable LTDC */
regmap_clear_bits(ldev->regmap, LTDC_GCR, GCR_LTDCEN);
/* Set to sleep state the pinctrl whatever type of encoder */
pinctrl_pm_select_sleep_state(ddev->dev);
}
static void ltdc_encoder_enable(struct drm_encoder *encoder)
{
struct drm_device *ddev = encoder->dev;
struct ltdc_device *ldev = ddev->dev_private;
DRM_DEBUG_DRIVER("\n");
/* set fifo underrun threshold register */
if (ldev->caps.fifo_threshold)
regmap_write(ldev->regmap, LTDC_FUT, ldev->fifo_threshold);
/* Enable LTDC */
regmap_set_bits(ldev->regmap, LTDC_GCR, GCR_LTDCEN);
}
static void ltdc_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *ddev = encoder->dev;
DRM_DEBUG_DRIVER("\n");
/*
* Set to default state the pinctrl only with DPI type.
* Others types like DSI, don't need pinctrl due to
* internal bridge (the signals do not come out of the chipset).
*/
if (encoder->encoder_type == DRM_MODE_ENCODER_DPI)
pinctrl_pm_select_default_state(ddev->dev);
}
static const struct drm_encoder_helper_funcs ltdc_encoder_helper_funcs = {
.disable = ltdc_encoder_disable,
.enable = ltdc_encoder_enable,
.mode_set = ltdc_encoder_mode_set,
};
static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
{
struct drm_encoder *encoder;
int ret;
encoder = devm_kzalloc(ddev->dev, sizeof(*encoder), GFP_KERNEL);
if (!encoder)
return -ENOMEM;
encoder->possible_crtcs = CRTC_MASK;
encoder->possible_clones = 0; /* No cloning support */
drm_simple_encoder_init(ddev, encoder, DRM_MODE_ENCODER_DPI);
drm_encoder_helper_add(encoder, <dc_encoder_helper_funcs);
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
if (ret != -EPROBE_DEFER)
drm_encoder_cleanup(encoder);
return ret;
}
DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id);
return 0;
}
static int ltdc_get_caps(struct drm_device *ddev)
{
struct ltdc_device *ldev = ddev->dev_private;
u32 bus_width_log2, lcr, gc2r;
/*
* at least 1 layer must be managed & the number of layers
* must not exceed LTDC_MAX_LAYER
*/
regmap_read(ldev->regmap, LTDC_LCR, &lcr);
ldev->caps.nb_layers = clamp((int)lcr, 1, LTDC_MAX_LAYER);
/* set data bus width */
regmap_read(ldev->regmap, LTDC_GC2R, &gc2r);
bus_width_log2 = (gc2r & GC2R_BW) >> 4;
ldev->caps.bus_width = 8 << bus_width_log2;
regmap_read(ldev->regmap, LTDC_IDR, &ldev->caps.hw_version);
switch (ldev->caps.hw_version) {
case HWVER_10200:
case HWVER_10300:
ldev->caps.layer_ofs = LAY_OFS_0;
ldev->caps.layer_regs = ltdc_layer_regs_a0;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a0;
ldev->caps.pix_fmt_drm = ltdc_drm_fmt_a0;
ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a0);
ldev->caps.pix_fmt_flex = false;
/*
* Hw older versions support non-alpha color formats derived
* from native alpha color formats only on the primary layer.
* For instance, RG16 native format without alpha works fine
* on 2nd layer but XR24 (derived color format from AR24)
* does not work on 2nd layer.
*/
ldev->caps.non_alpha_only_l1 = true;
ldev->caps.pad_max_freq_hz = 90000000;
if (ldev->caps.hw_version == HWVER_10200)
ldev->caps.pad_max_freq_hz = 65000000;
ldev->caps.nb_irq = 2;
ldev->caps.ycbcr_input = false;
ldev->caps.ycbcr_output = false;
ldev->caps.plane_reg_shadow = false;
ldev->caps.crc = false;
ldev->caps.dynamic_zorder = false;
ldev->caps.plane_rotation = false;
ldev->caps.fifo_threshold = false;
break;
case HWVER_20101:
ldev->caps.layer_ofs = LAY_OFS_0;
ldev->caps.layer_regs = ltdc_layer_regs_a1;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
ldev->caps.pix_fmt_drm = ltdc_drm_fmt_a1;
ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a1);
ldev->caps.pix_fmt_flex = false;
ldev->caps.non_alpha_only_l1 = false;
ldev->caps.pad_max_freq_hz = 150000000;
ldev->caps.nb_irq = 4;
ldev->caps.ycbcr_input = false;
ldev->caps.ycbcr_output = false;
ldev->caps.plane_reg_shadow = false;
ldev->caps.crc = false;
ldev->caps.dynamic_zorder = false;
ldev->caps.plane_rotation = false;
ldev->caps.fifo_threshold = false;
break;
case HWVER_40100:
ldev->caps.layer_ofs = LAY_OFS_1;
ldev->caps.layer_regs = ltdc_layer_regs_a2;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a2;
ldev->caps.pix_fmt_drm = ltdc_drm_fmt_a2;
ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a2);
ldev->caps.pix_fmt_flex = true;
ldev->caps.non_alpha_only_l1 = false;
ldev->caps.pad_max_freq_hz = 90000000;
ldev->caps.nb_irq = 2;
ldev->caps.ycbcr_input = true;
ldev->caps.ycbcr_output = true;
ldev->caps.plane_reg_shadow = true;
ldev->caps.crc = true;
ldev->caps.dynamic_zorder = true;
ldev->caps.plane_rotation = true;
ldev->caps.fifo_threshold = true;
break;
default:
return -ENODEV;
}
return 0;
}
void ltdc_suspend(struct drm_device *ddev)
{
struct ltdc_device *ldev = ddev->dev_private;
DRM_DEBUG_DRIVER("\n");
clk_disable_unprepare(ldev->pixel_clk);
}
int ltdc_resume(struct drm_device *ddev)
{
struct ltdc_device *ldev = ddev->dev_private;
int ret;
DRM_DEBUG_DRIVER("\n");
ret = clk_prepare_enable(ldev->pixel_clk);
if (ret) {
DRM_ERROR("failed to enable pixel clock (%d)\n", ret);
return ret;
}
return 0;
}
int ltdc_load(struct drm_device *ddev)
{
struct platform_device *pdev = to_platform_device(ddev->dev);
struct ltdc_device *ldev = ddev->dev_private;
struct device *dev = ddev->dev;
struct device_node *np = dev->of_node;
struct drm_bridge *bridge;
struct drm_panel *panel;
struct drm_crtc *crtc;
struct reset_control *rstc;
struct resource *res;
int irq, i, nb_endpoints;
int ret = -ENODEV;
DRM_DEBUG_DRIVER("\n");
/* Get number of endpoints */
nb_endpoints = of_graph_get_endpoint_count(np);
if (!nb_endpoints)
return -ENODEV;
ldev->pixel_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(ldev->pixel_clk)) {
if (PTR_ERR(ldev->pixel_clk) != -EPROBE_DEFER)
DRM_ERROR("Unable to get lcd clock\n");
return PTR_ERR(ldev->pixel_clk);
}
if (clk_prepare_enable(ldev->pixel_clk)) {
DRM_ERROR("Unable to prepare pixel clock\n");
return -ENODEV;
}
/* Get endpoints if any */
for (i = 0; i < nb_endpoints; i++) {
ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge);
/*
* If at least one endpoint is -ENODEV, continue probing,
* else if at least one endpoint returned an error
* (ie -EPROBE_DEFER) then stop probing.
*/
if (ret == -ENODEV)
continue;
else if (ret)
goto err;
if (panel) {
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
DRM_ERROR("panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge);
goto err;
}
}
if (bridge) {
ret = ltdc_encoder_init(ddev, bridge);
if (ret) {
if (ret != -EPROBE_DEFER)
DRM_ERROR("init encoder endpoint %d\n", i);
goto err;
}
}
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
mutex_init(&ldev->err_lock);
if (!IS_ERR(rstc)) {
reset_control_assert(rstc);
usleep_range(10, 20);
reset_control_deassert(rstc);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ldev->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(ldev->regs)) {
DRM_ERROR("Unable to get ltdc registers\n");
ret = PTR_ERR(ldev->regs);
goto err;
}
ldev->regmap = devm_regmap_init_mmio(&pdev->dev, ldev->regs, &stm32_ltdc_regmap_cfg);
if (IS_ERR(ldev->regmap)) {
DRM_ERROR("Unable to regmap ltdc registers\n");
ret = PTR_ERR(ldev->regmap);
goto err;
}
ret = ltdc_get_caps(ddev);
if (ret) {
DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
ldev->caps.hw_version);
goto err;
}
/* Disable interrupts */
if (ldev->caps.fifo_threshold)
regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUWIE |
IER_TERRIE);
else
regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUWIE |
IER_TERRIE | IER_FUEIE);
DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
/* initialize default value for fifo underrun threshold & clear interrupt error counters */
ldev->transfer_err = 0;
ldev->fifo_err = 0;
ldev->fifo_warn = 0;
ldev->fifo_threshold = FUT_DFT;
for (i = 0; i < ldev->caps.nb_irq; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
goto err;
}
ret = devm_request_threaded_irq(dev, irq, ltdc_irq,
ltdc_irq_thread, IRQF_ONESHOT,
dev_name(dev), ddev);
if (ret) {
DRM_ERROR("Failed to register LTDC interrupt\n");
goto err;
}
}
crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
if (!crtc) {
DRM_ERROR("Failed to allocate crtc\n");
ret = -ENOMEM;
goto err;
}
ret = ltdc_crtc_init(ddev, crtc);
if (ret) {
DRM_ERROR("Failed to init crtc\n");
goto err;
}
ret = drm_vblank_init(ddev, NB_CRTC);
if (ret) {
DRM_ERROR("Failed calling drm_vblank_init()\n");
goto err;
}
clk_disable_unprepare(ldev->pixel_clk);
pinctrl_pm_select_sleep_state(ddev->dev);
pm_runtime_enable(ddev->dev);
return 0;
err:
for (i = 0; i < nb_endpoints; i++)
drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
clk_disable_unprepare(ldev->pixel_clk);
return ret;
}
void ltdc_unload(struct drm_device *ddev)
{
struct device *dev = ddev->dev;
int nb_endpoints, i;
DRM_DEBUG_DRIVER("\n");
nb_endpoints = of_graph_get_endpoint_count(dev->of_node);
for (i = 0; i < nb_endpoints; i++)
drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
pm_runtime_disable(ddev->dev);
}
MODULE_AUTHOR("Philippe Cornu <[email protected]>");
MODULE_AUTHOR("Yannick Fertre <[email protected]>");
MODULE_AUTHOR("Fabien Dessenne <[email protected]>");
MODULE_AUTHOR("Mickael Reulier <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics ST DRM LTDC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/stm/ltdc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2017
*
* Authors: Philippe Cornu <[email protected]>
* Yannick Fertre <[email protected]>
*/
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/bridge/dw_mipi_dsi.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
#define HWVER_130 0x31333000 /* IP version 1.30 */
#define HWVER_131 0x31333100 /* IP version 1.31 */
/* DSI digital registers & bit definitions */
#define DSI_VERSION 0x00
#define VERSION GENMASK(31, 8)
/* DSI wrapper registers & bit definitions */
/* Note: registers are named as in the Reference Manual */
#define DSI_WCFGR 0x0400 /* Wrapper ConFiGuration Reg */
#define WCFGR_DSIM BIT(0) /* DSI Mode */
#define WCFGR_COLMUX GENMASK(3, 1) /* COLor MUltipleXing */
#define DSI_WCR 0x0404 /* Wrapper Control Reg */
#define WCR_DSIEN BIT(3) /* DSI ENable */
#define DSI_WISR 0x040C /* Wrapper Interrupt and Status Reg */
#define WISR_PLLLS BIT(8) /* PLL Lock Status */
#define WISR_RRS BIT(12) /* Regulator Ready Status */
#define DSI_WPCR0 0x0418 /* Wrapper Phy Conf Reg 0 */
#define WPCR0_UIX4 GENMASK(5, 0) /* Unit Interval X 4 */
#define WPCR0_TDDL BIT(16) /* Turn Disable Data Lanes */
#define DSI_WRPCR 0x0430 /* Wrapper Regulator & Pll Ctrl Reg */
#define WRPCR_PLLEN BIT(0) /* PLL ENable */
#define WRPCR_NDIV GENMASK(8, 2) /* pll loop DIVision Factor */
#define WRPCR_IDF GENMASK(14, 11) /* pll Input Division Factor */
#define WRPCR_ODF GENMASK(17, 16) /* pll Output Division Factor */
#define WRPCR_REGEN BIT(24) /* REGulator ENable */
#define WRPCR_BGREN BIT(28) /* BandGap Reference ENable */
#define IDF_MIN 1
#define IDF_MAX 7
#define NDIV_MIN 10
#define NDIV_MAX 125
#define ODF_MIN 1
#define ODF_MAX 8
/* dsi color format coding according to the datasheet */
enum dsi_color {
DSI_RGB565_CONF1,
DSI_RGB565_CONF2,
DSI_RGB565_CONF3,
DSI_RGB666_CONF1,
DSI_RGB666_CONF2,
DSI_RGB888,
};
#define LANE_MIN_KBPS 31250
#define LANE_MAX_KBPS 500000
/* Sleep & timeout for regulator on/off, pll lock/unlock & fifo empty */
#define SLEEP_US 1000
#define TIMEOUT_US 200000
struct dw_mipi_dsi_stm {
void __iomem *base;
struct clk *pllref_clk;
struct dw_mipi_dsi *dsi;
u32 hw_version;
int lane_min_kbps;
int lane_max_kbps;
struct regulator *vdd_supply;
};
static inline void dsi_write(struct dw_mipi_dsi_stm *dsi, u32 reg, u32 val)
{
writel(val, dsi->base + reg);
}
static inline u32 dsi_read(struct dw_mipi_dsi_stm *dsi, u32 reg)
{
return readl(dsi->base + reg);
}
static inline void dsi_set(struct dw_mipi_dsi_stm *dsi, u32 reg, u32 mask)
{
dsi_write(dsi, reg, dsi_read(dsi, reg) | mask);
}
static inline void dsi_clear(struct dw_mipi_dsi_stm *dsi, u32 reg, u32 mask)
{
dsi_write(dsi, reg, dsi_read(dsi, reg) & ~mask);
}
static inline void dsi_update_bits(struct dw_mipi_dsi_stm *dsi, u32 reg,
u32 mask, u32 val)
{
dsi_write(dsi, reg, (dsi_read(dsi, reg) & ~mask) | val);
}
static enum dsi_color dsi_color_from_mipi(enum mipi_dsi_pixel_format fmt)
{
switch (fmt) {
case MIPI_DSI_FMT_RGB888:
return DSI_RGB888;
case MIPI_DSI_FMT_RGB666:
return DSI_RGB666_CONF2;
case MIPI_DSI_FMT_RGB666_PACKED:
return DSI_RGB666_CONF1;
case MIPI_DSI_FMT_RGB565:
return DSI_RGB565_CONF1;
default:
DRM_DEBUG_DRIVER("MIPI color invalid, so we use rgb888\n");
}
return DSI_RGB888;
}
static int dsi_pll_get_clkout_khz(int clkin_khz, int idf, int ndiv, int odf)
{
int divisor = idf * odf;
/* prevent from division by 0 */
if (!divisor)
return 0;
return DIV_ROUND_CLOSEST(clkin_khz * ndiv, divisor);
}
static int dsi_pll_get_params(struct dw_mipi_dsi_stm *dsi,
int clkin_khz, int clkout_khz,
int *idf, int *ndiv, int *odf)
{
int i, o, n, n_min, n_max;
int fvco_min, fvco_max, delta, best_delta; /* all in khz */
/* Early checks preventing division by 0 & odd results */
if (clkin_khz <= 0 || clkout_khz <= 0)
return -EINVAL;
fvco_min = dsi->lane_min_kbps * 2 * ODF_MAX;
fvco_max = dsi->lane_max_kbps * 2 * ODF_MIN;
best_delta = 1000000; /* big started value (1000000khz) */
for (i = IDF_MIN; i <= IDF_MAX; i++) {
/* Compute ndiv range according to Fvco */
n_min = ((fvco_min * i) / (2 * clkin_khz)) + 1;
n_max = (fvco_max * i) / (2 * clkin_khz);
/* No need to continue idf loop if we reach ndiv max */
if (n_min >= NDIV_MAX)
break;
/* Clamp ndiv to valid values */
if (n_min < NDIV_MIN)
n_min = NDIV_MIN;
if (n_max > NDIV_MAX)
n_max = NDIV_MAX;
for (o = ODF_MIN; o <= ODF_MAX; o *= 2) {
n = DIV_ROUND_CLOSEST(i * o * clkout_khz, clkin_khz);
/* Check ndiv according to vco range */
if (n < n_min || n > n_max)
continue;
/* Check if new delta is better & saves parameters */
delta = dsi_pll_get_clkout_khz(clkin_khz, i, n, o) -
clkout_khz;
if (delta < 0)
delta = -delta;
if (delta < best_delta) {
*idf = i;
*ndiv = n;
*odf = o;
best_delta = delta;
}
/* fast return in case of "perfect result" */
if (!delta)
return 0;
}
}
return 0;
}
static int dw_mipi_dsi_phy_init(void *priv_data)
{
struct dw_mipi_dsi_stm *dsi = priv_data;
u32 val;
int ret;
/* Enable the regulator */
dsi_set(dsi, DSI_WRPCR, WRPCR_REGEN | WRPCR_BGREN);
ret = readl_poll_timeout(dsi->base + DSI_WISR, val, val & WISR_RRS,
SLEEP_US, TIMEOUT_US);
if (ret)
DRM_DEBUG_DRIVER("!TIMEOUT! waiting REGU, let's continue\n");
/* Enable the DSI PLL & wait for its lock */
dsi_set(dsi, DSI_WRPCR, WRPCR_PLLEN);
ret = readl_poll_timeout(dsi->base + DSI_WISR, val, val & WISR_PLLLS,
SLEEP_US, TIMEOUT_US);
if (ret)
DRM_DEBUG_DRIVER("!TIMEOUT! waiting PLL, let's continue\n");
return 0;
}
static void dw_mipi_dsi_phy_power_on(void *priv_data)
{
struct dw_mipi_dsi_stm *dsi = priv_data;
DRM_DEBUG_DRIVER("\n");
/* Enable the DSI wrapper */
dsi_set(dsi, DSI_WCR, WCR_DSIEN);
}
static void dw_mipi_dsi_phy_power_off(void *priv_data)
{
struct dw_mipi_dsi_stm *dsi = priv_data;
DRM_DEBUG_DRIVER("\n");
/* Disable the DSI wrapper */
dsi_clear(dsi, DSI_WCR, WCR_DSIEN);
}
static int
dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format,
unsigned int *lane_mbps)
{
struct dw_mipi_dsi_stm *dsi = priv_data;
unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz;
int ret, bpp;
u32 val;
pll_in_khz = (unsigned int)(clk_get_rate(dsi->pllref_clk) / 1000);
/* Compute requested pll out */
bpp = mipi_dsi_pixel_format_to_bpp(format);
pll_out_khz = mode->clock * bpp / lanes;
/* Add 20% to pll out to be higher than pixel bw (burst mode only) */
if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
pll_out_khz = (pll_out_khz * 12) / 10;
if (pll_out_khz > dsi->lane_max_kbps) {
pll_out_khz = dsi->lane_max_kbps;
DRM_WARN("Warning max phy mbps is used\n");
}
if (pll_out_khz < dsi->lane_min_kbps) {
pll_out_khz = dsi->lane_min_kbps;
DRM_WARN("Warning min phy mbps is used\n");
}
/* Compute best pll parameters */
idf = 0;
ndiv = 0;
odf = 0;
ret = dsi_pll_get_params(dsi, pll_in_khz, pll_out_khz,
&idf, &ndiv, &odf);
if (ret)
DRM_WARN("Warning dsi_pll_get_params(): bad params\n");
/* Get the adjusted pll out value */
pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf);
/* Set the PLL division factors */
dsi_update_bits(dsi, DSI_WRPCR, WRPCR_NDIV | WRPCR_IDF | WRPCR_ODF,
(ndiv << 2) | (idf << 11) | ((ffs(odf) - 1) << 16));
/* Compute uix4 & set the bit period in high-speed mode */
val = 4000000 / pll_out_khz;
dsi_update_bits(dsi, DSI_WPCR0, WPCR0_UIX4, val);
/* Select video mode by resetting DSIM bit */
dsi_clear(dsi, DSI_WCFGR, WCFGR_DSIM);
/* Select the color coding */
dsi_update_bits(dsi, DSI_WCFGR, WCFGR_COLMUX,
dsi_color_from_mipi(format) << 1);
*lane_mbps = pll_out_khz / 1000;
DRM_DEBUG_DRIVER("pll_in %ukHz pll_out %ukHz lane_mbps %uMHz\n",
pll_in_khz, pll_out_khz, *lane_mbps);
return 0;
}
#define DSI_PHY_DELAY(fp, vp, mbps) DIV_ROUND_UP((fp) * (mbps) + 1000 * (vp), 8000)
static int
dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
struct dw_mipi_dsi_dphy_timing *timing)
{
/*
* From STM32MP157 datasheet, valid for STM32F469, STM32F7x9, STM32H747
* phy_clkhs2lp_time = (272+136*UI)/(8*UI)
* phy_clklp2hs_time = (512+40*UI)/(8*UI)
* phy_hs2lp_time = (192+64*UI)/(8*UI)
* phy_lp2hs_time = (256+32*UI)/(8*UI)
*/
timing->clk_hs2lp = DSI_PHY_DELAY(272, 136, lane_mbps);
timing->clk_lp2hs = DSI_PHY_DELAY(512, 40, lane_mbps);
timing->data_hs2lp = DSI_PHY_DELAY(192, 64, lane_mbps);
timing->data_lp2hs = DSI_PHY_DELAY(256, 32, lane_mbps);
return 0;
}
#define CLK_TOLERANCE_HZ 50
static enum drm_mode_status
dw_mipi_dsi_stm_mode_valid(void *priv_data,
const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format)
{
struct dw_mipi_dsi_stm *dsi = priv_data;
unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz;
int ret, bpp;
bpp = mipi_dsi_pixel_format_to_bpp(format);
if (bpp < 0)
return MODE_BAD;
/* Compute requested pll out */
pll_out_khz = mode->clock * bpp / lanes;
if (pll_out_khz > dsi->lane_max_kbps)
return MODE_CLOCK_HIGH;
if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
/* Add 20% to pll out to be higher than pixel bw */
pll_out_khz = (pll_out_khz * 12) / 10;
} else {
if (pll_out_khz < dsi->lane_min_kbps)
return MODE_CLOCK_LOW;
}
/* Compute best pll parameters */
idf = 0;
ndiv = 0;
odf = 0;
pll_in_khz = clk_get_rate(dsi->pllref_clk) / 1000;
ret = dsi_pll_get_params(dsi, pll_in_khz, pll_out_khz, &idf, &ndiv, &odf);
if (ret) {
DRM_WARN("Warning dsi_pll_get_params(): bad params\n");
return MODE_ERROR;
}
if (!(mode_flags & MIPI_DSI_MODE_VIDEO_BURST)) {
unsigned int px_clock_hz, target_px_clock_hz, lane_mbps;
int dsi_short_packet_size_px, hfp, hsync, hbp, delay_to_lp;
struct dw_mipi_dsi_dphy_timing dphy_timing;
/* Get the adjusted pll out value */
pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf);
px_clock_hz = DIV_ROUND_CLOSEST_ULL(1000ULL * pll_out_khz * lanes, bpp);
target_px_clock_hz = mode->clock * 1000;
/*
* Filter modes according to the clock value, particularly useful for
* hdmi modes that require precise pixel clocks.
*/
if (px_clock_hz < target_px_clock_hz - CLK_TOLERANCE_HZ ||
px_clock_hz > target_px_clock_hz + CLK_TOLERANCE_HZ)
return MODE_CLOCK_RANGE;
/* sync packets are codes as DSI short packets (4 bytes) */
dsi_short_packet_size_px = DIV_ROUND_UP(4 * BITS_PER_BYTE, bpp);
hfp = mode->hsync_start - mode->hdisplay;
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
/* hsync must be longer than 4 bytes HSS packets */
if (hsync < dsi_short_packet_size_px)
return MODE_HSYNC_NARROW;
if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
/* HBP must be longer than 4 bytes HSE packets */
if (hbp < dsi_short_packet_size_px)
return MODE_HSYNC_NARROW;
hbp -= dsi_short_packet_size_px;
} else {
/* With sync events HBP extends in the hsync */
hbp += hsync - dsi_short_packet_size_px;
}
lane_mbps = pll_out_khz / 1000;
ret = dw_mipi_dsi_phy_get_timing(priv_data, lane_mbps, &dphy_timing);
if (ret)
return MODE_ERROR;
/*
* In non-burst mode DSI has to enter in LP during HFP
* (horizontal front porch) or HBP (horizontal back porch) to
* resync with LTDC pixel clock.
*/
delay_to_lp = DIV_ROUND_UP((dphy_timing.data_hs2lp + dphy_timing.data_lp2hs) *
lanes * BITS_PER_BYTE, bpp);
if (hfp < delay_to_lp && hbp < delay_to_lp)
return MODE_HSYNC;
}
return MODE_OK;
}
static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_stm_phy_ops = {
.init = dw_mipi_dsi_phy_init,
.power_on = dw_mipi_dsi_phy_power_on,
.power_off = dw_mipi_dsi_phy_power_off,
.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
.get_timing = dw_mipi_dsi_phy_get_timing,
};
static struct dw_mipi_dsi_plat_data dw_mipi_dsi_stm_plat_data = {
.max_data_lanes = 2,
.mode_valid = dw_mipi_dsi_stm_mode_valid,
.phy_ops = &dw_mipi_dsi_stm_phy_ops,
};
static const struct of_device_id dw_mipi_dsi_stm_dt_ids[] = {
{ .compatible = "st,stm32-dsi", .data = &dw_mipi_dsi_stm_plat_data, },
{ },
};
MODULE_DEVICE_TABLE(of, dw_mipi_dsi_stm_dt_ids);
static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_mipi_dsi_stm *dsi;
struct clk *pclk;
int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
dsi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->base)) {
ret = PTR_ERR(dsi->base);
DRM_ERROR("Unable to get dsi registers %d\n", ret);
return ret;
}
dsi->vdd_supply = devm_regulator_get(dev, "phy-dsi");
if (IS_ERR(dsi->vdd_supply)) {
ret = PTR_ERR(dsi->vdd_supply);
dev_err_probe(dev, ret, "Failed to request regulator\n");
return ret;
}
ret = regulator_enable(dsi->vdd_supply);
if (ret) {
DRM_ERROR("Failed to enable regulator: %d\n", ret);
return ret;
}
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
ret = PTR_ERR(dsi->pllref_clk);
dev_err_probe(dev, ret, "Unable to get pll reference clock\n");
goto err_clk_get;
}
ret = clk_prepare_enable(dsi->pllref_clk);
if (ret) {
DRM_ERROR("Failed to enable pllref_clk: %d\n", ret);
goto err_clk_get;
}
pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(pclk)) {
ret = PTR_ERR(pclk);
DRM_ERROR("Unable to get peripheral clock: %d\n", ret);
goto err_dsi_probe;
}
ret = clk_prepare_enable(pclk);
if (ret) {
DRM_ERROR("%s: Failed to enable peripheral clk\n", __func__);
goto err_dsi_probe;
}
dsi->hw_version = dsi_read(dsi, DSI_VERSION) & VERSION;
clk_disable_unprepare(pclk);
if (dsi->hw_version != HWVER_130 && dsi->hw_version != HWVER_131) {
ret = -ENODEV;
DRM_ERROR("bad dsi hardware version\n");
goto err_dsi_probe;
}
/* set lane capabilities according to hw version */
dsi->lane_min_kbps = LANE_MIN_KBPS;
dsi->lane_max_kbps = LANE_MAX_KBPS;
if (dsi->hw_version == HWVER_131) {
dsi->lane_min_kbps *= 2;
dsi->lane_max_kbps *= 2;
}
dw_mipi_dsi_stm_plat_data.base = dsi->base;
dw_mipi_dsi_stm_plat_data.priv_data = dsi;
platform_set_drvdata(pdev, dsi);
dsi->dsi = dw_mipi_dsi_probe(pdev, &dw_mipi_dsi_stm_plat_data);
if (IS_ERR(dsi->dsi)) {
ret = PTR_ERR(dsi->dsi);
dev_err_probe(dev, ret, "Failed to initialize mipi dsi host\n");
goto err_dsi_probe;
}
return 0;
err_dsi_probe:
clk_disable_unprepare(dsi->pllref_clk);
err_clk_get:
regulator_disable(dsi->vdd_supply);
return ret;
}
static void dw_mipi_dsi_stm_remove(struct platform_device *pdev)
{
struct dw_mipi_dsi_stm *dsi = platform_get_drvdata(pdev);
dw_mipi_dsi_remove(dsi->dsi);
clk_disable_unprepare(dsi->pllref_clk);
regulator_disable(dsi->vdd_supply);
}
static int __maybe_unused dw_mipi_dsi_stm_suspend(struct device *dev)
{
struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
DRM_DEBUG_DRIVER("\n");
clk_disable_unprepare(dsi->pllref_clk);
regulator_disable(dsi->vdd_supply);
return 0;
}
static int __maybe_unused dw_mipi_dsi_stm_resume(struct device *dev)
{
struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
int ret;
DRM_DEBUG_DRIVER("\n");
ret = regulator_enable(dsi->vdd_supply);
if (ret) {
DRM_ERROR("Failed to enable regulator: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(dsi->pllref_clk);
if (ret) {
regulator_disable(dsi->vdd_supply);
DRM_ERROR("Failed to enable pllref_clk: %d\n", ret);
return ret;
}
return 0;
}
static const struct dev_pm_ops dw_mipi_dsi_stm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dw_mipi_dsi_stm_suspend,
dw_mipi_dsi_stm_resume)
};
static struct platform_driver dw_mipi_dsi_stm_driver = {
.probe = dw_mipi_dsi_stm_probe,
.remove_new = dw_mipi_dsi_stm_remove,
.driver = {
.of_match_table = dw_mipi_dsi_stm_dt_ids,
.name = "stm32-display-dsi",
.pm = &dw_mipi_dsi_stm_pm_ops,
},
};
module_platform_driver(dw_mipi_dsi_stm_driver);
MODULE_AUTHOR("Philippe Cornu <[email protected]>");
MODULE_AUTHOR("Yannick Fertre <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics DW MIPI DSI host controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/stm/dw_mipi_dsi-stm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2017
*
* Authors: Philippe Cornu <[email protected]>
* Yannick Fertre <[email protected]>
* Fabien Dessenne <[email protected]>
* Mickael Reulier <[email protected]>
*/
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "ltdc.h"
#define STM_MAX_FB_WIDTH 2048
#define STM_MAX_FB_HEIGHT 2048 /* same as width to handle orientation */
static const struct drm_mode_config_funcs drv_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int stm_gem_dma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
/*
* in order to optimize data transfer, pitch is aligned on
* 128 bytes, height is aligned on 4 bytes
*/
args->pitch = roundup(min_pitch, 128);
args->height = roundup(args->height, 4);
return drm_gem_dma_dumb_create_internal(file, dev, args);
}
DEFINE_DRM_GEM_DMA_FOPS(drv_driver_fops);
static const struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "stm",
.desc = "STMicroelectronics SoC DRM",
.date = "20170330",
.major = 1,
.minor = 0,
.patchlevel = 0,
.fops = &drv_driver_fops,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_dma_dumb_create),
};
static int drv_load(struct drm_device *ddev)
{
struct platform_device *pdev = to_platform_device(ddev->dev);
struct ltdc_device *ldev;
int ret;
DRM_DEBUG("%s\n", __func__);
ldev = devm_kzalloc(ddev->dev, sizeof(*ldev), GFP_KERNEL);
if (!ldev)
return -ENOMEM;
ddev->dev_private = (void *)ldev;
ret = drmm_mode_config_init(ddev);
if (ret)
return ret;
/*
* set max width and height as default value.
* this value would be used to check framebuffer size limitation
* at drm_mode_addfb().
*/
ddev->mode_config.min_width = 0;
ddev->mode_config.min_height = 0;
ddev->mode_config.max_width = STM_MAX_FB_WIDTH;
ddev->mode_config.max_height = STM_MAX_FB_HEIGHT;
ddev->mode_config.funcs = &drv_mode_config_funcs;
ddev->mode_config.normalize_zpos = true;
ret = ltdc_load(ddev);
if (ret)
return ret;
drm_mode_config_reset(ddev);
drm_kms_helper_poll_init(ddev);
platform_set_drvdata(pdev, ddev);
return 0;
}
static void drv_unload(struct drm_device *ddev)
{
DRM_DEBUG("%s\n", __func__);
drm_kms_helper_poll_fini(ddev);
ltdc_unload(ddev);
}
static __maybe_unused int drv_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct ltdc_device *ldev = ddev->dev_private;
struct drm_atomic_state *state;
WARN_ON(ldev->suspend_state);
state = drm_atomic_helper_suspend(ddev);
if (IS_ERR(state))
return PTR_ERR(state);
ldev->suspend_state = state;
pm_runtime_force_suspend(dev);
return 0;
}
static __maybe_unused int drv_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct ltdc_device *ldev = ddev->dev_private;
int ret;
if (WARN_ON(!ldev->suspend_state))
return -ENOENT;
pm_runtime_force_resume(dev);
ret = drm_atomic_helper_resume(ddev, ldev->suspend_state);
if (ret)
pm_runtime_force_suspend(dev);
ldev->suspend_state = NULL;
return ret;
}
static __maybe_unused int drv_runtime_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
DRM_DEBUG_DRIVER("\n");
ltdc_suspend(ddev);
return 0;
}
static __maybe_unused int drv_runtime_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
DRM_DEBUG_DRIVER("\n");
return ltdc_resume(ddev);
}
static const struct dev_pm_ops drv_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(drv_suspend, drv_resume)
SET_RUNTIME_PM_OPS(drv_runtime_suspend,
drv_runtime_resume, NULL)
};
static int stm_drm_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct drm_device *ddev;
int ret;
DRM_DEBUG("%s\n", __func__);
ret = drm_aperture_remove_framebuffers(&drv_driver);
if (ret)
return ret;
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
ddev = drm_dev_alloc(&drv_driver, dev);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
ret = drv_load(ddev);
if (ret)
goto err_put;
ret = drm_dev_register(ddev, 0);
if (ret)
goto err_put;
drm_fbdev_dma_setup(ddev, 16);
return 0;
err_put:
drm_dev_put(ddev);
return ret;
}
static void stm_drm_platform_remove(struct platform_device *pdev)
{
struct drm_device *ddev = platform_get_drvdata(pdev);
DRM_DEBUG("%s\n", __func__);
drm_dev_unregister(ddev);
drv_unload(ddev);
drm_dev_put(ddev);
}
static const struct of_device_id drv_dt_ids[] = {
{ .compatible = "st,stm32-ltdc"},
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, drv_dt_ids);
static struct platform_driver stm_drm_platform_driver = {
.probe = stm_drm_platform_probe,
.remove_new = stm_drm_platform_remove,
.driver = {
.name = "stm32-display",
.of_match_table = drv_dt_ids,
.pm = &drv_pm_ops,
},
};
drm_module_platform_driver(stm_drm_platform_driver);
MODULE_AUTHOR("Philippe Cornu <[email protected]>");
MODULE_AUTHOR("Yannick Fertre <[email protected]>");
MODULE_AUTHOR("Fabien Dessenne <[email protected]>");
MODULE_AUTHOR("Mickael Reulier <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics ST DRM LTDC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/stm/drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: Jie Qiu <[email protected]>
*/
#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <sound/hdmi-codec.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mtk_cec.h"
#include "mtk_hdmi.h"
#include "mtk_hdmi_regs.h"
#define NCTS_BYTES 7
enum mtk_hdmi_clk_id {
MTK_HDMI_CLK_HDMI_PIXEL,
MTK_HDMI_CLK_HDMI_PLL,
MTK_HDMI_CLK_AUD_BCLK,
MTK_HDMI_CLK_AUD_SPDIF,
MTK_HDMI_CLK_COUNT
};
enum hdmi_aud_input_type {
HDMI_AUD_INPUT_I2S = 0,
HDMI_AUD_INPUT_SPDIF,
};
enum hdmi_aud_i2s_fmt {
HDMI_I2S_MODE_RJT_24BIT = 0,
HDMI_I2S_MODE_RJT_16BIT,
HDMI_I2S_MODE_LJT_24BIT,
HDMI_I2S_MODE_LJT_16BIT,
HDMI_I2S_MODE_I2S_24BIT,
HDMI_I2S_MODE_I2S_16BIT
};
enum hdmi_aud_mclk {
HDMI_AUD_MCLK_128FS,
HDMI_AUD_MCLK_192FS,
HDMI_AUD_MCLK_256FS,
HDMI_AUD_MCLK_384FS,
HDMI_AUD_MCLK_512FS,
HDMI_AUD_MCLK_768FS,
HDMI_AUD_MCLK_1152FS,
};
enum hdmi_aud_channel_type {
HDMI_AUD_CHAN_TYPE_1_0 = 0,
HDMI_AUD_CHAN_TYPE_1_1,
HDMI_AUD_CHAN_TYPE_2_0,
HDMI_AUD_CHAN_TYPE_2_1,
HDMI_AUD_CHAN_TYPE_3_0,
HDMI_AUD_CHAN_TYPE_3_1,
HDMI_AUD_CHAN_TYPE_4_0,
HDMI_AUD_CHAN_TYPE_4_1,
HDMI_AUD_CHAN_TYPE_5_0,
HDMI_AUD_CHAN_TYPE_5_1,
HDMI_AUD_CHAN_TYPE_6_0,
HDMI_AUD_CHAN_TYPE_6_1,
HDMI_AUD_CHAN_TYPE_7_0,
HDMI_AUD_CHAN_TYPE_7_1,
HDMI_AUD_CHAN_TYPE_3_0_LRS,
HDMI_AUD_CHAN_TYPE_3_1_LRS,
HDMI_AUD_CHAN_TYPE_4_0_CLRS,
HDMI_AUD_CHAN_TYPE_4_1_CLRS,
HDMI_AUD_CHAN_TYPE_6_1_CS,
HDMI_AUD_CHAN_TYPE_6_1_CH,
HDMI_AUD_CHAN_TYPE_6_1_OH,
HDMI_AUD_CHAN_TYPE_6_1_CHR,
HDMI_AUD_CHAN_TYPE_7_1_LH_RH,
HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR,
HDMI_AUD_CHAN_TYPE_7_1_LC_RC,
HDMI_AUD_CHAN_TYPE_7_1_LW_RW,
HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD,
HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS,
HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS,
HDMI_AUD_CHAN_TYPE_7_1_CS_CH,
HDMI_AUD_CHAN_TYPE_7_1_CS_OH,
HDMI_AUD_CHAN_TYPE_7_1_CS_CHR,
HDMI_AUD_CHAN_TYPE_7_1_CH_OH,
HDMI_AUD_CHAN_TYPE_7_1_CH_CHR,
HDMI_AUD_CHAN_TYPE_7_1_OH_CHR,
HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR,
HDMI_AUD_CHAN_TYPE_6_0_CS,
HDMI_AUD_CHAN_TYPE_6_0_CH,
HDMI_AUD_CHAN_TYPE_6_0_OH,
HDMI_AUD_CHAN_TYPE_6_0_CHR,
HDMI_AUD_CHAN_TYPE_7_0_LH_RH,
HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR,
HDMI_AUD_CHAN_TYPE_7_0_LC_RC,
HDMI_AUD_CHAN_TYPE_7_0_LW_RW,
HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD,
HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS,
HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS,
HDMI_AUD_CHAN_TYPE_7_0_CS_CH,
HDMI_AUD_CHAN_TYPE_7_0_CS_OH,
HDMI_AUD_CHAN_TYPE_7_0_CS_CHR,
HDMI_AUD_CHAN_TYPE_7_0_CH_OH,
HDMI_AUD_CHAN_TYPE_7_0_CH_CHR,
HDMI_AUD_CHAN_TYPE_7_0_OH_CHR,
HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR,
HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS,
HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF
};
enum hdmi_aud_channel_swap_type {
HDMI_AUD_SWAP_LR,
HDMI_AUD_SWAP_LFE_CC,
HDMI_AUD_SWAP_LSRS,
HDMI_AUD_SWAP_RLS_RRS,
HDMI_AUD_SWAP_LR_STATUS,
};
struct hdmi_audio_param {
enum hdmi_audio_coding_type aud_codec;
enum hdmi_audio_sample_size aud_sampe_size;
enum hdmi_aud_input_type aud_input_type;
enum hdmi_aud_i2s_fmt aud_i2s_fmt;
enum hdmi_aud_mclk aud_mclk;
enum hdmi_aud_channel_type aud_input_chan_type;
struct hdmi_codec_params codec_params;
};
struct mtk_hdmi_conf {
bool tz_disabled;
bool cea_modes_only;
unsigned long max_mode_clock;
};
struct mtk_hdmi {
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */
struct device *dev;
const struct mtk_hdmi_conf *conf;
struct phy *phy;
struct device *cec_dev;
struct i2c_adapter *ddc_adpt;
struct clk *clk[MTK_HDMI_CLK_COUNT];
struct drm_display_mode mode;
bool dvi_mode;
u32 min_clock;
u32 max_clock;
u32 max_hdisplay;
u32 max_vdisplay;
u32 ibias;
u32 ibias_up;
struct regmap *sys_regmap;
unsigned int sys_offset;
void __iomem *regs;
enum hdmi_colorspace csp;
struct hdmi_audio_param aud_param;
bool audio_enable;
bool powered;
bool enabled;
hdmi_codec_plugged_cb plugged_cb;
struct device *codec_dev;
struct mutex update_plugged_status_lock;
};
static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
{
return container_of(b, struct mtk_hdmi, bridge);
}
static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
{
return readl(hdmi->regs + offset);
}
static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val)
{
writel(val, hdmi->regs + offset);
}
static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
{
void __iomem *reg = hdmi->regs + offset;
u32 tmp;
tmp = readl(reg);
tmp &= ~bits;
writel(tmp, reg);
}
static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
{
void __iomem *reg = hdmi->regs + offset;
u32 tmp;
tmp = readl(reg);
tmp |= bits;
writel(tmp, reg);
}
static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask)
{
void __iomem *reg = hdmi->regs + offset;
u32 tmp;
tmp = readl(reg);
tmp = (tmp & ~mask) | (val & mask);
writel(tmp, reg);
}
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
{
mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH,
VIDEO_SOURCE_SEL);
}
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
{
struct arm_smccc_res res;
/*
* MT8173 HDMI hardware has an output control bit to enable/disable HDMI
* output. This bit can only be controlled in ARM supervisor mode.
* The ARM trusted firmware provides an API for the HDMI driver to set
* this control bit to enable HDMI output in supervisor mode.
*/
if (hdmi->conf && hdmi->conf->tz_disabled)
regmap_update_bits(hdmi->sys_regmap,
hdmi->sys_offset + HDMI_SYS_CFG20,
0x80008005, enable ? 0x80000005 : 0x8000);
else
arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904,
0x80000000, 0, 0, 0, 0, 0, &res);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
HDMI_PCLK_FREE_RUN, enable ? HDMI_PCLK_FREE_RUN : 0);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
HDMI_ON | ANLG_ON, enable ? (HDMI_ON | ANLG_ON) : 0);
}
static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
{
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
HDMI2P0_EN, enable ? 0 : HDMI2P0_EN);
}
static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi)
{
mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
}
static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi)
{
mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
}
static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
{
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
HDMI_RST, HDMI_RST);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
HDMI_RST, 0);
mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
ANLG_ON, ANLG_ON);
}
static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice)
{
mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0,
CFG2_NOTICE_EN);
}
static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask)
{
mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask);
}
static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable)
{
mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI);
}
static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
u8 len)
{
u32 ctrl_reg = GRL_CTRL;
int i;
u8 *frame_data;
enum hdmi_infoframe_type frame_type;
u8 frame_ver;
u8 frame_len;
u8 checksum;
int ctrl_frame_en = 0;
frame_type = *buffer++;
frame_ver = *buffer++;
frame_len = *buffer++;
checksum = *buffer++;
frame_data = buffer;
dev_dbg(hdmi->dev,
"frame_type:0x%x,frame_ver:0x%x,frame_len:0x%x,checksum:0x%x\n",
frame_type, frame_ver, frame_len, checksum);
switch (frame_type) {
case HDMI_INFOFRAME_TYPE_AVI:
ctrl_frame_en = CTRL_AVI_EN;
ctrl_reg = GRL_CTRL;
break;
case HDMI_INFOFRAME_TYPE_SPD:
ctrl_frame_en = CTRL_SPD_EN;
ctrl_reg = GRL_CTRL;
break;
case HDMI_INFOFRAME_TYPE_AUDIO:
ctrl_frame_en = CTRL_AUDIO_EN;
ctrl_reg = GRL_CTRL;
break;
case HDMI_INFOFRAME_TYPE_VENDOR:
ctrl_frame_en = VS_EN;
ctrl_reg = GRL_ACP_ISRC_CTRL;
break;
default:
dev_err(hdmi->dev, "Unknown infoframe type %d\n", frame_type);
return;
}
mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver);
mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len);
mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum);
for (i = 0; i < frame_len; i++)
mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]);
mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en);
}
static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
{
mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF,
AUDIO_PACKET_OFF);
}
static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
{
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
HDMI_OUT_FIFO_EN | MHL_MODE_ON, 0);
usleep_range(2000, 4000);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
HDMI_OUT_FIFO_EN | MHL_MODE_ON, HDMI_OUT_FIFO_EN);
}
static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
{
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
DEEP_COLOR_MODE_MASK | DEEP_COLOR_EN,
COLOR_8BIT_MODE);
}
static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi)
{
mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
usleep_range(2000, 4000);
mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
}
static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi)
{
mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN,
CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
usleep_range(2000, 4000);
mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET,
CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
}
static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on)
{
mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT,
CTS_CTRL_SOFT);
}
static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi,
bool enable)
{
mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0,
NCTS_WRI_ANYTIME);
}
static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE);
if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
mode->clock == 74250 &&
mode->vdisplay == 1080)
mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
else
mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
}
static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
enum hdmi_aud_channel_swap_type swap)
{
u8 swap_bit;
switch (swap) {
case HDMI_AUD_SWAP_LR:
swap_bit = LR_SWAP;
break;
case HDMI_AUD_SWAP_LFE_CC:
swap_bit = LFE_CC_SWAP;
break;
case HDMI_AUD_SWAP_LSRS:
swap_bit = LSRS_SWAP;
break;
case HDMI_AUD_SWAP_RLS_RRS:
swap_bit = RLS_RRS_SWAP;
break;
case HDMI_AUD_SWAP_LR_STATUS:
swap_bit = LR_STATUS_SWAP;
break;
default:
swap_bit = LFE_CC_SWAP;
break;
}
mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff);
}
static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
enum hdmi_audio_sample_size bit_num)
{
u32 val;
switch (bit_num) {
case HDMI_AUDIO_SAMPLE_SIZE_16:
val = AOUT_16BIT;
break;
case HDMI_AUDIO_SAMPLE_SIZE_20:
val = AOUT_20BIT;
break;
case HDMI_AUDIO_SAMPLE_SIZE_24:
case HDMI_AUDIO_SAMPLE_SIZE_STREAM:
val = AOUT_24BIT;
break;
}
mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK);
}
static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
enum hdmi_aud_i2s_fmt i2s_fmt)
{
u32 val;
val = mtk_hdmi_read(hdmi, GRL_CFG0);
val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK);
switch (i2s_fmt) {
case HDMI_I2S_MODE_RJT_24BIT:
val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_24BIT;
break;
case HDMI_I2S_MODE_RJT_16BIT:
val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_16BIT;
break;
case HDMI_I2S_MODE_LJT_24BIT:
default:
val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_24BIT;
break;
case HDMI_I2S_MODE_LJT_16BIT:
val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_16BIT;
break;
case HDMI_I2S_MODE_I2S_24BIT:
val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_24BIT;
break;
case HDMI_I2S_MODE_I2S_16BIT:
val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT;
break;
}
mtk_hdmi_write(hdmi, GRL_CFG0, val);
}
static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
{
const u8 mask = HIGH_BIT_RATE | DST_NORMAL_DOUBLE | SACD_DST | DSD_SEL;
u8 val;
/* Disable high bitrate, set DST packet normal/double */
mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
if (dst)
val = DST_NORMAL_DOUBLE | SACD_DST;
else
val = 0;
mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask);
}
static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
enum hdmi_aud_channel_type channel_type,
u8 channel_count)
{
unsigned int ch_switch;
u8 i2s_uv;
ch_switch = CH_SWITCH(7, 7) | CH_SWITCH(6, 6) |
CH_SWITCH(5, 5) | CH_SWITCH(4, 4) |
CH_SWITCH(3, 3) | CH_SWITCH(1, 2) |
CH_SWITCH(2, 1) | CH_SWITCH(0, 0);
if (channel_count == 2) {
i2s_uv = I2S_UV_CH_EN(0);
} else if (channel_count == 3 || channel_count == 4) {
if (channel_count == 4 &&
(channel_type == HDMI_AUD_CHAN_TYPE_3_0_LRS ||
channel_type == HDMI_AUD_CHAN_TYPE_4_0))
i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(0);
else
i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2);
} else if (channel_count == 6 || channel_count == 5) {
if (channel_count == 6 &&
channel_type != HDMI_AUD_CHAN_TYPE_5_1 &&
channel_type != HDMI_AUD_CHAN_TYPE_4_1_CLRS) {
i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
} else {
i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(1) |
I2S_UV_CH_EN(0);
}
} else if (channel_count == 8 || channel_count == 7) {
i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
} else {
i2s_uv = I2S_UV_CH_EN(0);
}
mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff);
mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv);
}
static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
enum hdmi_aud_input_type input_type)
{
u32 val;
val = mtk_hdmi_read(hdmi, GRL_CFG1);
if (input_type == HDMI_AUD_INPUT_I2S &&
(val & CFG1_SPDIF) == CFG1_SPDIF) {
val &= ~CFG1_SPDIF;
} else if (input_type == HDMI_AUD_INPUT_SPDIF &&
(val & CFG1_SPDIF) == 0) {
val |= CFG1_SPDIF;
}
mtk_hdmi_write(hdmi, GRL_CFG1, val);
}
static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
u8 *channel_status)
{
int i;
for (i = 0; i < 5; i++) {
mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]);
mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]);
}
for (; i < 24; i++) {
mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0);
mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0);
}
}
static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi)
{
u32 val;
val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
if (val & MIX_CTRL_SRC_EN) {
val &= ~MIX_CTRL_SRC_EN;
mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
usleep_range(255, 512);
val |= MIX_CTRL_SRC_EN;
mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
}
}
static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi)
{
u32 val;
val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
val &= ~MIX_CTRL_SRC_EN;
mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00);
}
static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
enum hdmi_aud_mclk mclk)
{
u32 val;
val = mtk_hdmi_read(hdmi, GRL_CFG5);
val &= CFG5_CD_RATIO_MASK;
switch (mclk) {
case HDMI_AUD_MCLK_128FS:
val |= CFG5_FS128;
break;
case HDMI_AUD_MCLK_256FS:
val |= CFG5_FS256;
break;
case HDMI_AUD_MCLK_384FS:
val |= CFG5_FS384;
break;
case HDMI_AUD_MCLK_512FS:
val |= CFG5_FS512;
break;
case HDMI_AUD_MCLK_768FS:
val |= CFG5_FS768;
break;
default:
val |= CFG5_FS256;
break;
}
mtk_hdmi_write(hdmi, GRL_CFG5, val);
}
struct hdmi_acr_n {
unsigned int clock;
unsigned int n[3];
};
/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */
static const struct hdmi_acr_n hdmi_rec_n_table[] = {
/* Clock, N: 32kHz 44.1kHz 48kHz */
{ 25175, { 4576, 7007, 6864 } },
{ 74176, { 11648, 17836, 11648 } },
{ 148352, { 11648, 8918, 5824 } },
{ 296703, { 5824, 4459, 5824 } },
{ 297000, { 3072, 4704, 5120 } },
{ 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */
};
/**
* hdmi_recommended_n() - Return N value recommended by HDMI specification
* @freq: audio sample rate in Hz
* @clock: rounded TMDS clock in kHz
*/
static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock)
{
const struct hdmi_acr_n *recommended;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) {
if (clock == hdmi_rec_n_table[i].clock)
break;
}
recommended = hdmi_rec_n_table + i;
switch (freq) {
case 32000:
return recommended->n[0];
case 44100:
return recommended->n[1];
case 48000:
return recommended->n[2];
case 88200:
return recommended->n[1] * 2;
case 96000:
return recommended->n[2] * 2;
case 176400:
return recommended->n[1] * 4;
case 192000:
return recommended->n[2] * 4;
default:
return (128 * freq) / 1000;
}
}
static unsigned int hdmi_mode_clock_to_hz(unsigned int clock)
{
switch (clock) {
case 25175:
return 25174825; /* 25.2/1.001 MHz */
case 74176:
return 74175824; /* 74.25/1.001 MHz */
case 148352:
return 148351648; /* 148.5/1.001 MHz */
case 296703:
return 296703297; /* 297/1.001 MHz */
default:
return clock * 1000;
}
}
static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
unsigned int tmds_clock, unsigned int n)
{
return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n,
128 * audio_sample_rate);
}
static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
unsigned int cts)
{
unsigned char val[NCTS_BYTES];
int i;
mtk_hdmi_write(hdmi, GRL_NCTS, 0);
mtk_hdmi_write(hdmi, GRL_NCTS, 0);
mtk_hdmi_write(hdmi, GRL_NCTS, 0);
memset(val, 0, sizeof(val));
val[0] = (cts >> 24) & 0xff;
val[1] = (cts >> 16) & 0xff;
val[2] = (cts >> 8) & 0xff;
val[3] = cts & 0xff;
val[4] = (n >> 16) & 0xff;
val[5] = (n >> 8) & 0xff;
val[6] = n & 0xff;
for (i = 0; i < NCTS_BYTES; i++)
mtk_hdmi_write(hdmi, GRL_NCTS, val[i]);
}
static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
unsigned int sample_rate,
unsigned int clock)
{
unsigned int n, cts;
n = hdmi_recommended_n(sample_rate, clock);
cts = hdmi_expected_cts(sample_rate, clock, n);
dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n",
__func__, sample_rate, clock, n, cts);
mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64,
AUDIO_I2S_NCTS_SEL);
do_hdmi_hw_aud_set_ncts(hdmi, n, cts);
}
static u8 mtk_hdmi_aud_get_chnl_count(enum hdmi_aud_channel_type channel_type)
{
switch (channel_type) {
case HDMI_AUD_CHAN_TYPE_1_0:
case HDMI_AUD_CHAN_TYPE_1_1:
case HDMI_AUD_CHAN_TYPE_2_0:
return 2;
case HDMI_AUD_CHAN_TYPE_2_1:
case HDMI_AUD_CHAN_TYPE_3_0:
return 3;
case HDMI_AUD_CHAN_TYPE_3_1:
case HDMI_AUD_CHAN_TYPE_4_0:
case HDMI_AUD_CHAN_TYPE_3_0_LRS:
return 4;
case HDMI_AUD_CHAN_TYPE_4_1:
case HDMI_AUD_CHAN_TYPE_5_0:
case HDMI_AUD_CHAN_TYPE_3_1_LRS:
case HDMI_AUD_CHAN_TYPE_4_0_CLRS:
return 5;
case HDMI_AUD_CHAN_TYPE_5_1:
case HDMI_AUD_CHAN_TYPE_6_0:
case HDMI_AUD_CHAN_TYPE_4_1_CLRS:
case HDMI_AUD_CHAN_TYPE_6_0_CS:
case HDMI_AUD_CHAN_TYPE_6_0_CH:
case HDMI_AUD_CHAN_TYPE_6_0_OH:
case HDMI_AUD_CHAN_TYPE_6_0_CHR:
return 6;
case HDMI_AUD_CHAN_TYPE_6_1:
case HDMI_AUD_CHAN_TYPE_6_1_CS:
case HDMI_AUD_CHAN_TYPE_6_1_CH:
case HDMI_AUD_CHAN_TYPE_6_1_OH:
case HDMI_AUD_CHAN_TYPE_6_1_CHR:
case HDMI_AUD_CHAN_TYPE_7_0:
case HDMI_AUD_CHAN_TYPE_7_0_LH_RH:
case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR:
case HDMI_AUD_CHAN_TYPE_7_0_LC_RC:
case HDMI_AUD_CHAN_TYPE_7_0_LW_RW:
case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD:
case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS:
case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS:
case HDMI_AUD_CHAN_TYPE_7_0_CS_CH:
case HDMI_AUD_CHAN_TYPE_7_0_CS_OH:
case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR:
case HDMI_AUD_CHAN_TYPE_7_0_CH_OH:
case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR:
case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR:
case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR:
case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS:
return 7;
case HDMI_AUD_CHAN_TYPE_7_1:
case HDMI_AUD_CHAN_TYPE_7_1_LH_RH:
case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR:
case HDMI_AUD_CHAN_TYPE_7_1_LC_RC:
case HDMI_AUD_CHAN_TYPE_7_1_LW_RW:
case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD:
case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS:
case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS:
case HDMI_AUD_CHAN_TYPE_7_1_CS_CH:
case HDMI_AUD_CHAN_TYPE_7_1_CS_OH:
case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR:
case HDMI_AUD_CHAN_TYPE_7_1_CH_OH:
case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR:
case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR:
case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR:
return 8;
default:
return 2;
}
}
static int mtk_hdmi_video_change_vpll(struct mtk_hdmi *hdmi, u32 clock)
{
unsigned long rate;
int ret;
/* The DPI driver already should have set TVDPLL to the correct rate */
ret = clk_set_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL], clock);
if (ret) {
dev_err(hdmi->dev, "Failed to set PLL to %u Hz: %d\n", clock,
ret);
return ret;
}
rate = clk_get_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
if (DIV_ROUND_CLOSEST(rate, 1000) != DIV_ROUND_CLOSEST(clock, 1000))
dev_warn(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock,
rate);
else
dev_dbg(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock, rate);
mtk_hdmi_hw_config_sys(hdmi);
mtk_hdmi_hw_set_deep_color_mode(hdmi);
return 0;
}
static void mtk_hdmi_video_set_display_mode(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
mtk_hdmi_hw_reset(hdmi);
mtk_hdmi_hw_enable_notice(hdmi, true);
mtk_hdmi_hw_write_int_mask(hdmi, 0xff);
mtk_hdmi_hw_enable_dvi_mode(hdmi, hdmi->dvi_mode);
mtk_hdmi_hw_ncts_auto_write_enable(hdmi, true);
mtk_hdmi_hw_msic_setting(hdmi, mode);
}
static void mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi)
{
enum hdmi_aud_channel_type chan_type;
u8 chan_count;
bool dst;
mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC);
mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF &&
hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) {
mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
} else if (hdmi->aud_param.aud_i2s_fmt == HDMI_I2S_MODE_LJT_24BIT) {
hdmi->aud_param.aud_i2s_fmt = HDMI_I2S_MODE_LJT_16BIT;
}
mtk_hdmi_hw_aud_set_i2s_fmt(hdmi, hdmi->aud_param.aud_i2s_fmt);
mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
dst = ((hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF) &&
(hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST));
mtk_hdmi_hw_audio_config(hdmi, dst);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF)
chan_type = HDMI_AUD_CHAN_TYPE_2_0;
else
chan_type = hdmi->aud_param.aud_input_chan_type;
chan_count = mtk_hdmi_aud_get_chnl_count(chan_type);
mtk_hdmi_hw_aud_set_i2s_chan_num(hdmi, chan_type, chan_count);
mtk_hdmi_hw_aud_set_input_type(hdmi, hdmi->aud_param.aud_input_type);
}
static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi,
struct drm_display_mode *display_mode)
{
unsigned int sample_rate = hdmi->aud_param.codec_params.sample_rate;
mtk_hdmi_hw_ncts_enable(hdmi, false);
mtk_hdmi_hw_aud_src_disable(hdmi);
mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) {
switch (sample_rate) {
case 32000:
case 44100:
case 48000:
case 88200:
case 96000:
break;
default:
return -EINVAL;
}
mtk_hdmi_hw_aud_set_mclk(hdmi, hdmi->aud_param.aud_mclk);
} else {
switch (sample_rate) {
case 32000:
case 44100:
case 48000:
break;
default:
return -EINVAL;
}
mtk_hdmi_hw_aud_set_mclk(hdmi, HDMI_AUD_MCLK_128FS);
}
mtk_hdmi_hw_aud_set_ncts(hdmi, sample_rate, display_mode->clock);
mtk_hdmi_hw_aud_src_reenable(hdmi);
return 0;
}
static int mtk_hdmi_aud_output_config(struct mtk_hdmi *hdmi,
struct drm_display_mode *display_mode)
{
mtk_hdmi_hw_aud_mute(hdmi);
mtk_hdmi_hw_send_aud_packet(hdmi, false);
mtk_hdmi_aud_set_input(hdmi);
mtk_hdmi_aud_set_src(hdmi, display_mode);
mtk_hdmi_hw_aud_set_channel_status(hdmi,
hdmi->aud_param.codec_params.iec.status);
usleep_range(50, 100);
mtk_hdmi_hw_ncts_enable(hdmi, true);
mtk_hdmi_hw_send_aud_packet(hdmi, true);
mtk_hdmi_hw_aud_unmute(hdmi);
return 0;
}
static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct hdmi_avi_infoframe frame;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
ssize_t err;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
hdmi->curr_conn, mode);
if (err < 0) {
dev_err(hdmi->dev,
"Failed to get AVI infoframe from mode: %zd\n", err);
return err;
}
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(hdmi->dev, "Failed to pack AVI infoframe: %zd\n", err);
return err;
}
mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
return 0;
}
static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
const char *vendor,
const char *product)
{
struct hdmi_spd_infoframe frame;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE];
ssize_t err;
err = hdmi_spd_infoframe_init(&frame, vendor, product);
if (err < 0) {
dev_err(hdmi->dev, "Failed to initialize SPD infoframe: %zd\n",
err);
return err;
}
err = hdmi_spd_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(hdmi->dev, "Failed to pack SDP infoframe: %zd\n", err);
return err;
}
mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
return 0;
}
static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi)
{
struct hdmi_audio_infoframe frame;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
ssize_t err;
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
dev_err(hdmi->dev, "Failed to setup audio infoframe: %zd\n",
err);
return err;
}
frame.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
frame.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
frame.channels = mtk_hdmi_aud_get_chnl_count(
hdmi->aud_param.aud_input_chan_type);
err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(hdmi->dev, "Failed to pack audio infoframe: %zd\n",
err);
return err;
}
mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
return 0;
}
static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct hdmi_vendor_infoframe frame;
u8 buffer[10];
ssize_t err;
err = drm_hdmi_vendor_infoframe_from_display_mode(&frame,
hdmi->curr_conn, mode);
if (err) {
dev_err(hdmi->dev,
"Failed to get vendor infoframe from mode: %zd\n", err);
return err;
}
err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
err);
return err;
}
mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
return 0;
}
static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
{
struct hdmi_audio_param *aud_param = &hdmi->aud_param;
hdmi->csp = HDMI_COLORSPACE_RGB;
aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
return 0;
}
static void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
{
mtk_hdmi_hw_send_aud_packet(hdmi, true);
hdmi->audio_enable = true;
}
static void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
{
mtk_hdmi_hw_send_aud_packet(hdmi, false);
hdmi->audio_enable = false;
}
static int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
struct hdmi_audio_param *param)
{
if (!hdmi->audio_enable) {
dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
return -EINVAL;
}
dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
param->aud_codec, param->aud_input_type,
param->aud_input_chan_type, param->codec_params.sample_rate);
memcpy(&hdmi->aud_param, param, sizeof(*param));
return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
}
static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
int ret;
mtk_hdmi_hw_vid_black(hdmi, true);
mtk_hdmi_hw_aud_mute(hdmi);
mtk_hdmi_hw_send_av_mute(hdmi);
phy_power_off(hdmi->phy);
ret = mtk_hdmi_video_change_vpll(hdmi,
mode->clock * 1000);
if (ret) {
dev_err(hdmi->dev, "Failed to set vpll: %d\n", ret);
return ret;
}
mtk_hdmi_video_set_display_mode(hdmi, mode);
phy_power_on(hdmi->phy);
mtk_hdmi_aud_output_config(hdmi, mode);
mtk_hdmi_hw_vid_black(hdmi, false);
mtk_hdmi_hw_aud_unmute(hdmi);
mtk_hdmi_hw_send_av_unmute(hdmi);
return 0;
}
static const char * const mtk_hdmi_clk_names[MTK_HDMI_CLK_COUNT] = {
[MTK_HDMI_CLK_HDMI_PIXEL] = "pixel",
[MTK_HDMI_CLK_HDMI_PLL] = "pll",
[MTK_HDMI_CLK_AUD_BCLK] = "bclk",
[MTK_HDMI_CLK_AUD_SPDIF] = "spdif",
};
static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi,
struct device_node *np)
{
int i;
for (i = 0; i < ARRAY_SIZE(mtk_hdmi_clk_names); i++) {
hdmi->clk[i] = of_clk_get_by_name(np,
mtk_hdmi_clk_names[i]);
if (IS_ERR(hdmi->clk[i]))
return PTR_ERR(hdmi->clk[i]);
}
return 0;
}
static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi)
{
int ret;
ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
if (ret)
return ret;
ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
if (ret)
goto err;
return 0;
err:
clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
return ret;
}
static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
{
clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
}
static enum drm_connector_status
mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi)
{
bool connected;
mutex_lock(&hdmi->update_plugged_status_lock);
connected = mtk_cec_hpd_high(hdmi->cec_dev);
if (hdmi->plugged_cb && hdmi->codec_dev)
hdmi->plugged_cb(hdmi->codec_dev, connected);
mutex_unlock(&hdmi->update_plugged_status_lock);
return connected ?
connector_status_connected : connector_status_disconnected;
}
static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi)
{
return mtk_hdmi_update_plugged_status(hdmi);
}
static enum drm_mode_status
mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
struct drm_bridge *next_bridge;
dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
!!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
next_bridge = drm_bridge_get_next_bridge(&hdmi->bridge);
if (next_bridge) {
struct drm_display_mode adjusted_mode;
drm_mode_init(&adjusted_mode, mode);
if (!drm_bridge_chain_mode_fixup(next_bridge, mode,
&adjusted_mode))
return MODE_BAD;
}
if (hdmi->conf) {
if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
return MODE_BAD;
if (hdmi->conf->max_mode_clock &&
mode->clock > hdmi->conf->max_mode_clock)
return MODE_CLOCK_HIGH;
}
if (mode->clock < 27000)
return MODE_CLOCK_LOW;
if (mode->clock > 297000)
return MODE_CLOCK_HIGH;
return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
}
static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev) {
static enum drm_connector_status status;
status = mtk_hdmi_detect(hdmi);
drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
drm_bridge_hpd_notify(&hdmi->bridge, status);
}
}
/*
* Bridge callbacks
*/
static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
return mtk_hdmi_detect(hdmi);
}
static struct edid *mtk_hdmi_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
struct edid *edid;
if (!hdmi->ddc_adpt)
return NULL;
edid = drm_get_edid(connector, hdmi->ddc_adpt);
if (!edid)
return NULL;
hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
return edid;
}
static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
int ret;
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
DRM_ERROR("%s: The flag DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied\n",
__func__);
return -EINVAL;
}
if (hdmi->next_bridge) {
ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
bridge, flags);
if (ret)
return ret;
}
mtk_cec_set_hpd_event(hdmi->cec_dev, mtk_hdmi_hpd_event, hdmi->dev);
return 0;
}
static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
if (!hdmi->enabled)
return;
phy_power_off(hdmi->phy);
clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
hdmi->curr_conn = NULL;
hdmi->enabled = false;
}
static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
if (!hdmi->powered)
return;
mtk_hdmi_hw_1p4_version_enable(hdmi, true);
mtk_hdmi_hw_make_reg_writable(hdmi, false);
hdmi->powered = false;
}
static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n",
adjusted_mode->name, adjusted_mode->hdisplay);
dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d",
adjusted_mode->hsync_start, adjusted_mode->hsync_end,
adjusted_mode->htotal);
dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n",
adjusted_mode->hskew, adjusted_mode->vdisplay);
dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d",
adjusted_mode->vsync_start, adjusted_mode->vsync_end,
adjusted_mode->vtotal);
dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n",
adjusted_mode->vscan, adjusted_mode->flags);
drm_mode_copy(&hdmi->mode, adjusted_mode);
}
static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
mtk_hdmi_hw_make_reg_writable(hdmi, true);
mtk_hdmi_hw_1p4_version_enable(hdmi, true);
hdmi->powered = true;
}
static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
mtk_hdmi_setup_audio_infoframe(hdmi);
mtk_hdmi_setup_avi_infoframe(hdmi, mode);
mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
if (mode->flags & DRM_MODE_FLAG_3D_MASK)
mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
}
static void mtk_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct drm_atomic_state *state = old_state->base.state;
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
/* Retrieve the connector through the atomic state. */
hdmi->curr_conn = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
phy_power_on(hdmi->phy);
mtk_hdmi_send_infoframe(hdmi, &hdmi->mode);
hdmi->enabled = true;
}
static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
.mode_valid = mtk_hdmi_bridge_mode_valid,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.attach = mtk_hdmi_bridge_attach,
.mode_fixup = mtk_hdmi_bridge_mode_fixup,
.atomic_disable = mtk_hdmi_bridge_atomic_disable,
.atomic_post_disable = mtk_hdmi_bridge_atomic_post_disable,
.mode_set = mtk_hdmi_bridge_mode_set,
.atomic_pre_enable = mtk_hdmi_bridge_atomic_pre_enable,
.atomic_enable = mtk_hdmi_bridge_atomic_enable,
.detect = mtk_hdmi_bridge_detect,
.get_edid = mtk_hdmi_bridge_get_edid,
};
static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *cec_np, *remote, *i2c_np;
struct platform_device *cec_pdev;
struct regmap *regmap;
struct resource *mem;
int ret;
ret = mtk_hdmi_get_all_clk(hdmi, np);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get clocks: %d\n", ret);
return ret;
}
/* The CEC module handles HDMI hotplug detection */
cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
if (!cec_np) {
dev_err(dev, "Failed to find CEC node\n");
return -EINVAL;
}
cec_pdev = of_find_device_by_node(cec_np);
if (!cec_pdev) {
dev_err(hdmi->dev, "Waiting for CEC device %pOF\n",
cec_np);
of_node_put(cec_np);
return -EPROBE_DEFER;
}
of_node_put(cec_np);
hdmi->cec_dev = &cec_pdev->dev;
/*
* The mediatek,syscon-hdmi property contains a phandle link to the
* MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
* registers it contains.
*/
regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi");
ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1,
&hdmi->sys_offset);
if (IS_ERR(regmap))
ret = PTR_ERR(regmap);
if (ret) {
dev_err(dev,
"Failed to get system configuration registers: %d\n",
ret);
goto put_device;
}
hdmi->sys_regmap = regmap;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(hdmi->regs)) {
ret = PTR_ERR(hdmi->regs);
goto put_device;
}
remote = of_graph_get_remote_node(np, 1, 0);
if (!remote) {
ret = -EINVAL;
goto put_device;
}
if (!of_device_is_compatible(remote, "hdmi-connector")) {
hdmi->next_bridge = of_drm_find_bridge(remote);
if (!hdmi->next_bridge) {
dev_err(dev, "Waiting for external bridge\n");
of_node_put(remote);
ret = -EPROBE_DEFER;
goto put_device;
}
}
i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
if (!i2c_np) {
dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
remote);
of_node_put(remote);
ret = -EINVAL;
goto put_device;
}
of_node_put(remote);
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
of_node_put(i2c_np);
if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
ret = -EINVAL;
goto put_device;
}
return 0;
put_device:
put_device(hdmi->cec_dev);
return ret;
}
/*
* HDMI audio codec callbacks
*/
static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
struct hdmi_audio_param hdmi_params;
unsigned int chan = params->cea.channels;
dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
params->sample_rate, params->sample_width, chan);
if (!hdmi->bridge.encoder)
return -ENODEV;
switch (chan) {
case 2:
hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
break;
case 4:
hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
break;
case 6:
hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
break;
case 8:
hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
break;
default:
dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
return -EINVAL;
}
switch (params->sample_rate) {
case 32000:
case 44100:
case 48000:
case 88200:
case 96000:
case 176400:
case 192000:
break;
default:
dev_err(hdmi->dev, "rate[%d] not supported!\n",
params->sample_rate);
return -EINVAL;
}
switch (daifmt->fmt) {
case HDMI_I2S:
hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
break;
case HDMI_SPDIF:
hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
break;
default:
dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
daifmt->fmt);
return -EINVAL;
}
memcpy(&hdmi_params.codec_params, params,
sizeof(hdmi_params.codec_params));
mtk_hdmi_audio_set_param(hdmi, &hdmi_params);
return 0;
}
static int mtk_hdmi_audio_startup(struct device *dev, void *data)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
mtk_hdmi_audio_enable(hdmi);
return 0;
}
static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
mtk_hdmi_audio_disable(hdmi);
}
static int
mtk_hdmi_audio_mute(struct device *dev, void *data,
bool enable, int direction)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
if (enable)
mtk_hdmi_hw_aud_mute(hdmi);
else
mtk_hdmi_hw_aud_unmute(hdmi);
return 0;
}
static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
if (hdmi->enabled)
memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len));
else
memset(buf, 0, len);
return 0;
}
static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
struct mtk_hdmi *hdmi = data;
mutex_lock(&hdmi->update_plugged_status_lock);
hdmi->plugged_cb = fn;
hdmi->codec_dev = codec_dev;
mutex_unlock(&hdmi->update_plugged_status_lock);
mtk_hdmi_update_plugged_status(hdmi);
return 0;
}
static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.hw_params = mtk_hdmi_audio_hw_params,
.audio_startup = mtk_hdmi_audio_startup,
.audio_shutdown = mtk_hdmi_audio_shutdown,
.mute_stream = mtk_hdmi_audio_mute,
.get_eld = mtk_hdmi_audio_get_eld,
.hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb,
.no_capture_mute = 1,
};
static int mtk_hdmi_register_audio_driver(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
struct hdmi_codec_pdata codec_data = {
.ops = &mtk_hdmi_audio_codec_ops,
.max_i2s_channels = 2,
.i2s = 1,
.data = hdmi,
};
struct platform_device *pdev;
pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO, &codec_data,
sizeof(codec_data));
if (IS_ERR(pdev))
return PTR_ERR(pdev);
DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
return 0;
}
static int mtk_drm_hdmi_probe(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi;
struct device *dev = &pdev->dev;
int ret;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->dev = dev;
hdmi->conf = of_device_get_match_data(dev);
ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
if (ret)
return ret;
hdmi->phy = devm_phy_get(dev, "hdmi");
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
dev_err(dev, "Failed to get HDMI PHY: %d\n", ret);
return ret;
}
mutex_init(&hdmi->update_plugged_status_lock);
platform_set_drvdata(pdev, hdmi);
ret = mtk_hdmi_output_init(hdmi);
if (ret) {
dev_err(dev, "Failed to initialize hdmi output\n");
return ret;
}
ret = mtk_hdmi_register_audio_driver(dev);
if (ret) {
dev_err(dev, "Failed to register audio driver: %d\n", ret);
return ret;
}
hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&hdmi->bridge);
ret = mtk_hdmi_clk_enable_audio(hdmi);
if (ret) {
dev_err(dev, "Failed to enable audio clocks: %d\n", ret);
goto err_bridge_remove;
}
return 0;
err_bridge_remove:
drm_bridge_remove(&hdmi->bridge);
return ret;
}
static void mtk_drm_hdmi_remove(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
drm_bridge_remove(&hdmi->bridge);
mtk_hdmi_clk_disable_audio(hdmi);
}
#ifdef CONFIG_PM_SLEEP
static int mtk_hdmi_suspend(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
mtk_hdmi_clk_disable_audio(hdmi);
return 0;
}
static int mtk_hdmi_resume(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
int ret = 0;
ret = mtk_hdmi_clk_enable_audio(hdmi);
if (ret) {
dev_err(dev, "hdmi resume failed!\n");
return ret;
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
mtk_hdmi_suspend, mtk_hdmi_resume);
static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = {
.tz_disabled = true,
};
static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8167 = {
.max_mode_clock = 148500,
.cea_modes_only = true,
};
static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
{ .compatible = "mediatek,mt2701-hdmi",
.data = &mtk_hdmi_conf_mt2701,
},
{ .compatible = "mediatek,mt8167-hdmi",
.data = &mtk_hdmi_conf_mt8167,
},
{ .compatible = "mediatek,mt8173-hdmi",
},
{}
};
MODULE_DEVICE_TABLE(of, mtk_drm_hdmi_of_ids);
static struct platform_driver mtk_hdmi_driver = {
.probe = mtk_drm_hdmi_probe,
.remove_new = mtk_drm_hdmi_remove,
.driver = {
.name = "mediatek-drm-hdmi",
.of_match_table = mtk_drm_hdmi_of_ids,
.pm = &mtk_hdmi_pm_ops,
},
};
static struct platform_driver * const mtk_hdmi_drivers[] = {
&mtk_hdmi_ddc_driver,
&mtk_cec_driver,
&mtk_hdmi_driver,
};
static int __init mtk_hdmitx_init(void)
{
return platform_register_drivers(mtk_hdmi_drivers,
ARRAY_SIZE(mtk_hdmi_drivers));
}
static void __exit mtk_hdmitx_exit(void)
{
platform_unregister_drivers(mtk_hdmi_drivers,
ARRAY_SIZE(mtk_hdmi_drivers));
}
module_init(mtk_hdmitx_init);
module_exit(mtk_hdmitx_exit);
MODULE_AUTHOR("Jie Qiu <[email protected]>");
MODULE_DESCRIPTION("MediaTek HDMI Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/mediatek/mtk_hdmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
* Author: YT SHEN <[email protected]>
*/
#include <linux/component.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
#define DRIVER_NAME "mediatek"
#define DRIVER_DESC "Mediatek SoC DRM"
#define DRIVER_DATE "20150513"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static struct drm_framebuffer *
mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
{
const struct drm_format_info *info = drm_get_format_info(dev, cmd);
if (info->num_planes != 1)
return ERR_PTR(-EINVAL);
return drm_gem_fb_create(dev, file, cmd);
}
static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
.fb_create = mtk_drm_mode_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const unsigned int mt2701_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_BLS,
DDP_COMPONENT_DSI0,
};
static const unsigned int mt2701_mtk_ddp_ext[] = {
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_DPI0,
};
static const unsigned int mt7623_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_BLS,
DDP_COMPONENT_DPI0,
};
static const unsigned int mt7623_mtk_ddp_ext[] = {
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_DSI0,
};
static const unsigned int mt2712_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_AAL0,
DDP_COMPONENT_OD0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_DPI0,
DDP_COMPONENT_PWM0,
};
static const unsigned int mt2712_mtk_ddp_ext[] = {
DDP_COMPONENT_OVL1,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_AAL1,
DDP_COMPONENT_OD1,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_DPI1,
DDP_COMPONENT_PWM1,
};
static const unsigned int mt2712_mtk_ddp_third[] = {
DDP_COMPONENT_RDMA2,
DDP_COMPONENT_DSI3,
DDP_COMPONENT_PWM2,
};
static unsigned int mt8167_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_CCORR,
DDP_COMPONENT_AAL0,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_DITHER0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_DSI0,
};
static const unsigned int mt8173_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_AAL0,
DDP_COMPONENT_OD0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_UFOE,
DDP_COMPONENT_DSI0,
DDP_COMPONENT_PWM0,
};
static const unsigned int mt8173_mtk_ddp_ext[] = {
DDP_COMPONENT_OVL1,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_DPI0,
};
static const unsigned int mt8183_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_OVL_2L0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_CCORR,
DDP_COMPONENT_AAL0,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_DITHER0,
DDP_COMPONENT_DSI0,
};
static const unsigned int mt8183_mtk_ddp_ext[] = {
DDP_COMPONENT_OVL_2L1,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_DPI0,
};
static const unsigned int mt8186_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_CCORR,
DDP_COMPONENT_AAL0,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_POSTMASK0,
DDP_COMPONENT_DITHER0,
DDP_COMPONENT_DSI0,
};
static const unsigned int mt8186_mtk_ddp_ext[] = {
DDP_COMPONENT_OVL_2L0,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_DPI0,
};
static const unsigned int mt8188_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_CCORR,
DDP_COMPONENT_AAL0,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_POSTMASK0,
DDP_COMPONENT_DITHER0,
DDP_COMPONENT_DP_INTF0,
};
static const unsigned int mt8192_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_OVL_2L0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_CCORR,
DDP_COMPONENT_AAL0,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_POSTMASK0,
DDP_COMPONENT_DITHER0,
DDP_COMPONENT_DSI0,
};
static const unsigned int mt8192_mtk_ddp_ext[] = {
DDP_COMPONENT_OVL_2L2,
DDP_COMPONENT_RDMA4,
DDP_COMPONENT_DPI0,
};
static const unsigned int mt8195_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_CCORR,
DDP_COMPONENT_AAL0,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_DITHER0,
DDP_COMPONENT_DSC0,
DDP_COMPONENT_MERGE0,
DDP_COMPONENT_DP_INTF0,
};
static const unsigned int mt8195_mtk_ddp_ext[] = {
DDP_COMPONENT_DRM_OVL_ADAPTOR,
DDP_COMPONENT_MERGE5,
DDP_COMPONENT_DP_INTF1,
};
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.main_path = mt2701_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
.ext_path = mt2701_mtk_ddp_ext,
.ext_len = ARRAY_SIZE(mt2701_mtk_ddp_ext),
.shadow_register = true,
.mmsys_dev_num = 1,
};
static const struct mtk_mmsys_driver_data mt7623_mmsys_driver_data = {
.main_path = mt7623_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt7623_mtk_ddp_main),
.ext_path = mt7623_mtk_ddp_ext,
.ext_len = ARRAY_SIZE(mt7623_mtk_ddp_ext),
.shadow_register = true,
.mmsys_dev_num = 1,
};
static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
.main_path = mt2712_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt2712_mtk_ddp_main),
.ext_path = mt2712_mtk_ddp_ext,
.ext_len = ARRAY_SIZE(mt2712_mtk_ddp_ext),
.third_path = mt2712_mtk_ddp_third,
.third_len = ARRAY_SIZE(mt2712_mtk_ddp_third),
.mmsys_dev_num = 1,
};
static const struct mtk_mmsys_driver_data mt8167_mmsys_driver_data = {
.main_path = mt8167_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8167_mtk_ddp_main),
.mmsys_dev_num = 1,
};
static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.main_path = mt8173_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
.ext_path = mt8173_mtk_ddp_ext,
.ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext),
.mmsys_dev_num = 1,
};
static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.main_path = mt8183_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8183_mtk_ddp_main),
.ext_path = mt8183_mtk_ddp_ext,
.ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext),
.mmsys_dev_num = 1,
};
static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
.main_path = mt8186_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8186_mtk_ddp_main),
.ext_path = mt8186_mtk_ddp_ext,
.ext_len = ARRAY_SIZE(mt8186_mtk_ddp_ext),
.mmsys_dev_num = 1,
};
static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
.main_path = mt8188_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8188_mtk_ddp_main),
};
static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
.main_path = mt8192_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8192_mtk_ddp_main),
.ext_path = mt8192_mtk_ddp_ext,
.ext_len = ARRAY_SIZE(mt8192_mtk_ddp_ext),
.mmsys_dev_num = 1,
};
static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = {
.main_path = mt8195_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8195_mtk_ddp_main),
.mmsys_dev_num = 2,
};
static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = {
.ext_path = mt8195_mtk_ddp_ext,
.ext_len = ARRAY_SIZE(mt8195_mtk_ddp_ext),
.mmsys_id = 1,
.mmsys_dev_num = 2,
};
static const struct of_device_id mtk_drm_of_ids[] = {
{ .compatible = "mediatek,mt2701-mmsys",
.data = &mt2701_mmsys_driver_data},
{ .compatible = "mediatek,mt7623-mmsys",
.data = &mt7623_mmsys_driver_data},
{ .compatible = "mediatek,mt2712-mmsys",
.data = &mt2712_mmsys_driver_data},
{ .compatible = "mediatek,mt8167-mmsys",
.data = &mt8167_mmsys_driver_data},
{ .compatible = "mediatek,mt8173-mmsys",
.data = &mt8173_mmsys_driver_data},
{ .compatible = "mediatek,mt8183-mmsys",
.data = &mt8183_mmsys_driver_data},
{ .compatible = "mediatek,mt8186-mmsys",
.data = &mt8186_mmsys_driver_data},
{ .compatible = "mediatek,mt8188-vdosys0",
.data = &mt8188_vdosys0_driver_data},
{ .compatible = "mediatek,mt8192-mmsys",
.data = &mt8192_mmsys_driver_data},
{ .compatible = "mediatek,mt8195-mmsys",
.data = &mt8195_vdosys0_driver_data},
{ .compatible = "mediatek,mt8195-vdosys0",
.data = &mt8195_vdosys0_driver_data},
{ .compatible = "mediatek,mt8195-vdosys1",
.data = &mt8195_vdosys1_driver_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_drm_of_ids);
static int mtk_drm_match(struct device *dev, void *data)
{
if (!strncmp(dev_name(dev), "mediatek-drm", sizeof("mediatek-drm") - 1))
return true;
return false;
}
static bool mtk_drm_get_all_drm_priv(struct device *dev)
{
struct mtk_drm_private *drm_priv = dev_get_drvdata(dev);
struct mtk_drm_private *all_drm_priv[MAX_CRTC];
struct device_node *phandle = dev->parent->of_node;
const struct of_device_id *of_id;
struct device_node *node;
struct device *drm_dev;
unsigned int cnt = 0;
int i, j;
for_each_child_of_node(phandle->parent, node) {
struct platform_device *pdev;
of_id = of_match_node(mtk_drm_of_ids, node);
if (!of_id)
continue;
pdev = of_find_device_by_node(node);
if (!pdev)
continue;
drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
if (!drm_dev || !dev_get_drvdata(drm_dev))
continue;
all_drm_priv[cnt] = dev_get_drvdata(drm_dev);
if (all_drm_priv[cnt] && all_drm_priv[cnt]->mtk_drm_bound)
cnt++;
if (cnt == MAX_CRTC)
break;
}
if (drm_priv->data->mmsys_dev_num == cnt) {
for (i = 0; i < cnt; i++)
for (j = 0; j < cnt; j++)
all_drm_priv[j]->all_drm_private[i] = all_drm_priv[i];
return true;
}
return false;
}
static bool mtk_drm_find_mmsys_comp(struct mtk_drm_private *private, int comp_id)
{
const struct mtk_mmsys_driver_data *drv_data = private->data;
int i;
if (drv_data->main_path)
for (i = 0; i < drv_data->main_len; i++)
if (drv_data->main_path[i] == comp_id)
return true;
if (drv_data->ext_path)
for (i = 0; i < drv_data->ext_len; i++)
if (drv_data->ext_path[i] == comp_id)
return true;
if (drv_data->third_path)
for (i = 0; i < drv_data->third_len; i++)
if (drv_data->third_path[i] == comp_id)
return true;
return false;
}
static int mtk_drm_kms_init(struct drm_device *drm)
{
struct mtk_drm_private *private = drm->dev_private;
struct mtk_drm_private *priv_n;
struct device *dma_dev = NULL;
int ret, i, j;
if (drm_firmware_drivers_only())
return -ENODEV;
ret = drmm_mode_config_init(drm);
if (ret)
goto put_mutex_dev;
drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64;
/*
* set max width and height as default value(4096x4096).
* this value would be used to check framebuffer size limitation
* at drm_mode_addfb().
*/
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &mtk_drm_mode_config_funcs;
drm->mode_config.helper_private = &mtk_drm_mode_config_helpers;
for (i = 0; i < private->data->mmsys_dev_num; i++) {
drm->dev_private = private->all_drm_private[i];
ret = component_bind_all(private->all_drm_private[i]->dev, drm);
if (ret)
goto put_mutex_dev;
}
/*
* Ensure internal panels are at the top of the connector list before
* crtc creation.
*/
drm_helper_move_panel_connectors_to_head(drm);
/*
* 1. We currently support two fixed data streams, each optional,
* and each statically assigned to a crtc:
* OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 ...
* 2. For multi mmsys architecture, crtc path data are located in
* different drm private data structures. Loop through crtc index to
* create crtc from the main path and then ext_path and finally the
* third path.
*/
for (i = 0; i < MAX_CRTC; i++) {
for (j = 0; j < private->data->mmsys_dev_num; j++) {
priv_n = private->all_drm_private[j];
if (i == 0 && priv_n->data->main_len) {
ret = mtk_drm_crtc_create(drm, priv_n->data->main_path,
priv_n->data->main_len, j);
if (ret)
goto err_component_unbind;
continue;
} else if (i == 1 && priv_n->data->ext_len) {
ret = mtk_drm_crtc_create(drm, priv_n->data->ext_path,
priv_n->data->ext_len, j);
if (ret)
goto err_component_unbind;
continue;
} else if (i == 2 && priv_n->data->third_len) {
ret = mtk_drm_crtc_create(drm, priv_n->data->third_path,
priv_n->data->third_len, j);
if (ret)
goto err_component_unbind;
continue;
}
}
}
/* Use OVL device for all DMA memory allocations */
dma_dev = mtk_drm_crtc_dma_dev_get(drm_crtc_from_index(drm, 0));
if (!dma_dev) {
ret = -ENODEV;
dev_err(drm->dev, "Need at least one OVL device\n");
goto err_component_unbind;
}
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->dma_dev = dma_dev;
/*
* Configure the DMA segment size to make sure we get contiguous IOVA
* when importing PRIME buffers.
*/
ret = dma_set_max_seg_size(dma_dev, UINT_MAX);
if (ret) {
dev_err(dma_dev, "Failed to set DMA segment size\n");
goto err_component_unbind;
}
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret < 0)
goto err_component_unbind;
drm_kms_helper_poll_init(drm);
drm_mode_config_reset(drm);
return 0;
err_component_unbind:
for (i = 0; i < private->data->mmsys_dev_num; i++)
component_unbind_all(private->all_drm_private[i]->dev, drm);
put_mutex_dev:
for (i = 0; i < private->data->mmsys_dev_num; i++)
put_device(private->all_drm_private[i]->mutex_dev);
return ret;
}
static void mtk_drm_kms_deinit(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
component_unbind_all(drm->dev, drm);
}
DEFINE_DRM_GEM_FOPS(mtk_drm_fops);
/*
* We need to override this because the device used to import the memory is
* not dev->dev, as drm_gem_prime_import() expects.
*/
static struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct mtk_drm_private *private = dev->dev_private;
return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
}
static const struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.dumb_create = mtk_drm_gem_dumb_create,
.gem_prime_import = mtk_drm_gem_prime_import,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.fops = &mtk_drm_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
static int compare_dev(struct device *dev, void *data)
{
return dev == (struct device *)data;
}
static int mtk_drm_bind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct platform_device *pdev;
struct drm_device *drm;
int ret, i;
if (!iommu_present(&platform_bus_type))
return -EPROBE_DEFER;
pdev = of_find_device_by_node(private->mutex_node);
if (!pdev) {
dev_err(dev, "Waiting for disp-mutex device %pOF\n",
private->mutex_node);
of_node_put(private->mutex_node);
return -EPROBE_DEFER;
}
private->mutex_dev = &pdev->dev;
private->mtk_drm_bound = true;
private->dev = dev;
if (!mtk_drm_get_all_drm_priv(dev))
return 0;
drm = drm_dev_alloc(&mtk_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
private->drm_master = true;
drm->dev_private = private;
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->drm = drm;
ret = mtk_drm_kms_init(drm);
if (ret < 0)
goto err_free;
ret = drm_dev_register(drm, 0);
if (ret < 0)
goto err_deinit;
drm_fbdev_generic_setup(drm, 32);
return 0;
err_deinit:
mtk_drm_kms_deinit(drm);
err_free:
private->drm = NULL;
drm_dev_put(drm);
return ret;
}
static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
/* for multi mmsys dev, unregister drm dev in mmsys master */
if (private->drm_master) {
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
}
private->mtk_drm_bound = false;
private->drm_master = false;
private->drm = NULL;
}
static const struct component_master_ops mtk_drm_ops = {
.bind = mtk_drm_bind,
.unbind = mtk_drm_unbind,
};
static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
{ .compatible = "mediatek,mt8167-disp-aal",
.data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8173-disp-aal",
.data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8183-disp-aal",
.data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8192-disp-aal",
.data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8167-disp-ccorr",
.data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt8183-disp-ccorr",
.data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt8192-disp-ccorr",
.data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt2701-disp-color",
.data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8167-disp-color",
.data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-color",
.data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8167-disp-dither",
.data = (void *)MTK_DISP_DITHER },
{ .compatible = "mediatek,mt8183-disp-dither",
.data = (void *)MTK_DISP_DITHER },
{ .compatible = "mediatek,mt8195-disp-dsc",
.data = (void *)MTK_DISP_DSC },
{ .compatible = "mediatek,mt8167-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8173-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8183-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8195-disp-merge",
.data = (void *)MTK_DISP_MERGE },
{ .compatible = "mediatek,mt2701-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt2712-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8167-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8183-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8186-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8188-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8192-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8195-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-od",
.data = (void *)MTK_DISP_OD },
{ .compatible = "mediatek,mt2701-disp-ovl",
.data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8167-disp-ovl",
.data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8173-disp-ovl",
.data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8183-disp-ovl",
.data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8192-disp-ovl",
.data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8183-disp-ovl-2l",
.data = (void *)MTK_DISP_OVL_2L },
{ .compatible = "mediatek,mt8192-disp-ovl-2l",
.data = (void *)MTK_DISP_OVL_2L },
{ .compatible = "mediatek,mt8192-disp-postmask",
.data = (void *)MTK_DISP_POSTMASK },
{ .compatible = "mediatek,mt2701-disp-pwm",
.data = (void *)MTK_DISP_BLS },
{ .compatible = "mediatek,mt8167-disp-pwm",
.data = (void *)MTK_DISP_PWM },
{ .compatible = "mediatek,mt8173-disp-pwm",
.data = (void *)MTK_DISP_PWM },
{ .compatible = "mediatek,mt2701-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8167-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8183-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8195-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-ufoe",
.data = (void *)MTK_DISP_UFOE },
{ .compatible = "mediatek,mt8173-disp-wdma",
.data = (void *)MTK_DISP_WDMA },
{ .compatible = "mediatek,mt2701-dpi",
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8167-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dpi",
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8183-dpi",
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8186-dpi",
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8188-dp-intf",
.data = (void *)MTK_DP_INTF },
{ .compatible = "mediatek,mt8192-dpi",
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8195-dp-intf",
.data = (void *)MTK_DP_INTF },
{ .compatible = "mediatek,mt2701-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8183-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8186-dsi",
.data = (void *)MTK_DSI },
{ }
};
static int mtk_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *phandle = dev->parent->of_node;
const struct of_device_id *of_id;
struct mtk_drm_private *private;
struct device_node *node;
struct component_match *match = NULL;
struct platform_device *ovl_adaptor;
int ret;
int i;
private = devm_kzalloc(dev, sizeof(*private), GFP_KERNEL);
if (!private)
return -ENOMEM;
private->mmsys_dev = dev->parent;
if (!private->mmsys_dev) {
dev_err(dev, "Failed to get MMSYS device\n");
return -ENODEV;
}
of_id = of_match_node(mtk_drm_of_ids, phandle);
if (!of_id)
return -ENODEV;
private->data = of_id->data;
private->all_drm_private = devm_kmalloc_array(dev, private->data->mmsys_dev_num,
sizeof(*private->all_drm_private),
GFP_KERNEL);
if (!private->all_drm_private)
return -ENOMEM;
/* Bringup ovl_adaptor */
if (mtk_drm_find_mmsys_comp(private, DDP_COMPONENT_DRM_OVL_ADAPTOR)) {
ovl_adaptor = platform_device_register_data(dev, "mediatek-disp-ovl-adaptor",
PLATFORM_DEVID_AUTO,
(void *)private->mmsys_dev,
sizeof(*private->mmsys_dev));
private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR].dev = &ovl_adaptor->dev;
mtk_ddp_comp_init(NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR],
DDP_COMPONENT_DRM_OVL_ADAPTOR);
component_match_add(dev, &match, compare_dev, &ovl_adaptor->dev);
}
/* Iterate over sibling DISP function blocks */
for_each_child_of_node(phandle->parent, node) {
const struct of_device_id *of_id;
enum mtk_ddp_comp_type comp_type;
int comp_id;
of_id = of_match_node(mtk_ddp_comp_dt_ids, node);
if (!of_id)
continue;
if (!of_device_is_available(node)) {
dev_dbg(dev, "Skipping disabled component %pOF\n",
node);
continue;
}
comp_type = (enum mtk_ddp_comp_type)(uintptr_t)of_id->data;
if (comp_type == MTK_DISP_MUTEX) {
int id;
id = of_alias_get_id(node, "mutex");
if (id < 0 || id == private->data->mmsys_id) {
private->mutex_node = of_node_get(node);
dev_dbg(dev, "get mutex for mmsys %d", private->data->mmsys_id);
}
continue;
}
comp_id = mtk_ddp_comp_get_id(node, comp_type);
if (comp_id < 0) {
dev_warn(dev, "Skipping unknown component %pOF\n",
node);
continue;
}
if (!mtk_drm_find_mmsys_comp(private, comp_id))
continue;
private->comp_node[comp_id] = of_node_get(node);
/*
* Currently only the AAL, CCORR, COLOR, GAMMA, MERGE, OVL, RDMA, DSI, and DPI
* blocks have separate component platform drivers and initialize their own
* DDP component structure. The others are initialized here.
*/
if (comp_type == MTK_DISP_AAL ||
comp_type == MTK_DISP_CCORR ||
comp_type == MTK_DISP_COLOR ||
comp_type == MTK_DISP_GAMMA ||
comp_type == MTK_DISP_MERGE ||
comp_type == MTK_DISP_OVL ||
comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_OVL_ADAPTOR ||
comp_type == MTK_DISP_RDMA ||
comp_type == MTK_DP_INTF ||
comp_type == MTK_DPI ||
comp_type == MTK_DSI) {
dev_info(dev, "Adding component match for %pOF\n",
node);
drm_of_component_match_add(dev, &match, component_compare_of,
node);
}
ret = mtk_ddp_comp_init(node, &private->ddp_comp[comp_id], comp_id);
if (ret) {
of_node_put(node);
goto err_node;
}
}
if (!private->mutex_node) {
dev_err(dev, "Failed to find disp-mutex node\n");
ret = -ENODEV;
goto err_node;
}
pm_runtime_enable(dev);
platform_set_drvdata(pdev, private);
ret = component_master_add_with_match(dev, &mtk_drm_ops, match);
if (ret)
goto err_pm;
return 0;
err_pm:
pm_runtime_disable(dev);
err_node:
of_node_put(private->mutex_node);
for (i = 0; i < DDP_COMPONENT_DRM_ID_MAX; i++)
of_node_put(private->comp_node[i]);
return ret;
}
static void mtk_drm_remove(struct platform_device *pdev)
{
struct mtk_drm_private *private = platform_get_drvdata(pdev);
int i;
component_master_del(&pdev->dev, &mtk_drm_ops);
pm_runtime_disable(&pdev->dev);
of_node_put(private->mutex_node);
for (i = 0; i < DDP_COMPONENT_DRM_ID_MAX; i++)
of_node_put(private->comp_node[i]);
}
static int mtk_drm_sys_prepare(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
if (private->drm_master)
return drm_mode_config_helper_suspend(drm);
else
return 0;
}
static void mtk_drm_sys_complete(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
int ret = 0;
if (private->drm_master)
ret = drm_mode_config_helper_resume(drm);
if (ret)
dev_err(dev, "Failed to resume\n");
}
static const struct dev_pm_ops mtk_drm_pm_ops = {
.prepare = mtk_drm_sys_prepare,
.complete = mtk_drm_sys_complete,
};
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
.remove_new = mtk_drm_remove,
.driver = {
.name = "mediatek-drm",
.pm = &mtk_drm_pm_ops,
},
};
static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_disp_aal_driver,
&mtk_disp_ccorr_driver,
&mtk_disp_color_driver,
&mtk_disp_gamma_driver,
&mtk_disp_merge_driver,
&mtk_disp_ovl_adaptor_driver,
&mtk_disp_ovl_driver,
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
&mtk_drm_platform_driver,
&mtk_dsi_driver,
&mtk_ethdr_driver,
&mtk_mdp_rdma_driver,
};
static int __init mtk_drm_init(void)
{
return platform_register_drivers(mtk_drm_drivers,
ARRAY_SIZE(mtk_drm_drivers));
}
static void __exit mtk_drm_exit(void)
{
platform_unregister_drivers(mtk_drm_drivers,
ARRAY_SIZE(mtk_drm_drivers));
}
module_init(mtk_drm_init);
module_exit(mtk_drm_exit);
MODULE_AUTHOR("YT SHEN <[email protected]>");
MODULE_DESCRIPTION("Mediatek SoC DRM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/mediatek/mtk_drm_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 MediaTek Inc.
*/
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_ethdr.h"
#define MIX_INTEN 0x4
#define MIX_FME_CPL_INTEN BIT(1)
#define MIX_INTSTA 0x8
#define MIX_EN 0xc
#define MIX_RST 0x14
#define MIX_ROI_SIZE 0x18
#define MIX_DATAPATH_CON 0x1c
#define OUTPUT_NO_RND BIT(3)
#define SOURCE_RGB_SEL BIT(7)
#define BACKGROUND_RELAY (4 << 9)
#define MIX_ROI_BGCLR 0x20
#define BGCLR_BLACK 0xff000000
#define MIX_SRC_CON 0x24
#define MIX_SRC_L0_EN BIT(0)
#define MIX_L_SRC_CON(n) (0x28 + 0x18 * (n))
#define NON_PREMULTI_SOURCE (2 << 12)
#define MIX_L_SRC_SIZE(n) (0x30 + 0x18 * (n))
#define MIX_L_SRC_OFFSET(n) (0x34 + 0x18 * (n))
#define MIX_FUNC_DCM0 0x120
#define MIX_FUNC_DCM1 0x124
#define MIX_FUNC_DCM_ENABLE 0xffffffff
#define HDR_VDO_FE_0804_HDR_DM_FE 0x804
#define HDR_VDO_FE_0804_BYPASS_ALL 0xfd
#define HDR_GFX_FE_0204_GFX_HDR_FE 0x204
#define HDR_GFX_FE_0204_BYPASS_ALL 0xfd
#define HDR_VDO_BE_0204_VDO_DM_BE 0x204
#define HDR_VDO_BE_0204_BYPASS_ALL 0x7e
#define MIXER_INX_MODE_BYPASS 0
#define MIXER_INX_MODE_EVEN_EXTEND 1
#define DEFAULT_9BIT_ALPHA 0x100
#define MIXER_ALPHA_AEN BIT(8)
#define MIXER_ALPHA 0xff
#define ETHDR_CLK_NUM 13
enum mtk_ethdr_comp_id {
ETHDR_MIXER,
ETHDR_VDO_FE0,
ETHDR_VDO_FE1,
ETHDR_GFX_FE0,
ETHDR_GFX_FE1,
ETHDR_VDO_BE,
ETHDR_ADL_DS,
ETHDR_ID_MAX
};
struct mtk_ethdr_comp {
struct device *dev;
void __iomem *regs;
struct cmdq_client_reg cmdq_base;
};
struct mtk_ethdr {
struct mtk_ethdr_comp ethdr_comp[ETHDR_ID_MAX];
struct clk_bulk_data ethdr_clk[ETHDR_CLK_NUM];
struct device *mmsys_dev;
void (*vblank_cb)(void *data);
void *vblank_cb_data;
int irq;
struct reset_control *reset_ctl;
};
static const char * const ethdr_clk_str[] = {
"ethdr_top",
"mixer",
"vdo_fe0",
"vdo_fe1",
"gfx_fe0",
"gfx_fe1",
"vdo_be",
"adl_ds",
"vdo_fe0_async",
"vdo_fe1_async",
"gfx_fe0_async",
"gfx_fe1_async",
"vdo_be_async",
};
void mtk_ethdr_register_vblank_cb(struct device *dev,
void (*vblank_cb)(void *),
void *vblank_cb_data)
{
struct mtk_ethdr *priv = dev_get_drvdata(dev);
priv->vblank_cb = vblank_cb;
priv->vblank_cb_data = vblank_cb_data;
}
void mtk_ethdr_unregister_vblank_cb(struct device *dev)
{
struct mtk_ethdr *priv = dev_get_drvdata(dev);
priv->vblank_cb = NULL;
priv->vblank_cb_data = NULL;
}
void mtk_ethdr_enable_vblank(struct device *dev)
{
struct mtk_ethdr *priv = dev_get_drvdata(dev);
writel(MIX_FME_CPL_INTEN, priv->ethdr_comp[ETHDR_MIXER].regs + MIX_INTEN);
}
void mtk_ethdr_disable_vblank(struct device *dev)
{
struct mtk_ethdr *priv = dev_get_drvdata(dev);
writel(0x0, priv->ethdr_comp[ETHDR_MIXER].regs + MIX_INTEN);
}
static irqreturn_t mtk_ethdr_irq_handler(int irq, void *dev_id)
{
struct mtk_ethdr *priv = dev_id;
writel(0x0, priv->ethdr_comp[ETHDR_MIXER].regs + MIX_INTSTA);
if (!priv->vblank_cb)
return IRQ_NONE;
priv->vblank_cb(priv->vblank_cb_data);
return IRQ_HANDLED;
}
void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ethdr *priv = dev_get_drvdata(dev);
struct mtk_ethdr_comp *mixer = &priv->ethdr_comp[ETHDR_MIXER];
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int offset = (pending->x & 1) << 31 | pending->y << 16 | pending->x;
unsigned int align_width = ALIGN_DOWN(pending->width, 2);
unsigned int alpha_con = 0;
dev_dbg(dev, "%s+ idx:%d", __func__, idx);
if (idx >= 4)
return;
if (!pending->enable) {
mtk_ddp_write(cmdq_pkt, 0, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_SIZE(idx));
return;
}
if (state->base.fb && state->base.fb->format->has_alpha)
alpha_con = MIXER_ALPHA_AEN | MIXER_ALPHA;
mtk_mmsys_mixer_in_config(priv->mmsys_dev, idx + 1, alpha_con ? false : true,
DEFAULT_9BIT_ALPHA,
pending->x & 1 ? MIXER_INX_MODE_EVEN_EXTEND :
MIXER_INX_MODE_BYPASS, align_width / 2 - 1, cmdq_pkt);
mtk_ddp_write(cmdq_pkt, pending->height << 16 | align_width, &mixer->cmdq_base,
mixer->regs, MIX_L_SRC_SIZE(idx));
mtk_ddp_write(cmdq_pkt, offset, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_OFFSET(idx));
mtk_ddp_write_mask(cmdq_pkt, alpha_con, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_CON(idx),
0x1ff);
mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &mixer->cmdq_base, mixer->regs, MIX_SRC_CON,
BIT(idx));
}
void mtk_ethdr_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ethdr *priv = dev_get_drvdata(dev);
struct mtk_ethdr_comp *vdo_fe0 = &priv->ethdr_comp[ETHDR_VDO_FE0];
struct mtk_ethdr_comp *vdo_fe1 = &priv->ethdr_comp[ETHDR_VDO_FE1];
struct mtk_ethdr_comp *gfx_fe0 = &priv->ethdr_comp[ETHDR_GFX_FE0];
struct mtk_ethdr_comp *gfx_fe1 = &priv->ethdr_comp[ETHDR_GFX_FE1];
struct mtk_ethdr_comp *vdo_be = &priv->ethdr_comp[ETHDR_VDO_BE];
struct mtk_ethdr_comp *mixer = &priv->ethdr_comp[ETHDR_MIXER];
dev_dbg(dev, "%s-w:%d, h:%d\n", __func__, w, h);
mtk_ddp_write(cmdq_pkt, HDR_VDO_FE_0804_BYPASS_ALL, &vdo_fe0->cmdq_base,
vdo_fe0->regs, HDR_VDO_FE_0804_HDR_DM_FE);
mtk_ddp_write(cmdq_pkt, HDR_VDO_FE_0804_BYPASS_ALL, &vdo_fe1->cmdq_base,
vdo_fe1->regs, HDR_VDO_FE_0804_HDR_DM_FE);
mtk_ddp_write(cmdq_pkt, HDR_GFX_FE_0204_BYPASS_ALL, &gfx_fe0->cmdq_base,
gfx_fe0->regs, HDR_GFX_FE_0204_GFX_HDR_FE);
mtk_ddp_write(cmdq_pkt, HDR_GFX_FE_0204_BYPASS_ALL, &gfx_fe1->cmdq_base,
gfx_fe1->regs, HDR_GFX_FE_0204_GFX_HDR_FE);
mtk_ddp_write(cmdq_pkt, HDR_VDO_BE_0204_BYPASS_ALL, &vdo_be->cmdq_base,
vdo_be->regs, HDR_VDO_BE_0204_VDO_DM_BE);
mtk_ddp_write(cmdq_pkt, MIX_FUNC_DCM_ENABLE, &mixer->cmdq_base, mixer->regs, MIX_FUNC_DCM0);
mtk_ddp_write(cmdq_pkt, MIX_FUNC_DCM_ENABLE, &mixer->cmdq_base, mixer->regs, MIX_FUNC_DCM1);
mtk_ddp_write(cmdq_pkt, h << 16 | w, &mixer->cmdq_base, mixer->regs, MIX_ROI_SIZE);
mtk_ddp_write(cmdq_pkt, BGCLR_BLACK, &mixer->cmdq_base, mixer->regs, MIX_ROI_BGCLR);
mtk_ddp_write(cmdq_pkt, NON_PREMULTI_SOURCE, &mixer->cmdq_base, mixer->regs,
MIX_L_SRC_CON(0));
mtk_ddp_write(cmdq_pkt, NON_PREMULTI_SOURCE, &mixer->cmdq_base, mixer->regs,
MIX_L_SRC_CON(1));
mtk_ddp_write(cmdq_pkt, NON_PREMULTI_SOURCE, &mixer->cmdq_base, mixer->regs,
MIX_L_SRC_CON(2));
mtk_ddp_write(cmdq_pkt, NON_PREMULTI_SOURCE, &mixer->cmdq_base, mixer->regs,
MIX_L_SRC_CON(3));
mtk_ddp_write(cmdq_pkt, 0x0, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_SIZE(0));
mtk_ddp_write(cmdq_pkt, OUTPUT_NO_RND | SOURCE_RGB_SEL | BACKGROUND_RELAY,
&mixer->cmdq_base, mixer->regs, MIX_DATAPATH_CON);
mtk_ddp_write_mask(cmdq_pkt, MIX_SRC_L0_EN, &mixer->cmdq_base, mixer->regs,
MIX_SRC_CON, MIX_SRC_L0_EN);
mtk_mmsys_hdr_config(priv->mmsys_dev, w / 2, h, cmdq_pkt);
mtk_mmsys_mixer_in_channel_swap(priv->mmsys_dev, 4, 0, cmdq_pkt);
}
void mtk_ethdr_start(struct device *dev)
{
struct mtk_ethdr *priv = dev_get_drvdata(dev);
struct mtk_ethdr_comp *mixer = &priv->ethdr_comp[ETHDR_MIXER];
writel(1, mixer->regs + MIX_EN);
}
void mtk_ethdr_stop(struct device *dev)
{
struct mtk_ethdr *priv = dev_get_drvdata(dev);
struct mtk_ethdr_comp *mixer = &priv->ethdr_comp[ETHDR_MIXER];
writel(0, mixer->regs + MIX_EN);
writel(1, mixer->regs + MIX_RST);
reset_control_reset(priv->reset_ctl);
writel(0, mixer->regs + MIX_RST);
}
int mtk_ethdr_clk_enable(struct device *dev)
{
int ret;
struct mtk_ethdr *priv = dev_get_drvdata(dev);
ret = clk_bulk_prepare_enable(ETHDR_CLK_NUM, priv->ethdr_clk);
if (ret)
dev_err(dev,
"ethdr_clk prepare enable failed\n");
return ret;
}
void mtk_ethdr_clk_disable(struct device *dev)
{
struct mtk_ethdr *priv = dev_get_drvdata(dev);
clk_bulk_disable_unprepare(ETHDR_CLK_NUM, priv->ethdr_clk);
}
static int mtk_ethdr_bind(struct device *dev, struct device *master,
void *data)
{
struct mtk_ethdr *priv = dev_get_drvdata(dev);
priv->mmsys_dev = data;
return 0;
}
static void mtk_ethdr_unbind(struct device *dev, struct device *master, void *data)
{
}
static const struct component_ops mtk_ethdr_component_ops = {
.bind = mtk_ethdr_bind,
.unbind = mtk_ethdr_unbind,
};
static int mtk_ethdr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_ethdr *priv;
int ret;
int i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
for (i = 0; i < ETHDR_ID_MAX; i++) {
priv->ethdr_comp[i].dev = dev;
priv->ethdr_comp[i].regs = of_iomap(dev->of_node, i);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev,
&priv->ethdr_comp[i].cmdq_base, i);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
dev_dbg(dev, "[DRM]regs:0x%p, node:%d\n", priv->ethdr_comp[i].regs, i);
}
for (i = 0; i < ETHDR_CLK_NUM; i++)
priv->ethdr_clk[i].id = ethdr_clk_str[i];
ret = devm_clk_bulk_get_optional(dev, ETHDR_CLK_NUM, priv->ethdr_clk);
if (ret)
return ret;
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0)
priv->irq = 0;
if (priv->irq) {
ret = devm_request_irq(dev, priv->irq, mtk_ethdr_irq_handler,
IRQF_TRIGGER_NONE, dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "Failed to request irq %d: %d\n", priv->irq, ret);
return ret;
}
}
priv->reset_ctl = devm_reset_control_array_get_optional_exclusive(dev);
if (IS_ERR(priv->reset_ctl)) {
dev_err_probe(dev, PTR_ERR(priv->reset_ctl), "cannot get ethdr reset control\n");
return PTR_ERR(priv->reset_ctl);
}
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_ethdr_component_ops);
if (ret)
dev_notice(dev, "Failed to add component: %d\n", ret);
return ret;
}
static int mtk_ethdr_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_ethdr_component_ops);
return 0;
}
static const struct of_device_id mtk_ethdr_driver_dt_match[] = {
{ .compatible = "mediatek,mt8195-disp-ethdr"},
{},
};
MODULE_DEVICE_TABLE(of, mtk_ethdr_driver_dt_match);
struct platform_driver mtk_ethdr_driver = {
.probe = mtk_ethdr_probe,
.remove = mtk_ethdr_remove,
.driver = {
.name = "mediatek-disp-ethdr",
.owner = THIS_MODULE,
.of_match_table = mtk_ethdr_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_ethdr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 MediaTek Inc.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_CCORR_EN 0x0000
#define CCORR_EN BIT(0)
#define DISP_CCORR_CFG 0x0020
#define CCORR_RELAY_MODE BIT(0)
#define CCORR_ENGINE_EN BIT(1)
#define CCORR_GAMMA_OFF BIT(2)
#define CCORR_WGAMUT_SRC_CLIP BIT(3)
#define DISP_CCORR_SIZE 0x0030
#define DISP_CCORR_COEF_0 0x0080
#define DISP_CCORR_COEF_1 0x0084
#define DISP_CCORR_COEF_2 0x0088
#define DISP_CCORR_COEF_3 0x008C
#define DISP_CCORR_COEF_4 0x0090
struct mtk_disp_ccorr_data {
u32 matrix_bits;
};
struct mtk_disp_ccorr {
struct clk *clk;
void __iomem *regs;
struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_ccorr_data *data;
};
int mtk_ccorr_clk_enable(struct device *dev)
{
struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
return clk_prepare_enable(ccorr->clk);
}
void mtk_ccorr_clk_disable(struct device *dev)
{
struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
clk_disable_unprepare(ccorr->clk);
}
void mtk_ccorr_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &ccorr->cmdq_reg, ccorr->regs,
DISP_CCORR_SIZE);
mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, &ccorr->cmdq_reg, ccorr->regs,
DISP_CCORR_CFG);
}
void mtk_ccorr_start(struct device *dev)
{
struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
writel(CCORR_EN, ccorr->regs + DISP_CCORR_EN);
}
void mtk_ccorr_stop(struct device *dev)
{
struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
writel_relaxed(0x0, ccorr->regs + DISP_CCORR_EN);
}
/* Converts a DRM S31.32 value to the HW S1.n format. */
static u16 mtk_ctm_s31_32_to_s1_n(u64 in, u32 n)
{
u16 r;
/* Sign bit. */
r = in & BIT_ULL(63) ? BIT(n + 1) : 0;
if ((in & GENMASK_ULL(62, 33)) > 0) {
/* identity value 0x100000000 -> 0x400(mt8183), */
/* identity value 0x100000000 -> 0x800(mt8192), */
/* if bigger this, set it to max 0x7ff. */
r |= GENMASK(n, 0);
} else {
/* take the n+1 most important bits. */
r |= (in >> (32 - n)) & GENMASK(n, 0);
}
return r;
}
void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state)
{
struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
struct drm_property_blob *blob = state->ctm;
struct drm_color_ctm *ctm;
const u64 *input;
uint16_t coeffs[9] = { 0 };
int i;
struct cmdq_pkt *cmdq_pkt = NULL;
u32 matrix_bits = ccorr->data->matrix_bits;
if (!blob)
return;
ctm = (struct drm_color_ctm *)blob->data;
input = ctm->matrix;
for (i = 0; i < ARRAY_SIZE(coeffs); i++)
coeffs[i] = mtk_ctm_s31_32_to_s1_n(input[i], matrix_bits);
mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
&ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_0);
mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
&ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_1);
mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
&ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_2);
mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
&ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_3);
mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
&ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_4);
}
static int mtk_disp_ccorr_bind(struct device *dev, struct device *master,
void *data)
{
return 0;
}
static void mtk_disp_ccorr_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_disp_ccorr_component_ops = {
.bind = mtk_disp_ccorr_bind,
.unbind = mtk_disp_ccorr_unbind,
};
static int mtk_disp_ccorr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_ccorr *priv;
struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get ccorr clk\n");
return PTR_ERR(priv->clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap ccorr\n");
return PTR_ERR(priv->regs);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
priv->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_ccorr_component_ops);
if (ret)
dev_err(dev, "Failed to add component: %d\n", ret);
return ret;
}
static void mtk_disp_ccorr_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_ccorr_component_ops);
}
static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = {
.matrix_bits = 10,
};
static const struct mtk_disp_ccorr_data mt8192_ccorr_driver_data = {
.matrix_bits = 11,
};
static const struct of_device_id mtk_disp_ccorr_driver_dt_match[] = {
{ .compatible = "mediatek,mt8183-disp-ccorr",
.data = &mt8183_ccorr_driver_data},
{ .compatible = "mediatek,mt8192-disp-ccorr",
.data = &mt8192_ccorr_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match);
struct platform_driver mtk_disp_ccorr_driver = {
.probe = mtk_disp_ccorr_probe,
.remove_new = mtk_disp_ccorr_remove,
.driver = {
.name = "mediatek-disp-ccorr",
.owner = THIS_MODULE,
.of_match_table = mtk_disp_ccorr_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_disp_ccorr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
*/
#include <linux/dma-buf.h>
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
static const struct vm_operations_struct vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
.free = mtk_drm_gem_free_object,
.get_sg_table = mtk_gem_prime_get_sg_table,
.vmap = mtk_drm_gem_prime_vmap,
.vunmap = mtk_drm_gem_prime_vunmap,
.mmap = mtk_drm_gem_object_mmap,
.vm_ops = &vm_ops,
};
static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct mtk_drm_gem_obj *mtk_gem_obj;
int ret;
size = round_up(size, PAGE_SIZE);
mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
if (!mtk_gem_obj)
return ERR_PTR(-ENOMEM);
mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs;
ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
if (ret < 0) {
DRM_ERROR("failed to initialize gem object\n");
kfree(mtk_gem_obj);
return ERR_PTR(ret);
}
return mtk_gem_obj;
}
struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
size_t size, bool alloc_kmap)
{
struct mtk_drm_private *priv = dev->dev_private;
struct mtk_drm_gem_obj *mtk_gem;
struct drm_gem_object *obj;
int ret;
mtk_gem = mtk_drm_gem_init(dev, size);
if (IS_ERR(mtk_gem))
return ERR_CAST(mtk_gem);
obj = &mtk_gem->base;
mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
if (!alloc_kmap)
mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
&mtk_gem->dma_addr, GFP_KERNEL,
mtk_gem->dma_attrs);
if (!mtk_gem->cookie) {
DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
ret = -ENOMEM;
goto err_gem_free;
}
if (alloc_kmap)
mtk_gem->kvaddr = mtk_gem->cookie;
DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
mtk_gem->cookie, &mtk_gem->dma_addr,
size);
return mtk_gem;
err_gem_free:
drm_gem_object_release(obj);
kfree(mtk_gem);
return ERR_PTR(ret);
}
void mtk_drm_gem_free_object(struct drm_gem_object *obj)
{
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct mtk_drm_private *priv = obj->dev->dev_private;
if (mtk_gem->sg)
drm_prime_gem_destroy(obj, mtk_gem->sg);
else
dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
mtk_gem->dma_addr, mtk_gem->dma_attrs);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
kfree(mtk_gem);
}
int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct mtk_drm_gem_obj *mtk_gem;
int ret;
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
args->size = args->pitch * args->height;
mtk_gem = mtk_drm_gem_create(dev, args->size, false);
if (IS_ERR(mtk_gem))
return PTR_ERR(mtk_gem);
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see.
*/
ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
if (ret)
goto err_handle_create;
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&mtk_gem->base);
return 0;
err_handle_create:
mtk_drm_gem_free_object(&mtk_gem->base);
return ret;
}
static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
int ret;
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct mtk_drm_private *priv = obj->dev->dev_private;
/*
* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
* whole buffer from the start.
*/
vma->vm_pgoff = 0;
/*
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
return ret;
}
/*
* Allocate a sg_table for this GEM object.
* Note: Both the table's contents, and the sg_table itself must be freed by
* the caller.
* Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
*/
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct mtk_drm_private *priv = obj->dev->dev_private;
struct sg_table *sgt;
int ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size,
mtk_gem->dma_attrs);
if (ret) {
DRM_ERROR("failed to allocate sgt, %d\n", ret);
kfree(sgt);
return ERR_PTR(ret);
}
return sgt;
}
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
struct mtk_drm_gem_obj *mtk_gem;
/* check if the entries in the sg_table are contiguous */
if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
DRM_ERROR("sg_table is not contiguous");
return ERR_PTR(-EINVAL);
}
mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
if (IS_ERR(mtk_gem))
return ERR_CAST(mtk_gem);
mtk_gem->dma_addr = sg_dma_address(sg->sgl);
mtk_gem->sg = sg;
return &mtk_gem->base;
}
int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct sg_table *sgt = NULL;
unsigned int npages;
if (mtk_gem->kvaddr)
goto out;
sgt = mtk_gem_prime_get_sg_table(obj);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
npages = obj->size >> PAGE_SHIFT;
mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
if (!mtk_gem->pages) {
kfree(sgt);
return -ENOMEM;
}
drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!mtk_gem->kvaddr) {
kfree(sgt);
kfree(mtk_gem->pages);
return -ENOMEM;
}
out:
kfree(sgt);
iosys_map_set_vaddr(map, mtk_gem->kvaddr);
return 0;
}
void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj,
struct iosys_map *map)
{
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
void *vaddr = map->vaddr;
if (!mtk_gem->pages)
return;
vunmap(vaddr);
mtk_gem->kvaddr = NULL;
kfree(mtk_gem->pages);
}
| linux-master | drivers/gpu/drm/mediatek/mtk_drm_gem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 MediaTek Inc.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_disp_drv.h"
#define DISP_REG_MERGE_CTRL 0x000
#define MERGE_EN 1
#define DISP_REG_MERGE_CFG_0 0x010
#define DISP_REG_MERGE_CFG_1 0x014
#define DISP_REG_MERGE_CFG_4 0x020
#define DISP_REG_MERGE_CFG_10 0x038
/* no swap */
#define SWAP_MODE 0
#define FLD_SWAP_MODE GENMASK(4, 0)
#define DISP_REG_MERGE_CFG_12 0x040
#define CFG_10_10_1PI_2PO_BUF_MODE 6
#define CFG_10_10_2PI_2PO_BUF_MODE 8
#define CFG_11_10_1PI_2PO_MERGE 18
#define FLD_CFG_MERGE_MODE GENMASK(4, 0)
#define DISP_REG_MERGE_CFG_24 0x070
#define DISP_REG_MERGE_CFG_25 0x074
#define DISP_REG_MERGE_CFG_26 0x078
#define DISP_REG_MERGE_CFG_27 0x07c
#define DISP_REG_MERGE_CFG_36 0x0a0
#define ULTRA_EN BIT(0)
#define PREULTRA_EN BIT(4)
#define DISP_REG_MERGE_CFG_37 0x0a4
/* 0: Off, 1: SRAM0, 2: SRAM1, 3: SRAM0 + SRAM1 */
#define BUFFER_MODE 3
#define FLD_BUFFER_MODE GENMASK(1, 0)
/*
* For the ultra and preultra settings, 6us ~ 9us is experience value
* and the maximum frequency of mmsys clock is 594MHz.
*/
#define DISP_REG_MERGE_CFG_40 0x0b0
/* 6 us, 594M pixel/sec */
#define ULTRA_TH_LOW (6 * 594)
/* 8 us, 594M pixel/sec */
#define ULTRA_TH_HIGH (8 * 594)
#define FLD_ULTRA_TH_LOW GENMASK(15, 0)
#define FLD_ULTRA_TH_HIGH GENMASK(31, 16)
#define DISP_REG_MERGE_CFG_41 0x0b4
/* 8 us, 594M pixel/sec */
#define PREULTRA_TH_LOW (8 * 594)
/* 9 us, 594M pixel/sec */
#define PREULTRA_TH_HIGH (9 * 594)
#define FLD_PREULTRA_TH_LOW GENMASK(15, 0)
#define FLD_PREULTRA_TH_HIGH GENMASK(31, 16)
#define DISP_REG_MERGE_MUTE_0 0xf00
struct mtk_disp_merge {
void __iomem *regs;
struct clk *clk;
struct clk *async_clk;
struct cmdq_client_reg cmdq_reg;
bool fifo_en;
bool mute_support;
struct reset_control *reset_ctl;
};
void mtk_merge_start(struct device *dev)
{
mtk_merge_start_cmdq(dev, NULL);
}
void mtk_merge_stop(struct device *dev)
{
mtk_merge_stop_cmdq(dev, NULL);
}
void mtk_merge_start_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
if (priv->mute_support)
mtk_ddp_write(cmdq_pkt, 0x0, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_MUTE_0);
mtk_ddp_write(cmdq_pkt, 1, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CTRL);
}
void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
if (priv->mute_support)
mtk_ddp_write(cmdq_pkt, 0x1, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_MUTE_0);
mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CTRL);
if (priv->async_clk)
reset_control_reset(priv->reset_ctl);
}
static void mtk_merge_fifo_setting(struct mtk_disp_merge *priv,
struct cmdq_pkt *cmdq_pkt)
{
mtk_ddp_write(cmdq_pkt, ULTRA_EN | PREULTRA_EN,
&priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_36);
mtk_ddp_write_mask(cmdq_pkt, BUFFER_MODE,
&priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_37,
FLD_BUFFER_MODE);
mtk_ddp_write_mask(cmdq_pkt, ULTRA_TH_LOW | ULTRA_TH_HIGH << 16,
&priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_40,
FLD_ULTRA_TH_LOW | FLD_ULTRA_TH_HIGH);
mtk_ddp_write_mask(cmdq_pkt, PREULTRA_TH_LOW | PREULTRA_TH_HIGH << 16,
&priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_41,
FLD_PREULTRA_TH_LOW | FLD_PREULTRA_TH_HIGH);
}
void mtk_merge_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
mtk_merge_advance_config(dev, w, 0, h, vrefresh, bpc, cmdq_pkt);
}
void mtk_merge_advance_config(struct device *dev, unsigned int l_w, unsigned int r_w,
unsigned int h, unsigned int vrefresh, unsigned int bpc,
struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
unsigned int mode = CFG_10_10_1PI_2PO_BUF_MODE;
if (!h || !l_w) {
dev_err(dev, "%s: input width(%d) or height(%d) is invalid\n", __func__, l_w, h);
return;
}
if (priv->fifo_en) {
mtk_merge_fifo_setting(priv, cmdq_pkt);
mode = CFG_10_10_2PI_2PO_BUF_MODE;
}
if (r_w)
mode = CFG_11_10_1PI_2PO_MERGE;
mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_0);
mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_1);
mtk_ddp_write(cmdq_pkt, h << 16 | (l_w + r_w), &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_4);
/*
* DISP_REG_MERGE_CFG_24 is merge SRAM0 w/h
* DISP_REG_MERGE_CFG_25 is merge SRAM1 w/h.
* If r_w > 0, the merge is in merge mode (input0 and input1 merge together),
* the input0 goes to SRAM0, and input1 goes to SRAM1.
* If r_w = 0, the merge is in buffer mode, the input goes through SRAM0 and
* then to SRAM1. Both SRAM0 and SRAM1 are set to the same size.
*/
mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_24);
if (r_w)
mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_25);
else
mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_25);
/*
* DISP_REG_MERGE_CFG_26 and DISP_REG_MERGE_CFG_27 is only used in LR merge.
* Only take effect when the merge is setting to merge mode.
*/
mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_26);
mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_27);
mtk_ddp_write_mask(cmdq_pkt, SWAP_MODE, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_10, FLD_SWAP_MODE);
mtk_ddp_write_mask(cmdq_pkt, mode, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_12, FLD_CFG_MERGE_MODE);
}
int mtk_merge_clk_enable(struct device *dev)
{
int ret = 0;
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "merge clk prepare enable failed\n");
return ret;
}
ret = clk_prepare_enable(priv->async_clk);
if (ret) {
/* should clean up the state of priv->clk */
clk_disable_unprepare(priv->clk);
dev_err(dev, "async clk prepare enable failed\n");
return ret;
}
return ret;
}
void mtk_merge_clk_disable(struct device *dev)
{
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
clk_disable_unprepare(priv->async_clk);
clk_disable_unprepare(priv->clk);
}
static int mtk_disp_merge_bind(struct device *dev, struct device *master,
void *data)
{
return 0;
}
static void mtk_disp_merge_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_disp_merge_component_ops = {
.bind = mtk_disp_merge_bind,
.unbind = mtk_disp_merge_unbind,
};
static int mtk_disp_merge_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct mtk_disp_merge *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap merge\n");
return PTR_ERR(priv->regs);
}
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get merge clk\n");
return PTR_ERR(priv->clk);
}
priv->async_clk = devm_clk_get_optional(dev, "merge_async");
if (IS_ERR(priv->async_clk)) {
dev_err(dev, "failed to get merge async clock\n");
return PTR_ERR(priv->async_clk);
}
if (priv->async_clk) {
priv->reset_ctl = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(priv->reset_ctl))
return PTR_ERR(priv->reset_ctl);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
priv->fifo_en = of_property_read_bool(dev->of_node,
"mediatek,merge-fifo-en");
priv->mute_support = of_property_read_bool(dev->of_node,
"mediatek,merge-mute");
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_merge_component_ops);
if (ret != 0)
dev_err(dev, "Failed to add component: %d\n", ret);
return ret;
}
static void mtk_disp_merge_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_merge_component_ops);
}
static const struct of_device_id mtk_disp_merge_driver_dt_match[] = {
{ .compatible = "mediatek,mt8195-disp-merge", },
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_merge_driver_dt_match);
struct platform_driver mtk_disp_merge_driver = {
.probe = mtk_disp_merge_probe,
.remove_new = mtk_disp_merge_remove,
.driver = {
.name = "mediatek-disp-merge",
.owner = THIS_MODULE,
.of_match_table = mtk_disp_merge_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_disp_merge.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017 MediaTek Inc.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_COLOR_CFG_MAIN 0x0400
#define DISP_COLOR_START_MT2701 0x0f00
#define DISP_COLOR_START_MT8167 0x0400
#define DISP_COLOR_START_MT8173 0x0c00
#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
#define COLOR_BYPASS_ALL BIT(7)
#define COLOR_SEQ_SEL BIT(13)
struct mtk_disp_color_data {
unsigned int color_offset;
};
/*
* struct mtk_disp_color - DISP_COLOR driver structure
* @crtc: associated crtc to report irq events to
* @data: platform colour driver data
*/
struct mtk_disp_color {
struct drm_crtc *crtc;
struct clk *clk;
void __iomem *regs;
struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_color_data *data;
};
int mtk_color_clk_enable(struct device *dev)
{
struct mtk_disp_color *color = dev_get_drvdata(dev);
return clk_prepare_enable(color->clk);
}
void mtk_color_clk_disable(struct device *dev)
{
struct mtk_disp_color *color = dev_get_drvdata(dev);
clk_disable_unprepare(color->clk);
}
void mtk_color_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_color *color = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, w, &color->cmdq_reg, color->regs, DISP_COLOR_WIDTH(color));
mtk_ddp_write(cmdq_pkt, h, &color->cmdq_reg, color->regs, DISP_COLOR_HEIGHT(color));
}
void mtk_color_start(struct device *dev)
{
struct mtk_disp_color *color = dev_get_drvdata(dev);
writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
color->regs + DISP_COLOR_CFG_MAIN);
writel(0x1, color->regs + DISP_COLOR_START(color));
}
static int mtk_disp_color_bind(struct device *dev, struct device *master,
void *data)
{
return 0;
}
static void mtk_disp_color_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_disp_color_component_ops = {
.bind = mtk_disp_color_bind,
.unbind = mtk_disp_color_unbind,
};
static int mtk_disp_color_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_color *priv;
struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get color clk\n");
return PTR_ERR(priv->clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap color\n");
return PTR_ERR(priv->regs);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
priv->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_color_component_ops);
if (ret)
dev_err(dev, "Failed to add component: %d\n", ret);
return ret;
}
static void mtk_disp_color_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_color_component_ops);
}
static const struct mtk_disp_color_data mt2701_color_driver_data = {
.color_offset = DISP_COLOR_START_MT2701,
};
static const struct mtk_disp_color_data mt8167_color_driver_data = {
.color_offset = DISP_COLOR_START_MT8167,
};
static const struct mtk_disp_color_data mt8173_color_driver_data = {
.color_offset = DISP_COLOR_START_MT8173,
};
static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-color",
.data = &mt2701_color_driver_data},
{ .compatible = "mediatek,mt8167-disp-color",
.data = &mt8167_color_driver_data},
{ .compatible = "mediatek,mt8173-disp-color",
.data = &mt8173_color_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match);
struct platform_driver mtk_disp_color_driver = {
.probe = mtk_disp_color_probe,
.remove_new = mtk_disp_color_remove,
.driver = {
.name = "mediatek-disp-color",
.owner = THIS_MODULE,
.of_match_table = mtk_disp_color_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_disp_color.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
* Authors:
* YT Shen <[email protected]>
* CK Hu <[email protected]>
*/
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <drm/drm_print.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
#include "mtk_drm_plane.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_crtc.h"
#define DISP_REG_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
#define DISP_REG_DITHER_CFG 0x0020
#define DITHER_RELAY_MODE BIT(0)
#define DITHER_ENGINE_EN BIT(1)
#define DISP_DITHERING BIT(2)
#define DISP_REG_DITHER_SIZE 0x0030
#define DISP_REG_DITHER_5 0x0114
#define DISP_REG_DITHER_7 0x011c
#define DISP_REG_DITHER_15 0x013c
#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28)
#define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20)
#define DITHER_NEW_BIT_MODE BIT(0)
#define DISP_REG_DITHER_16 0x0140
#define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28)
#define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20)
#define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12)
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DISP_REG_DSC_CON 0x0000
#define DSC_EN BIT(0)
#define DSC_DUAL_INOUT BIT(2)
#define DSC_BYPASS BIT(4)
#define DSC_UFOE_SEL BIT(16)
#define DISP_REG_OD_EN 0x0000
#define DISP_REG_OD_CFG 0x0020
#define OD_RELAYMODE BIT(0)
#define DISP_REG_OD_SIZE 0x0030
#define DISP_REG_POSTMASK_EN 0x0000
#define POSTMASK_EN BIT(0)
#define DISP_REG_POSTMASK_CFG 0x0020
#define POSTMASK_RELAY_MODE BIT(0)
#define DISP_REG_POSTMASK_SIZE 0x0030
#define DISP_REG_UFO_START 0x0000
#define UFO_BYPASS BIT(2)
struct mtk_ddp_comp_dev {
struct clk *clk;
void __iomem *regs;
struct cmdq_client_reg cmdq_reg;
};
void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
unsigned int offset)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (cmdq_pkt)
cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys,
cmdq_reg->offset + offset, value);
else
#endif
writel(value, regs + offset);
}
void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
unsigned int offset)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (cmdq_pkt)
cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys,
cmdq_reg->offset + offset, value);
else
#endif
writel_relaxed(value, regs + offset);
}
void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
unsigned int offset, unsigned int mask)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (cmdq_pkt) {
cmdq_pkt_write_mask(cmdq_pkt, cmdq_reg->subsys,
cmdq_reg->offset + offset, value, mask);
} else {
#endif
u32 tmp = readl(regs + offset);
tmp = (tmp & ~mask) | (value & mask);
writel(tmp, regs + offset);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
}
#endif
}
static int mtk_ddp_clk_enable(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
return clk_prepare_enable(priv->clk);
}
static void mtk_ddp_clk_disable(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
clk_disable_unprepare(priv->clk);
}
void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg,
unsigned int bpc, unsigned int cfg,
unsigned int dither_en, struct cmdq_pkt *cmdq_pkt)
{
/* If bpc equal to 0, the dithering function didn't be enabled */
if (bpc == 0)
return;
if (bpc >= MTK_MIN_BPC) {
mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_5);
mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_7);
mtk_ddp_write(cmdq_pkt,
DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
DITHER_NEW_BIT_MODE,
cmdq_reg, regs, DISP_REG_DITHER_15);
mtk_ddp_write(cmdq_pkt,
DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
cmdq_reg, regs, DISP_REG_DITHER_16);
mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg);
}
}
static void mtk_dither_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
DISP_REG_DITHER_CFG);
mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
DITHER_ENGINE_EN, cmdq_pkt);
}
static void mtk_dither_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
writel(DITHER_EN, priv->regs + DISP_REG_DITHER_EN);
}
static void mtk_dither_stop(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
writel_relaxed(0x0, priv->regs + DISP_REG_DITHER_EN);
}
static void mtk_dither_set(struct device *dev, unsigned int bpc,
unsigned int cfg, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, cfg,
DISP_DITHERING, cmdq_pkt);
}
static void mtk_dsc_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
/* dsc bypass mode */
mtk_ddp_write_mask(cmdq_pkt, DSC_BYPASS, &priv->cmdq_reg, priv->regs,
DISP_REG_DSC_CON, DSC_BYPASS);
mtk_ddp_write_mask(cmdq_pkt, DSC_UFOE_SEL, &priv->cmdq_reg, priv->regs,
DISP_REG_DSC_CON, DSC_UFOE_SEL);
mtk_ddp_write_mask(cmdq_pkt, DSC_DUAL_INOUT, &priv->cmdq_reg, priv->regs,
DISP_REG_DSC_CON, DSC_DUAL_INOUT);
}
static void mtk_dsc_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
/* write with mask to reserve the value set in mtk_dsc_config */
mtk_ddp_write_mask(NULL, DSC_EN, &priv->cmdq_reg, priv->regs, DISP_REG_DSC_CON, DSC_EN);
}
static void mtk_dsc_stop(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
writel_relaxed(0x0, priv->regs + DISP_REG_DSC_CON);
}
static void mtk_od_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_OD_SIZE);
mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_REG_OD_CFG);
mtk_dither_set(dev, bpc, DISP_REG_OD_CFG, cmdq_pkt);
}
static void mtk_od_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
writel(1, priv->regs + DISP_REG_OD_EN);
}
static void mtk_postmask_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs,
DISP_REG_POSTMASK_SIZE);
mtk_ddp_write(cmdq_pkt, POSTMASK_RELAY_MODE, &priv->cmdq_reg,
priv->regs, DISP_REG_POSTMASK_CFG);
}
static void mtk_postmask_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
writel(POSTMASK_EN, priv->regs + DISP_REG_POSTMASK_EN);
}
static void mtk_postmask_stop(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
writel_relaxed(0x0, priv->regs + DISP_REG_POSTMASK_EN);
}
static void mtk_ufoe_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START);
}
static const struct mtk_ddp_comp_funcs ddp_aal = {
.clk_enable = mtk_aal_clk_enable,
.clk_disable = mtk_aal_clk_disable,
.gamma_set = mtk_aal_gamma_set,
.config = mtk_aal_config,
.start = mtk_aal_start,
.stop = mtk_aal_stop,
};
static const struct mtk_ddp_comp_funcs ddp_ccorr = {
.clk_enable = mtk_ccorr_clk_enable,
.clk_disable = mtk_ccorr_clk_disable,
.config = mtk_ccorr_config,
.start = mtk_ccorr_start,
.stop = mtk_ccorr_stop,
.ctm_set = mtk_ccorr_ctm_set,
};
static const struct mtk_ddp_comp_funcs ddp_color = {
.clk_enable = mtk_color_clk_enable,
.clk_disable = mtk_color_clk_disable,
.config = mtk_color_config,
.start = mtk_color_start,
};
static const struct mtk_ddp_comp_funcs ddp_dither = {
.clk_enable = mtk_ddp_clk_enable,
.clk_disable = mtk_ddp_clk_disable,
.config = mtk_dither_config,
.start = mtk_dither_start,
.stop = mtk_dither_stop,
};
static const struct mtk_ddp_comp_funcs ddp_dpi = {
.start = mtk_dpi_start,
.stop = mtk_dpi_stop,
};
static const struct mtk_ddp_comp_funcs ddp_dsc = {
.clk_enable = mtk_ddp_clk_enable,
.clk_disable = mtk_ddp_clk_disable,
.config = mtk_dsc_config,
.start = mtk_dsc_start,
.stop = mtk_dsc_stop,
};
static const struct mtk_ddp_comp_funcs ddp_dsi = {
.start = mtk_dsi_ddp_start,
.stop = mtk_dsi_ddp_stop,
};
static const struct mtk_ddp_comp_funcs ddp_gamma = {
.clk_enable = mtk_gamma_clk_enable,
.clk_disable = mtk_gamma_clk_disable,
.gamma_set = mtk_gamma_set,
.config = mtk_gamma_config,
.start = mtk_gamma_start,
.stop = mtk_gamma_stop,
};
static const struct mtk_ddp_comp_funcs ddp_merge = {
.clk_enable = mtk_merge_clk_enable,
.clk_disable = mtk_merge_clk_disable,
.start = mtk_merge_start,
.stop = mtk_merge_stop,
.config = mtk_merge_config,
};
static const struct mtk_ddp_comp_funcs ddp_od = {
.clk_enable = mtk_ddp_clk_enable,
.clk_disable = mtk_ddp_clk_disable,
.config = mtk_od_config,
.start = mtk_od_start,
};
static const struct mtk_ddp_comp_funcs ddp_ovl = {
.clk_enable = mtk_ovl_clk_enable,
.clk_disable = mtk_ovl_clk_disable,
.config = mtk_ovl_config,
.start = mtk_ovl_start,
.stop = mtk_ovl_stop,
.register_vblank_cb = mtk_ovl_register_vblank_cb,
.unregister_vblank_cb = mtk_ovl_unregister_vblank_cb,
.enable_vblank = mtk_ovl_enable_vblank,
.disable_vblank = mtk_ovl_disable_vblank,
.supported_rotations = mtk_ovl_supported_rotations,
.layer_nr = mtk_ovl_layer_nr,
.layer_check = mtk_ovl_layer_check,
.layer_config = mtk_ovl_layer_config,
.bgclr_in_on = mtk_ovl_bgclr_in_on,
.bgclr_in_off = mtk_ovl_bgclr_in_off,
.get_formats = mtk_ovl_get_formats,
.get_num_formats = mtk_ovl_get_num_formats,
};
static const struct mtk_ddp_comp_funcs ddp_postmask = {
.clk_enable = mtk_ddp_clk_enable,
.clk_disable = mtk_ddp_clk_disable,
.config = mtk_postmask_config,
.start = mtk_postmask_start,
.stop = mtk_postmask_stop,
};
static const struct mtk_ddp_comp_funcs ddp_rdma = {
.clk_enable = mtk_rdma_clk_enable,
.clk_disable = mtk_rdma_clk_disable,
.config = mtk_rdma_config,
.start = mtk_rdma_start,
.stop = mtk_rdma_stop,
.register_vblank_cb = mtk_rdma_register_vblank_cb,
.unregister_vblank_cb = mtk_rdma_unregister_vblank_cb,
.enable_vblank = mtk_rdma_enable_vblank,
.disable_vblank = mtk_rdma_disable_vblank,
.layer_nr = mtk_rdma_layer_nr,
.layer_config = mtk_rdma_layer_config,
.get_formats = mtk_rdma_get_formats,
.get_num_formats = mtk_rdma_get_num_formats,
};
static const struct mtk_ddp_comp_funcs ddp_ufoe = {
.clk_enable = mtk_ddp_clk_enable,
.clk_disable = mtk_ddp_clk_disable,
.start = mtk_ufoe_start,
};
static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = {
.clk_enable = mtk_ovl_adaptor_clk_enable,
.clk_disable = mtk_ovl_adaptor_clk_disable,
.config = mtk_ovl_adaptor_config,
.start = mtk_ovl_adaptor_start,
.stop = mtk_ovl_adaptor_stop,
.layer_nr = mtk_ovl_adaptor_layer_nr,
.layer_config = mtk_ovl_adaptor_layer_config,
.register_vblank_cb = mtk_ovl_adaptor_register_vblank_cb,
.unregister_vblank_cb = mtk_ovl_adaptor_unregister_vblank_cb,
.enable_vblank = mtk_ovl_adaptor_enable_vblank,
.disable_vblank = mtk_ovl_adaptor_disable_vblank,
.dma_dev_get = mtk_ovl_adaptor_dma_dev_get,
.connect = mtk_ovl_adaptor_connect,
.disconnect = mtk_ovl_adaptor_disconnect,
.add = mtk_ovl_adaptor_add_comp,
.remove = mtk_ovl_adaptor_remove_comp,
.get_formats = mtk_ovl_adaptor_get_formats,
.get_num_formats = mtk_ovl_adaptor_get_num_formats,
};
static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
[MTK_DISP_AAL] = "aal",
[MTK_DISP_BLS] = "bls",
[MTK_DISP_CCORR] = "ccorr",
[MTK_DISP_COLOR] = "color",
[MTK_DISP_DITHER] = "dither",
[MTK_DISP_DSC] = "dsc",
[MTK_DISP_GAMMA] = "gamma",
[MTK_DISP_MERGE] = "merge",
[MTK_DISP_MUTEX] = "mutex",
[MTK_DISP_OD] = "od",
[MTK_DISP_OVL] = "ovl",
[MTK_DISP_OVL_2L] = "ovl-2l",
[MTK_DISP_OVL_ADAPTOR] = "ovl_adaptor",
[MTK_DISP_POSTMASK] = "postmask",
[MTK_DISP_PWM] = "pwm",
[MTK_DISP_RDMA] = "rdma",
[MTK_DISP_UFOE] = "ufoe",
[MTK_DISP_WDMA] = "wdma",
[MTK_DP_INTF] = "dp-intf",
[MTK_DPI] = "dpi",
[MTK_DSI] = "dsi",
};
struct mtk_ddp_comp_match {
enum mtk_ddp_comp_type type;
int alias_id;
const struct mtk_ddp_comp_funcs *funcs;
};
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_DRM_ID_MAX] = {
[DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
[DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
[DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
[DDP_COMPONENT_DITHER0] = { MTK_DISP_DITHER, 0, &ddp_dither },
[DDP_COMPONENT_DP_INTF0] = { MTK_DP_INTF, 0, &ddp_dpi },
[DDP_COMPONENT_DP_INTF1] = { MTK_DP_INTF, 1, &ddp_dpi },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi },
[DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi },
[DDP_COMPONENT_DRM_OVL_ADAPTOR] = { MTK_DISP_OVL_ADAPTOR, 0, &ddp_ovl_adaptor },
[DDP_COMPONENT_DSC0] = { MTK_DISP_DSC, 0, &ddp_dsc },
[DDP_COMPONENT_DSC1] = { MTK_DISP_DSC, 1, &ddp_dsc },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi },
[DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi },
[DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi },
[DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi },
[DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
[DDP_COMPONENT_MERGE0] = { MTK_DISP_MERGE, 0, &ddp_merge },
[DDP_COMPONENT_MERGE1] = { MTK_DISP_MERGE, 1, &ddp_merge },
[DDP_COMPONENT_MERGE2] = { MTK_DISP_MERGE, 2, &ddp_merge },
[DDP_COMPONENT_MERGE3] = { MTK_DISP_MERGE, 3, &ddp_merge },
[DDP_COMPONENT_MERGE4] = { MTK_DISP_MERGE, 4, &ddp_merge },
[DDP_COMPONENT_MERGE5] = { MTK_DISP_MERGE, 5, &ddp_merge },
[DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
[DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl },
[DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl },
[DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl },
[DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl },
[DDP_COMPONENT_OVL_2L2] = { MTK_DISP_OVL_2L, 2, &ddp_ovl },
[DDP_COMPONENT_POSTMASK0] = { MTK_DISP_POSTMASK, 0, &ddp_postmask },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
[DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
[DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
[DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma },
[DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma },
[DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma },
[DDP_COMPONENT_RDMA4] = { MTK_DISP_RDMA, 4, &ddp_rdma },
[DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe },
[DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL },
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
static bool mtk_drm_find_comp_in_ddp(struct device *dev,
const unsigned int *path,
unsigned int path_len,
struct mtk_ddp_comp *ddp_comp)
{
unsigned int i;
if (path == NULL)
return false;
for (i = 0U; i < path_len; i++)
if (dev == ddp_comp[path[i]].dev)
return true;
return false;
}
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type)
{
int id = of_alias_get_id(node, mtk_ddp_comp_stem[comp_type]);
int i;
for (i = 0; i < ARRAY_SIZE(mtk_ddp_matches); i++) {
if (comp_type == mtk_ddp_matches[i].type &&
(id < 0 || id == mtk_ddp_matches[i].alias_id))
return i;
}
return -EINVAL;
}
unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
struct device *dev)
{
struct mtk_drm_private *private = drm->dev_private;
unsigned int ret = 0;
if (mtk_drm_find_comp_in_ddp(dev, private->data->main_path, private->data->main_len,
private->ddp_comp))
ret = BIT(0);
else if (mtk_drm_find_comp_in_ddp(dev, private->data->ext_path,
private->data->ext_len, private->ddp_comp))
ret = BIT(1);
else if (mtk_drm_find_comp_in_ddp(dev, private->data->third_path,
private->data->third_len, private->ddp_comp))
ret = BIT(2);
else
DRM_INFO("Failed to find comp in ddp table\n");
return ret;
}
int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
unsigned int comp_id)
{
struct platform_device *comp_pdev;
enum mtk_ddp_comp_type type;
struct mtk_ddp_comp_dev *priv;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
int ret;
#endif
if (comp_id < 0 || comp_id >= DDP_COMPONENT_DRM_ID_MAX)
return -EINVAL;
type = mtk_ddp_matches[comp_id].type;
comp->id = comp_id;
comp->funcs = mtk_ddp_matches[comp_id].funcs;
/* Not all drm components have a DTS device node, such as ovl_adaptor,
* which is the drm bring up sub driver
*/
if (!node)
return 0;
comp_pdev = of_find_device_by_node(node);
if (!comp_pdev) {
DRM_INFO("Waiting for device %s\n", node->full_name);
return -EPROBE_DEFER;
}
comp->dev = &comp_pdev->dev;
if (type == MTK_DISP_AAL ||
type == MTK_DISP_BLS ||
type == MTK_DISP_CCORR ||
type == MTK_DISP_COLOR ||
type == MTK_DISP_GAMMA ||
type == MTK_DISP_MERGE ||
type == MTK_DISP_OVL ||
type == MTK_DISP_OVL_2L ||
type == MTK_DISP_PWM ||
type == MTK_DISP_RDMA ||
type == MTK_DPI ||
type == MTK_DP_INTF ||
type == MTK_DSI)
return 0;
priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regs = of_iomap(node, 0);
priv->clk = of_clk_get(node, 0);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(comp->dev, "get mediatek,gce-client-reg fail!\n");
#endif
platform_set_drvdata(comp_pdev, priv);
return 0;
}
| linux-master | drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 MediaTek Inc.
*/
#include <drm/drm_fourcc.h>
#include <drm/drm_of.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_ethdr.h"
#define MTK_OVL_ADAPTOR_RDMA_MAX_WIDTH 1920
#define MTK_OVL_ADAPTOR_LAYER_NUM 4
enum mtk_ovl_adaptor_comp_type {
OVL_ADAPTOR_TYPE_RDMA = 0,
OVL_ADAPTOR_TYPE_MERGE,
OVL_ADAPTOR_TYPE_ETHDR,
OVL_ADAPTOR_TYPE_NUM,
};
enum mtk_ovl_adaptor_comp_id {
OVL_ADAPTOR_MDP_RDMA0,
OVL_ADAPTOR_MDP_RDMA1,
OVL_ADAPTOR_MDP_RDMA2,
OVL_ADAPTOR_MDP_RDMA3,
OVL_ADAPTOR_MDP_RDMA4,
OVL_ADAPTOR_MDP_RDMA5,
OVL_ADAPTOR_MDP_RDMA6,
OVL_ADAPTOR_MDP_RDMA7,
OVL_ADAPTOR_MERGE0,
OVL_ADAPTOR_MERGE1,
OVL_ADAPTOR_MERGE2,
OVL_ADAPTOR_MERGE3,
OVL_ADAPTOR_ETHDR0,
OVL_ADAPTOR_ID_MAX
};
struct ovl_adaptor_comp_match {
enum mtk_ovl_adaptor_comp_type type;
int alias_id;
};
struct mtk_disp_ovl_adaptor {
struct device *ovl_adaptor_comp[OVL_ADAPTOR_ID_MAX];
struct device *mmsys_dev;
bool children_bound;
};
static const char * const private_comp_stem[OVL_ADAPTOR_TYPE_NUM] = {
[OVL_ADAPTOR_TYPE_RDMA] = "vdo1-rdma",
[OVL_ADAPTOR_TYPE_MERGE] = "merge",
[OVL_ADAPTOR_TYPE_ETHDR] = "ethdr",
};
static const struct ovl_adaptor_comp_match comp_matches[OVL_ADAPTOR_ID_MAX] = {
[OVL_ADAPTOR_MDP_RDMA0] = { OVL_ADAPTOR_TYPE_RDMA, 0 },
[OVL_ADAPTOR_MDP_RDMA1] = { OVL_ADAPTOR_TYPE_RDMA, 1 },
[OVL_ADAPTOR_MDP_RDMA2] = { OVL_ADAPTOR_TYPE_RDMA, 2 },
[OVL_ADAPTOR_MDP_RDMA3] = { OVL_ADAPTOR_TYPE_RDMA, 3 },
[OVL_ADAPTOR_MDP_RDMA4] = { OVL_ADAPTOR_TYPE_RDMA, 4 },
[OVL_ADAPTOR_MDP_RDMA5] = { OVL_ADAPTOR_TYPE_RDMA, 5 },
[OVL_ADAPTOR_MDP_RDMA6] = { OVL_ADAPTOR_TYPE_RDMA, 6 },
[OVL_ADAPTOR_MDP_RDMA7] = { OVL_ADAPTOR_TYPE_RDMA, 7 },
[OVL_ADAPTOR_MERGE0] = { OVL_ADAPTOR_TYPE_MERGE, 1 },
[OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, 2 },
[OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, 3 },
[OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, 4 },
[OVL_ADAPTOR_ETHDR0] = { OVL_ADAPTOR_TYPE_ETHDR, 0 },
};
void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
struct mtk_mdp_rdma_cfg rdma_config = {0};
struct device *rdma_l;
struct device *rdma_r;
struct device *merge;
struct device *ethdr;
const struct drm_format_info *fmt_info = drm_format_info(pending->format);
bool use_dual_pipe = false;
unsigned int align_width;
unsigned int l_w = 0;
unsigned int r_w = 0;
dev_dbg(dev, "%s+ idx:%d, enable:%d, fmt:0x%x\n", __func__, idx,
pending->enable, pending->format);
dev_dbg(dev, "addr 0x%pad, fb w:%d, {%d,%d,%d,%d}\n",
&pending->addr, (pending->pitch / fmt_info->cpp[0]),
pending->x, pending->y, pending->width, pending->height);
rdma_l = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MDP_RDMA0 + 2 * idx];
rdma_r = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MDP_RDMA0 + 2 * idx + 1];
merge = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MERGE0 + idx];
ethdr = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0];
if (!pending->enable) {
mtk_merge_stop_cmdq(merge, cmdq_pkt);
mtk_mdp_rdma_stop(rdma_l, cmdq_pkt);
mtk_mdp_rdma_stop(rdma_r, cmdq_pkt);
mtk_ethdr_layer_config(ethdr, idx, state, cmdq_pkt);
return;
}
/* ETHDR is in 1T2P domain, width needs to be 2 pixels align */
align_width = ALIGN_DOWN(pending->width, 2);
if (align_width > MTK_OVL_ADAPTOR_RDMA_MAX_WIDTH)
use_dual_pipe = true;
if (use_dual_pipe) {
l_w = (align_width / 2) + ((pending->width / 2) % 2);
r_w = align_width - l_w;
} else {
l_w = align_width;
}
mtk_merge_advance_config(merge, l_w, r_w, pending->height, 0, 0, cmdq_pkt);
mtk_mmsys_merge_async_config(ovl_adaptor->mmsys_dev, idx, align_width / 2,
pending->height, cmdq_pkt);
rdma_config.width = l_w;
rdma_config.height = pending->height;
rdma_config.addr0 = pending->addr;
rdma_config.pitch = pending->pitch;
rdma_config.fmt = pending->format;
rdma_config.color_encoding = pending->color_encoding;
mtk_mdp_rdma_config(rdma_l, &rdma_config, cmdq_pkt);
if (use_dual_pipe) {
rdma_config.x_left = l_w;
rdma_config.width = r_w;
mtk_mdp_rdma_config(rdma_r, &rdma_config, cmdq_pkt);
}
mtk_merge_start_cmdq(merge, cmdq_pkt);
mtk_mdp_rdma_start(rdma_l, cmdq_pkt);
if (use_dual_pipe)
mtk_mdp_rdma_start(rdma_r, cmdq_pkt);
else
mtk_mdp_rdma_stop(rdma_r, cmdq_pkt);
mtk_ethdr_layer_config(ethdr, idx, state, cmdq_pkt);
}
void mtk_ovl_adaptor_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
mtk_ethdr_config(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0], w, h,
vrefresh, bpc, cmdq_pkt);
}
void mtk_ovl_adaptor_start(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
mtk_ethdr_start(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
}
void mtk_ovl_adaptor_stop(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
mtk_ethdr_stop(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
}
int mtk_ovl_adaptor_clk_enable(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
struct device *comp;
int ret;
int i;
for (i = 0; i < OVL_ADAPTOR_MERGE0; i++) {
comp = ovl_adaptor->ovl_adaptor_comp[i];
ret = pm_runtime_get_sync(comp);
if (ret < 0) {
dev_err(dev, "Failed to enable power domain %d, err %d\n", i, ret);
goto pwr_err;
}
}
for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
comp = ovl_adaptor->ovl_adaptor_comp[i];
if (i < OVL_ADAPTOR_MERGE0)
ret = mtk_mdp_rdma_clk_enable(comp);
else if (i < OVL_ADAPTOR_ETHDR0)
ret = mtk_merge_clk_enable(comp);
else
ret = mtk_ethdr_clk_enable(comp);
if (ret) {
dev_err(dev, "Failed to enable clock %d, err %d\n", i, ret);
goto clk_err;
}
}
return ret;
clk_err:
while (--i >= 0) {
comp = ovl_adaptor->ovl_adaptor_comp[i];
if (i < OVL_ADAPTOR_MERGE0)
mtk_mdp_rdma_clk_disable(comp);
else if (i < OVL_ADAPTOR_ETHDR0)
mtk_merge_clk_disable(comp);
else
mtk_ethdr_clk_disable(comp);
}
i = OVL_ADAPTOR_MERGE0;
pwr_err:
while (--i >= 0)
pm_runtime_put(ovl_adaptor->ovl_adaptor_comp[i]);
return ret;
}
void mtk_ovl_adaptor_clk_disable(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
struct device *comp;
int i;
for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
comp = ovl_adaptor->ovl_adaptor_comp[i];
if (i < OVL_ADAPTOR_MERGE0) {
mtk_mdp_rdma_clk_disable(comp);
pm_runtime_put(comp);
} else if (i < OVL_ADAPTOR_ETHDR0) {
mtk_merge_clk_disable(comp);
} else {
mtk_ethdr_clk_disable(comp);
}
}
}
unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev)
{
return MTK_OVL_ADAPTOR_LAYER_NUM;
}
struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
return ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MDP_RDMA0];
}
void mtk_ovl_adaptor_register_vblank_cb(struct device *dev, void (*vblank_cb)(void *),
void *vblank_cb_data)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
mtk_ethdr_register_vblank_cb(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0],
vblank_cb, vblank_cb_data);
}
void mtk_ovl_adaptor_unregister_vblank_cb(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
mtk_ethdr_unregister_vblank_cb(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
}
void mtk_ovl_adaptor_enable_vblank(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
mtk_ethdr_enable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
}
void mtk_ovl_adaptor_disable_vblank(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
mtk_ethdr_disable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
}
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
return mtk_mdp_rdma_get_formats(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MDP_RDMA0]);
}
size_t mtk_ovl_adaptor_get_num_formats(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
return mtk_mdp_rdma_get_num_formats(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MDP_RDMA0]);
}
void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex)
{
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA0);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA1);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA2);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA3);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA4);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA5);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA6);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA7);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE1);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE2);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE3);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE4);
mtk_mutex_add_comp(mutex, DDP_COMPONENT_ETHDR_MIXER);
}
void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex)
{
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA0);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA1);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA2);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA3);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA4);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA5);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA6);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA7);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE1);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE2);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE3);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE4);
mtk_mutex_remove_comp(mutex, DDP_COMPONENT_ETHDR_MIXER);
}
void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsigned int next)
{
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next);
}
void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, unsigned int next)
{
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next);
}
static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node,
enum mtk_ovl_adaptor_comp_type type)
{
int alias_id = of_alias_get_id(node, private_comp_stem[type]);
int i;
for (i = 0; i < ARRAY_SIZE(comp_matches); i++)
if (comp_matches[i].type == type &&
comp_matches[i].alias_id == alias_id)
return i;
dev_warn(dev, "Failed to get id. type: %d, alias: %d\n", type, alias_id);
return -EINVAL;
}
static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = {
{
.compatible = "mediatek,mt8195-vdo1-rdma",
.data = (void *)OVL_ADAPTOR_TYPE_RDMA,
}, {
.compatible = "mediatek,mt8195-disp-merge",
.data = (void *)OVL_ADAPTOR_TYPE_MERGE,
}, {
.compatible = "mediatek,mt8195-disp-ethdr",
.data = (void *)OVL_ADAPTOR_TYPE_ETHDR,
},
{},
};
static int compare_of(struct device *dev, void *data)
{
return dev->of_node == data;
}
static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match)
{
struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
struct device_node *node, *parent;
struct platform_device *comp_pdev;
parent = dev->parent->parent->of_node->parent;
for_each_child_of_node(parent, node) {
const struct of_device_id *of_id;
enum mtk_ovl_adaptor_comp_type type;
int id;
of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node);
if (!of_id)
continue;
if (!of_device_is_available(node)) {
dev_dbg(dev, "Skipping disabled component %pOF\n",
node);
continue;
}
type = (enum mtk_ovl_adaptor_comp_type)(uintptr_t)of_id->data;
id = ovl_adaptor_comp_get_id(dev, node, type);
if (id < 0) {
dev_warn(dev, "Skipping unknown component %pOF\n",
node);
continue;
}
comp_pdev = of_find_device_by_node(node);
if (!comp_pdev)
return -EPROBE_DEFER;
priv->ovl_adaptor_comp[id] = &comp_pdev->dev;
drm_of_component_match_add(dev, match, compare_of, node);
dev_dbg(dev, "Adding component match for %pOF\n", node);
}
if (!*match) {
dev_err(dev, "No match device for ovl_adaptor\n");
return -ENODEV;
}
return 0;
}
static int mtk_disp_ovl_adaptor_comp_bind(struct device *dev, struct device *master,
void *data)
{
struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
if (!priv->children_bound)
return -EPROBE_DEFER;
return 0;
}
static void mtk_disp_ovl_adaptor_comp_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_disp_ovl_adaptor_comp_ops = {
.bind = mtk_disp_ovl_adaptor_comp_bind,
.unbind = mtk_disp_ovl_adaptor_comp_unbind,
};
static int mtk_disp_ovl_adaptor_master_bind(struct device *dev)
{
struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
int ret;
ret = component_bind_all(dev, priv->mmsys_dev);
if (ret)
return dev_err_probe(dev, ret, "component_bind_all failed!\n");
priv->children_bound = true;
return 0;
}
static void mtk_disp_ovl_adaptor_master_unbind(struct device *dev)
{
struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
priv->children_bound = false;
}
static const struct component_master_ops mtk_disp_ovl_adaptor_master_ops = {
.bind = mtk_disp_ovl_adaptor_master_bind,
.unbind = mtk_disp_ovl_adaptor_master_unbind,
};
static int mtk_disp_ovl_adaptor_probe(struct platform_device *pdev)
{
struct mtk_disp_ovl_adaptor *priv;
struct device *dev = &pdev->dev;
struct component_match *match = NULL;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
ret = ovl_adaptor_comp_init(dev, &match);
if (ret < 0)
return ret;
priv->mmsys_dev = pdev->dev.platform_data;
component_master_add_with_match(dev, &mtk_disp_ovl_adaptor_master_ops, match);
pm_runtime_enable(dev);
ret = component_add(dev, &mtk_disp_ovl_adaptor_comp_ops);
if (ret != 0) {
pm_runtime_disable(dev);
dev_err(dev, "Failed to add component: %d\n", ret);
}
return ret;
}
static int mtk_disp_ovl_adaptor_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &mtk_disp_ovl_adaptor_master_ops);
pm_runtime_disable(&pdev->dev);
return 0;
}
struct platform_driver mtk_disp_ovl_adaptor_driver = {
.probe = mtk_disp_ovl_adaptor_probe,
.remove = mtk_disp_ovl_adaptor_remove,
.driver = {
.name = "mediatek-disp-ovl-adaptor",
.owner = THIS_MODULE,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
*/
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_REG_OVL_INTEN 0x0004
#define OVL_FME_CPL_INT BIT(1)
#define DISP_REG_OVL_INTSTA 0x0008
#define DISP_REG_OVL_EN 0x000c
#define DISP_REG_OVL_RST 0x0014
#define DISP_REG_OVL_ROI_SIZE 0x0020
#define DISP_REG_OVL_DATAPATH_CON 0x0024
#define OVL_LAYER_SMI_ID_EN BIT(0)
#define OVL_BGCLR_SEL_IN BIT(2)
#define OVL_LAYER_AFBC_EN(n) BIT(4+n)
#define DISP_REG_OVL_ROI_BGCLR 0x0028
#define DISP_REG_OVL_SRC_CON 0x002c
#define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n))
#define DISP_REG_OVL_SRC_SIZE(n) (0x0038 + 0x20 * (n))
#define DISP_REG_OVL_OFFSET(n) (0x003c + 0x20 * (n))
#define DISP_REG_OVL_PITCH_MSB(n) (0x0040 + 0x20 * (n))
#define OVL_PITCH_MSB_2ND_SUBBUF BIT(16)
#define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n))
#define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n))
#define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n))
#define DISP_REG_OVL_ADDR_MT2701 0x0040
#define DISP_REG_OVL_CLRFMT_EXT 0x02D0
#define DISP_REG_OVL_ADDR_MT8173 0x0f40
#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
#define DISP_REG_OVL_HDR_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n) + 0x04)
#define DISP_REG_OVL_HDR_PITCH(ovl, n) ((ovl)->data->addr + 0x20 * (n) + 0x08)
#define GMC_THRESHOLD_BITS 16
#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
#define OVL_CON_BYTE_SWAP BIT(24)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (2 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (3 << 12)
#define OVL_CON_CLRFMT_UYVY (4 << 12)
#define OVL_CON_CLRFMT_YUYV (5 << 12)
#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
0 : OVL_CON_CLRFMT_RGB)
#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
OVL_CON_CLRFMT_RGB : 0)
#define OVL_CON_CLRFMT_BIT_DEPTH_MASK(ovl) (0xFF << 4 * (ovl))
#define OVL_CON_CLRFMT_BIT_DEPTH(depth, ovl) (depth << 4 * (ovl))
#define OVL_CON_CLRFMT_8_BIT 0x00
#define OVL_CON_CLRFMT_10_BIT 0x01
#define OVL_CON_AEN BIT(8)
#define OVL_CON_ALPHA 0xff
#define OVL_CON_VIRT_FLIP BIT(9)
#define OVL_CON_HORZ_FLIP BIT(10)
static const u32 mt8173_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
};
static const u32 mt8195_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRA1010102,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
};
struct mtk_disp_ovl_data {
unsigned int addr;
unsigned int gmc_bits;
unsigned int layer_nr;
bool fmt_rgb565_is_0;
bool smi_id_en;
bool supports_afbc;
const u32 *formats;
size_t num_formats;
bool supports_clrfmt_ext;
};
/*
* struct mtk_disp_ovl - DISP_OVL driver structure
* @crtc: associated crtc to report vblank events to
* @data: platform data
*/
struct mtk_disp_ovl {
struct drm_crtc *crtc;
struct clk *clk;
void __iomem *regs;
struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_ovl_data *data;
void (*vblank_cb)(void *data);
void *vblank_cb_data;
};
static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_ovl *priv = dev_id;
/* Clear frame completion interrupt */
writel(0x0, priv->regs + DISP_REG_OVL_INTSTA);
if (!priv->vblank_cb)
return IRQ_NONE;
priv->vblank_cb(priv->vblank_cb_data);
return IRQ_HANDLED;
}
void mtk_ovl_register_vblank_cb(struct device *dev,
void (*vblank_cb)(void *),
void *vblank_cb_data)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
ovl->vblank_cb = vblank_cb;
ovl->vblank_cb_data = vblank_cb_data;
}
void mtk_ovl_unregister_vblank_cb(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
ovl->vblank_cb = NULL;
ovl->vblank_cb_data = NULL;
}
void mtk_ovl_enable_vblank(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
writel_relaxed(OVL_FME_CPL_INT, ovl->regs + DISP_REG_OVL_INTEN);
}
void mtk_ovl_disable_vblank(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
}
const u32 *mtk_ovl_get_formats(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
return ovl->data->formats;
}
size_t mtk_ovl_get_num_formats(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
return ovl->data->num_formats;
}
int mtk_ovl_clk_enable(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
return clk_prepare_enable(ovl->clk);
}
void mtk_ovl_clk_disable(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
clk_disable_unprepare(ovl->clk);
}
void mtk_ovl_start(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
if (ovl->data->smi_id_en) {
unsigned int reg;
reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
reg = reg | OVL_LAYER_SMI_ID_EN;
writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
}
writel_relaxed(0x1, ovl->regs + DISP_REG_OVL_EN);
}
void mtk_ovl_stop(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_EN);
if (ovl->data->smi_id_en) {
unsigned int reg;
reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
reg = reg & ~OVL_LAYER_SMI_ID_EN;
writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
}
}
static void mtk_ovl_set_afbc(struct mtk_disp_ovl *ovl, struct cmdq_pkt *cmdq_pkt,
int idx, bool enabled)
{
mtk_ddp_write_mask(cmdq_pkt, enabled ? OVL_LAYER_AFBC_EN(idx) : 0,
&ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_DATAPATH_CON, OVL_LAYER_AFBC_EN(idx));
}
static void mtk_ovl_set_bit_depth(struct device *dev, int idx, u32 format,
struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
unsigned int reg;
unsigned int bit_depth = OVL_CON_CLRFMT_8_BIT;
if (!ovl->data->supports_clrfmt_ext)
return;
reg = readl(ovl->regs + DISP_REG_OVL_CLRFMT_EXT);
reg &= ~OVL_CON_CLRFMT_BIT_DEPTH_MASK(idx);
if (format == DRM_FORMAT_RGBA1010102 ||
format == DRM_FORMAT_BGRA1010102 ||
format == DRM_FORMAT_ARGB2101010)
bit_depth = OVL_CON_CLRFMT_10_BIT;
reg |= OVL_CON_CLRFMT_BIT_DEPTH(bit_depth, idx);
mtk_ddp_write(cmdq_pkt, reg, &ovl->cmdq_reg,
ovl->regs, DISP_REG_OVL_CLRFMT_EXT);
}
void mtk_ovl_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
if (w != 0 && h != 0)
mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_ROI_SIZE);
mtk_ddp_write_relaxed(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_ROI_BGCLR);
mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
}
unsigned int mtk_ovl_layer_nr(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
return ovl->data->layer_nr;
}
unsigned int mtk_ovl_supported_rotations(struct device *dev)
{
return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
}
int mtk_ovl_layer_check(struct device *dev, unsigned int idx,
struct mtk_plane_state *mtk_state)
{
struct drm_plane_state *state = &mtk_state->base;
unsigned int rotation = 0;
rotation = drm_rotation_simplify(state->rotation,
DRM_MODE_ROTATE_0 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
rotation &= ~DRM_MODE_ROTATE_0;
/* We can only do reflection, not rotation */
if ((rotation & DRM_MODE_ROTATE_MASK) != 0)
return -EINVAL;
/*
* TODO: Rotating/reflecting YUV buffers is not supported at this time.
* Only RGB[AX] variants are supported.
*/
if (state->fb->format->is_yuv && rotation != 0)
return -EINVAL;
state->rotation = rotation;
return 0;
}
void mtk_ovl_layer_on(struct device *dev, unsigned int idx,
struct cmdq_pkt *cmdq_pkt)
{
unsigned int gmc_thrshd_l;
unsigned int gmc_thrshd_h;
unsigned int gmc_value;
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_RDMA_CTRL(idx));
gmc_thrshd_l = GMC_THRESHOLD_LOW >>
(GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
(GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
if (ovl->data->gmc_bits == 10)
gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16;
else
gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
mtk_ddp_write(cmdq_pkt, gmc_value,
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RDMA_GMC(idx));
mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_CON, BIT(idx));
}
void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
mtk_ddp_write_mask(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_CON, BIT(idx));
mtk_ddp_write(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_RDMA_CTRL(idx));
}
static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
{
/* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
* is defined in mediatek HW data sheet.
* The alphabet order in XXX is no relation to data
* arrangement in memory.
*/
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
return OVL_CON_CLRFMT_RGB565(ovl);
case DRM_FORMAT_BGR565:
return OVL_CON_CLRFMT_RGB565(ovl) | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_RGB888:
return OVL_CON_CLRFMT_RGB888(ovl);
case DRM_FORMAT_BGR888:
return OVL_CON_CLRFMT_RGB888(ovl) | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
return OVL_CON_CLRFMT_ARGB8888;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_BGRA1010102:
return OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ARGB2101010:
return OVL_CON_CLRFMT_RGBA8888;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_UYVY:
return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB;
case DRM_FORMAT_YUYV:
return OVL_CON_CLRFMT_YUYV | OVL_CON_MTX_YUV_TO_RGB;
}
}
void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
unsigned int hdr_addr = pending->hdr_addr;
unsigned int pitch = pending->pitch;
unsigned int hdr_pitch = pending->hdr_pitch;
unsigned int fmt = pending->format;
unsigned int offset = (pending->y << 16) | pending->x;
unsigned int src_size = (pending->height << 16) | pending->width;
unsigned int con;
bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
union overlay_pitch {
struct split_pitch {
u16 lsb;
u16 msb;
} split_pitch;
u32 pitch;
} overlay_pitch;
overlay_pitch.pitch = pitch;
if (!pending->enable) {
mtk_ovl_layer_off(dev, idx, cmdq_pkt);
return;
}
con = ovl_fmt_convert(ovl, fmt);
if (state->base.fb && state->base.fb->format->has_alpha)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
if (pending->rotation & DRM_MODE_REFLECT_Y) {
con |= OVL_CON_VIRT_FLIP;
addr += (pending->height - 1) * pending->pitch;
}
if (pending->rotation & DRM_MODE_REFLECT_X) {
con |= OVL_CON_HORZ_FLIP;
addr += pending->pitch - 1;
}
if (ovl->data->supports_afbc)
mtk_ovl_set_afbc(ovl, cmdq_pkt, idx, is_afbc);
mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_CON(idx));
mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_PITCH(idx));
mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_SIZE(idx));
mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_OFFSET(idx));
mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_ADDR(ovl, idx));
if (is_afbc) {
mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_HDR_ADDR(ovl, idx));
mtk_ddp_write_relaxed(cmdq_pkt,
OVL_PITCH_MSB_2ND_SUBBUF | overlay_pitch.split_pitch.msb,
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_HDR_PITCH(ovl, idx));
} else {
mtk_ddp_write_relaxed(cmdq_pkt,
overlay_pitch.split_pitch.msb,
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
}
mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt);
mtk_ovl_layer_on(dev, idx, cmdq_pkt);
}
void mtk_ovl_bgclr_in_on(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
unsigned int reg;
reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
reg = reg | OVL_BGCLR_SEL_IN;
writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
}
void mtk_ovl_bgclr_in_off(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
unsigned int reg;
reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
reg = reg & ~OVL_BGCLR_SEL_IN;
writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
}
static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
void *data)
{
return 0;
}
static void mtk_disp_ovl_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_disp_ovl_component_ops = {
.bind = mtk_disp_ovl_bind,
.unbind = mtk_disp_ovl_unbind,
};
static int mtk_disp_ovl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_ovl *priv;
struct resource *res;
int irq;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get ovl clk\n");
return PTR_ERR(priv->clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap ovl\n");
return PTR_ERR(priv->regs);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
priv->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
IRQF_TRIGGER_NONE, dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
return ret;
}
pm_runtime_enable(dev);
ret = component_add(dev, &mtk_disp_ovl_component_ops);
if (ret) {
pm_runtime_disable(dev);
dev_err(dev, "Failed to add component: %d\n", ret);
}
return ret;
}
static void mtk_disp_ovl_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
pm_runtime_disable(&pdev->dev);
}
static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT2701,
.gmc_bits = 8,
.layer_nr = 4,
.fmt_rgb565_is_0 = false,
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
.gmc_bits = 8,
.layer_nr = 4,
.fmt_rgb565_is_0 = true,
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
static const struct mtk_disp_ovl_data mt8183_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
.gmc_bits = 10,
.layer_nr = 4,
.fmt_rgb565_is_0 = true,
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
.gmc_bits = 10,
.layer_nr = 2,
.fmt_rgb565_is_0 = true,
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
.gmc_bits = 10,
.layer_nr = 4,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
.gmc_bits = 10,
.layer_nr = 2,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
.gmc_bits = 10,
.layer_nr = 4,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
.supports_afbc = true,
.formats = mt8195_formats,
.num_formats = ARRAY_SIZE(mt8195_formats),
.supports_clrfmt_ext = true,
};
static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-ovl",
.data = &mt2701_ovl_driver_data},
{ .compatible = "mediatek,mt8173-disp-ovl",
.data = &mt8173_ovl_driver_data},
{ .compatible = "mediatek,mt8183-disp-ovl",
.data = &mt8183_ovl_driver_data},
{ .compatible = "mediatek,mt8183-disp-ovl-2l",
.data = &mt8183_ovl_2l_driver_data},
{ .compatible = "mediatek,mt8192-disp-ovl",
.data = &mt8192_ovl_driver_data},
{ .compatible = "mediatek,mt8192-disp-ovl-2l",
.data = &mt8192_ovl_2l_driver_data},
{ .compatible = "mediatek,mt8195-disp-ovl",
.data = &mt8195_ovl_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
struct platform_driver mtk_disp_ovl_driver = {
.probe = mtk_disp_ovl_probe,
.remove_new = mtk_disp_ovl_remove,
.driver = {
.name = "mediatek-disp-ovl",
.owner = THIS_MODULE,
.of_match_table = mtk_disp_ovl_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_disp_ovl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: Jie Qiu <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/types.h>
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
#include "mtk_disp_drv.h"
#include "mtk_dpi_regs.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
enum mtk_dpi_out_bit_num {
MTK_DPI_OUT_BIT_NUM_8BITS,
MTK_DPI_OUT_BIT_NUM_10BITS,
MTK_DPI_OUT_BIT_NUM_12BITS,
MTK_DPI_OUT_BIT_NUM_16BITS
};
enum mtk_dpi_out_yc_map {
MTK_DPI_OUT_YC_MAP_RGB,
MTK_DPI_OUT_YC_MAP_CYCY,
MTK_DPI_OUT_YC_MAP_YCYC,
MTK_DPI_OUT_YC_MAP_CY,
MTK_DPI_OUT_YC_MAP_YC
};
enum mtk_dpi_out_channel_swap {
MTK_DPI_OUT_CHANNEL_SWAP_RGB,
MTK_DPI_OUT_CHANNEL_SWAP_GBR,
MTK_DPI_OUT_CHANNEL_SWAP_BRG,
MTK_DPI_OUT_CHANNEL_SWAP_RBG,
MTK_DPI_OUT_CHANNEL_SWAP_GRB,
MTK_DPI_OUT_CHANNEL_SWAP_BGR
};
enum mtk_dpi_out_color_format {
MTK_DPI_COLOR_FORMAT_RGB,
MTK_DPI_COLOR_FORMAT_YCBCR_422
};
struct mtk_dpi {
struct drm_encoder encoder;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct drm_connector *connector;
void __iomem *regs;
struct device *dev;
struct device *mmsys_dev;
struct clk *engine_clk;
struct clk *pixel_clk;
struct clk *tvd_clk;
int irq;
struct drm_display_mode mode;
const struct mtk_dpi_conf *conf;
enum mtk_dpi_out_color_format color_format;
enum mtk_dpi_out_yc_map yc_map;
enum mtk_dpi_out_bit_num bit_num;
enum mtk_dpi_out_channel_swap channel_swap;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_gpio;
struct pinctrl_state *pins_dpi;
u32 output_fmt;
int refcount;
};
static inline struct mtk_dpi *bridge_to_dpi(struct drm_bridge *b)
{
return container_of(b, struct mtk_dpi, bridge);
}
enum mtk_dpi_polarity {
MTK_DPI_POLARITY_RISING,
MTK_DPI_POLARITY_FALLING,
};
struct mtk_dpi_polarities {
enum mtk_dpi_polarity de_pol;
enum mtk_dpi_polarity ck_pol;
enum mtk_dpi_polarity hsync_pol;
enum mtk_dpi_polarity vsync_pol;
};
struct mtk_dpi_sync_param {
u32 sync_width;
u32 front_porch;
u32 back_porch;
bool shift_half_line;
};
struct mtk_dpi_yc_limit {
u16 y_top;
u16 y_bottom;
u16 c_top;
u16 c_bottom;
};
/**
* struct mtk_dpi_conf - Configuration of mediatek dpi.
* @cal_factor: Callback function to calculate factor value.
* @reg_h_fre_con: Register address of frequency control.
* @max_clock_khz: Max clock frequency supported for this SoCs in khz units.
* @edge_sel_en: Enable of edge selection.
* @output_fmts: Array of supported output formats.
* @num_output_fmts: Quantity of supported output formats.
* @is_ck_de_pol: Support CK/DE polarity.
* @swap_input_support: Support input swap function.
* @support_direct_pin: IP supports direct connection to dpi panels.
* @input_2pixel: Input pixel of dp_intf is 2 pixel per round, so enable this
* config to enable this feature.
* @dimension_mask: Mask used for HWIDTH, HPORCH, VSYNC_WIDTH and VSYNC_PORCH
* (no shift).
* @hvsize_mask: Mask of HSIZE and VSIZE mask (no shift).
* @channel_swap_shift: Shift value of channel swap.
* @yuv422_en_bit: Enable bit of yuv422.
* @csc_enable_bit: Enable bit of CSC.
* @pixels_per_iter: Quantity of transferred pixels per iteration.
* @edge_cfg_in_mmsys: If the edge configuration for DPI's output needs to be set in MMSYS.
*/
struct mtk_dpi_conf {
unsigned int (*cal_factor)(int clock);
u32 reg_h_fre_con;
u32 max_clock_khz;
bool edge_sel_en;
const u32 *output_fmts;
u32 num_output_fmts;
bool is_ck_de_pol;
bool swap_input_support;
bool support_direct_pin;
bool input_2pixel;
u32 dimension_mask;
u32 hvsize_mask;
u32 channel_swap_shift;
u32 yuv422_en_bit;
u32 csc_enable_bit;
u32 pixels_per_iter;
bool edge_cfg_in_mmsys;
};
static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
{
u32 tmp = readl(dpi->regs + offset) & ~mask;
tmp |= (val & mask);
writel(tmp, dpi->regs + offset);
}
static void mtk_dpi_sw_reset(struct mtk_dpi *dpi, bool reset)
{
mtk_dpi_mask(dpi, DPI_RET, reset ? RST : 0, RST);
}
static void mtk_dpi_enable(struct mtk_dpi *dpi)
{
mtk_dpi_mask(dpi, DPI_EN, EN, EN);
}
static void mtk_dpi_disable(struct mtk_dpi *dpi)
{
mtk_dpi_mask(dpi, DPI_EN, 0, EN);
}
static void mtk_dpi_config_hsync(struct mtk_dpi *dpi,
struct mtk_dpi_sync_param *sync)
{
mtk_dpi_mask(dpi, DPI_TGEN_HWIDTH, sync->sync_width << HPW,
dpi->conf->dimension_mask << HPW);
mtk_dpi_mask(dpi, DPI_TGEN_HPORCH, sync->back_porch << HBP,
dpi->conf->dimension_mask << HBP);
mtk_dpi_mask(dpi, DPI_TGEN_HPORCH, sync->front_porch << HFP,
dpi->conf->dimension_mask << HFP);
}
static void mtk_dpi_config_vsync(struct mtk_dpi *dpi,
struct mtk_dpi_sync_param *sync,
u32 width_addr, u32 porch_addr)
{
mtk_dpi_mask(dpi, width_addr,
sync->shift_half_line << VSYNC_HALF_LINE_SHIFT,
VSYNC_HALF_LINE_MASK);
mtk_dpi_mask(dpi, width_addr,
sync->sync_width << VSYNC_WIDTH_SHIFT,
dpi->conf->dimension_mask << VSYNC_WIDTH_SHIFT);
mtk_dpi_mask(dpi, porch_addr,
sync->back_porch << VSYNC_BACK_PORCH_SHIFT,
dpi->conf->dimension_mask << VSYNC_BACK_PORCH_SHIFT);
mtk_dpi_mask(dpi, porch_addr,
sync->front_porch << VSYNC_FRONT_PORCH_SHIFT,
dpi->conf->dimension_mask << VSYNC_FRONT_PORCH_SHIFT);
}
static void mtk_dpi_config_vsync_lodd(struct mtk_dpi *dpi,
struct mtk_dpi_sync_param *sync)
{
mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH, DPI_TGEN_VPORCH);
}
static void mtk_dpi_config_vsync_leven(struct mtk_dpi *dpi,
struct mtk_dpi_sync_param *sync)
{
mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_LEVEN,
DPI_TGEN_VPORCH_LEVEN);
}
static void mtk_dpi_config_vsync_rodd(struct mtk_dpi *dpi,
struct mtk_dpi_sync_param *sync)
{
mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_RODD,
DPI_TGEN_VPORCH_RODD);
}
static void mtk_dpi_config_vsync_reven(struct mtk_dpi *dpi,
struct mtk_dpi_sync_param *sync)
{
mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_REVEN,
DPI_TGEN_VPORCH_REVEN);
}
static void mtk_dpi_config_pol(struct mtk_dpi *dpi,
struct mtk_dpi_polarities *dpi_pol)
{
unsigned int pol;
unsigned int mask;
mask = HSYNC_POL | VSYNC_POL;
pol = (dpi_pol->hsync_pol == MTK_DPI_POLARITY_RISING ? 0 : HSYNC_POL) |
(dpi_pol->vsync_pol == MTK_DPI_POLARITY_RISING ? 0 : VSYNC_POL);
if (dpi->conf->is_ck_de_pol) {
mask |= CK_POL | DE_POL;
pol |= (dpi_pol->ck_pol == MTK_DPI_POLARITY_RISING ?
0 : CK_POL) |
(dpi_pol->de_pol == MTK_DPI_POLARITY_RISING ?
0 : DE_POL);
}
mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, pol, mask);
}
static void mtk_dpi_config_3d(struct mtk_dpi *dpi, bool en_3d)
{
mtk_dpi_mask(dpi, DPI_CON, en_3d ? TDFP_EN : 0, TDFP_EN);
}
static void mtk_dpi_config_interface(struct mtk_dpi *dpi, bool inter)
{
mtk_dpi_mask(dpi, DPI_CON, inter ? INTL_EN : 0, INTL_EN);
}
static void mtk_dpi_config_fb_size(struct mtk_dpi *dpi, u32 width, u32 height)
{
mtk_dpi_mask(dpi, DPI_SIZE, width << HSIZE,
dpi->conf->hvsize_mask << HSIZE);
mtk_dpi_mask(dpi, DPI_SIZE, height << VSIZE,
dpi->conf->hvsize_mask << VSIZE);
}
static void mtk_dpi_config_channel_limit(struct mtk_dpi *dpi)
{
struct mtk_dpi_yc_limit limit;
if (drm_default_rgb_quant_range(&dpi->mode) ==
HDMI_QUANTIZATION_RANGE_LIMITED) {
limit.y_bottom = 0x10;
limit.y_top = 0xfe0;
limit.c_bottom = 0x10;
limit.c_top = 0xfe0;
} else {
limit.y_bottom = 0;
limit.y_top = 0xfff;
limit.c_bottom = 0;
limit.c_top = 0xfff;
}
mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit.y_bottom << Y_LIMINT_BOT,
Y_LIMINT_BOT_MASK);
mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit.y_top << Y_LIMINT_TOP,
Y_LIMINT_TOP_MASK);
mtk_dpi_mask(dpi, DPI_C_LIMIT, limit.c_bottom << C_LIMIT_BOT,
C_LIMIT_BOT_MASK);
mtk_dpi_mask(dpi, DPI_C_LIMIT, limit.c_top << C_LIMIT_TOP,
C_LIMIT_TOP_MASK);
}
static void mtk_dpi_config_bit_num(struct mtk_dpi *dpi,
enum mtk_dpi_out_bit_num num)
{
u32 val;
switch (num) {
case MTK_DPI_OUT_BIT_NUM_8BITS:
val = OUT_BIT_8;
break;
case MTK_DPI_OUT_BIT_NUM_10BITS:
val = OUT_BIT_10;
break;
case MTK_DPI_OUT_BIT_NUM_12BITS:
val = OUT_BIT_12;
break;
case MTK_DPI_OUT_BIT_NUM_16BITS:
val = OUT_BIT_16;
break;
default:
val = OUT_BIT_8;
break;
}
mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << OUT_BIT,
OUT_BIT_MASK);
}
static void mtk_dpi_config_yc_map(struct mtk_dpi *dpi,
enum mtk_dpi_out_yc_map map)
{
u32 val;
switch (map) {
case MTK_DPI_OUT_YC_MAP_RGB:
val = YC_MAP_RGB;
break;
case MTK_DPI_OUT_YC_MAP_CYCY:
val = YC_MAP_CYCY;
break;
case MTK_DPI_OUT_YC_MAP_YCYC:
val = YC_MAP_YCYC;
break;
case MTK_DPI_OUT_YC_MAP_CY:
val = YC_MAP_CY;
break;
case MTK_DPI_OUT_YC_MAP_YC:
val = YC_MAP_YC;
break;
default:
val = YC_MAP_RGB;
break;
}
mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << YC_MAP, YC_MAP_MASK);
}
static void mtk_dpi_config_channel_swap(struct mtk_dpi *dpi,
enum mtk_dpi_out_channel_swap swap)
{
u32 val;
switch (swap) {
case MTK_DPI_OUT_CHANNEL_SWAP_RGB:
val = SWAP_RGB;
break;
case MTK_DPI_OUT_CHANNEL_SWAP_GBR:
val = SWAP_GBR;
break;
case MTK_DPI_OUT_CHANNEL_SWAP_BRG:
val = SWAP_BRG;
break;
case MTK_DPI_OUT_CHANNEL_SWAP_RBG:
val = SWAP_RBG;
break;
case MTK_DPI_OUT_CHANNEL_SWAP_GRB:
val = SWAP_GRB;
break;
case MTK_DPI_OUT_CHANNEL_SWAP_BGR:
val = SWAP_BGR;
break;
default:
val = SWAP_RGB;
break;
}
mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING,
val << dpi->conf->channel_swap_shift,
CH_SWAP_MASK << dpi->conf->channel_swap_shift);
}
static void mtk_dpi_config_yuv422_enable(struct mtk_dpi *dpi, bool enable)
{
mtk_dpi_mask(dpi, DPI_CON, enable ? dpi->conf->yuv422_en_bit : 0,
dpi->conf->yuv422_en_bit);
}
static void mtk_dpi_config_csc_enable(struct mtk_dpi *dpi, bool enable)
{
mtk_dpi_mask(dpi, DPI_CON, enable ? dpi->conf->csc_enable_bit : 0,
dpi->conf->csc_enable_bit);
}
static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
{
mtk_dpi_mask(dpi, DPI_CON, enable ? IN_RB_SWAP : 0, IN_RB_SWAP);
}
static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
{
mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
}
static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
{
if (dpi->conf->edge_sel_en)
mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN);
}
static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
enum mtk_dpi_out_color_format format)
{
mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
if (format == MTK_DPI_COLOR_FORMAT_YCBCR_422) {
mtk_dpi_config_yuv422_enable(dpi, true);
mtk_dpi_config_csc_enable(dpi, true);
/*
* If height is smaller than 720, we need to use RGB_TO_BT601
* to transfer to yuv422. Otherwise, we use RGB_TO_JPEG.
*/
mtk_dpi_mask(dpi, DPI_MATRIX_SET, dpi->mode.hdisplay <= 720 ?
MATRIX_SEL_RGB_TO_BT601 : MATRIX_SEL_RGB_TO_JPEG,
INT_MATRIX_SEL_MASK);
} else {
mtk_dpi_config_yuv422_enable(dpi, false);
mtk_dpi_config_csc_enable(dpi, false);
if (dpi->conf->swap_input_support)
mtk_dpi_config_swap_input(dpi, false);
}
}
static void mtk_dpi_dual_edge(struct mtk_dpi *dpi)
{
if ((dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE) ||
(dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_BE)) {
mtk_dpi_mask(dpi, DPI_DDR_SETTING, DDR_EN | DDR_4PHASE,
DDR_EN | DDR_4PHASE);
mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING,
dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE ?
EDGE_SEL : 0, EDGE_SEL);
if (dpi->conf->edge_cfg_in_mmsys)
mtk_mmsys_ddp_dpi_fmt_config(dpi->mmsys_dev, MTK_DPI_RGB888_DDR_CON);
} else {
mtk_dpi_mask(dpi, DPI_DDR_SETTING, DDR_EN | DDR_4PHASE, 0);
if (dpi->conf->edge_cfg_in_mmsys)
mtk_mmsys_ddp_dpi_fmt_config(dpi->mmsys_dev, MTK_DPI_RGB888_SDR_CON);
}
}
static void mtk_dpi_power_off(struct mtk_dpi *dpi)
{
if (WARN_ON(dpi->refcount == 0))
return;
if (--dpi->refcount != 0)
return;
mtk_dpi_disable(dpi);
clk_disable_unprepare(dpi->pixel_clk);
clk_disable_unprepare(dpi->engine_clk);
}
static int mtk_dpi_power_on(struct mtk_dpi *dpi)
{
int ret;
if (++dpi->refcount != 1)
return 0;
ret = clk_prepare_enable(dpi->engine_clk);
if (ret) {
dev_err(dpi->dev, "Failed to enable engine clock: %d\n", ret);
goto err_refcount;
}
ret = clk_prepare_enable(dpi->pixel_clk);
if (ret) {
dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret);
goto err_pixel;
}
return 0;
err_pixel:
clk_disable_unprepare(dpi->engine_clk);
err_refcount:
dpi->refcount--;
return ret;
}
static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
struct drm_display_mode *mode)
{
struct mtk_dpi_polarities dpi_pol;
struct mtk_dpi_sync_param hsync;
struct mtk_dpi_sync_param vsync_lodd = { 0 };
struct mtk_dpi_sync_param vsync_leven = { 0 };
struct mtk_dpi_sync_param vsync_rodd = { 0 };
struct mtk_dpi_sync_param vsync_reven = { 0 };
struct videomode vm = { 0 };
unsigned long pll_rate;
unsigned int factor;
/* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
factor = dpi->conf->cal_factor(mode->clock);
drm_display_mode_to_videomode(mode, &vm);
pll_rate = vm.pixelclock * factor;
dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
pll_rate, vm.pixelclock);
clk_set_rate(dpi->tvd_clk, pll_rate);
pll_rate = clk_get_rate(dpi->tvd_clk);
/*
* Depending on the IP version, we may output a different amount of
* pixels for each iteration: divide the clock by this number and
* adjust the display porches accordingly.
*/
vm.pixelclock = pll_rate / factor;
vm.pixelclock /= dpi->conf->pixels_per_iter;
if ((dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE) ||
(dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_BE))
clk_set_rate(dpi->pixel_clk, vm.pixelclock * 2);
else
clk_set_rate(dpi->pixel_clk, vm.pixelclock);
vm.pixelclock = clk_get_rate(dpi->pixel_clk);
dev_dbg(dpi->dev, "Got PLL %lu Hz, pixel clock %lu Hz\n",
pll_rate, vm.pixelclock);
dpi_pol.ck_pol = MTK_DPI_POLARITY_FALLING;
dpi_pol.de_pol = MTK_DPI_POLARITY_RISING;
dpi_pol.hsync_pol = vm.flags & DISPLAY_FLAGS_HSYNC_HIGH ?
MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING;
dpi_pol.vsync_pol = vm.flags & DISPLAY_FLAGS_VSYNC_HIGH ?
MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING;
/*
* Depending on the IP version, we may output a different amount of
* pixels for each iteration: divide the clock by this number and
* adjust the display porches accordingly.
*/
hsync.sync_width = vm.hsync_len / dpi->conf->pixels_per_iter;
hsync.back_porch = vm.hback_porch / dpi->conf->pixels_per_iter;
hsync.front_porch = vm.hfront_porch / dpi->conf->pixels_per_iter;
hsync.shift_half_line = false;
vsync_lodd.sync_width = vm.vsync_len;
vsync_lodd.back_porch = vm.vback_porch;
vsync_lodd.front_porch = vm.vfront_porch;
vsync_lodd.shift_half_line = false;
if (vm.flags & DISPLAY_FLAGS_INTERLACED &&
mode->flags & DRM_MODE_FLAG_3D_MASK) {
vsync_leven = vsync_lodd;
vsync_rodd = vsync_lodd;
vsync_reven = vsync_lodd;
vsync_leven.shift_half_line = true;
vsync_reven.shift_half_line = true;
} else if (vm.flags & DISPLAY_FLAGS_INTERLACED &&
!(mode->flags & DRM_MODE_FLAG_3D_MASK)) {
vsync_leven = vsync_lodd;
vsync_leven.shift_half_line = true;
} else if (!(vm.flags & DISPLAY_FLAGS_INTERLACED) &&
mode->flags & DRM_MODE_FLAG_3D_MASK) {
vsync_rodd = vsync_lodd;
}
mtk_dpi_sw_reset(dpi, true);
mtk_dpi_config_pol(dpi, &dpi_pol);
mtk_dpi_config_hsync(dpi, &hsync);
mtk_dpi_config_vsync_lodd(dpi, &vsync_lodd);
mtk_dpi_config_vsync_rodd(dpi, &vsync_rodd);
mtk_dpi_config_vsync_leven(dpi, &vsync_leven);
mtk_dpi_config_vsync_reven(dpi, &vsync_reven);
mtk_dpi_config_3d(dpi, !!(mode->flags & DRM_MODE_FLAG_3D_MASK));
mtk_dpi_config_interface(dpi, !!(vm.flags &
DISPLAY_FLAGS_INTERLACED));
if (vm.flags & DISPLAY_FLAGS_INTERLACED)
mtk_dpi_config_fb_size(dpi, vm.hactive, vm.vactive >> 1);
else
mtk_dpi_config_fb_size(dpi, vm.hactive, vm.vactive);
mtk_dpi_config_channel_limit(dpi);
mtk_dpi_config_bit_num(dpi, dpi->bit_num);
mtk_dpi_config_channel_swap(dpi, dpi->channel_swap);
mtk_dpi_config_color_format(dpi, dpi->color_format);
if (dpi->conf->support_direct_pin) {
mtk_dpi_config_yc_map(dpi, dpi->yc_map);
mtk_dpi_config_2n_h_fre(dpi);
mtk_dpi_dual_edge(dpi);
mtk_dpi_config_disable_edge(dpi);
}
if (dpi->conf->input_2pixel) {
mtk_dpi_mask(dpi, DPI_CON, DPINTF_INPUT_2P_EN,
DPINTF_INPUT_2P_EN);
}
mtk_dpi_sw_reset(dpi, false);
return 0;
}
static u32 *mtk_dpi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
unsigned int *num_output_fmts)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
u32 *output_fmts;
*num_output_fmts = 0;
if (!dpi->conf->output_fmts) {
dev_err(dpi->dev, "output_fmts should not be null\n");
return NULL;
}
output_fmts = kcalloc(dpi->conf->num_output_fmts, sizeof(*output_fmts),
GFP_KERNEL);
if (!output_fmts)
return NULL;
*num_output_fmts = dpi->conf->num_output_fmts;
memcpy(output_fmts, dpi->conf->output_fmts,
sizeof(*output_fmts) * dpi->conf->num_output_fmts);
return output_fmts;
}
static u32 *mtk_dpi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
u32 *input_fmts;
*num_input_fmts = 0;
input_fmts = kcalloc(1, sizeof(*input_fmts),
GFP_KERNEL);
if (!input_fmts)
return NULL;
*num_input_fmts = 1;
input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
return input_fmts;
}
static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
unsigned int out_bus_format;
out_bus_format = bridge_state->output_bus_cfg.format;
if (out_bus_format == MEDIA_BUS_FMT_FIXED)
if (dpi->conf->num_output_fmts)
out_bus_format = dpi->conf->output_fmts[0];
dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n",
bridge_state->input_bus_cfg.format,
bridge_state->output_bus_cfg.format);
dpi->output_fmt = out_bus_format;
dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
if (out_bus_format == MEDIA_BUS_FMT_YUYV8_1X16)
dpi->color_format = MTK_DPI_COLOR_FORMAT_YCBCR_422;
else
dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
return 0;
}
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
return drm_bridge_attach(bridge->encoder, dpi->next_bridge,
&dpi->bridge, flags);
}
static void mtk_dpi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
drm_mode_copy(&dpi->mode, adjusted_mode);
}
static void mtk_dpi_bridge_disable(struct drm_bridge *bridge)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
mtk_dpi_power_off(dpi);
if (dpi->pinctrl && dpi->pins_gpio)
pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
}
static void mtk_dpi_bridge_enable(struct drm_bridge *bridge)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
if (dpi->pinctrl && dpi->pins_dpi)
pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
mtk_dpi_power_on(dpi);
mtk_dpi_set_display_mode(dpi, &dpi->mode);
mtk_dpi_enable(dpi);
}
static enum drm_mode_status
mtk_dpi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
if (mode->clock > dpi->conf->max_clock_khz)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
.attach = mtk_dpi_bridge_attach,
.mode_set = mtk_dpi_bridge_mode_set,
.mode_valid = mtk_dpi_bridge_mode_valid,
.disable = mtk_dpi_bridge_disable,
.enable = mtk_dpi_bridge_enable,
.atomic_check = mtk_dpi_bridge_atomic_check,
.atomic_get_output_bus_fmts = mtk_dpi_bridge_atomic_get_output_bus_fmts,
.atomic_get_input_bus_fmts = mtk_dpi_bridge_atomic_get_input_bus_fmts,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
};
void mtk_dpi_start(struct device *dev)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
mtk_dpi_power_on(dpi);
}
void mtk_dpi_stop(struct device *dev)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
mtk_dpi_power_off(dpi);
}
static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct mtk_drm_private *priv = drm_dev->dev_private;
int ret;
dpi->mmsys_dev = priv->mmsys_dev;
ret = drm_simple_encoder_init(drm_dev, &dpi->encoder,
DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(dev, "Failed to initialize decoder: %d\n", ret);
return ret;
}
dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->dev);
ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_cleanup;
dpi->connector = drm_bridge_connector_init(drm_dev, &dpi->encoder);
if (IS_ERR(dpi->connector)) {
dev_err(dev, "Unable to create bridge connector\n");
ret = PTR_ERR(dpi->connector);
goto err_cleanup;
}
drm_connector_attach_encoder(dpi->connector, &dpi->encoder);
return 0;
err_cleanup:
drm_encoder_cleanup(&dpi->encoder);
return ret;
}
static void mtk_dpi_unbind(struct device *dev, struct device *master,
void *data)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
drm_encoder_cleanup(&dpi->encoder);
}
static const struct component_ops mtk_dpi_component_ops = {
.bind = mtk_dpi_bind,
.unbind = mtk_dpi_unbind,
};
static unsigned int mt8173_calculate_factor(int clock)
{
if (clock <= 27000)
return 3 << 4;
else if (clock <= 84000)
return 3 << 3;
else if (clock <= 167000)
return 3 << 2;
else
return 3 << 1;
}
static unsigned int mt2701_calculate_factor(int clock)
{
if (clock <= 64000)
return 4;
else if (clock <= 128000)
return 2;
else
return 1;
}
static unsigned int mt8183_calculate_factor(int clock)
{
if (clock <= 27000)
return 8;
else if (clock <= 167000)
return 4;
else
return 2;
}
static unsigned int mt8195_dpintf_calculate_factor(int clock)
{
if (clock < 70000)
return 4;
else if (clock < 200000)
return 2;
else
return 1;
}
static const u32 mt8173_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_1X24,
};
static const u32 mt8183_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_2X12_LE,
MEDIA_BUS_FMT_RGB888_2X12_BE,
};
static const u32 mt8195_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_YUYV8_1X16,
};
static const struct mtk_dpi_conf mt8173_conf = {
.cal_factor = mt8173_calculate_factor,
.reg_h_fre_con = 0xe0,
.max_clock_khz = 300000,
.output_fmts = mt8173_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8173_output_fmts),
.pixels_per_iter = 1,
.is_ck_de_pol = true,
.swap_input_support = true,
.support_direct_pin = true,
.dimension_mask = HPW_MASK,
.hvsize_mask = HSIZE_MASK,
.channel_swap_shift = CH_SWAP,
.yuv422_en_bit = YUV422_EN,
.csc_enable_bit = CSC_ENABLE,
};
static const struct mtk_dpi_conf mt2701_conf = {
.cal_factor = mt2701_calculate_factor,
.reg_h_fre_con = 0xb0,
.edge_sel_en = true,
.max_clock_khz = 150000,
.output_fmts = mt8173_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8173_output_fmts),
.pixels_per_iter = 1,
.is_ck_de_pol = true,
.swap_input_support = true,
.support_direct_pin = true,
.dimension_mask = HPW_MASK,
.hvsize_mask = HSIZE_MASK,
.channel_swap_shift = CH_SWAP,
.yuv422_en_bit = YUV422_EN,
.csc_enable_bit = CSC_ENABLE,
};
static const struct mtk_dpi_conf mt8183_conf = {
.cal_factor = mt8183_calculate_factor,
.reg_h_fre_con = 0xe0,
.max_clock_khz = 100000,
.output_fmts = mt8183_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
.pixels_per_iter = 1,
.is_ck_de_pol = true,
.swap_input_support = true,
.support_direct_pin = true,
.dimension_mask = HPW_MASK,
.hvsize_mask = HSIZE_MASK,
.channel_swap_shift = CH_SWAP,
.yuv422_en_bit = YUV422_EN,
.csc_enable_bit = CSC_ENABLE,
};
static const struct mtk_dpi_conf mt8186_conf = {
.cal_factor = mt8183_calculate_factor,
.reg_h_fre_con = 0xe0,
.max_clock_khz = 150000,
.output_fmts = mt8183_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
.edge_cfg_in_mmsys = true,
.pixels_per_iter = 1,
.is_ck_de_pol = true,
.swap_input_support = true,
.support_direct_pin = true,
.dimension_mask = HPW_MASK,
.hvsize_mask = HSIZE_MASK,
.channel_swap_shift = CH_SWAP,
.yuv422_en_bit = YUV422_EN,
.csc_enable_bit = CSC_ENABLE,
};
static const struct mtk_dpi_conf mt8188_dpintf_conf = {
.cal_factor = mt8195_dpintf_calculate_factor,
.max_clock_khz = 600000,
.output_fmts = mt8195_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
.pixels_per_iter = 4,
.input_2pixel = false,
.dimension_mask = DPINTF_HPW_MASK,
.hvsize_mask = DPINTF_HSIZE_MASK,
.channel_swap_shift = DPINTF_CH_SWAP,
.yuv422_en_bit = DPINTF_YUV422_EN,
.csc_enable_bit = DPINTF_CSC_ENABLE,
};
static const struct mtk_dpi_conf mt8192_conf = {
.cal_factor = mt8183_calculate_factor,
.reg_h_fre_con = 0xe0,
.max_clock_khz = 150000,
.output_fmts = mt8183_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
.pixels_per_iter = 1,
.is_ck_de_pol = true,
.swap_input_support = true,
.support_direct_pin = true,
.dimension_mask = HPW_MASK,
.hvsize_mask = HSIZE_MASK,
.channel_swap_shift = CH_SWAP,
.yuv422_en_bit = YUV422_EN,
.csc_enable_bit = CSC_ENABLE,
};
static const struct mtk_dpi_conf mt8195_dpintf_conf = {
.cal_factor = mt8195_dpintf_calculate_factor,
.max_clock_khz = 600000,
.output_fmts = mt8195_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
.pixels_per_iter = 4,
.input_2pixel = true,
.dimension_mask = DPINTF_HPW_MASK,
.hvsize_mask = DPINTF_HSIZE_MASK,
.channel_swap_shift = DPINTF_CH_SWAP,
.yuv422_en_bit = DPINTF_YUV422_EN,
.csc_enable_bit = DPINTF_CSC_ENABLE,
};
static int mtk_dpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_dpi *dpi;
int ret;
dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
if (!dpi)
return -ENOMEM;
dpi->dev = dev;
dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
dpi->output_fmt = MEDIA_BUS_FMT_RGB888_1X24;
dpi->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(dpi->pinctrl)) {
dpi->pinctrl = NULL;
dev_dbg(&pdev->dev, "Cannot find pinctrl!\n");
}
if (dpi->pinctrl) {
dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep");
if (IS_ERR(dpi->pins_gpio)) {
dpi->pins_gpio = NULL;
dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n");
}
if (dpi->pins_gpio)
pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default");
if (IS_ERR(dpi->pins_dpi)) {
dpi->pins_dpi = NULL;
dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n");
}
}
dpi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dpi->regs))
return dev_err_probe(dev, PTR_ERR(dpi->regs),
"Failed to ioremap mem resource\n");
dpi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dpi->engine_clk))
return dev_err_probe(dev, PTR_ERR(dpi->engine_clk),
"Failed to get engine clock\n");
dpi->pixel_clk = devm_clk_get(dev, "pixel");
if (IS_ERR(dpi->pixel_clk))
return dev_err_probe(dev, PTR_ERR(dpi->pixel_clk),
"Failed to get pixel clock\n");
dpi->tvd_clk = devm_clk_get(dev, "pll");
if (IS_ERR(dpi->tvd_clk))
return dev_err_probe(dev, PTR_ERR(dpi->tvd_clk),
"Failed to get tvdpll clock\n");
dpi->irq = platform_get_irq(pdev, 0);
if (dpi->irq < 0)
return dpi->irq;
dpi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(dpi->next_bridge))
return dev_err_probe(dev, PTR_ERR(dpi->next_bridge),
"Failed to get bridge\n");
dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node);
platform_set_drvdata(pdev, dpi);
dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
dpi->bridge.of_node = dev->of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
ret = devm_drm_bridge_add(dev, &dpi->bridge);
if (ret)
return ret;
ret = component_add(dev, &mtk_dpi_component_ops);
if (ret)
return dev_err_probe(dev, ret, "Failed to add component.\n");
return 0;
}
static void mtk_dpi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_dpi_component_ops);
}
static const struct of_device_id mtk_dpi_of_ids[] = {
{ .compatible = "mediatek,mt2701-dpi", .data = &mt2701_conf },
{ .compatible = "mediatek,mt8173-dpi", .data = &mt8173_conf },
{ .compatible = "mediatek,mt8183-dpi", .data = &mt8183_conf },
{ .compatible = "mediatek,mt8186-dpi", .data = &mt8186_conf },
{ .compatible = "mediatek,mt8188-dp-intf", .data = &mt8188_dpintf_conf },
{ .compatible = "mediatek,mt8192-dpi", .data = &mt8192_conf },
{ .compatible = "mediatek,mt8195-dp-intf", .data = &mt8195_dpintf_conf },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mtk_dpi_of_ids);
struct platform_driver mtk_dpi_driver = {
.probe = mtk_dpi_probe,
.remove_new = mtk_dpi_remove,
.driver = {
.name = "mediatek-dpi",
.of_match_table = mtk_dpi_of_ids,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_dpi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/mailbox_controller.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
#include <asm/barrier.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_gem.h"
#include "mtk_drm_plane.h"
/*
* struct mtk_drm_crtc - MediaTek specific crtc structure.
* @base: crtc object.
* @enabled: records whether crtc_enable succeeded
* @planes: array of 4 drm_plane structures, one for each overlay plane
* @pending_planes: whether any plane has pending changes to be applied
* @mmsys_dev: pointer to the mmsys device for configuration registers
* @mutex: handle to one of the ten disp_mutex streams
* @ddp_comp_nr: number of components in ddp_comp
* @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
*
* TODO: Needs update: this header is missing a bunch of member descriptions.
*/
struct mtk_drm_crtc {
struct drm_crtc base;
bool enabled;
bool pending_needs_vblank;
struct drm_pending_vblank_event *event;
struct drm_plane *planes;
unsigned int layer_nr;
bool pending_planes;
bool pending_async_planes;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
struct cmdq_client cmdq_client;
struct cmdq_pkt cmdq_handle;
u32 cmdq_event;
u32 cmdq_vblank_cnt;
wait_queue_head_t cb_blocking_queue;
#endif
struct device *mmsys_dev;
struct device *dma_dev;
struct mtk_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
/* lock for display hardware access */
struct mutex hw_lock;
bool config_updating;
};
struct mtk_crtc_state {
struct drm_crtc_state base;
bool pending_config;
unsigned int pending_width;
unsigned int pending_height;
unsigned int pending_vrefresh;
};
static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
{
return container_of(c, struct mtk_drm_crtc, base);
}
static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
{
return container_of(s, struct mtk_crtc_state, base);
}
static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
unsigned long flags;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
drm_crtc_vblank_put(crtc);
mtk_crtc->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
{
drm_crtc_handle_vblank(&mtk_crtc->base);
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
mtk_drm_crtc_finish_page_flip(mtk_crtc);
mtk_crtc->pending_needs_vblank = false;
}
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
size_t size)
{
struct device *dev;
dma_addr_t dma_addr;
pkt->va_base = kzalloc(size, GFP_KERNEL);
if (!pkt->va_base)
return -ENOMEM;
pkt->buf_size = size;
pkt->cl = (void *)client;
dev = client->chan->mbox->dev;
dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
kfree(pkt->va_base);
return -ENOMEM;
}
pkt->pa_base = dma_addr;
return 0;
}
static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
{
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
DMA_TO_DEVICE);
kfree(pkt->va_base);
}
#endif
static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
int i;
mtk_mutex_put(mtk_crtc->mutex);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
if (mtk_crtc->cmdq_client.chan) {
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
}
#endif
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
struct mtk_ddp_comp *comp;
comp = mtk_crtc->ddp_comp[i];
mtk_ddp_comp_unregister_vblank_cb(comp);
}
drm_crtc_cleanup(crtc);
}
static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
{
struct mtk_crtc_state *state;
if (crtc->state)
__drm_atomic_helper_crtc_destroy_state(crtc->state);
kfree(to_mtk_crtc_state(crtc->state));
crtc->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_crtc_reset(crtc, &state->base);
}
static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct mtk_crtc_state *state;
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
WARN_ON(state->base.crtc != crtc);
state->base.crtc = crtc;
state->pending_config = false;
return &state->base;
}
static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
__drm_atomic_helper_crtc_destroy_state(state);
kfree(to_mtk_crtc_state(state));
}
static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* Nothing to do here, but this callback is mandatory. */
return true;
}
static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
state->pending_width = crtc->mode.hdisplay;
state->pending_height = crtc->mode.vdisplay;
state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
wmb(); /* Make sure the above parameters are set before update */
state->pending_config = true;
}
static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
{
int ret;
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
if (ret) {
DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
goto err;
}
}
return 0;
err:
while (--i >= 0)
mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
return ret;
}
static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
{
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
}
static
struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
struct drm_plane *plane,
unsigned int *local_layer)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp;
int i, count = 0;
unsigned int local_index = plane - mtk_crtc->planes;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
comp = mtk_crtc->ddp_comp[i];
if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
*local_layer = local_index - count;
return comp;
}
count += mtk_ddp_comp_layer_nr(comp);
}
WARN(1, "Failed to find component for plane %d\n", plane->index);
return NULL;
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
{
struct cmdq_cb_data *data = mssg;
struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
struct mtk_crtc_state *state;
unsigned int i;
if (data->sta < 0)
return;
state = to_mtk_crtc_state(mtk_crtc->base.state);
state->pending_config = false;
if (mtk_crtc->pending_planes) {
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
}
if (mtk_crtc->pending_async_planes) {
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
plane_state->pending.async_config = false;
}
mtk_crtc->pending_async_planes = false;
}
mtk_crtc->cmdq_vblank_cnt = 0;
wake_up(&mtk_crtc->cb_blocking_queue);
}
#endif
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_connector_list_iter conn_iter;
unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
int ret;
int i;
if (WARN_ON(!crtc->state))
return -EINVAL;
width = crtc->state->adjusted_mode.hdisplay;
height = crtc->state->adjusted_mode.vdisplay;
vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
drm_for_each_encoder(encoder, crtc->dev) {
if (encoder->crtc != crtc)
continue;
drm_connector_list_iter_begin(crtc->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder != encoder)
continue;
if (connector->display_info.bpc != 0 &&
bpc > connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
drm_connector_list_iter_end(&conn_iter);
}
ret = pm_runtime_resume_and_get(crtc->dev->dev);
if (ret < 0) {
DRM_ERROR("Failed to enable power domain: %d\n", ret);
return ret;
}
ret = mtk_mutex_prepare(mtk_crtc->mutex);
if (ret < 0) {
DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
goto err_pm_runtime_put;
}
ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
if (ret < 0) {
DRM_ERROR("Failed to enable component clocks: %d\n", ret);
goto err_mutex_unprepare;
}
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
if (!mtk_ddp_comp_connect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i + 1]->id))
mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i]->id,
mtk_crtc->ddp_comp[i + 1]->id);
if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
mtk_mutex_add_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
mtk_mutex_enable(mtk_crtc->mutex);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
if (i == 1)
mtk_ddp_comp_bgclr_in_on(comp);
mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
mtk_ddp_comp_start(comp);
}
/* Initially configure all planes */
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
struct mtk_ddp_comp *comp;
unsigned int local_layer;
plane_state = to_mtk_plane_state(plane->state);
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state, NULL);
}
return 0;
err_mutex_unprepare:
mtk_mutex_unprepare(mtk_crtc->mutex);
err_pm_runtime_put:
pm_runtime_put(crtc->dev->dev);
return ret;
}
static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
struct drm_crtc *crtc = &mtk_crtc->base;
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
if (i == 1)
mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
}
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
mtk_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
mtk_mutex_disable(mtk_crtc->mutex);
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
if (!mtk_ddp_comp_disconnect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i + 1]->id))
mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i]->id,
mtk_crtc->ddp_comp[i + 1]->id);
if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
mtk_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
mtk_crtc_ddp_clk_disable(mtk_crtc);
mtk_mutex_unprepare(mtk_crtc->mutex);
pm_runtime_put(drm->dev);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irq(&crtc->dev->event_lock);
}
}
static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
struct cmdq_pkt *cmdq_handle)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
unsigned int local_layer;
/*
* TODO: instead of updating the registers here, we should prepare
* working registers in atomic_commit and let the hardware command
* queue update module registers on vblank.
*/
if (state->pending_config) {
mtk_ddp_comp_config(comp, state->pending_width,
state->pending_height,
state->pending_vrefresh, 0,
cmdq_handle);
if (!cmdq_handle)
state->pending_config = false;
}
if (mtk_crtc->pending_planes) {
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
if (!plane_state->pending.config)
continue;
comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
&local_layer);
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
if (!cmdq_handle)
plane_state->pending.config = false;
}
if (!cmdq_handle)
mtk_crtc->pending_planes = false;
}
if (mtk_crtc->pending_async_planes) {
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
if (!plane_state->pending.async_config)
continue;
comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
&local_layer);
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
if (!cmdq_handle)
plane_state->pending.async_config = false;
}
if (!cmdq_handle)
mtk_crtc->pending_async_planes = false;
}
}
static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
bool needs_vblank)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
#endif
struct drm_crtc *crtc = &mtk_crtc->base;
struct mtk_drm_private *priv = crtc->dev->dev_private;
unsigned int pending_planes = 0, pending_async_planes = 0;
int i;
mutex_lock(&mtk_crtc->hw_lock);
mtk_crtc->config_updating = true;
if (needs_vblank)
mtk_crtc->pending_needs_vblank = true;
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
if (plane_state->pending.dirty) {
plane_state->pending.config = true;
plane_state->pending.dirty = false;
pending_planes |= BIT(i);
} else if (plane_state->pending.async_dirty) {
plane_state->pending.async_config = true;
plane_state->pending.async_dirty = false;
pending_async_planes |= BIT(i);
}
}
if (pending_planes)
mtk_crtc->pending_planes = true;
if (pending_async_planes)
mtk_crtc->pending_async_planes = true;
if (priv->data->shadow_register) {
mtk_mutex_acquire(mtk_crtc->mutex);
mtk_crtc_ddp_config(crtc, NULL);
mtk_mutex_release(mtk_crtc->mutex);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_client.chan) {
mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
cmdq_handle->cmd_buf_size = 0;
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
cmdq_pkt_finalize(cmdq_handle);
dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
cmdq_handle->pa_base,
cmdq_handle->cmd_buf_size,
DMA_TO_DEVICE);
/*
* CMDQ command should execute in next 3 vblank.
* One vblank interrupt before send message (occasionally)
* and one vblank interrupt after cmdq done,
* so it's timeout after 3 vblank interrupt.
* If it fail to execute in next 3 vblank, timeout happen.
*/
mtk_crtc->cmdq_vblank_cnt = 3;
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
}
#endif
mtk_crtc->config_updating = false;
mutex_unlock(&mtk_crtc->hw_lock);
}
static void mtk_crtc_ddp_irq(void *data)
{
struct drm_crtc *crtc = data;
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
mtk_crtc_ddp_config(crtc, NULL);
else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
drm_crtc_index(&mtk_crtc->base));
#else
if (!priv->data->shadow_register)
mtk_crtc_ddp_config(crtc, NULL);
#endif
mtk_drm_finish_page_flip(mtk_crtc);
}
static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
mtk_ddp_comp_enable_vblank(comp);
return 0;
}
static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
mtk_ddp_comp_disable_vblank(comp);
}
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state)
{
unsigned int local_layer;
struct mtk_ddp_comp *comp;
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
return mtk_ddp_comp_layer_check(comp, local_layer, state);
return 0;
}
void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
if (!mtk_crtc->enabled)
return;
mtk_drm_crtc_update_config(mtk_crtc, false);
}
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int ret;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
ret = pm_runtime_resume_and_get(comp->dev);
if (ret < 0) {
DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret);
return;
}
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
if (ret) {
pm_runtime_put(comp->dev);
return;
}
drm_crtc_vblank_on(crtc);
mtk_crtc->enabled = true;
}
static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int i, ret;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
if (!mtk_crtc->enabled)
return;
/* Set all pending plane state to disabled */
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
plane_state->pending.enable = false;
plane_state->pending.config = true;
}
mtk_crtc->pending_planes = true;
mtk_drm_crtc_update_config(mtk_crtc, false);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
/* Wait for planes to be disabled by cmdq */
if (mtk_crtc->cmdq_client.chan)
wait_event_timeout(mtk_crtc->cb_blocking_queue,
mtk_crtc->cmdq_vblank_cnt == 0,
msecs_to_jiffies(500));
#endif
/* Wait for planes to be disabled */
drm_crtc_wait_one_vblank(crtc);
drm_crtc_vblank_off(crtc);
mtk_crtc_ddp_hw_fini(mtk_crtc);
ret = pm_runtime_put(comp->dev);
if (ret < 0)
DRM_DEV_ERROR(comp->dev, "Failed to disable power domain: %d\n", ret);
mtk_crtc->enabled = false;
}
static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
if (mtk_crtc->event && mtk_crtc_state->base.event)
DRM_ERROR("new event while there is still a pending event\n");
if (mtk_crtc_state->base.event) {
mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
mtk_crtc->event = mtk_crtc_state->base.event;
mtk_crtc_state->base.event = NULL;
}
}
static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
int i;
if (crtc->state->color_mgmt_changed)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
}
mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.destroy = mtk_drm_crtc_destroy,
.reset = mtk_drm_crtc_reset,
.atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
.atomic_destroy_state = mtk_drm_crtc_destroy_state,
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
.mode_fixup = mtk_drm_crtc_mode_fixup,
.mode_set_nofb = mtk_drm_crtc_mode_set_nofb,
.atomic_begin = mtk_drm_crtc_atomic_begin,
.atomic_flush = mtk_drm_crtc_atomic_flush,
.atomic_enable = mtk_drm_crtc_atomic_enable,
.atomic_disable = mtk_drm_crtc_atomic_disable,
};
static int mtk_drm_crtc_init(struct drm_device *drm,
struct mtk_drm_crtc *mtk_crtc,
unsigned int pipe)
{
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
int i, ret;
for (i = 0; i < mtk_crtc->layer_nr; i++) {
if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
primary = &mtk_crtc->planes[i];
else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
cursor = &mtk_crtc->planes[i];
}
ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
&mtk_crtc_funcs, NULL);
if (ret)
goto err_cleanup_crtc;
drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);
return 0;
err_cleanup_crtc:
drm_crtc_cleanup(&mtk_crtc->base);
return ret;
}
static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
int comp_idx)
{
struct mtk_ddp_comp *comp;
if (comp_idx > 1)
return 0;
comp = mtk_crtc->ddp_comp[comp_idx];
if (!comp->funcs)
return 0;
if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
return 0;
return mtk_ddp_comp_layer_nr(comp);
}
static inline
enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
unsigned int num_planes)
{
if (plane_idx == 0)
return DRM_PLANE_TYPE_PRIMARY;
else if (plane_idx == (num_planes - 1))
return DRM_PLANE_TYPE_CURSOR;
else
return DRM_PLANE_TYPE_OVERLAY;
}
static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
struct mtk_drm_crtc *mtk_crtc,
int comp_idx, int pipe)
{
int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
int i, ret;
for (i = 0; i < num_planes; i++) {
ret = mtk_plane_init(drm_dev,
&mtk_crtc->planes[mtk_crtc->layer_nr],
BIT(pipe),
mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
num_planes),
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_formats(comp),
mtk_ddp_comp_get_num_formats(comp));
if (ret)
return ret;
mtk_crtc->layer_nr++;
}
return 0;
}
struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
return mtk_crtc->dma_dev;
}
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const unsigned int *path, unsigned int path_len,
int priv_data_index)
{
struct mtk_drm_private *priv = drm_dev->dev_private;
struct device *dev = drm_dev->dev;
struct mtk_drm_crtc *mtk_crtc;
unsigned int num_comp_planes = 0;
int ret;
int i;
bool has_ctm = false;
uint gamma_lut_size = 0;
struct drm_crtc *tmp;
int crtc_i = 0;
if (!path)
return 0;
priv = priv->all_drm_private[priv_data_index];
drm_for_each_crtc(tmp, drm_dev)
crtc_i++;
for (i = 0; i < path_len; i++) {
enum mtk_ddp_comp_id comp_id = path[i];
struct device_node *node;
struct mtk_ddp_comp *comp;
node = priv->comp_node[comp_id];
comp = &priv->ddp_comp[comp_id];
/* Not all drm components have a DTS device node, such as ovl_adaptor,
* which is the drm bring up sub driver
*/
if (!node && comp_id != DDP_COMPONENT_DRM_OVL_ADAPTOR) {
dev_info(dev,
"Not creating crtc %d because component %d is disabled or missing\n",
crtc_i, comp_id);
return 0;
}
if (!comp->dev) {
dev_err(dev, "Component %pOF not initialized\n", node);
return -ENODEV;
}
}
mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
if (!mtk_crtc)
return -ENOMEM;
mtk_crtc->mmsys_dev = priv->mmsys_dev;
mtk_crtc->ddp_comp_nr = path_len;
mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
sizeof(*mtk_crtc->ddp_comp),
GFP_KERNEL);
if (!mtk_crtc->ddp_comp)
return -ENOMEM;
mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
if (IS_ERR(mtk_crtc->mutex)) {
ret = PTR_ERR(mtk_crtc->mutex);
dev_err(dev, "Failed to get mutex: %d\n", ret);
return ret;
}
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
unsigned int comp_id = path[i];
struct mtk_ddp_comp *comp;
comp = &priv->ddp_comp[comp_id];
mtk_crtc->ddp_comp[i] = comp;
if (comp->funcs) {
if (comp->funcs->gamma_set)
gamma_lut_size = MTK_LUT_SIZE;
if (comp->funcs->ctm_set)
has_ctm = true;
}
mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq,
&mtk_crtc->base);
}
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
sizeof(struct drm_plane), GFP_KERNEL);
if (!mtk_crtc->planes)
return -ENOMEM;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
crtc_i);
if (ret)
return ret;
}
/*
* Default to use the first component as the dma dev.
* In the case of ovl_adaptor sub driver, it needs to use the
* dma_dev_get function to get representative dma dev.
*/
mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]);
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, crtc_i);
if (ret < 0)
return ret;
if (gamma_lut_size)
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
mutex_init(&mtk_crtc->hw_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
i = priv->mbox_index++;
mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev;
mtk_crtc->cmdq_client.client.tx_block = false;
mtk_crtc->cmdq_client.client.knows_txdone = true;
mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb;
mtk_crtc->cmdq_client.chan =
mbox_request_channel(&mtk_crtc->cmdq_client.client, i);
if (IS_ERR(mtk_crtc->cmdq_client.chan)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
drm_crtc_index(&mtk_crtc->base));
mtk_crtc->cmdq_client.chan = NULL;
}
if (mtk_crtc->cmdq_client.chan) {
ret = of_property_read_u32_index(priv->mutex_node,
"mediatek,gce-events",
i,
&mtk_crtc->cmdq_event);
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
drm_crtc_index(&mtk_crtc->base));
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
} else {
ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
&mtk_crtc->cmdq_handle,
PAGE_SIZE);
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
drm_crtc_index(&mtk_crtc->base));
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
}
}
/* for sending blocking cmd in crtc disable */
init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
}
#endif
return 0;
}
| linux-master | drivers/gpu/drm/mediatek/mtk_drm_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
* Author: CK Hu <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <linux/align.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
#include "mtk_drm_plane.h"
static const u64 modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
AFBC_FORMAT_MOD_SPLIT |
AFBC_FORMAT_MOD_SPARSE),
DRM_FORMAT_MOD_INVALID,
};
static void mtk_plane_reset(struct drm_plane *plane)
{
struct mtk_plane_state *state;
if (plane->state) {
__drm_atomic_helper_plane_destroy_state(plane->state);
state = to_mtk_plane_state(plane->state);
memset(state, 0, sizeof(*state));
} else {
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return;
}
__drm_atomic_helper_plane_reset(plane, &state->base);
state->base.plane = plane;
state->pending.format = DRM_FORMAT_RGB565;
state->pending.modifier = DRM_FORMAT_MOD_LINEAR;
}
static struct drm_plane_state *mtk_plane_duplicate_state(struct drm_plane *plane)
{
struct mtk_plane_state *old_state = to_mtk_plane_state(plane->state);
struct mtk_plane_state *state;
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
WARN_ON(state->base.plane != plane);
state->pending = old_state->pending;
return &state->base;
}
static bool mtk_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
if (modifier != DRM_FORMAT_MOD_ARM_AFBC(
AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
AFBC_FORMAT_MOD_SPLIT |
AFBC_FORMAT_MOD_SPARSE))
return false;
if (format != DRM_FORMAT_XRGB8888 &&
format != DRM_FORMAT_ARGB8888 &&
format != DRM_FORMAT_BGRX8888 &&
format != DRM_FORMAT_BGRA8888 &&
format != DRM_FORMAT_ABGR8888 &&
format != DRM_FORMAT_XBGR8888 &&
format != DRM_FORMAT_RGB888 &&
format != DRM_FORMAT_BGR888)
return false;
return true;
}
static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(to_mtk_plane_state(state));
}
static int mtk_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc_state *crtc_state;
int ret;
if (plane != new_plane_state->crtc->cursor)
return -EINVAL;
if (!plane->state)
return -EINVAL;
if (!plane->state->fb)
return -EINVAL;
ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane,
to_mtk_plane_state(new_plane_state));
if (ret)
return ret;
crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
}
static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
struct mtk_plane_state *mtk_plane_state)
{
struct drm_framebuffer *fb = new_state->fb;
struct drm_gem_object *gem;
struct mtk_drm_gem_obj *mtk_gem;
unsigned int pitch, format;
u64 modifier;
dma_addr_t addr;
dma_addr_t hdr_addr = 0;
unsigned int hdr_pitch = 0;
gem = fb->obj[0];
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
pitch = fb->pitches[0];
format = fb->format->format;
modifier = fb->modifier;
if (modifier == DRM_FORMAT_MOD_LINEAR) {
addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
addr += (new_state->src.y1 >> 16) * pitch;
} else {
int width_in_blocks = ALIGN(fb->width, AFBC_DATA_BLOCK_WIDTH)
/ AFBC_DATA_BLOCK_WIDTH;
int height_in_blocks = ALIGN(fb->height, AFBC_DATA_BLOCK_HEIGHT)
/ AFBC_DATA_BLOCK_HEIGHT;
int x_offset_in_blocks = (new_state->src.x1 >> 16) / AFBC_DATA_BLOCK_WIDTH;
int y_offset_in_blocks = (new_state->src.y1 >> 16) / AFBC_DATA_BLOCK_HEIGHT;
int hdr_size;
hdr_pitch = width_in_blocks * AFBC_HEADER_BLOCK_SIZE;
pitch = width_in_blocks * AFBC_DATA_BLOCK_WIDTH *
AFBC_DATA_BLOCK_HEIGHT * fb->format->cpp[0];
hdr_size = ALIGN(hdr_pitch * height_in_blocks, AFBC_HEADER_ALIGNMENT);
hdr_addr = addr + hdr_pitch * y_offset_in_blocks +
AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks;
/* The data plane is offset by 1 additional block. */
addr = addr + hdr_size +
pitch * y_offset_in_blocks +
AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT *
fb->format->cpp[0] * (x_offset_in_blocks + 1);
}
mtk_plane_state->pending.enable = true;
mtk_plane_state->pending.pitch = pitch;
mtk_plane_state->pending.hdr_pitch = hdr_pitch;
mtk_plane_state->pending.format = format;
mtk_plane_state->pending.modifier = modifier;
mtk_plane_state->pending.addr = addr;
mtk_plane_state->pending.hdr_addr = hdr_addr;
mtk_plane_state->pending.x = new_state->dst.x1;
mtk_plane_state->pending.y = new_state->dst.y1;
mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
mtk_plane_state->pending.rotation = new_state->rotation;
mtk_plane_state->pending.color_encoding = new_state->color_encoding;
}
static void mtk_plane_atomic_async_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct mtk_plane_state *new_plane_state = to_mtk_plane_state(plane->state);
plane->state->crtc_x = new_state->crtc_x;
plane->state->crtc_y = new_state->crtc_y;
plane->state->crtc_h = new_state->crtc_h;
plane->state->crtc_w = new_state->crtc_w;
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
plane->state->src_h = new_state->src_h;
plane->state->src_w = new_state->src_w;
swap(plane->state->fb, new_state->fb);
mtk_plane_update_new_state(new_state, new_plane_state);
wmb(); /* Make sure the above parameters are set before update */
new_plane_state->pending.async_dirty = true;
mtk_drm_crtc_async_update(new_state->crtc, plane, state);
}
static const struct drm_plane_funcs mtk_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = mtk_plane_reset,
.atomic_duplicate_state = mtk_plane_duplicate_state,
.atomic_destroy_state = mtk_drm_plane_destroy_state,
.format_mod_supported = mtk_plane_format_mod_supported,
};
static int mtk_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
int ret;
if (!fb)
return 0;
if (WARN_ON(!new_plane_state->crtc))
return 0;
ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane,
to_mtk_plane_state(new_plane_state));
if (ret)
return ret;
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
}
static void mtk_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
mtk_plane_state->pending.enable = false;
wmb(); /* Make sure the above parameter is set before update */
mtk_plane_state->pending.dirty = true;
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
if (!new_state->crtc || WARN_ON(!new_state->fb))
return;
if (!new_state->visible) {
mtk_plane_atomic_disable(plane, state);
return;
}
mtk_plane_update_new_state(new_state, mtk_plane_state);
wmb(); /* Make sure the above parameters are set before update */
mtk_plane_state->pending.dirty = true;
}
static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
.atomic_check = mtk_plane_atomic_check,
.atomic_update = mtk_plane_atomic_update,
.atomic_disable = mtk_plane_atomic_disable,
.atomic_async_update = mtk_plane_atomic_async_update,
.atomic_async_check = mtk_plane_atomic_async_check,
};
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 *formats,
size_t num_formats)
{
int err;
if (!formats || !num_formats) {
DRM_ERROR("no formats for plane\n");
return -EINVAL;
}
err = drm_universal_plane_init(dev, plane, possible_crtcs,
&mtk_plane_funcs, formats,
num_formats, modifiers, type, NULL);
if (err) {
DRM_ERROR("failed to initialize plane\n");
return err;
}
if (supported_rotations & ~DRM_MODE_ROTATE_0) {
err = drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0,
supported_rotations);
if (err)
DRM_INFO("Create rotation property failed\n");
}
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
return 0;
}
| linux-master | drivers/gpu/drm/mediatek/mtk_drm_plane.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <video/mipi_display.h>
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DSI_START 0x00
#define DSI_INTEN 0x08
#define DSI_INTSTA 0x0c
#define LPRX_RD_RDY_INT_FLAG BIT(0)
#define CMD_DONE_INT_FLAG BIT(1)
#define TE_RDY_INT_FLAG BIT(2)
#define VM_DONE_INT_FLAG BIT(3)
#define EXT_TE_RDY_INT_FLAG BIT(4)
#define DSI_BUSY BIT(31)
#define DSI_CON_CTRL 0x10
#define DSI_RESET BIT(0)
#define DSI_EN BIT(1)
#define DPHY_RESET BIT(2)
#define DSI_MODE_CTRL 0x14
#define MODE (3)
#define CMD_MODE 0
#define SYNC_PULSE_MODE 1
#define SYNC_EVENT_MODE 2
#define BURST_MODE 3
#define FRM_MODE BIT(16)
#define MIX_MODE BIT(17)
#define DSI_TXRX_CTRL 0x18
#define VC_NUM BIT(1)
#define LANE_NUM (0xf << 2)
#define DIS_EOT BIT(6)
#define NULL_EN BIT(7)
#define TE_FREERUN BIT(8)
#define EXT_TE_EN BIT(9)
#define EXT_TE_EDGE BIT(10)
#define MAX_RTN_SIZE (0xf << 12)
#define HSTX_CKLP_EN BIT(16)
#define DSI_PSCTRL 0x1c
#define DSI_PS_WC 0x3fff
#define DSI_PS_SEL (3 << 16)
#define PACKED_PS_16BIT_RGB565 (0 << 16)
#define LOOSELY_PS_18BIT_RGB666 (1 << 16)
#define PACKED_PS_18BIT_RGB666 (2 << 16)
#define PACKED_PS_24BIT_RGB888 (3 << 16)
#define DSI_VSA_NL 0x20
#define DSI_VBP_NL 0x24
#define DSI_VFP_NL 0x28
#define DSI_VACT_NL 0x2C
#define DSI_SIZE_CON 0x38
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
#define DSI_CMDQ_SIZE 0x60
#define CMDQ_SIZE 0x3f
#define DSI_HSTX_CKL_WC 0x64
#define DSI_RX_DATA0 0x74
#define DSI_RX_DATA1 0x78
#define DSI_RX_DATA2 0x7c
#define DSI_RX_DATA3 0x80
#define DSI_RACK 0x84
#define RACK BIT(0)
#define DSI_PHY_LCCON 0x104
#define LC_HS_TX_EN BIT(0)
#define LC_ULPM_EN BIT(1)
#define LC_WAKEUP_EN BIT(2)
#define DSI_PHY_LD0CON 0x108
#define LD0_HS_TX_EN BIT(0)
#define LD0_ULPM_EN BIT(1)
#define LD0_WAKEUP_EN BIT(2)
#define DSI_PHY_TIMECON0 0x110
#define LPX (0xff << 0)
#define HS_PREP (0xff << 8)
#define HS_ZERO (0xff << 16)
#define HS_TRAIL (0xff << 24)
#define DSI_PHY_TIMECON1 0x114
#define TA_GO (0xff << 0)
#define TA_SURE (0xff << 8)
#define TA_GET (0xff << 16)
#define DA_HS_EXIT (0xff << 24)
#define DSI_PHY_TIMECON2 0x118
#define CONT_DET (0xff << 0)
#define CLK_ZERO (0xff << 16)
#define CLK_TRAIL (0xff << 24)
#define DSI_PHY_TIMECON3 0x11c
#define CLK_HS_PREP (0xff << 0)
#define CLK_HS_POST (0xff << 8)
#define CLK_HS_EXIT (0xff << 16)
#define DSI_VM_CMD_CON 0x130
#define VM_CMD_EN BIT(0)
#define TS_VFP_EN BIT(5)
#define DSI_SHADOW_DEBUG 0x190U
#define FORCE_COMMIT BIT(0)
#define BYPASS_SHADOW BIT(1)
#define CONFIG (0xff << 0)
#define SHORT_PACKET 0
#define LONG_PACKET 2
#define BTA BIT(2)
#define DATA_ID (0xff << 8)
#define DATA_0 (0xff << 16)
#define DATA_1 (0xff << 24)
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
#define MTK_DSI_HOST_IS_READ(type) \
((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
(type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
(type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
(type == MIPI_DSI_DCS_READ))
struct mtk_phy_timing {
u32 lpx;
u32 da_hs_prepare;
u32 da_hs_zero;
u32 da_hs_trail;
u32 ta_go;
u32 ta_sure;
u32 ta_get;
u32 da_hs_exit;
u32 clk_hs_zero;
u32 clk_hs_trail;
u32 clk_hs_prepare;
u32 clk_hs_post;
u32 clk_hs_exit;
};
struct phy;
struct mtk_dsi_driver_data {
const u32 reg_cmdq_off;
bool has_shadow_ctl;
bool has_size_ctl;
};
struct mtk_dsi {
struct device *dev;
struct mipi_dsi_host host;
struct drm_encoder encoder;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct drm_connector *connector;
struct phy *phy;
void __iomem *regs;
struct clk *engine_clk;
struct clk *digital_clk;
struct clk *hs_clk;
u32 data_rate;
unsigned long mode_flags;
enum mipi_dsi_pixel_format format;
unsigned int lanes;
struct videomode vm;
struct mtk_phy_timing phy_timing;
int refcount;
bool enabled;
bool lanes_ready;
u32 irq_data;
wait_queue_head_t irq_wait_queue;
const struct mtk_dsi_driver_data *driver_data;
};
static inline struct mtk_dsi *bridge_to_dsi(struct drm_bridge *b)
{
return container_of(b, struct mtk_dsi, bridge);
}
static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h)
{
return container_of(h, struct mtk_dsi, host);
}
static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
{
u32 temp = readl(dsi->regs + offset);
writel((temp & ~mask) | (data & mask), dsi->regs + offset);
}
static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
struct mtk_phy_timing *timing = &dsi->phy_timing;
timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
timing->da_hs_prepare;
timing->da_hs_trail = timing->da_hs_prepare + 1;
timing->ta_go = 4 * timing->lpx - 2;
timing->ta_sure = timing->lpx + 2;
timing->ta_get = 4 * timing->lpx;
timing->da_hs_exit = 2 * timing->lpx + 1;
timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
timing->clk_hs_post = timing->clk_hs_prepare + 8;
timing->clk_hs_trail = timing->clk_hs_prepare;
timing->clk_hs_zero = timing->clk_hs_trail * 4;
timing->clk_hs_exit = 2 * timing->clk_hs_trail;
timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
timcon1 = timing->ta_go | timing->ta_sure << 8 |
timing->ta_get << 16 | timing->da_hs_exit << 24;
timcon2 = 1 << 8 | timing->clk_hs_zero << 16 |
timing->clk_hs_trail << 24;
timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 |
timing->clk_hs_exit << 16;
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
writel(timcon2, dsi->regs + DSI_PHY_TIMECON2);
writel(timcon3, dsi->regs + DSI_PHY_TIMECON3);
}
static void mtk_dsi_enable(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN);
}
static void mtk_dsi_disable(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
}
static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
}
static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET);
mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0);
}
static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
}
static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
}
static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
}
static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
}
static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
{
return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN;
}
static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
{
if (enter && !mtk_dsi_clk_hs_state(dsi))
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
else if (!enter && mtk_dsi_clk_hs_state(dsi))
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
}
static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
{
u32 vid_mode = CMD_MODE;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
vid_mode = BURST_MODE;
else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
vid_mode = SYNC_PULSE_MODE;
else
vid_mode = SYNC_EVENT_MODE;
}
writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
}
static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
}
static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
{
struct videomode *vm = &dsi->vm;
u32 dsi_buf_bpp, ps_wc;
u32 ps_bpp_mode;
if (dsi->format == MIPI_DSI_FMT_RGB565)
dsi_buf_bpp = 2;
else
dsi_buf_bpp = 3;
ps_wc = vm->hactive * dsi_buf_bpp;
ps_bpp_mode = ps_wc;
switch (dsi->format) {
case MIPI_DSI_FMT_RGB888:
ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
break;
case MIPI_DSI_FMT_RGB666:
ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
break;
case MIPI_DSI_FMT_RGB565:
ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
break;
}
writel(vm->vactive, dsi->regs + DSI_VACT_NL);
writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL);
writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
}
static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
{
u32 tmp_reg;
switch (dsi->lanes) {
case 1:
tmp_reg = 1 << 2;
break;
case 2:
tmp_reg = 3 << 2;
break;
case 3:
tmp_reg = 7 << 2;
break;
case 4:
tmp_reg = 0xf << 2;
break;
default:
tmp_reg = 0xf << 2;
break;
}
if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
tmp_reg |= HSTX_CKLP_EN;
if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
tmp_reg |= DIS_EOT;
writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
}
static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
{
u32 dsi_tmp_buf_bpp;
u32 tmp_reg;
switch (dsi->format) {
case MIPI_DSI_FMT_RGB888:
tmp_reg = PACKED_PS_24BIT_RGB888;
dsi_tmp_buf_bpp = 3;
break;
case MIPI_DSI_FMT_RGB666:
tmp_reg = LOOSELY_PS_18BIT_RGB666;
dsi_tmp_buf_bpp = 3;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
tmp_reg = PACKED_PS_18BIT_RGB666;
dsi_tmp_buf_bpp = 3;
break;
case MIPI_DSI_FMT_RGB565:
tmp_reg = PACKED_PS_16BIT_RGB565;
dsi_tmp_buf_bpp = 2;
break;
default:
tmp_reg = PACKED_PS_24BIT_RGB888;
dsi_tmp_buf_bpp = 3;
break;
}
tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC;
writel(tmp_reg, dsi->regs + DSI_PSCTRL);
}
static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
{
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
u32 horizontal_frontporch_byte;
u32 horizontal_front_back_byte;
u32 data_phy_cycles_byte;
u32 dsi_tmp_buf_bpp, data_phy_cycles;
u32 delta;
struct mtk_phy_timing *timing = &dsi->phy_timing;
struct videomode *vm = &dsi->vm;
if (dsi->format == MIPI_DSI_FMT_RGB565)
dsi_tmp_buf_bpp = 2;
else
dsi_tmp_buf_bpp = 3;
writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
writel(vm->vactive, dsi->regs + DSI_VACT_NL);
if (dsi->driver_data->has_size_ctl)
writel(vm->vactive << 16 | vm->hactive,
dsi->regs + DSI_SIZE_CON);
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10;
else
horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
dsi_tmp_buf_bpp - 10;
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
timing->da_hs_zero + timing->da_hs_exit + 3;
delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 2 : 0;
horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta;
if (horizontal_front_back_byte > data_phy_cycles_byte) {
horizontal_frontporch_byte -= data_phy_cycles_byte *
horizontal_frontporch_byte /
horizontal_front_back_byte;
horizontal_backporch_byte -= data_phy_cycles_byte *
horizontal_backporch_byte /
horizontal_front_back_byte;
} else {
DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n");
}
if ((dsi->mode_flags & MIPI_DSI_HS_PKT_END_ALIGNED) &&
(dsi->lanes == 4)) {
horizontal_sync_active_byte =
roundup(horizontal_sync_active_byte, dsi->lanes) - 2;
horizontal_frontporch_byte =
roundup(horizontal_frontporch_byte, dsi->lanes) - 2;
horizontal_backporch_byte =
roundup(horizontal_backporch_byte, dsi->lanes) - 2;
horizontal_backporch_byte -=
(vm->hactive * dsi_tmp_buf_bpp + 2) % dsi->lanes;
}
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
mtk_dsi_ps_control(dsi);
}
static void mtk_dsi_start(struct mtk_dsi *dsi)
{
writel(0, dsi->regs + DSI_START);
writel(1, dsi->regs + DSI_START);
}
static void mtk_dsi_stop(struct mtk_dsi *dsi)
{
writel(0, dsi->regs + DSI_START);
}
static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi)
{
writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL);
}
static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi)
{
u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
writel(inten, dsi->regs + DSI_INTEN);
}
static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit)
{
dsi->irq_data |= irq_bit;
}
static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit)
{
dsi->irq_data &= ~irq_bit;
}
static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag,
unsigned int timeout)
{
s32 ret = 0;
unsigned long jiffies = msecs_to_jiffies(timeout);
ret = wait_event_interruptible_timeout(dsi->irq_wait_queue,
dsi->irq_data & irq_flag,
jiffies);
if (ret == 0) {
DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag);
mtk_dsi_enable(dsi);
mtk_dsi_reset_engine(dsi);
}
return ret;
}
static irqreturn_t mtk_dsi_irq(int irq, void *dev_id)
{
struct mtk_dsi *dsi = dev_id;
u32 status, tmp;
u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
status = readl(dsi->regs + DSI_INTSTA) & flag;
if (status) {
do {
mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK);
tmp = readl(dsi->regs + DSI_INTSTA);
} while (tmp & DSI_BUSY);
mtk_dsi_mask(dsi, DSI_INTSTA, status, 0);
mtk_dsi_irq_data_set(dsi, status);
wake_up_interruptible(&dsi->irq_wait_queue);
}
return IRQ_HANDLED;
}
static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
{
mtk_dsi_irq_data_clear(dsi, irq_flag);
mtk_dsi_set_cmd_mode(dsi);
if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) {
DRM_ERROR("failed to switch cmd mode\n");
return -ETIME;
} else {
return 0;
}
}
static int mtk_dsi_poweron(struct mtk_dsi *dsi)
{
struct device *dev = dsi->host.dev;
int ret;
u32 bit_per_pixel;
if (++dsi->refcount != 1)
return 0;
switch (dsi->format) {
case MIPI_DSI_FMT_RGB565:
bit_per_pixel = 16;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
bit_per_pixel = 18;
break;
case MIPI_DSI_FMT_RGB666:
case MIPI_DSI_FMT_RGB888:
default:
bit_per_pixel = 24;
break;
}
dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel,
dsi->lanes);
ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
if (ret < 0) {
dev_err(dev, "Failed to set data rate: %d\n", ret);
goto err_refcount;
}
phy_power_on(dsi->phy);
ret = clk_prepare_enable(dsi->engine_clk);
if (ret < 0) {
dev_err(dev, "Failed to enable engine clock: %d\n", ret);
goto err_phy_power_off;
}
ret = clk_prepare_enable(dsi->digital_clk);
if (ret < 0) {
dev_err(dev, "Failed to enable digital clock: %d\n", ret);
goto err_disable_engine_clk;
}
mtk_dsi_enable(dsi);
if (dsi->driver_data->has_shadow_ctl)
writel(FORCE_COMMIT | BYPASS_SHADOW,
dsi->regs + DSI_SHADOW_DEBUG);
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
mtk_dsi_ps_control_vact(dsi);
mtk_dsi_set_vm_cmd(dsi);
mtk_dsi_config_vdo_timing(dsi);
mtk_dsi_set_interrupt_enable(dsi);
return 0;
err_disable_engine_clk:
clk_disable_unprepare(dsi->engine_clk);
err_phy_power_off:
phy_power_off(dsi->phy);
err_refcount:
dsi->refcount--;
return ret;
}
static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
{
if (WARN_ON(dsi->refcount == 0))
return;
if (--dsi->refcount != 0)
return;
/*
* mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
* mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
* which needs irq for vblank, and mtk_dsi_stop() will disable irq.
* mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
* after dsi is fully set.
*/
mtk_dsi_stop(dsi);
mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
/* set the lane number as 0 to pull down mipi */
writel(0, dsi->regs + DSI_TXRX_CTRL);
mtk_dsi_disable(dsi);
clk_disable_unprepare(dsi->engine_clk);
clk_disable_unprepare(dsi->digital_clk);
phy_power_off(dsi->phy);
dsi->lanes_ready = false;
}
static void mtk_dsi_lane_ready(struct mtk_dsi *dsi)
{
if (!dsi->lanes_ready) {
dsi->lanes_ready = true;
mtk_dsi_rxtx_control(dsi);
usleep_range(30, 100);
mtk_dsi_reset_dphy(dsi);
mtk_dsi_clk_ulp_mode_leave(dsi);
mtk_dsi_lane0_ulp_mode_leave(dsi);
mtk_dsi_clk_hs_mode(dsi, 0);
usleep_range(1000, 3000);
/* The reaction time after pulling up the mipi signal for dsi_rx */
}
}
static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
{
if (dsi->enabled)
return;
mtk_dsi_lane_ready(dsi);
mtk_dsi_set_mode(dsi);
mtk_dsi_clk_hs_mode(dsi, 1);
mtk_dsi_start(dsi);
dsi->enabled = true;
}
static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
{
if (!dsi->enabled)
return;
dsi->enabled = false;
}
static int mtk_dsi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
return drm_bridge_attach(bridge->encoder, dsi->next_bridge,
&dsi->bridge, flags);
}
static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
drm_display_mode_to_videomode(adjusted, &dsi->vm);
}
static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
mtk_output_dsi_disable(dsi);
}
static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
if (dsi->refcount == 0)
return;
mtk_output_dsi_enable(dsi);
}
static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
int ret;
ret = mtk_dsi_poweron(dsi);
if (ret < 0)
DRM_ERROR("failed to power on dsi\n");
}
static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
mtk_dsi_poweroff(dsi);
}
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.attach = mtk_dsi_bridge_attach,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_disable = mtk_dsi_bridge_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_enable = mtk_dsi_bridge_atomic_enable,
.atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
.atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
.atomic_reset = drm_atomic_helper_bridge_reset,
.mode_set = mtk_dsi_bridge_mode_set,
};
void mtk_dsi_ddp_start(struct device *dev)
{
struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_poweron(dsi);
}
void mtk_dsi_ddp_stop(struct device *dev)
{
struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_poweroff(dsi);
}
static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
{
int ret;
ret = drm_simple_encoder_init(drm, &dsi->encoder,
DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("Failed to encoder init to drm\n");
return ret;
}
dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_cleanup_encoder;
dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
if (IS_ERR(dsi->connector)) {
DRM_ERROR("Unable to create bridge connector\n");
ret = PTR_ERR(dsi->connector);
goto err_cleanup_encoder;
}
drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
return 0;
err_cleanup_encoder:
drm_encoder_cleanup(&dsi->encoder);
return ret;
}
static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
{
int ret;
struct drm_device *drm = data;
struct mtk_dsi *dsi = dev_get_drvdata(dev);
ret = mtk_dsi_encoder_init(drm, dsi);
if (ret)
return ret;
return device_reset_optional(dev);
}
static void mtk_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct mtk_dsi *dsi = dev_get_drvdata(dev);
drm_encoder_cleanup(&dsi->encoder);
}
static const struct component_ops mtk_dsi_component_ops = {
.bind = mtk_dsi_bind,
.unbind = mtk_dsi_unbind,
};
static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct mtk_dsi *dsi = host_to_dsi(host);
struct device *dev = host->dev;
int ret;
dsi->lanes = device->lanes;
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(dsi->next_bridge))
return PTR_ERR(dsi->next_bridge);
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);
if (ret) {
DRM_ERROR("failed to add dsi_host component: %d\n", ret);
drm_bridge_remove(&dsi->bridge);
return ret;
}
return 0;
}
static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct mtk_dsi *dsi = host_to_dsi(host);
component_del(host->dev, &mtk_dsi_component_ops);
drm_bridge_remove(&dsi->bridge);
return 0;
}
static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
{
int ret;
u32 val;
ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
4, 2000000);
if (ret) {
DRM_WARN("polling dsi wait not busy timeout!\n");
mtk_dsi_enable(dsi);
mtk_dsi_reset_engine(dsi);
}
}
static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
{
switch (type) {
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
return 1;
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
return 2;
case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
return read_data[1] + read_data[2] * 16;
case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
DRM_INFO("type is 0x02, try again\n");
break;
default:
DRM_INFO("type(0x%x) not recognized\n", type);
break;
}
return 0;
}
static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
{
const char *tx_buf = msg->tx_buf;
u8 config, cmdq_size, cmdq_off, type = msg->type;
u32 reg_val, cmdq_mask, i;
u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off;
if (MTK_DSI_HOST_IS_READ(type))
config = BTA;
else
config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET;
if (msg->tx_len > 2) {
cmdq_size = 1 + (msg->tx_len + 3) / 4;
cmdq_off = 4;
cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1;
reg_val = (msg->tx_len << 16) | (type << 8) | config;
} else {
cmdq_size = 1;
cmdq_off = 2;
cmdq_mask = CONFIG | DATA_ID;
reg_val = (type << 8) | config;
}
for (i = 0; i < msg->tx_len; i++)
mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U),
(0xffUL << (((i + cmdq_off) & 3U) * 8U)),
tx_buf[i] << (((i + cmdq_off) & 3U) * 8U));
mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val);
mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
}
static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi,
const struct mipi_dsi_msg *msg, u8 flag)
{
mtk_dsi_wait_for_idle(dsi);
mtk_dsi_irq_data_clear(dsi, flag);
mtk_dsi_cmdq(dsi, msg);
mtk_dsi_start(dsi);
if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000))
return -ETIME;
else
return 0;
}
static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct mtk_dsi *dsi = host_to_dsi(host);
u32 recv_cnt, i;
u8 read_data[16];
void *src_addr;
u8 irq_flag = CMD_DONE_INT_FLAG;
u32 dsi_mode;
int ret;
dsi_mode = readl(dsi->regs + DSI_MODE_CTRL);
if (dsi_mode & MODE) {
mtk_dsi_stop(dsi);
ret = mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
if (ret)
goto restore_dsi_mode;
}
if (MTK_DSI_HOST_IS_READ(msg->type))
irq_flag |= LPRX_RD_RDY_INT_FLAG;
mtk_dsi_lane_ready(dsi);
ret = mtk_dsi_host_send_cmd(dsi, msg, irq_flag);
if (ret)
goto restore_dsi_mode;
if (!MTK_DSI_HOST_IS_READ(msg->type)) {
recv_cnt = 0;
goto restore_dsi_mode;
}
if (!msg->rx_buf) {
DRM_ERROR("dsi receive buffer size may be NULL\n");
ret = -EINVAL;
goto restore_dsi_mode;
}
for (i = 0; i < 16; i++)
*(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i);
recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data);
if (recv_cnt > 2)
src_addr = &read_data[4];
else
src_addr = &read_data[1];
if (recv_cnt > 10)
recv_cnt = 10;
if (recv_cnt > msg->rx_len)
recv_cnt = msg->rx_len;
if (recv_cnt)
memcpy(msg->rx_buf, src_addr, recv_cnt);
DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
recv_cnt, *((u8 *)(msg->tx_buf)));
restore_dsi_mode:
if (dsi_mode & MODE) {
mtk_dsi_set_mode(dsi);
mtk_dsi_start(dsi);
}
return ret < 0 ? ret : recv_cnt;
}
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
.attach = mtk_dsi_host_attach,
.detach = mtk_dsi_host_detach,
.transfer = mtk_dsi_host_transfer,
};
static int mtk_dsi_probe(struct platform_device *pdev)
{
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
struct resource *regs;
int irq_num;
int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
dsi->host.ops = &mtk_dsi_ops;
dsi->host.dev = dev;
ret = mipi_dsi_host_register(&dsi->host);
if (ret < 0) {
dev_err(dev, "failed to register DSI host: %d\n", ret);
return ret;
}
dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get engine clock: %d\n", ret);
goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get digital clock: %d\n", ret);
goto err_unregister_host;
}
dsi->hs_clk = devm_clk_get(dev, "hs");
if (IS_ERR(dsi->hs_clk)) {
ret = PTR_ERR(dsi->hs_clk);
dev_err(dev, "Failed to get hs clock: %d\n", ret);
goto err_unregister_host;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(dev, regs);
if (IS_ERR(dsi->regs)) {
ret = PTR_ERR(dsi->regs);
dev_err(dev, "Failed to ioremap memory: %d\n", ret);
goto err_unregister_host;
}
dsi->phy = devm_phy_get(dev, "dphy");
if (IS_ERR(dsi->phy)) {
ret = PTR_ERR(dsi->phy);
dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
goto err_unregister_host;
}
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0) {
ret = irq_num;
goto err_unregister_host;
}
ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
IRQF_TRIGGER_NONE, dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
goto err_unregister_host;
}
init_waitqueue_head(&dsi->irq_wait_queue);
platform_set_drvdata(pdev, dsi);
dsi->bridge.funcs = &mtk_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
return 0;
err_unregister_host:
mipi_dsi_host_unregister(&dsi->host);
return ret;
}
static void mtk_dsi_remove(struct platform_device *pdev)
{
struct mtk_dsi *dsi = platform_get_drvdata(pdev);
mtk_output_dsi_disable(dsi);
mipi_dsi_host_unregister(&dsi->host);
}
static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
.reg_cmdq_off = 0x200,
};
static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
.reg_cmdq_off = 0x180,
};
static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
.reg_cmdq_off = 0x200,
.has_shadow_ctl = true,
.has_size_ctl = true,
};
static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = {
.reg_cmdq_off = 0xd00,
.has_shadow_ctl = true,
.has_size_ctl = true,
};
static const struct of_device_id mtk_dsi_of_match[] = {
{ .compatible = "mediatek,mt2701-dsi",
.data = &mt2701_dsi_driver_data },
{ .compatible = "mediatek,mt8173-dsi",
.data = &mt8173_dsi_driver_data },
{ .compatible = "mediatek,mt8183-dsi",
.data = &mt8183_dsi_driver_data },
{ .compatible = "mediatek,mt8186-dsi",
.data = &mt8186_dsi_driver_data },
{ },
};
MODULE_DEVICE_TABLE(of, mtk_dsi_of_match);
struct platform_driver mtk_dsi_driver = {
.probe = mtk_dsi_probe,
.remove_new = mtk_dsi_remove,
.driver = {
.name = "mtk-dsi",
.of_match_table = mtk_dsi_of_match,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_dsi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 MediaTek Inc.
*/
#include <drm/drm_fourcc.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
#include "mtk_mdp_rdma.h"
#define MDP_RDMA_EN 0x000
#define FLD_ROT_ENABLE BIT(0)
#define MDP_RDMA_RESET 0x008
#define MDP_RDMA_CON 0x020
#define FLD_OUTPUT_10B BIT(5)
#define FLD_SIMPLE_MODE BIT(4)
#define MDP_RDMA_GMCIF_CON 0x028
#define FLD_COMMAND_DIV BIT(0)
#define FLD_EXT_PREULTRA_EN BIT(3)
#define FLD_RD_REQ_TYPE GENMASK(7, 4)
#define VAL_RD_REQ_TYPE_BURST_8_ACCESS 7
#define FLD_ULTRA_EN GENMASK(13, 12)
#define VAL_ULTRA_EN_ENABLE 1
#define FLD_PRE_ULTRA_EN GENMASK(17, 16)
#define VAL_PRE_ULTRA_EN_ENABLE 1
#define FLD_EXT_ULTRA_EN BIT(18)
#define MDP_RDMA_SRC_CON 0x030
#define FLD_OUTPUT_ARGB BIT(25)
#define FLD_BIT_NUMBER GENMASK(19, 18)
#define FLD_SWAP BIT(14)
#define FLD_UNIFORM_CONFIG BIT(17)
#define RDMA_INPUT_10BIT BIT(18)
#define FLD_SRC_FORMAT GENMASK(3, 0)
#define MDP_RDMA_COMP_CON 0x038
#define FLD_AFBC_EN BIT(22)
#define FLD_AFBC_YUV_TRANSFORM BIT(21)
#define FLD_UFBDC_EN BIT(12)
#define MDP_RDMA_MF_BKGD_SIZE_IN_BYTE 0x060
#define FLD_MF_BKGD_WB GENMASK(22, 0)
#define MDP_RDMA_MF_SRC_SIZE 0x070
#define FLD_MF_SRC_H GENMASK(30, 16)
#define FLD_MF_SRC_W GENMASK(14, 0)
#define MDP_RDMA_MF_CLIP_SIZE 0x078
#define FLD_MF_CLIP_H GENMASK(30, 16)
#define FLD_MF_CLIP_W GENMASK(14, 0)
#define MDP_RDMA_SRC_OFFSET_0 0x118
#define FLD_SRC_OFFSET_0 GENMASK(31, 0)
#define MDP_RDMA_TRANSFORM_0 0x200
#define FLD_INT_MATRIX_SEL GENMASK(27, 23)
#define FLD_TRANS_EN BIT(16)
#define MDP_RDMA_SRC_BASE_0 0xf00
#define FLD_SRC_BASE_0 GENMASK(31, 0)
#define RDMA_CSC_FULL709_TO_RGB 5
#define RDMA_CSC_BT601_TO_RGB 6
static const u32 formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
};
enum rdma_format {
RDMA_INPUT_FORMAT_RGB565 = 0,
RDMA_INPUT_FORMAT_RGB888 = 1,
RDMA_INPUT_FORMAT_RGBA8888 = 2,
RDMA_INPUT_FORMAT_ARGB8888 = 3,
RDMA_INPUT_FORMAT_UYVY = 4,
RDMA_INPUT_FORMAT_YUY2 = 5,
RDMA_INPUT_FORMAT_Y8 = 7,
RDMA_INPUT_FORMAT_YV12 = 8,
RDMA_INPUT_FORMAT_UYVY_3PL = 9,
RDMA_INPUT_FORMAT_NV12 = 12,
RDMA_INPUT_FORMAT_UYVY_2PL = 13,
RDMA_INPUT_FORMAT_Y410 = 14
};
struct mtk_mdp_rdma {
void __iomem *regs;
struct clk *clk;
struct cmdq_client_reg cmdq_reg;
};
static unsigned int rdma_fmt_convert(unsigned int fmt)
{
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
return RDMA_INPUT_FORMAT_RGB565;
case DRM_FORMAT_BGR565:
return RDMA_INPUT_FORMAT_RGB565 | FLD_SWAP;
case DRM_FORMAT_RGB888:
return RDMA_INPUT_FORMAT_RGB888;
case DRM_FORMAT_BGR888:
return RDMA_INPUT_FORMAT_RGB888 | FLD_SWAP;
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
return RDMA_INPUT_FORMAT_ARGB8888;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
return RDMA_INPUT_FORMAT_ARGB8888 | FLD_SWAP;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
return RDMA_INPUT_FORMAT_RGBA8888;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return RDMA_INPUT_FORMAT_RGBA8888 | FLD_SWAP;
case DRM_FORMAT_ABGR2101010:
return RDMA_INPUT_FORMAT_RGBA8888 | FLD_SWAP | RDMA_INPUT_10BIT;
case DRM_FORMAT_ARGB2101010:
return RDMA_INPUT_FORMAT_RGBA8888 | RDMA_INPUT_10BIT;
case DRM_FORMAT_RGBA1010102:
return RDMA_INPUT_FORMAT_ARGB8888 | FLD_SWAP | RDMA_INPUT_10BIT;
case DRM_FORMAT_BGRA1010102:
return RDMA_INPUT_FORMAT_ARGB8888 | RDMA_INPUT_10BIT;
case DRM_FORMAT_UYVY:
return RDMA_INPUT_FORMAT_UYVY;
case DRM_FORMAT_YUYV:
return RDMA_INPUT_FORMAT_YUY2;
}
}
static unsigned int rdma_color_convert(unsigned int color_encoding)
{
switch (color_encoding) {
default:
case DRM_COLOR_YCBCR_BT709:
return RDMA_CSC_FULL709_TO_RGB;
case DRM_COLOR_YCBCR_BT601:
return RDMA_CSC_BT601_TO_RGB;
}
}
static void mtk_mdp_rdma_fifo_config(struct device *dev, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
mtk_ddp_write_mask(cmdq_pkt, FLD_EXT_ULTRA_EN | VAL_PRE_ULTRA_EN_ENABLE << 16 |
VAL_ULTRA_EN_ENABLE << 12 | VAL_RD_REQ_TYPE_BURST_8_ACCESS << 4 |
FLD_EXT_PREULTRA_EN | FLD_COMMAND_DIV, &priv->cmdq_reg,
priv->regs, MDP_RDMA_GMCIF_CON, FLD_EXT_ULTRA_EN |
FLD_PRE_ULTRA_EN | FLD_ULTRA_EN | FLD_RD_REQ_TYPE |
FLD_EXT_PREULTRA_EN | FLD_COMMAND_DIV);
}
void mtk_mdp_rdma_start(struct device *dev, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
mtk_ddp_write_mask(cmdq_pkt, FLD_ROT_ENABLE, &priv->cmdq_reg,
priv->regs, MDP_RDMA_EN, FLD_ROT_ENABLE);
}
void mtk_mdp_rdma_stop(struct device *dev, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg,
priv->regs, MDP_RDMA_EN, FLD_ROT_ENABLE);
mtk_ddp_write(cmdq_pkt, 1, &priv->cmdq_reg, priv->regs, MDP_RDMA_RESET);
mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, MDP_RDMA_RESET);
}
void mtk_mdp_rdma_config(struct device *dev, struct mtk_mdp_rdma_cfg *cfg,
struct cmdq_pkt *cmdq_pkt)
{
struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
const struct drm_format_info *fmt_info = drm_format_info(cfg->fmt);
bool csc_enable = fmt_info->is_yuv ? true : false;
unsigned int src_pitch_y = cfg->pitch;
unsigned int offset_y = 0;
mtk_mdp_rdma_fifo_config(dev, cmdq_pkt);
mtk_ddp_write_mask(cmdq_pkt, FLD_UNIFORM_CONFIG, &priv->cmdq_reg, priv->regs,
MDP_RDMA_SRC_CON, FLD_UNIFORM_CONFIG);
mtk_ddp_write_mask(cmdq_pkt, rdma_fmt_convert(cfg->fmt), &priv->cmdq_reg, priv->regs,
MDP_RDMA_SRC_CON, FLD_SWAP | FLD_SRC_FORMAT | FLD_BIT_NUMBER);
if (!csc_enable && fmt_info->has_alpha)
mtk_ddp_write_mask(cmdq_pkt, FLD_OUTPUT_ARGB, &priv->cmdq_reg,
priv->regs, MDP_RDMA_SRC_CON, FLD_OUTPUT_ARGB);
else
mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
MDP_RDMA_SRC_CON, FLD_OUTPUT_ARGB);
mtk_ddp_write_mask(cmdq_pkt, cfg->addr0, &priv->cmdq_reg, priv->regs,
MDP_RDMA_SRC_BASE_0, FLD_SRC_BASE_0);
mtk_ddp_write_mask(cmdq_pkt, src_pitch_y, &priv->cmdq_reg, priv->regs,
MDP_RDMA_MF_BKGD_SIZE_IN_BYTE, FLD_MF_BKGD_WB);
mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, MDP_RDMA_COMP_CON,
FLD_AFBC_YUV_TRANSFORM | FLD_UFBDC_EN | FLD_AFBC_EN);
mtk_ddp_write_mask(cmdq_pkt, FLD_OUTPUT_10B, &priv->cmdq_reg, priv->regs,
MDP_RDMA_CON, FLD_OUTPUT_10B);
mtk_ddp_write_mask(cmdq_pkt, FLD_SIMPLE_MODE, &priv->cmdq_reg, priv->regs,
MDP_RDMA_CON, FLD_SIMPLE_MODE);
if (csc_enable)
mtk_ddp_write_mask(cmdq_pkt, rdma_color_convert(cfg->color_encoding) << 23,
&priv->cmdq_reg, priv->regs, MDP_RDMA_TRANSFORM_0,
FLD_INT_MATRIX_SEL);
mtk_ddp_write_mask(cmdq_pkt, csc_enable << 16, &priv->cmdq_reg, priv->regs,
MDP_RDMA_TRANSFORM_0, FLD_TRANS_EN);
offset_y = cfg->x_left * fmt_info->cpp[0] + cfg->y_top * src_pitch_y;
mtk_ddp_write_mask(cmdq_pkt, offset_y, &priv->cmdq_reg, priv->regs,
MDP_RDMA_SRC_OFFSET_0, FLD_SRC_OFFSET_0);
mtk_ddp_write_mask(cmdq_pkt, cfg->width, &priv->cmdq_reg, priv->regs,
MDP_RDMA_MF_SRC_SIZE, FLD_MF_SRC_W);
mtk_ddp_write_mask(cmdq_pkt, cfg->height << 16, &priv->cmdq_reg, priv->regs,
MDP_RDMA_MF_SRC_SIZE, FLD_MF_SRC_H);
mtk_ddp_write_mask(cmdq_pkt, cfg->width, &priv->cmdq_reg, priv->regs,
MDP_RDMA_MF_CLIP_SIZE, FLD_MF_CLIP_W);
mtk_ddp_write_mask(cmdq_pkt, cfg->height << 16, &priv->cmdq_reg, priv->regs,
MDP_RDMA_MF_CLIP_SIZE, FLD_MF_CLIP_H);
}
const u32 *mtk_mdp_rdma_get_formats(struct device *dev)
{
return formats;
}
size_t mtk_mdp_rdma_get_num_formats(struct device *dev)
{
return ARRAY_SIZE(formats);
}
int mtk_mdp_rdma_clk_enable(struct device *dev)
{
struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);
clk_prepare_enable(rdma->clk);
return 0;
}
void mtk_mdp_rdma_clk_disable(struct device *dev)
{
struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);
clk_disable_unprepare(rdma->clk);
}
static int mtk_mdp_rdma_bind(struct device *dev, struct device *master,
void *data)
{
return 0;
}
static void mtk_mdp_rdma_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_mdp_rdma_component_ops = {
.bind = mtk_mdp_rdma_bind,
.unbind = mtk_mdp_rdma_unbind,
};
static int mtk_mdp_rdma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct mtk_mdp_rdma *priv;
int ret = 0;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap rdma\n");
return PTR_ERR(priv->regs);
}
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get rdma clk\n");
return PTR_ERR(priv->clk);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
platform_set_drvdata(pdev, priv);
pm_runtime_enable(dev);
ret = component_add(dev, &mtk_mdp_rdma_component_ops);
if (ret != 0) {
pm_runtime_disable(dev);
dev_err(dev, "Failed to add component: %d\n", ret);
}
return ret;
}
static void mtk_mdp_rdma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_mdp_rdma_component_ops);
pm_runtime_disable(&pdev->dev);
}
static const struct of_device_id mtk_mdp_rdma_driver_dt_match[] = {
{ .compatible = "mediatek,mt8195-vdo1-rdma", },
{},
};
MODULE_DEVICE_TABLE(of, mtk_mdp_rdma_driver_dt_match);
struct platform_driver mtk_mdp_rdma_driver = {
.probe = mtk_mdp_rdma_probe,
.remove_new = mtk_mdp_rdma_remove,
.driver = {
.name = "mediatek-mdp-rdma",
.owner = THIS_MODULE,
.of_match_table = mtk_mdp_rdma_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_mdp_rdma.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019-2022 MediaTek Inc.
* Copyright (c) 2022 BayLibre
*/
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
#include <sound/hdmi-codec.h>
#include <video/videomode.h>
#include "mtk_dp_reg.h"
#define MTK_DP_SIP_CONTROL_AARCH32 MTK_SIP_SMC_CMD(0x523)
#define MTK_DP_SIP_ATF_EDP_VIDEO_UNMUTE (BIT(0) | BIT(5))
#define MTK_DP_SIP_ATF_VIDEO_UNMUTE BIT(5)
#define MTK_DP_THREAD_CABLE_STATE_CHG BIT(0)
#define MTK_DP_THREAD_HPD_EVENT BIT(1)
#define MTK_DP_4P1T 4
#define MTK_DP_HDE 2
#define MTK_DP_PIX_PER_ADDR 2
#define MTK_DP_AUX_WAIT_REPLY_COUNT 20
#define MTK_DP_TBC_BUF_READ_START_ADDR 0x8
#define MTK_DP_TRAIN_VOLTAGE_LEVEL_RETRY 5
#define MTK_DP_TRAIN_DOWNSCALE_RETRY 10
#define MTK_DP_VERSION 0x11
#define MTK_DP_SDP_AUI 0x4
enum {
MTK_DP_CAL_GLB_BIAS_TRIM = 0,
MTK_DP_CAL_CLKTX_IMPSE,
MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0,
MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1,
MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2,
MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3,
MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0,
MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1,
MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2,
MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3,
MTK_DP_CAL_MAX,
};
struct mtk_dp_train_info {
bool sink_ssc;
bool cable_plugged_in;
/* link_rate is in multiple of 0.27Gbps */
int link_rate;
int lane_count;
unsigned int channel_eq_pattern;
};
struct mtk_dp_audio_cfg {
bool detect_monitor;
int sad_count;
int sample_rate;
int word_length_bits;
int channels;
};
struct mtk_dp_info {
enum dp_pixelformat format;
struct videomode vm;
struct mtk_dp_audio_cfg audio_cur_cfg;
};
struct mtk_dp_efuse_fmt {
unsigned short idx;
unsigned short shift;
unsigned short mask;
unsigned short min_val;
unsigned short max_val;
unsigned short default_val;
};
struct mtk_dp {
bool enabled;
bool need_debounce;
int irq;
u8 max_lanes;
u8 max_linkrate;
u8 rx_cap[DP_RECEIVER_CAP_SIZE];
u32 cal_data[MTK_DP_CAL_MAX];
u32 irq_thread_handle;
/* irq_thread_lock is used to protect irq_thread_handle */
spinlock_t irq_thread_lock;
struct device *dev;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct drm_connector *conn;
struct drm_device *drm_dev;
struct drm_dp_aux aux;
const struct mtk_dp_data *data;
struct mtk_dp_info info;
struct mtk_dp_train_info train_info;
struct platform_device *phy_dev;
struct phy *phy;
struct regmap *regs;
struct timer_list debounce_timer;
/* For audio */
bool audio_enable;
hdmi_codec_plugged_cb plugged_cb;
struct platform_device *audio_pdev;
struct device *codec_dev;
/* protect the plugged_cb as it's used in both bridge ops and audio */
struct mutex update_plugged_status_lock;
};
struct mtk_dp_data {
int bridge_type;
unsigned int smc_cmd;
const struct mtk_dp_efuse_fmt *efuse_fmt;
bool audio_supported;
};
static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = {
[MTK_DP_CAL_GLB_BIAS_TRIM] = {
.idx = 3,
.shift = 27,
.mask = 0x1f,
.min_val = 1,
.max_val = 0x1e,
.default_val = 0xf,
},
[MTK_DP_CAL_CLKTX_IMPSE] = {
.idx = 0,
.shift = 9,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = {
.idx = 2,
.shift = 28,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = {
.idx = 2,
.shift = 20,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = {
.idx = 2,
.shift = 12,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = {
.idx = 2,
.shift = 4,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = {
.idx = 2,
.shift = 24,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = {
.idx = 2,
.shift = 16,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = {
.idx = 2,
.shift = 8,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = {
.idx = 2,
.shift = 0,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
};
static const struct mtk_dp_efuse_fmt mt8195_dp_efuse_fmt[MTK_DP_CAL_MAX] = {
[MTK_DP_CAL_GLB_BIAS_TRIM] = {
.idx = 0,
.shift = 27,
.mask = 0x1f,
.min_val = 1,
.max_val = 0x1e,
.default_val = 0xf,
},
[MTK_DP_CAL_CLKTX_IMPSE] = {
.idx = 0,
.shift = 13,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = {
.idx = 1,
.shift = 28,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = {
.idx = 1,
.shift = 20,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = {
.idx = 1,
.shift = 12,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = {
.idx = 1,
.shift = 4,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = {
.idx = 1,
.shift = 24,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = {
.idx = 1,
.shift = 16,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = {
.idx = 1,
.shift = 8,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = {
.idx = 1,
.shift = 0,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
};
static struct regmap_config mtk_dp_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = SEC_OFFSET + 0x90,
.name = "mtk-dp-registers",
};
static struct mtk_dp *mtk_dp_from_bridge(struct drm_bridge *b)
{
return container_of(b, struct mtk_dp, bridge);
}
static u32 mtk_dp_read(struct mtk_dp *mtk_dp, u32 offset)
{
u32 read_val;
int ret;
ret = regmap_read(mtk_dp->regs, offset, &read_val);
if (ret) {
dev_err(mtk_dp->dev, "Failed to read register 0x%x: %d\n",
offset, ret);
return 0;
}
return read_val;
}
static int mtk_dp_write(struct mtk_dp *mtk_dp, u32 offset, u32 val)
{
int ret = regmap_write(mtk_dp->regs, offset, val);
if (ret)
dev_err(mtk_dp->dev,
"Failed to write register 0x%x with value 0x%x\n",
offset, val);
return ret;
}
static int mtk_dp_update_bits(struct mtk_dp *mtk_dp, u32 offset,
u32 val, u32 mask)
{
int ret = regmap_update_bits(mtk_dp->regs, offset, mask, val);
if (ret)
dev_err(mtk_dp->dev,
"Failed to update register 0x%x with value 0x%x, mask 0x%x\n",
offset, val, mask);
return ret;
}
static void mtk_dp_bulk_16bit_write(struct mtk_dp *mtk_dp, u32 offset, u8 *buf,
size_t length)
{
int i;
/* 2 bytes per register */
for (i = 0; i < length; i += 2) {
u32 val = buf[i] | (i + 1 < length ? buf[i + 1] << 8 : 0);
if (mtk_dp_write(mtk_dp, offset + i * 2, val))
return;
}
}
static void mtk_dp_msa_bypass_enable(struct mtk_dp *mtk_dp, bool enable)
{
u32 mask = HTOTAL_SEL_DP_ENC0_P0 | VTOTAL_SEL_DP_ENC0_P0 |
HSTART_SEL_DP_ENC0_P0 | VSTART_SEL_DP_ENC0_P0 |
HWIDTH_SEL_DP_ENC0_P0 | VHEIGHT_SEL_DP_ENC0_P0 |
HSP_SEL_DP_ENC0_P0 | HSW_SEL_DP_ENC0_P0 |
VSP_SEL_DP_ENC0_P0 | VSW_SEL_DP_ENC0_P0;
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3030, enable ? 0 : mask, mask);
}
static void mtk_dp_set_msa(struct mtk_dp *mtk_dp)
{
struct drm_display_mode mode;
struct videomode *vm = &mtk_dp->info.vm;
drm_display_mode_from_videomode(vm, &mode);
/* horizontal */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3010,
mode.htotal, HTOTAL_SW_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3018,
vm->hsync_len + vm->hback_porch,
HSTART_SW_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3028,
vm->hsync_len, HSW_SW_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3028,
0, HSP_SW_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3020,
vm->hactive, HWIDTH_SW_DP_ENC0_P0_MASK);
/* vertical */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3014,
mode.vtotal, VTOTAL_SW_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_301C,
vm->vsync_len + vm->vback_porch,
VSTART_SW_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_302C,
vm->vsync_len, VSW_SW_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_302C,
0, VSP_SW_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3024,
vm->vactive, VHEIGHT_SW_DP_ENC0_P0_MASK);
/* horizontal */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3064,
vm->hactive, HDE_NUM_LAST_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3154,
mode.htotal, PGEN_HTOTAL_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3158,
vm->hfront_porch,
PGEN_HSYNC_RISING_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_315C,
vm->hsync_len,
PGEN_HSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3160,
vm->hback_porch + vm->hsync_len,
PGEN_HFDE_START_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3164,
vm->hactive,
PGEN_HFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK);
/* vertical */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3168,
mode.vtotal,
PGEN_VTOTAL_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_316C,
vm->vfront_porch,
PGEN_VSYNC_RISING_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3170,
vm->vsync_len,
PGEN_VSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3174,
vm->vback_porch + vm->vsync_len,
PGEN_VFDE_START_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3178,
vm->vactive,
PGEN_VFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK);
}
static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
enum dp_pixelformat color_format)
{
u32 val;
/* update MISC0 */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
color_format << DP_TEST_COLOR_FORMAT_SHIFT,
DP_TEST_COLOR_FORMAT_MASK);
switch (color_format) {
case DP_PIXELFORMAT_YUV422:
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422;
break;
case DP_PIXELFORMAT_RGB:
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB;
break;
default:
drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n",
color_format);
return -EINVAL;
}
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK);
return 0;
}
static void mtk_dp_set_color_depth(struct mtk_dp *mtk_dp)
{
/* Only support 8 bits currently */
/* Update MISC0 */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
DP_MSA_MISC_8_BPC, DP_TEST_BIT_DEPTH_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
VIDEO_COLOR_DEPTH_DP_ENC0_P0_8BIT,
VIDEO_COLOR_DEPTH_DP_ENC0_P0_MASK);
}
static void mtk_dp_config_mn_mode(struct mtk_dp *mtk_dp)
{
/* 0: hw mode, 1: sw mode */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
0, VIDEO_M_CODE_SEL_DP_ENC0_P0_MASK);
}
static void mtk_dp_set_sram_read_start(struct mtk_dp *mtk_dp, u32 val)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
val, SRAM_START_READ_THRD_DP_ENC0_P0_MASK);
}
static void mtk_dp_setup_encoder(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
VIDEO_MN_GEN_EN_DP_ENC0_P0,
VIDEO_MN_GEN_EN_DP_ENC0_P0);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3040,
SDP_DOWN_CNT_DP_ENC0_P0_VAL,
SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364,
SDP_DOWN_CNT_IN_HBLANK_DP_ENC1_P0_VAL,
SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3300,
VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_VAL << 8,
VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364,
FIFO_READ_START_POINT_DP_ENC1_P0_VAL << 12,
FIFO_READ_START_POINT_DP_ENC1_P0_MASK);
mtk_dp_write(mtk_dp, MTK_DP_ENC1_P0_3368, DP_ENC1_P0_3368_VAL);
}
static void mtk_dp_pg_enable(struct mtk_dp *mtk_dp, bool enable)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3038,
enable ? VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK : 0,
VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_31B0,
PGEN_PATTERN_SEL_VAL << 4, PGEN_PATTERN_SEL_MASK);
}
static void mtk_dp_audio_setup_channels(struct mtk_dp *mtk_dp,
struct mtk_dp_audio_cfg *cfg)
{
u32 channel_enable_bits;
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3324,
AUDIO_SOURCE_MUX_DP_ENC1_P0_DPRX,
AUDIO_SOURCE_MUX_DP_ENC1_P0_MASK);
/* audio channel count change reset */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4,
DP_ENC_DUMMY_RW_1, DP_ENC_DUMMY_RW_1);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3304,
AU_PRTY_REGEN_DP_ENC1_P0_MASK |
AU_CH_STS_REGEN_DP_ENC1_P0_MASK |
AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK,
AU_PRTY_REGEN_DP_ENC1_P0_MASK |
AU_CH_STS_REGEN_DP_ENC1_P0_MASK |
AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK);
switch (cfg->channels) {
case 2:
channel_enable_bits = AUDIO_2CH_SEL_DP_ENC0_P0_MASK |
AUDIO_2CH_EN_DP_ENC0_P0_MASK;
break;
case 8:
default:
channel_enable_bits = AUDIO_8CH_SEL_DP_ENC0_P0_MASK |
AUDIO_8CH_EN_DP_ENC0_P0_MASK;
break;
}
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3088,
channel_enable_bits | AU_EN_DP_ENC0_P0,
AUDIO_2CH_SEL_DP_ENC0_P0_MASK |
AUDIO_2CH_EN_DP_ENC0_P0_MASK |
AUDIO_8CH_SEL_DP_ENC0_P0_MASK |
AUDIO_8CH_EN_DP_ENC0_P0_MASK |
AU_EN_DP_ENC0_P0);
/* audio channel count change reset */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4, 0, DP_ENC_DUMMY_RW_1);
/* enable audio reset */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4,
DP_ENC_DUMMY_RW_1_AUDIO_RST_EN,
DP_ENC_DUMMY_RW_1_AUDIO_RST_EN);
}
static void mtk_dp_audio_channel_status_set(struct mtk_dp *mtk_dp,
struct mtk_dp_audio_cfg *cfg)
{
struct snd_aes_iec958 iec = { 0 };
switch (cfg->sample_rate) {
case 32000:
iec.status[3] = IEC958_AES3_CON_FS_32000;
break;
case 44100:
iec.status[3] = IEC958_AES3_CON_FS_44100;
break;
case 48000:
iec.status[3] = IEC958_AES3_CON_FS_48000;
break;
case 88200:
iec.status[3] = IEC958_AES3_CON_FS_88200;
break;
case 96000:
iec.status[3] = IEC958_AES3_CON_FS_96000;
break;
case 192000:
iec.status[3] = IEC958_AES3_CON_FS_192000;
break;
default:
iec.status[3] = IEC958_AES3_CON_FS_NOTID;
break;
}
switch (cfg->word_length_bits) {
case 16:
iec.status[4] = IEC958_AES4_CON_WORDLEN_20_16;
break;
case 20:
iec.status[4] = IEC958_AES4_CON_WORDLEN_20_16 |
IEC958_AES4_CON_MAX_WORDLEN_24;
break;
case 24:
iec.status[4] = IEC958_AES4_CON_WORDLEN_24_20 |
IEC958_AES4_CON_MAX_WORDLEN_24;
break;
default:
iec.status[4] = IEC958_AES4_CON_WORDLEN_NOTID;
}
/* IEC 60958 consumer channel status bits */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_308C,
0, CH_STATUS_0_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3090,
iec.status[3] << 8, CH_STATUS_1_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3094,
iec.status[4], CH_STATUS_2_DP_ENC0_P0_MASK);
}
static void mtk_dp_audio_sdp_asp_set_channels(struct mtk_dp *mtk_dp,
int channels)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_312C,
(min(8, channels) - 1) << 8,
ASP_HB2_DP_ENC0_P0_MASK | ASP_HB3_DP_ENC0_P0_MASK);
}
static void mtk_dp_audio_set_divider(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30BC,
AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2,
AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MASK);
}
static void mtk_dp_sdp_trigger_aui(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3280,
MTK_DP_SDP_AUI, SDP_PACKET_TYPE_DP_ENC1_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3280,
SDP_PACKET_W_DP_ENC1_P0, SDP_PACKET_W_DP_ENC1_P0);
}
static void mtk_dp_sdp_set_data(struct mtk_dp *mtk_dp, u8 *data_bytes)
{
mtk_dp_bulk_16bit_write(mtk_dp, MTK_DP_ENC1_P0_3200,
data_bytes, 0x10);
}
static void mtk_dp_sdp_set_header_aui(struct mtk_dp *mtk_dp,
struct dp_sdp_header *header)
{
u32 db_addr = MTK_DP_ENC0_P0_30D8 + (MTK_DP_SDP_AUI - 1) * 8;
mtk_dp_bulk_16bit_write(mtk_dp, db_addr, (u8 *)header, 4);
}
static void mtk_dp_disable_sdp_aui(struct mtk_dp *mtk_dp)
{
/* Disable periodic send */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A8 & 0xfffc, 0,
0xff << ((MTK_DP_ENC0_P0_30A8 & 3) * 8));
}
static void mtk_dp_setup_sdp_aui(struct mtk_dp *mtk_dp,
struct dp_sdp *sdp)
{
u32 shift;
mtk_dp_sdp_set_data(mtk_dp, sdp->db);
mtk_dp_sdp_set_header_aui(mtk_dp, &sdp->sdp_header);
mtk_dp_disable_sdp_aui(mtk_dp);
shift = (MTK_DP_ENC0_P0_30A8 & 3) * 8;
mtk_dp_sdp_trigger_aui(mtk_dp);
/* Enable periodic sending */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A8 & 0xfffc,
0x05 << shift, 0xff << shift);
}
static void mtk_dp_aux_irq_clear(struct mtk_dp *mtk_dp)
{
mtk_dp_write(mtk_dp, MTK_DP_AUX_P0_3640, DP_AUX_P0_3640_VAL);
}
static void mtk_dp_aux_set_cmd(struct mtk_dp *mtk_dp, u8 cmd, u32 addr)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3644,
cmd, MCU_REQUEST_COMMAND_AUX_TX_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3648,
addr, MCU_REQUEST_ADDRESS_LSB_AUX_TX_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_364C,
addr >> 16, MCU_REQUEST_ADDRESS_MSB_AUX_TX_P0_MASK);
}
static void mtk_dp_aux_clear_fifo(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3650,
MCU_ACK_TRAN_COMPLETE_AUX_TX_P0,
MCU_ACK_TRAN_COMPLETE_AUX_TX_P0 |
PHY_FIFO_RST_AUX_TX_P0_MASK |
MCU_REQ_DATA_NUM_AUX_TX_P0_MASK);
}
static void mtk_dp_aux_request_ready(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3630,
AUX_TX_REQUEST_READY_AUX_TX_P0,
AUX_TX_REQUEST_READY_AUX_TX_P0);
}
static void mtk_dp_aux_fill_write_fifo(struct mtk_dp *mtk_dp, u8 *buf,
size_t length)
{
mtk_dp_bulk_16bit_write(mtk_dp, MTK_DP_AUX_P0_3708, buf, length);
}
static void mtk_dp_aux_read_rx_fifo(struct mtk_dp *mtk_dp, u8 *buf,
size_t length, int read_delay)
{
int read_pos;
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3620,
0, AUX_RD_MODE_AUX_TX_P0_MASK);
for (read_pos = 0; read_pos < length; read_pos++) {
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3620,
AUX_RX_FIFO_READ_PULSE_TX_P0,
AUX_RX_FIFO_READ_PULSE_TX_P0);
/* Hardware needs time to update the data */
usleep_range(read_delay, read_delay * 2);
buf[read_pos] = (u8)(mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3620) &
AUX_RX_FIFO_READ_DATA_AUX_TX_P0_MASK);
}
}
static void mtk_dp_aux_set_length(struct mtk_dp *mtk_dp, size_t length)
{
if (length > 0) {
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3650,
(length - 1) << 12,
MCU_REQ_DATA_NUM_AUX_TX_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C,
0,
AUX_NO_LENGTH_AUX_TX_P0 |
AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK |
AUX_RESERVED_RW_0_AUX_TX_P0_MASK);
} else {
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C,
AUX_NO_LENGTH_AUX_TX_P0,
AUX_NO_LENGTH_AUX_TX_P0 |
AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK |
AUX_RESERVED_RW_0_AUX_TX_P0_MASK);
}
}
static int mtk_dp_aux_wait_for_completion(struct mtk_dp *mtk_dp, bool is_read)
{
int wait_reply = MTK_DP_AUX_WAIT_REPLY_COUNT;
while (--wait_reply) {
u32 aux_irq_status;
if (is_read) {
u32 fifo_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3618);
if (fifo_status &
(AUX_RX_FIFO_WRITE_POINTER_AUX_TX_P0_MASK |
AUX_RX_FIFO_FULL_AUX_TX_P0_MASK)) {
return 0;
}
}
aux_irq_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3640);
if (aux_irq_status & AUX_RX_AUX_RECV_COMPLETE_IRQ_AUX_TX_P0)
return 0;
if (aux_irq_status & AUX_400US_TIMEOUT_IRQ_AUX_TX_P0)
return -ETIMEDOUT;
/* Give the hardware a chance to reach completion before retrying */
usleep_range(100, 500);
}
return -ETIMEDOUT;
}
static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
u32 addr, u8 *buf, size_t length, u8 *reply_cmd)
{
int ret;
if (is_read && (length > DP_AUX_MAX_PAYLOAD_BYTES ||
(cmd == DP_AUX_NATIVE_READ && !length)))
return -EINVAL;
if (!is_read)
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3704,
AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0,
AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0);
/* We need to clear fifo and irq before sending commands to the sink device. */
mtk_dp_aux_clear_fifo(mtk_dp);
mtk_dp_aux_irq_clear(mtk_dp);
mtk_dp_aux_set_cmd(mtk_dp, cmd, addr);
mtk_dp_aux_set_length(mtk_dp, length);
if (!is_read) {
if (length)
mtk_dp_aux_fill_write_fifo(mtk_dp, buf, length);
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3704,
AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK,
AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK);
}
mtk_dp_aux_request_ready(mtk_dp);
/* Wait for feedback from sink device. */
ret = mtk_dp_aux_wait_for_completion(mtk_dp, is_read);
*reply_cmd = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3624) &
AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK;
if (ret) {
u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) &
AUX_RX_PHY_STATE_AUX_TX_P0_MASK;
if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) {
dev_err(mtk_dp->dev,
"AUX Rx Aux hang, need SW reset\n");
return -EIO;
}
return -ETIMEDOUT;
}
if (!length) {
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C,
0,
AUX_NO_LENGTH_AUX_TX_P0 |
AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK |
AUX_RESERVED_RW_0_AUX_TX_P0_MASK);
} else if (is_read) {
int read_delay;
if (cmd == (DP_AUX_I2C_READ | DP_AUX_I2C_MOT) ||
cmd == DP_AUX_I2C_READ)
read_delay = 500;
else
read_delay = 100;
mtk_dp_aux_read_rx_fifo(mtk_dp, buf, length, read_delay);
}
return 0;
}
static void mtk_dp_set_swing_pre_emphasis(struct mtk_dp *mtk_dp, int lane_num,
int swing_val, int preemphasis)
{
u32 lane_shift = lane_num * DP_TX1_VOLT_SWING_SHIFT;
dev_dbg(mtk_dp->dev,
"link training: swing_val = 0x%x, pre-emphasis = 0x%x\n",
swing_val, preemphasis);
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP,
swing_val << (DP_TX0_VOLT_SWING_SHIFT + lane_shift),
DP_TX0_VOLT_SWING_MASK << lane_shift);
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP,
preemphasis << (DP_TX0_PRE_EMPH_SHIFT + lane_shift),
DP_TX0_PRE_EMPH_MASK << lane_shift);
}
static void mtk_dp_reset_swing_pre_emphasis(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP,
0,
DP_TX0_VOLT_SWING_MASK |
DP_TX1_VOLT_SWING_MASK |
DP_TX2_VOLT_SWING_MASK |
DP_TX3_VOLT_SWING_MASK |
DP_TX0_PRE_EMPH_MASK |
DP_TX1_PRE_EMPH_MASK |
DP_TX2_PRE_EMPH_MASK |
DP_TX3_PRE_EMPH_MASK);
}
static u32 mtk_dp_swirq_get_clear(struct mtk_dp *mtk_dp)
{
u32 irq_status = mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_35D0) &
SW_IRQ_FINAL_STATUS_DP_TRANS_P0_MASK;
if (irq_status) {
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35C8,
irq_status, SW_IRQ_CLR_DP_TRANS_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35C8,
0, SW_IRQ_CLR_DP_TRANS_P0_MASK);
}
return irq_status;
}
static u32 mtk_dp_hwirq_get_clear(struct mtk_dp *mtk_dp)
{
u32 irq_status = (mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_3418) &
IRQ_STATUS_DP_TRANS_P0_MASK) >> 12;
if (irq_status) {
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418,
irq_status, IRQ_CLR_DP_TRANS_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418,
0, IRQ_CLR_DP_TRANS_P0_MASK);
}
return irq_status;
}
static void mtk_dp_hwirq_enable(struct mtk_dp *mtk_dp, bool enable)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418,
enable ? 0 :
IRQ_MASK_DP_TRANS_P0_DISC_IRQ |
IRQ_MASK_DP_TRANS_P0_CONN_IRQ |
IRQ_MASK_DP_TRANS_P0_INT_IRQ,
IRQ_MASK_DP_TRANS_P0_MASK);
}
static void mtk_dp_initialize_settings(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_342C,
XTAL_FREQ_DP_TRANS_P0_DEFAULT,
XTAL_FREQ_DP_TRANS_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3540,
FEC_CLOCK_EN_MODE_DP_TRANS_P0,
FEC_CLOCK_EN_MODE_DP_TRANS_P0);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_31EC,
AUDIO_CH_SRC_SEL_DP_ENC0_P0,
AUDIO_CH_SRC_SEL_DP_ENC0_P0);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_304C,
0, SDP_VSYNC_RISING_MASK_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_IRQ_MASK,
IRQ_MASK_AUX_TOP_IRQ, IRQ_MASK_AUX_TOP_IRQ);
}
static void mtk_dp_initialize_hpd_detect_settings(struct mtk_dp *mtk_dp)
{
u32 val;
/* Debounce threshold */
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410,
8, HPD_DEB_THD_DP_TRANS_P0_MASK);
val = (HPD_INT_THD_DP_TRANS_P0_LOWER_500US |
HPD_INT_THD_DP_TRANS_P0_UPPER_1100US) << 4;
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410,
val, HPD_INT_THD_DP_TRANS_P0_MASK);
/*
* Connect threshold 1.5ms + 5 x 0.1ms = 2ms
* Disconnect threshold 1.5ms + 5 x 0.1ms = 2ms
*/
val = (5 << 8) | (5 << 12);
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410,
val,
HPD_DISC_THD_DP_TRANS_P0_MASK |
HPD_CONN_THD_DP_TRANS_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3430,
HPD_INT_THD_ECO_DP_TRANS_P0_HIGH_BOUND_EXT,
HPD_INT_THD_ECO_DP_TRANS_P0_MASK);
}
static void mtk_dp_initialize_aux_settings(struct mtk_dp *mtk_dp)
{
/* modify timeout threshold = 0x1595 */
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_360C,
AUX_TIMEOUT_THR_AUX_TX_P0_VAL,
AUX_TIMEOUT_THR_AUX_TX_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3658,
0, AUX_TX_OV_EN_AUX_TX_P0_MASK);
/* 25 for 26M */
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3634,
AUX_TX_OVER_SAMPLE_RATE_FOR_26M << 8,
AUX_TX_OVER_SAMPLE_RATE_AUX_TX_P0_MASK);
/* 13 for 26M */
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3614,
AUX_RX_UI_CNT_THR_AUX_FOR_26M,
AUX_RX_UI_CNT_THR_AUX_TX_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_37C8,
MTK_ATOP_EN_AUX_TX_P0,
MTK_ATOP_EN_AUX_TX_P0);
/* Set complete reply mode for AUX */
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3690,
RX_REPLY_COMPLETE_MODE_AUX_TX_P0,
RX_REPLY_COMPLETE_MODE_AUX_TX_P0);
}
static void mtk_dp_initialize_digital_settings(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_304C,
0, VBID_VIDEO_MUTE_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3368,
BS2BS_MODE_DP_ENC1_P0_VAL << 12,
BS2BS_MODE_DP_ENC1_P0_MASK);
/* dp tx encoder reset all sw */
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0,
DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0);
/* Wait for sw reset to complete */
usleep_range(1000, 5000);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
0, DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0);
}
static void mtk_dp_digital_sw_reset(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_340C,
DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0,
DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0);
/* Wait for sw reset to complete */
usleep_range(1000, 5000);
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_340C,
0, DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0);
}
static void mtk_dp_set_lanes(struct mtk_dp *mtk_dp, int lanes)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35F0,
lanes == 0 ? 0 : DP_TRANS_DUMMY_RW_0,
DP_TRANS_DUMMY_RW_0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000,
lanes, LANE_NUM_DP_ENC0_P0_MASK);
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_34A4,
lanes << 2, LANE_NUM_DP_TRANS_P0_MASK);
}
static void mtk_dp_get_calibration_data(struct mtk_dp *mtk_dp)
{
const struct mtk_dp_efuse_fmt *fmt;
struct device *dev = mtk_dp->dev;
struct nvmem_cell *cell;
u32 *cal_data = mtk_dp->cal_data;
u32 *buf;
int i;
size_t len;
cell = nvmem_cell_get(dev, "dp_calibration_data");
if (IS_ERR(cell)) {
dev_warn(dev, "Failed to get nvmem cell dp_calibration_data\n");
goto use_default_val;
}
buf = (u32 *)nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
if (IS_ERR(buf) || ((len / sizeof(u32)) != 4)) {
dev_warn(dev, "Failed to read nvmem_cell_read\n");
if (!IS_ERR(buf))
kfree(buf);
goto use_default_val;
}
for (i = 0; i < MTK_DP_CAL_MAX; i++) {
fmt = &mtk_dp->data->efuse_fmt[i];
cal_data[i] = (buf[fmt->idx] >> fmt->shift) & fmt->mask;
if (cal_data[i] < fmt->min_val || cal_data[i] > fmt->max_val) {
dev_warn(mtk_dp->dev, "Invalid efuse data, idx = %d\n", i);
kfree(buf);
goto use_default_val;
}
}
kfree(buf);
return;
use_default_val:
dev_warn(mtk_dp->dev, "Use default calibration data\n");
for (i = 0; i < MTK_DP_CAL_MAX; i++)
cal_data[i] = mtk_dp->data->efuse_fmt[i].default_val;
}
static void mtk_dp_set_calibration_data(struct mtk_dp *mtk_dp)
{
u32 *cal_data = mtk_dp->cal_data;
mtk_dp_update_bits(mtk_dp, DP_PHY_GLB_DPAUX_TX,
cal_data[MTK_DP_CAL_CLKTX_IMPSE] << 20,
RG_CKM_PT0_CKTX_IMPSEL);
mtk_dp_update_bits(mtk_dp, DP_PHY_GLB_BIAS_GEN_00,
cal_data[MTK_DP_CAL_GLB_BIAS_TRIM] << 16,
RG_XTP_GLB_BIAS_INTR_CTRL);
mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_0,
cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] << 12,
RG_XTP_LN0_TX_IMPSEL_PMOS);
mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_0,
cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] << 16,
RG_XTP_LN0_TX_IMPSEL_NMOS);
mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_1,
cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] << 12,
RG_XTP_LN1_TX_IMPSEL_PMOS);
mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_1,
cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] << 16,
RG_XTP_LN1_TX_IMPSEL_NMOS);
mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_2,
cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] << 12,
RG_XTP_LN2_TX_IMPSEL_PMOS);
mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_2,
cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] << 16,
RG_XTP_LN2_TX_IMPSEL_NMOS);
mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_3,
cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] << 12,
RG_XTP_LN3_TX_IMPSEL_PMOS);
mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_3,
cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] << 16,
RG_XTP_LN3_TX_IMPSEL_NMOS);
}
static int mtk_dp_phy_configure(struct mtk_dp *mtk_dp,
u32 link_rate, int lane_count)
{
int ret;
union phy_configure_opts phy_opts = {
.dp = {
.link_rate = drm_dp_bw_code_to_link_rate(link_rate) / 100,
.set_rate = 1,
.lanes = lane_count,
.set_lanes = 1,
.ssc = mtk_dp->train_info.sink_ssc,
}
};
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, DP_PWR_STATE_BANDGAP,
DP_PWR_STATE_MASK);
ret = phy_configure(mtk_dp->phy, &phy_opts);
if (ret)
return ret;
mtk_dp_set_calibration_data(mtk_dp);
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
DP_PWR_STATE_BANDGAP_TPLL_LANE, DP_PWR_STATE_MASK);
return 0;
}
static void mtk_dp_set_idle_pattern(struct mtk_dp *mtk_dp, bool enable)
{
u32 val = POST_MISC_DATA_LANE0_OV_DP_TRANS_P0_MASK |
POST_MISC_DATA_LANE1_OV_DP_TRANS_P0_MASK |
POST_MISC_DATA_LANE2_OV_DP_TRANS_P0_MASK |
POST_MISC_DATA_LANE3_OV_DP_TRANS_P0_MASK;
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3580,
enable ? val : 0, val);
}
static void mtk_dp_train_set_pattern(struct mtk_dp *mtk_dp, int pattern)
{
/* TPS1 */
if (pattern == 1)
mtk_dp_set_idle_pattern(mtk_dp, false);
mtk_dp_update_bits(mtk_dp,
MTK_DP_TRANS_P0_3400,
pattern ? BIT(pattern - 1) << 12 : 0,
PATTERN1_EN_DP_TRANS_P0_MASK |
PATTERN2_EN_DP_TRANS_P0_MASK |
PATTERN3_EN_DP_TRANS_P0_MASK |
PATTERN4_EN_DP_TRANS_P0_MASK);
}
static void mtk_dp_set_enhanced_frame_mode(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000,
ENHANCED_FRAME_EN_DP_ENC0_P0,
ENHANCED_FRAME_EN_DP_ENC0_P0);
}
static void mtk_dp_training_set_scramble(struct mtk_dp *mtk_dp, bool enable)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3404,
enable ? DP_SCR_EN_DP_TRANS_P0_MASK : 0,
DP_SCR_EN_DP_TRANS_P0_MASK);
}
static void mtk_dp_video_mute(struct mtk_dp *mtk_dp, bool enable)
{
struct arm_smccc_res res;
u32 val = VIDEO_MUTE_SEL_DP_ENC0_P0 |
(enable ? VIDEO_MUTE_SW_DP_ENC0_P0 : 0);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000,
val,
VIDEO_MUTE_SEL_DP_ENC0_P0 |
VIDEO_MUTE_SW_DP_ENC0_P0);
arm_smccc_smc(MTK_DP_SIP_CONTROL_AARCH32,
mtk_dp->data->smc_cmd, enable,
0, 0, 0, 0, 0, &res);
dev_dbg(mtk_dp->dev, "smc cmd: 0x%x, p1: %s, ret: 0x%lx-0x%lx\n",
mtk_dp->data->smc_cmd, enable ? "enable" : "disable", res.a0, res.a1);
}
static void mtk_dp_audio_mute(struct mtk_dp *mtk_dp, bool mute)
{
u32 val[3];
if (mute) {
val[0] = VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 |
VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0;
val[1] = 0;
val[2] = 0;
} else {
val[0] = 0;
val[1] = AU_EN_DP_ENC0_P0;
/* Send one every two frames */
val[2] = 0x0F;
}
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3030,
val[0],
VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 |
VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3088,
val[1], AU_EN_DP_ENC0_P0);
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A4,
val[2], AU_TS_CFG_DP_ENC0_P0_MASK);
}
static void mtk_dp_aux_panel_poweron(struct mtk_dp *mtk_dp, bool pwron)
{
if (pwron) {
/* power on aux */
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
DP_PWR_STATE_BANDGAP_TPLL_LANE,
DP_PWR_STATE_MASK);
/* power on panel */
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
usleep_range(2000, 5000);
} else {
/* power off panel */
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
usleep_range(2000, 3000);
/* power off aux */
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
DP_PWR_STATE_BANDGAP_TPLL,
DP_PWR_STATE_MASK);
}
}
static void mtk_dp_power_enable(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_RESET_AND_PROBE,
0, SW_RST_B_PHYD);
/* Wait for power enable */
usleep_range(10, 200);
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_RESET_AND_PROBE,
SW_RST_B_PHYD, SW_RST_B_PHYD);
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
DP_PWR_STATE_BANDGAP_TPLL, DP_PWR_STATE_MASK);
mtk_dp_write(mtk_dp, MTK_DP_1040,
RG_DPAUX_RX_VALID_DEGLITCH_EN | RG_XTP_GLB_CKDET_EN |
RG_DPAUX_RX_EN);
mtk_dp_update_bits(mtk_dp, MTK_DP_0034, 0, DA_CKM_CKTX0_EN_FORCE_EN);
}
static void mtk_dp_power_disable(struct mtk_dp *mtk_dp)
{
mtk_dp_write(mtk_dp, MTK_DP_TOP_PWR_STATE, 0);
mtk_dp_update_bits(mtk_dp, MTK_DP_0034,
DA_CKM_CKTX0_EN_FORCE_EN, DA_CKM_CKTX0_EN_FORCE_EN);
/* Disable RX */
mtk_dp_write(mtk_dp, MTK_DP_1040, 0);
mtk_dp_write(mtk_dp, MTK_DP_TOP_MEM_PD,
0x550 | FUSE_SEL | MEM_ISO_EN);
}
static void mtk_dp_initialize_priv_data(struct mtk_dp *mtk_dp)
{
bool plugged_in = (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP);
mtk_dp->train_info.link_rate = DP_LINK_BW_5_4;
mtk_dp->train_info.lane_count = mtk_dp->max_lanes;
mtk_dp->train_info.cable_plugged_in = plugged_in;
mtk_dp->info.format = DP_PIXELFORMAT_RGB;
memset(&mtk_dp->info.vm, 0, sizeof(struct videomode));
mtk_dp->audio_enable = false;
}
static void mtk_dp_sdp_set_down_cnt_init(struct mtk_dp *mtk_dp,
u32 sram_read_start)
{
u32 sdp_down_cnt_init = 0;
struct drm_display_mode mode;
struct videomode *vm = &mtk_dp->info.vm;
drm_display_mode_from_videomode(vm, &mode);
if (mode.clock > 0)
sdp_down_cnt_init = sram_read_start *
mtk_dp->train_info.link_rate * 2700 * 8 /
(mode.clock * 4);
switch (mtk_dp->train_info.lane_count) {
case 1:
sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 0x1A);
break;
case 2:
/* case for LowResolution && High Audio Sample Rate */
sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 0x10);
sdp_down_cnt_init += mode.vtotal <= 525 ? 4 : 0;
break;
case 4:
default:
sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 6);
break;
}
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3040,
sdp_down_cnt_init,
SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK);
}
static void mtk_dp_sdp_set_down_cnt_init_in_hblank(struct mtk_dp *mtk_dp)
{
int pix_clk_mhz;
u32 dc_offset;
u32 spd_down_cnt_init = 0;
struct drm_display_mode mode;
struct videomode *vm = &mtk_dp->info.vm;
drm_display_mode_from_videomode(vm, &mode);
pix_clk_mhz = mtk_dp->info.format == DP_PIXELFORMAT_YUV420 ?
mode.clock / 2000 : mode.clock / 1000;
switch (mtk_dp->train_info.lane_count) {
case 1:
spd_down_cnt_init = 0x20;
break;
case 2:
dc_offset = (mode.vtotal <= 525) ? 0x14 : 0x00;
spd_down_cnt_init = 0x18 + dc_offset;
break;
case 4:
default:
dc_offset = (mode.vtotal <= 525) ? 0x08 : 0x00;
if (pix_clk_mhz > mtk_dp->train_info.link_rate * 27)
spd_down_cnt_init = 0x8;
else
spd_down_cnt_init = 0x10 + dc_offset;
break;
}
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364, spd_down_cnt_init,
SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK);
}
static void mtk_dp_setup_tu(struct mtk_dp *mtk_dp)
{
u32 sram_read_start = min_t(u32, MTK_DP_TBC_BUF_READ_START_ADDR,
mtk_dp->info.vm.hactive /
mtk_dp->train_info.lane_count /
MTK_DP_4P1T / MTK_DP_HDE /
MTK_DP_PIX_PER_ADDR);
mtk_dp_set_sram_read_start(mtk_dp, sram_read_start);
mtk_dp_setup_encoder(mtk_dp);
mtk_dp_sdp_set_down_cnt_init_in_hblank(mtk_dp);
mtk_dp_sdp_set_down_cnt_init(mtk_dp, sram_read_start);
}
static void mtk_dp_set_tx_out(struct mtk_dp *mtk_dp)
{
mtk_dp_setup_tu(mtk_dp);
}
static void mtk_dp_train_update_swing_pre(struct mtk_dp *mtk_dp, int lanes,
u8 dpcd_adjust_req[2])
{
int lane;
for (lane = 0; lane < lanes; ++lane) {
u8 val;
u8 swing;
u8 preemphasis;
int index = lane / 2;
int shift = lane % 2 ? DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 0;
swing = (dpcd_adjust_req[index] >> shift) &
DP_ADJUST_VOLTAGE_SWING_LANE0_MASK;
preemphasis = ((dpcd_adjust_req[index] >> shift) &
DP_ADJUST_PRE_EMPHASIS_LANE0_MASK) >>
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT;
val = swing << DP_TRAIN_VOLTAGE_SWING_SHIFT |
preemphasis << DP_TRAIN_PRE_EMPHASIS_SHIFT;
if (swing == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
val |= DP_TRAIN_MAX_SWING_REACHED;
if (preemphasis == 3)
val |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
mtk_dp_set_swing_pre_emphasis(mtk_dp, lane, swing, preemphasis);
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_LANE0_SET + lane,
val);
}
}
static void mtk_dp_pattern(struct mtk_dp *mtk_dp, bool is_tps1)
{
int pattern;
unsigned int aux_offset;
if (is_tps1) {
pattern = 1;
aux_offset = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1;
} else {
aux_offset = mtk_dp->train_info.channel_eq_pattern;
switch (mtk_dp->train_info.channel_eq_pattern) {
case DP_TRAINING_PATTERN_4:
pattern = 4;
break;
case DP_TRAINING_PATTERN_3:
pattern = 3;
aux_offset |= DP_LINK_SCRAMBLING_DISABLE;
break;
case DP_TRAINING_PATTERN_2:
default:
pattern = 2;
aux_offset |= DP_LINK_SCRAMBLING_DISABLE;
break;
}
}
mtk_dp_train_set_pattern(mtk_dp, pattern);
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET, aux_offset);
}
static int mtk_dp_train_setting(struct mtk_dp *mtk_dp, u8 target_link_rate,
u8 target_lane_count)
{
int ret;
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_LINK_BW_SET, target_link_rate);
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_LANE_COUNT_SET,
target_lane_count | DP_LANE_COUNT_ENHANCED_FRAME_EN);
if (mtk_dp->train_info.sink_ssc)
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_DOWNSPREAD_CTRL,
DP_SPREAD_AMP_0_5);
mtk_dp_set_lanes(mtk_dp, target_lane_count / 2);
ret = mtk_dp_phy_configure(mtk_dp, target_link_rate, target_lane_count);
if (ret)
return ret;
dev_dbg(mtk_dp->dev,
"Link train target_link_rate = 0x%x, target_lane_count = 0x%x\n",
target_link_rate, target_lane_count);
return 0;
}
static int mtk_dp_train_cr(struct mtk_dp *mtk_dp, u8 target_lane_count)
{
u8 lane_adjust[2] = {};
u8 link_status[DP_LINK_STATUS_SIZE] = {};
u8 prev_lane_adjust = 0xff;
int train_retries = 0;
int voltage_retries = 0;
mtk_dp_pattern(mtk_dp, true);
/* In DP spec 1.4, the retry count of CR is defined as 10. */
do {
train_retries++;
if (!mtk_dp->train_info.cable_plugged_in) {
mtk_dp_train_set_pattern(mtk_dp, 0);
return -ENODEV;
}
drm_dp_dpcd_read(&mtk_dp->aux, DP_ADJUST_REQUEST_LANE0_1,
lane_adjust, sizeof(lane_adjust));
mtk_dp_train_update_swing_pre(mtk_dp, target_lane_count,
lane_adjust);
drm_dp_link_train_clock_recovery_delay(&mtk_dp->aux,
mtk_dp->rx_cap);
/* check link status from sink device */
drm_dp_dpcd_read_link_status(&mtk_dp->aux, link_status);
if (drm_dp_clock_recovery_ok(link_status,
target_lane_count)) {
dev_dbg(mtk_dp->dev, "Link train CR pass\n");
return 0;
}
/*
* In DP spec 1.4, if current voltage level is the same
* with previous voltage level, we need to retry 5 times.
*/
if (prev_lane_adjust == link_status[4]) {
voltage_retries++;
/*
* Condition of CR fail:
* 1. Failed to pass CR using the same voltage
* level over five times.
* 2. Failed to pass CR when the current voltage
* level is the same with previous voltage
* level and reach max voltage level (3).
*/
if (voltage_retries > MTK_DP_TRAIN_VOLTAGE_LEVEL_RETRY ||
(prev_lane_adjust & DP_ADJUST_VOLTAGE_SWING_LANE0_MASK) == 3) {
dev_dbg(mtk_dp->dev, "Link train CR fail\n");
break;
}
} else {
/*
* If the voltage level is changed, we need to
* re-calculate this retry count.
*/
voltage_retries = 0;
}
prev_lane_adjust = link_status[4];
} while (train_retries < MTK_DP_TRAIN_DOWNSCALE_RETRY);
/* Failed to train CR, and disable pattern. */
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
mtk_dp_train_set_pattern(mtk_dp, 0);
return -ETIMEDOUT;
}
static int mtk_dp_train_eq(struct mtk_dp *mtk_dp, u8 target_lane_count)
{
u8 lane_adjust[2] = {};
u8 link_status[DP_LINK_STATUS_SIZE] = {};
int train_retries = 0;
mtk_dp_pattern(mtk_dp, false);
do {
train_retries++;
if (!mtk_dp->train_info.cable_plugged_in) {
mtk_dp_train_set_pattern(mtk_dp, 0);
return -ENODEV;
}
drm_dp_dpcd_read(&mtk_dp->aux, DP_ADJUST_REQUEST_LANE0_1,
lane_adjust, sizeof(lane_adjust));
mtk_dp_train_update_swing_pre(mtk_dp, target_lane_count,
lane_adjust);
drm_dp_link_train_channel_eq_delay(&mtk_dp->aux,
mtk_dp->rx_cap);
/* check link status from sink device */
drm_dp_dpcd_read_link_status(&mtk_dp->aux, link_status);
if (drm_dp_channel_eq_ok(link_status, target_lane_count)) {
dev_dbg(mtk_dp->dev, "Link train EQ pass\n");
/* Training done, and disable pattern. */
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
mtk_dp_train_set_pattern(mtk_dp, 0);
return 0;
}
dev_dbg(mtk_dp->dev, "Link train EQ fail\n");
} while (train_retries < MTK_DP_TRAIN_DOWNSCALE_RETRY);
/* Failed to train EQ, and disable pattern. */
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
mtk_dp_train_set_pattern(mtk_dp, 0);
return -ETIMEDOUT;
}
static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
{
u8 val;
ssize_t ret;
/*
* If we're eDP and capabilities were already parsed we can skip
* reading again because eDP panels aren't hotpluggable hence the
* caps and training information won't ever change in a boot life
*/
if (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP &&
mtk_dp->rx_cap[DP_MAX_LINK_RATE] &&
mtk_dp->train_info.sink_ssc)
return 0;
ret = drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
if (ret < 0)
return ret;
if (drm_dp_tps4_supported(mtk_dp->rx_cap))
mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_4;
else if (drm_dp_tps3_supported(mtk_dp->rx_cap))
mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_3;
else
mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_2;
mtk_dp->train_info.sink_ssc = drm_dp_max_downspread(mtk_dp->rx_cap);
ret = drm_dp_dpcd_readb(&mtk_dp->aux, DP_MSTM_CAP, &val);
if (ret < 1) {
drm_err(mtk_dp->drm_dev, "Read mstm cap failed\n");
return ret == 0 ? -EIO : ret;
}
if (val & DP_MST_CAP) {
/* Clear DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 */
ret = drm_dp_dpcd_readb(&mtk_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
&val);
if (ret < 1) {
drm_err(mtk_dp->drm_dev, "Read irq vector failed\n");
return ret == 0 ? -EIO : ret;
}
if (val) {
ret = drm_dp_dpcd_writeb(&mtk_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
val);
if (ret < 0)
return ret;
}
}
return 0;
}
static bool mtk_dp_edid_parse_audio_capabilities(struct mtk_dp *mtk_dp,
struct mtk_dp_audio_cfg *cfg)
{
if (!mtk_dp->data->audio_supported)
return false;
if (mtk_dp->info.audio_cur_cfg.sad_count <= 0) {
drm_info(mtk_dp->drm_dev, "The SADs is NULL\n");
return false;
}
return true;
}
static void mtk_dp_train_change_mode(struct mtk_dp *mtk_dp)
{
phy_reset(mtk_dp->phy);
mtk_dp_reset_swing_pre_emphasis(mtk_dp);
}
static int mtk_dp_training(struct mtk_dp *mtk_dp)
{
int ret;
u8 lane_count, link_rate, train_limit, max_link_rate;
link_rate = min_t(u8, mtk_dp->max_linkrate,
mtk_dp->rx_cap[DP_MAX_LINK_RATE]);
max_link_rate = link_rate;
lane_count = min_t(u8, mtk_dp->max_lanes,
drm_dp_max_lane_count(mtk_dp->rx_cap));
/*
* TPS are generated by the hardware pattern generator. From the
* hardware setting we need to disable this scramble setting before
* use the TPS pattern generator.
*/
mtk_dp_training_set_scramble(mtk_dp, false);
for (train_limit = 6; train_limit > 0; train_limit--) {
mtk_dp_train_change_mode(mtk_dp);
ret = mtk_dp_train_setting(mtk_dp, link_rate, lane_count);
if (ret)
return ret;
ret = mtk_dp_train_cr(mtk_dp, lane_count);
if (ret == -ENODEV) {
return ret;
} else if (ret) {
/* reduce link rate */
switch (link_rate) {
case DP_LINK_BW_1_62:
lane_count = lane_count / 2;
link_rate = max_link_rate;
if (lane_count == 0)
return -EIO;
break;
case DP_LINK_BW_2_7:
link_rate = DP_LINK_BW_1_62;
break;
case DP_LINK_BW_5_4:
link_rate = DP_LINK_BW_2_7;
break;
case DP_LINK_BW_8_1:
link_rate = DP_LINK_BW_5_4;
break;
default:
return -EINVAL;
}
continue;
}
ret = mtk_dp_train_eq(mtk_dp, lane_count);
if (ret == -ENODEV) {
return ret;
} else if (ret) {
/* reduce lane count */
if (lane_count == 0)
return -EIO;
lane_count /= 2;
continue;
}
/* if we can run to this, training is done. */
break;
}
if (train_limit == 0)
return -ETIMEDOUT;
mtk_dp->train_info.link_rate = link_rate;
mtk_dp->train_info.lane_count = lane_count;
/*
* After training done, we need to output normal stream instead of TPS,
* so we need to enable scramble.
*/
mtk_dp_training_set_scramble(mtk_dp, true);
mtk_dp_set_enhanced_frame_mode(mtk_dp);
return 0;
}
static void mtk_dp_video_enable(struct mtk_dp *mtk_dp, bool enable)
{
/* the mute sequence is different between enable and disable */
if (enable) {
mtk_dp_msa_bypass_enable(mtk_dp, false);
mtk_dp_pg_enable(mtk_dp, false);
mtk_dp_set_tx_out(mtk_dp);
mtk_dp_video_mute(mtk_dp, false);
} else {
mtk_dp_video_mute(mtk_dp, true);
mtk_dp_pg_enable(mtk_dp, true);
mtk_dp_msa_bypass_enable(mtk_dp, true);
}
}
static void mtk_dp_audio_sdp_setup(struct mtk_dp *mtk_dp,
struct mtk_dp_audio_cfg *cfg)
{
struct dp_sdp sdp;
struct hdmi_audio_infoframe frame;
hdmi_audio_infoframe_init(&frame);
frame.coding_type = HDMI_AUDIO_CODING_TYPE_PCM;
frame.channels = cfg->channels;
frame.sample_frequency = cfg->sample_rate;
switch (cfg->word_length_bits) {
case 16:
frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
break;
case 20:
frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_20;
break;
case 24:
default:
frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_24;
break;
}
hdmi_audio_infoframe_pack_for_dp(&frame, &sdp, MTK_DP_VERSION);
mtk_dp_audio_sdp_asp_set_channels(mtk_dp, cfg->channels);
mtk_dp_setup_sdp_aui(mtk_dp, &sdp);
}
static void mtk_dp_audio_setup(struct mtk_dp *mtk_dp,
struct mtk_dp_audio_cfg *cfg)
{
mtk_dp_audio_sdp_setup(mtk_dp, cfg);
mtk_dp_audio_channel_status_set(mtk_dp, cfg);
mtk_dp_audio_setup_channels(mtk_dp, cfg);
mtk_dp_audio_set_divider(mtk_dp);
}
static int mtk_dp_video_config(struct mtk_dp *mtk_dp)
{
mtk_dp_config_mn_mode(mtk_dp);
mtk_dp_set_msa(mtk_dp);
mtk_dp_set_color_depth(mtk_dp);
return mtk_dp_set_color_format(mtk_dp, mtk_dp->info.format);
}
static void mtk_dp_init_port(struct mtk_dp *mtk_dp)
{
mtk_dp_set_idle_pattern(mtk_dp, true);
mtk_dp_initialize_priv_data(mtk_dp);
mtk_dp_initialize_settings(mtk_dp);
mtk_dp_initialize_aux_settings(mtk_dp);
mtk_dp_initialize_digital_settings(mtk_dp);
mtk_dp_initialize_hpd_detect_settings(mtk_dp);
mtk_dp_digital_sw_reset(mtk_dp);
}
static irqreturn_t mtk_dp_hpd_event_thread(int hpd, void *dev)
{
struct mtk_dp *mtk_dp = dev;
unsigned long flags;
u32 status;
if (mtk_dp->need_debounce && mtk_dp->train_info.cable_plugged_in)
msleep(100);
spin_lock_irqsave(&mtk_dp->irq_thread_lock, flags);
status = mtk_dp->irq_thread_handle;
mtk_dp->irq_thread_handle = 0;
spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags);
if (status & MTK_DP_THREAD_CABLE_STATE_CHG) {
if (mtk_dp->bridge.dev)
drm_helper_hpd_irq_event(mtk_dp->bridge.dev);
if (!mtk_dp->train_info.cable_plugged_in) {
mtk_dp_disable_sdp_aui(mtk_dp);
memset(&mtk_dp->info.audio_cur_cfg, 0,
sizeof(mtk_dp->info.audio_cur_cfg));
mtk_dp->need_debounce = false;
mod_timer(&mtk_dp->debounce_timer,
jiffies + msecs_to_jiffies(100) - 1);
}
}
if (status & MTK_DP_THREAD_HPD_EVENT)
dev_dbg(mtk_dp->dev, "Receive IRQ from sink devices\n");
return IRQ_HANDLED;
}
static irqreturn_t mtk_dp_hpd_event(int hpd, void *dev)
{
struct mtk_dp *mtk_dp = dev;
bool cable_sta_chg = false;
unsigned long flags;
u32 irq_status = mtk_dp_swirq_get_clear(mtk_dp) |
mtk_dp_hwirq_get_clear(mtk_dp);
if (!irq_status)
return IRQ_HANDLED;
spin_lock_irqsave(&mtk_dp->irq_thread_lock, flags);
if (irq_status & MTK_DP_HPD_INTERRUPT)
mtk_dp->irq_thread_handle |= MTK_DP_THREAD_HPD_EVENT;
/* Cable state is changed. */
if (irq_status != MTK_DP_HPD_INTERRUPT) {
mtk_dp->irq_thread_handle |= MTK_DP_THREAD_CABLE_STATE_CHG;
cable_sta_chg = true;
}
spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags);
if (cable_sta_chg) {
if (!!(mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_3414) &
HPD_DB_DP_TRANS_P0_MASK))
mtk_dp->train_info.cable_plugged_in = true;
else
mtk_dp->train_info.cable_plugged_in = false;
}
return IRQ_WAKE_THREAD;
}
static int mtk_dp_wait_hpd_asserted(struct drm_dp_aux *mtk_aux, unsigned long wait_us)
{
struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
u32 val;
int ret;
ret = regmap_read_poll_timeout(mtk_dp->regs, MTK_DP_TRANS_P0_3414,
val, !!(val & HPD_DB_DP_TRANS_P0_MASK),
wait_us / 100, wait_us);
if (ret) {
mtk_dp->train_info.cable_plugged_in = false;
return ret;
}
mtk_dp->train_info.cable_plugged_in = true;
ret = mtk_dp_parse_capabilities(mtk_dp);
if (ret) {
drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
return ret;
}
return 0;
}
static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp,
struct platform_device *pdev)
{
struct device_node *endpoint;
struct device *dev = &pdev->dev;
int ret;
void __iomem *base;
u32 linkrate;
int len;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
mtk_dp->regs = devm_regmap_init_mmio(dev, base, &mtk_dp_regmap_config);
if (IS_ERR(mtk_dp->regs))
return PTR_ERR(mtk_dp->regs);
endpoint = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 1, -1);
len = of_property_count_elems_of_size(endpoint,
"data-lanes", sizeof(u32));
if (len < 0 || len > 4 || len == 3) {
dev_err(dev, "invalid data lane size: %d\n", len);
return -EINVAL;
}
mtk_dp->max_lanes = len;
ret = device_property_read_u32(dev, "max-linkrate-mhz", &linkrate);
if (ret) {
dev_err(dev, "failed to read max linkrate: %d\n", ret);
return ret;
}
mtk_dp->max_linkrate = drm_dp_link_rate_to_bw_code(linkrate * 100);
return 0;
}
static void mtk_dp_update_plugged_status(struct mtk_dp *mtk_dp)
{
if (!mtk_dp->data->audio_supported || !mtk_dp->audio_enable)
return;
mutex_lock(&mtk_dp->update_plugged_status_lock);
if (mtk_dp->plugged_cb && mtk_dp->codec_dev)
mtk_dp->plugged_cb(mtk_dp->codec_dev,
mtk_dp->enabled &
mtk_dp->info.audio_cur_cfg.detect_monitor);
mutex_unlock(&mtk_dp->update_plugged_status_lock);
}
static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
enum drm_connector_status ret = connector_status_disconnected;
bool enabled = mtk_dp->enabled;
u8 sink_count = 0;
if (!mtk_dp->train_info.cable_plugged_in)
return ret;
if (!enabled)
mtk_dp_aux_panel_poweron(mtk_dp, true);
/*
* Some dongles still source HPD when they do not connect to any
* sink device. To avoid this, we need to read the sink count
* to make sure we do connect to sink devices. After this detect
* function, we just need to check the HPD connection to check
* whether we connect to a sink device.
*/
drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count);
if (DP_GET_SINK_COUNT(sink_count))
ret = connector_status_connected;
if (!enabled)
mtk_dp_aux_panel_poweron(mtk_dp, false);
return ret;
}
static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
bool enabled = mtk_dp->enabled;
struct edid *new_edid = NULL;
struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
struct cea_sad *sads;
if (!enabled) {
drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
mtk_dp_aux_panel_poweron(mtk_dp, true);
}
new_edid = drm_get_edid(connector, &mtk_dp->aux.ddc);
/*
* Parse capability here to let atomic_get_input_bus_fmts and
* mode_valid use the capability to calculate sink bitrates.
*/
if (mtk_dp_parse_capabilities(mtk_dp)) {
drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
new_edid = NULL;
}
if (new_edid) {
audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
}
if (!enabled) {
mtk_dp_aux_panel_poweron(mtk_dp, false);
drm_atomic_bridge_chain_post_disable(bridge, connector->state->state);
}
return new_edid;
}
static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
struct drm_dp_aux_msg *msg)
{
struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
bool is_read;
u8 request;
size_t accessed_bytes = 0;
int ret;
if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP &&
!mtk_dp->train_info.cable_plugged_in) {
ret = -EAGAIN;
goto err;
}
switch (msg->request) {
case DP_AUX_I2C_MOT:
case DP_AUX_I2C_WRITE:
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE_STATUS_UPDATE:
case DP_AUX_I2C_WRITE_STATUS_UPDATE | DP_AUX_I2C_MOT:
request = msg->request & ~DP_AUX_I2C_WRITE_STATUS_UPDATE;
is_read = false;
break;
case DP_AUX_I2C_READ:
case DP_AUX_NATIVE_READ:
case DP_AUX_I2C_READ | DP_AUX_I2C_MOT:
request = msg->request;
is_read = true;
break;
default:
dev_err(mtk_dp->dev, "invalid aux cmd = %d\n",
msg->request);
ret = -EINVAL;
goto err;
}
do {
size_t to_access = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES,
msg->size - accessed_bytes);
ret = mtk_dp_aux_do_transfer(mtk_dp, is_read, request,
msg->address + accessed_bytes,
msg->buffer + accessed_bytes,
to_access, &msg->reply);
if (ret) {
dev_info(mtk_dp->dev,
"Failed to do AUX transfer: %d\n", ret);
goto err;
}
accessed_bytes += to_access;
} while (accessed_bytes < msg->size);
return msg->size;
err:
msg->reply = DP_AUX_NATIVE_REPLY_NACK | DP_AUX_I2C_REPLY_NACK;
return ret;
}
static int mtk_dp_poweron(struct mtk_dp *mtk_dp)
{
int ret;
ret = phy_init(mtk_dp->phy);
if (ret) {
dev_err(mtk_dp->dev, "Failed to initialize phy: %d\n", ret);
return ret;
}
mtk_dp_init_port(mtk_dp);
mtk_dp_power_enable(mtk_dp);
return 0;
}
static void mtk_dp_poweroff(struct mtk_dp *mtk_dp)
{
mtk_dp_power_disable(mtk_dp);
phy_exit(mtk_dp->phy);
}
static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
int ret;
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
dev_err(mtk_dp->dev, "Driver does not provide a connector!");
return -EINVAL;
}
mtk_dp->aux.drm_dev = bridge->dev;
ret = drm_dp_aux_register(&mtk_dp->aux);
if (ret) {
dev_err(mtk_dp->dev,
"failed to register DP AUX channel: %d\n", ret);
return ret;
}
ret = mtk_dp_poweron(mtk_dp);
if (ret)
goto err_aux_register;
if (mtk_dp->next_bridge) {
ret = drm_bridge_attach(bridge->encoder, mtk_dp->next_bridge,
&mtk_dp->bridge, flags);
if (ret) {
drm_warn(mtk_dp->drm_dev,
"Failed to attach external bridge: %d\n", ret);
goto err_bridge_attach;
}
}
mtk_dp->drm_dev = bridge->dev;
if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP) {
irq_clear_status_flags(mtk_dp->irq, IRQ_NOAUTOEN);
enable_irq(mtk_dp->irq);
mtk_dp_hwirq_enable(mtk_dp, true);
}
return 0;
err_bridge_attach:
mtk_dp_poweroff(mtk_dp);
err_aux_register:
drm_dp_aux_unregister(&mtk_dp->aux);
return ret;
}
static void mtk_dp_bridge_detach(struct drm_bridge *bridge)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP) {
mtk_dp_hwirq_enable(mtk_dp, false);
disable_irq(mtk_dp->irq);
}
mtk_dp->drm_dev = NULL;
mtk_dp_poweroff(mtk_dp);
drm_dp_aux_unregister(&mtk_dp->aux);
}
static void mtk_dp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
int ret;
mtk_dp->conn = drm_atomic_get_new_connector_for_encoder(old_state->base.state,
bridge->encoder);
if (!mtk_dp->conn) {
drm_err(mtk_dp->drm_dev,
"Can't enable bridge as connector is missing\n");
return;
}
mtk_dp_aux_panel_poweron(mtk_dp, true);
/* Training */
ret = mtk_dp_training(mtk_dp);
if (ret) {
drm_err(mtk_dp->drm_dev, "Training failed, %d\n", ret);
goto power_off_aux;
}
ret = mtk_dp_video_config(mtk_dp);
if (ret)
goto power_off_aux;
mtk_dp_video_enable(mtk_dp, true);
mtk_dp->audio_enable =
mtk_dp_edid_parse_audio_capabilities(mtk_dp,
&mtk_dp->info.audio_cur_cfg);
if (mtk_dp->audio_enable) {
mtk_dp_audio_setup(mtk_dp, &mtk_dp->info.audio_cur_cfg);
mtk_dp_audio_mute(mtk_dp, false);
} else {
memset(&mtk_dp->info.audio_cur_cfg, 0,
sizeof(mtk_dp->info.audio_cur_cfg));
}
mtk_dp->enabled = true;
mtk_dp_update_plugged_status(mtk_dp);
return;
power_off_aux:
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
DP_PWR_STATE_BANDGAP_TPLL,
DP_PWR_STATE_MASK);
}
static void mtk_dp_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
mtk_dp->enabled = false;
mtk_dp_update_plugged_status(mtk_dp);
mtk_dp_video_enable(mtk_dp, false);
mtk_dp_audio_mute(mtk_dp, true);
if (mtk_dp->train_info.cable_plugged_in) {
drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
usleep_range(2000, 3000);
}
/* power off aux */
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
DP_PWR_STATE_BANDGAP_TPLL,
DP_PWR_STATE_MASK);
/* Ensure the sink is muted */
msleep(20);
}
static enum drm_mode_status
mtk_dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24;
u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
drm_dp_max_lane_count(mtk_dp->rx_cap),
drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
mtk_dp->max_lanes);
if (rate < mode->clock * bpp / 8)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static u32 *mtk_dp_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
unsigned int *num_output_fmts)
{
u32 *output_fmts;
*num_output_fmts = 0;
output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL);
if (!output_fmts)
return NULL;
*num_output_fmts = 1;
output_fmts[0] = MEDIA_BUS_FMT_FIXED;
return output_fmts;
}
static const u32 mt8195_input_fmts[] = {
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_YUV8_1X24,
MEDIA_BUS_FMT_YUYV8_1X16,
};
static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
u32 *input_fmts;
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_display_info *display_info =
&conn_state->connector->display_info;
u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
drm_dp_max_lane_count(mtk_dp->rx_cap),
drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
mtk_dp->max_lanes);
*num_input_fmts = 0;
/*
* If the linkrate is smaller than datarate of RGB888, larger than
* datarate of YUV422 and sink device supports YUV422, we output YUV422
* format. Use this condition, we can support more resolution.
*/
if ((rate < (mode->clock * 24 / 8)) &&
(rate > (mode->clock * 16 / 8)) &&
(display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
return NULL;
*num_input_fmts = 1;
input_fmts[0] = MEDIA_BUS_FMT_YUYV8_1X16;
} else {
input_fmts = kcalloc(ARRAY_SIZE(mt8195_input_fmts),
sizeof(*input_fmts),
GFP_KERNEL);
if (!input_fmts)
return NULL;
*num_input_fmts = ARRAY_SIZE(mt8195_input_fmts);
memcpy(input_fmts, mt8195_input_fmts, sizeof(mt8195_input_fmts));
}
return input_fmts;
}
static int mtk_dp_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
struct drm_crtc *crtc = conn_state->crtc;
unsigned int input_bus_format;
input_bus_format = bridge_state->input_bus_cfg.format;
dev_dbg(mtk_dp->dev, "input format 0x%04x, output format 0x%04x\n",
bridge_state->input_bus_cfg.format,
bridge_state->output_bus_cfg.format);
if (input_bus_format == MEDIA_BUS_FMT_YUYV8_1X16)
mtk_dp->info.format = DP_PIXELFORMAT_YUV422;
else
mtk_dp->info.format = DP_PIXELFORMAT_RGB;
if (!crtc) {
drm_err(mtk_dp->drm_dev,
"Can't enable bridge as connector state doesn't have a crtc\n");
return -EINVAL;
}
drm_display_mode_to_videomode(&crtc_state->adjusted_mode, &mtk_dp->info.vm);
return 0;
}
static const struct drm_bridge_funcs mtk_dp_bridge_funcs = {
.atomic_check = mtk_dp_bridge_atomic_check,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_get_output_bus_fmts = mtk_dp_bridge_atomic_get_output_bus_fmts,
.atomic_get_input_bus_fmts = mtk_dp_bridge_atomic_get_input_bus_fmts,
.atomic_reset = drm_atomic_helper_bridge_reset,
.attach = mtk_dp_bridge_attach,
.detach = mtk_dp_bridge_detach,
.atomic_enable = mtk_dp_bridge_atomic_enable,
.atomic_disable = mtk_dp_bridge_atomic_disable,
.mode_valid = mtk_dp_bridge_mode_valid,
.get_edid = mtk_dp_get_edid,
.detect = mtk_dp_bdg_detect,
};
static void mtk_dp_debounce_timer(struct timer_list *t)
{
struct mtk_dp *mtk_dp = from_timer(mtk_dp, t, debounce_timer);
mtk_dp->need_debounce = true;
}
/*
* HDMI audio codec callbacks
*/
static int mtk_dp_audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
if (!mtk_dp->enabled) {
dev_err(mtk_dp->dev, "%s, DP is not ready!\n", __func__);
return -ENODEV;
}
mtk_dp->info.audio_cur_cfg.channels = params->cea.channels;
mtk_dp->info.audio_cur_cfg.sample_rate = params->sample_rate;
mtk_dp_audio_setup(mtk_dp, &mtk_dp->info.audio_cur_cfg);
return 0;
}
static int mtk_dp_audio_startup(struct device *dev, void *data)
{
struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
mtk_dp_audio_mute(mtk_dp, false);
return 0;
}
static void mtk_dp_audio_shutdown(struct device *dev, void *data)
{
struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
mtk_dp_audio_mute(mtk_dp, true);
}
static int mtk_dp_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
size_t len)
{
struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
if (mtk_dp->enabled)
memcpy(buf, mtk_dp->conn->eld, len);
else
memset(buf, 0, len);
return 0;
}
static int mtk_dp_audio_hook_plugged_cb(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
struct mtk_dp *mtk_dp = data;
mutex_lock(&mtk_dp->update_plugged_status_lock);
mtk_dp->plugged_cb = fn;
mtk_dp->codec_dev = codec_dev;
mutex_unlock(&mtk_dp->update_plugged_status_lock);
mtk_dp_update_plugged_status(mtk_dp);
return 0;
}
static const struct hdmi_codec_ops mtk_dp_audio_codec_ops = {
.hw_params = mtk_dp_audio_hw_params,
.audio_startup = mtk_dp_audio_startup,
.audio_shutdown = mtk_dp_audio_shutdown,
.get_eld = mtk_dp_audio_get_eld,
.hook_plugged_cb = mtk_dp_audio_hook_plugged_cb,
.no_capture_mute = 1,
};
static int mtk_dp_register_audio_driver(struct device *dev)
{
struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
struct hdmi_codec_pdata codec_data = {
.ops = &mtk_dp_audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
.data = mtk_dp,
};
mtk_dp->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
&codec_data,
sizeof(codec_data));
return PTR_ERR_OR_ZERO(mtk_dp->audio_pdev);
}
static int mtk_dp_register_phy(struct mtk_dp *mtk_dp)
{
struct device *dev = mtk_dp->dev;
mtk_dp->phy_dev = platform_device_register_data(dev, "mediatek-dp-phy",
PLATFORM_DEVID_AUTO,
&mtk_dp->regs,
sizeof(struct regmap *));
if (IS_ERR(mtk_dp->phy_dev))
return dev_err_probe(dev, PTR_ERR(mtk_dp->phy_dev),
"Failed to create device mediatek-dp-phy\n");
mtk_dp_get_calibration_data(mtk_dp);
mtk_dp->phy = devm_phy_get(&mtk_dp->phy_dev->dev, "dp");
if (IS_ERR(mtk_dp->phy)) {
platform_device_unregister(mtk_dp->phy_dev);
return dev_err_probe(dev, PTR_ERR(mtk_dp->phy), "Failed to get phy\n");
}
return 0;
}
static int mtk_dp_edp_link_panel(struct drm_dp_aux *mtk_aux)
{
struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
struct device *dev = mtk_aux->dev;
int ret;
mtk_dp->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
/* Power off the DP and AUX: either detection is done, or no panel present */
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
DP_PWR_STATE_BANDGAP_TPLL,
DP_PWR_STATE_MASK);
mtk_dp_power_disable(mtk_dp);
if (IS_ERR(mtk_dp->next_bridge)) {
ret = PTR_ERR(mtk_dp->next_bridge);
mtk_dp->next_bridge = NULL;
return ret;
}
/* For eDP, we add the bridge only if the panel was found */
ret = devm_drm_bridge_add(dev, &mtk_dp->bridge);
if (ret)
return ret;
return 0;
}
static int mtk_dp_probe(struct platform_device *pdev)
{
struct mtk_dp *mtk_dp;
struct device *dev = &pdev->dev;
int ret;
mtk_dp = devm_kzalloc(dev, sizeof(*mtk_dp), GFP_KERNEL);
if (!mtk_dp)
return -ENOMEM;
mtk_dp->dev = dev;
mtk_dp->data = (struct mtk_dp_data *)of_device_get_match_data(dev);
ret = mtk_dp_dt_parse(mtk_dp, pdev);
if (ret)
return dev_err_probe(dev, ret, "Failed to parse dt\n");
/*
* Request the interrupt and install service routine only if we are
* on full DisplayPort.
* For eDP, polling the HPD instead is more convenient because we
* don't expect any (un)plug events during runtime, hence we can
* avoid some locking.
*/
if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP) {
mtk_dp->irq = platform_get_irq(pdev, 0);
if (mtk_dp->irq < 0)
return dev_err_probe(dev, mtk_dp->irq,
"failed to request dp irq resource\n");
spin_lock_init(&mtk_dp->irq_thread_lock);
irq_set_status_flags(mtk_dp->irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, mtk_dp->irq, mtk_dp_hpd_event,
mtk_dp_hpd_event_thread,
IRQ_TYPE_LEVEL_HIGH, dev_name(dev),
mtk_dp);
if (ret)
return dev_err_probe(dev, ret,
"failed to request mediatek dptx irq\n");
mtk_dp->need_debounce = true;
timer_setup(&mtk_dp->debounce_timer, mtk_dp_debounce_timer, 0);
}
mtk_dp->aux.name = "aux_mtk_dp";
mtk_dp->aux.dev = dev;
mtk_dp->aux.transfer = mtk_dp_aux_transfer;
mtk_dp->aux.wait_hpd_asserted = mtk_dp_wait_hpd_asserted;
drm_dp_aux_init(&mtk_dp->aux);
platform_set_drvdata(pdev, mtk_dp);
if (mtk_dp->data->audio_supported) {
mutex_init(&mtk_dp->update_plugged_status_lock);
ret = mtk_dp_register_audio_driver(dev);
if (ret) {
dev_err(dev, "Failed to register audio driver: %d\n",
ret);
return ret;
}
}
ret = mtk_dp_register_phy(mtk_dp);
if (ret)
return ret;
mtk_dp->bridge.funcs = &mtk_dp_bridge_funcs;
mtk_dp->bridge.of_node = dev->of_node;
mtk_dp->bridge.type = mtk_dp->data->bridge_type;
if (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP) {
/*
* Set the data lanes to idle in case the bootloader didn't
* properly close the eDP port to avoid stalls and then
* reinitialize, reset and power on the AUX block.
*/
mtk_dp_set_idle_pattern(mtk_dp, true);
mtk_dp_initialize_aux_settings(mtk_dp);
mtk_dp_power_enable(mtk_dp);
/* Disable HW interrupts: we don't need any for eDP */
mtk_dp_hwirq_enable(mtk_dp, false);
/*
* Power on the AUX to allow reading the EDID from aux-bus:
* please note that it is necessary to call power off in the
* .done_probing() callback (mtk_dp_edp_link_panel), as only
* there we can safely assume that we finished reading EDID.
*/
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
DP_PWR_STATE_BANDGAP_TPLL_LANE,
DP_PWR_STATE_MASK);
ret = devm_of_dp_aux_populate_bus(&mtk_dp->aux, mtk_dp_edp_link_panel);
if (ret) {
/* -ENODEV this means that the panel is not on the aux-bus */
if (ret == -ENODEV) {
ret = mtk_dp_edp_link_panel(&mtk_dp->aux);
if (ret)
return ret;
} else {
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
DP_PWR_STATE_BANDGAP_TPLL,
DP_PWR_STATE_MASK);
mtk_dp_power_disable(mtk_dp);
return ret;
}
}
} else {
mtk_dp->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
ret = devm_drm_bridge_add(dev, &mtk_dp->bridge);
if (ret)
return dev_err_probe(dev, ret, "Failed to add bridge\n");
}
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
return 0;
}
static void mtk_dp_remove(struct platform_device *pdev)
{
struct mtk_dp *mtk_dp = platform_get_drvdata(pdev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP)
del_timer_sync(&mtk_dp->debounce_timer);
platform_device_unregister(mtk_dp->phy_dev);
if (mtk_dp->audio_pdev)
platform_device_unregister(mtk_dp->audio_pdev);
}
#ifdef CONFIG_PM_SLEEP
static int mtk_dp_suspend(struct device *dev)
{
struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
mtk_dp_power_disable(mtk_dp);
if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP)
mtk_dp_hwirq_enable(mtk_dp, false);
pm_runtime_put_sync(dev);
return 0;
}
static int mtk_dp_resume(struct device *dev)
{
struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
pm_runtime_get_sync(dev);
mtk_dp_init_port(mtk_dp);
if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP)
mtk_dp_hwirq_enable(mtk_dp, true);
mtk_dp_power_enable(mtk_dp);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume);
static const struct mtk_dp_data mt8195_edp_data = {
.bridge_type = DRM_MODE_CONNECTOR_eDP,
.smc_cmd = MTK_DP_SIP_ATF_EDP_VIDEO_UNMUTE,
.efuse_fmt = mt8195_edp_efuse_fmt,
.audio_supported = false,
};
static const struct mtk_dp_data mt8195_dp_data = {
.bridge_type = DRM_MODE_CONNECTOR_DisplayPort,
.smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE,
.efuse_fmt = mt8195_dp_efuse_fmt,
.audio_supported = true,
};
static const struct of_device_id mtk_dp_of_match[] = {
{
.compatible = "mediatek,mt8195-edp-tx",
.data = &mt8195_edp_data,
},
{
.compatible = "mediatek,mt8195-dp-tx",
.data = &mt8195_dp_data,
},
{},
};
MODULE_DEVICE_TABLE(of, mtk_dp_of_match);
static struct platform_driver mtk_dp_driver = {
.probe = mtk_dp_probe,
.remove_new = mtk_dp_remove,
.driver = {
.name = "mediatek-drm-dp",
.of_match_table = mtk_dp_of_match,
.pm = &mtk_dp_pm_ops,
},
};
module_platform_driver(mtk_dp_driver);
MODULE_AUTHOR("Jitao Shi <[email protected]>");
MODULE_AUTHOR("Markus Schneider-Pargmann <[email protected]>");
MODULE_AUTHOR("Bo-Chen Chen <[email protected]>");
MODULE_DESCRIPTION("MediaTek DisplayPort Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/mediatek/mtk_dp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
*/
#include <drm/drm_fourcc.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_REG_RDMA_INT_ENABLE 0x0000
#define DISP_REG_RDMA_INT_STATUS 0x0004
#define RDMA_TARGET_LINE_INT BIT(5)
#define RDMA_FIFO_UNDERFLOW_INT BIT(4)
#define RDMA_EOF_ABNORMAL_INT BIT(3)
#define RDMA_FRAME_END_INT BIT(2)
#define RDMA_FRAME_START_INT BIT(1)
#define RDMA_REG_UPDATE_INT BIT(0)
#define DISP_REG_RDMA_GLOBAL_CON 0x0010
#define RDMA_ENGINE_EN BIT(0)
#define RDMA_MODE_MEMORY BIT(1)
#define DISP_REG_RDMA_SIZE_CON_0 0x0014
#define RDMA_MATRIX_ENABLE BIT(17)
#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20)
#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20)
#define DISP_REG_RDMA_SIZE_CON_1 0x0018
#define DISP_REG_RDMA_TARGET_LINE 0x001c
#define DISP_RDMA_MEM_CON 0x0024
#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4)
#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4)
#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4)
#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4)
#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4)
#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4)
#define MEM_MODE_INPUT_SWAP BIT(8)
#define DISP_RDMA_MEM_SRC_PITCH 0x002c
#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030
#define DISP_REG_RDMA_FIFO_CON 0x0040
#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
#define DISP_RDMA_MEM_START_ADDR 0x0f00
#define RDMA_MEM_GMC 0x40402020
static const u32 mt8173_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
};
struct mtk_disp_rdma_data {
unsigned int fifo_size;
const u32 *formats;
size_t num_formats;
};
/*
* struct mtk_disp_rdma - DISP_RDMA driver structure
* @data: local driver data
*/
struct mtk_disp_rdma {
struct clk *clk;
void __iomem *regs;
struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_rdma_data *data;
void (*vblank_cb)(void *data);
void *vblank_cb_data;
u32 fifo_size;
};
static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_rdma *priv = dev_id;
/* Clear frame completion interrupt */
writel(0x0, priv->regs + DISP_REG_RDMA_INT_STATUS);
if (!priv->vblank_cb)
return IRQ_NONE;
priv->vblank_cb(priv->vblank_cb_data);
return IRQ_HANDLED;
}
static void rdma_update_bits(struct device *dev, unsigned int reg,
unsigned int mask, unsigned int val)
{
struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
unsigned int tmp = readl(rdma->regs + reg);
tmp = (tmp & ~mask) | (val & mask);
writel(tmp, rdma->regs + reg);
}
void mtk_rdma_register_vblank_cb(struct device *dev,
void (*vblank_cb)(void *),
void *vblank_cb_data)
{
struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
rdma->vblank_cb = vblank_cb;
rdma->vblank_cb_data = vblank_cb_data;
}
void mtk_rdma_unregister_vblank_cb(struct device *dev)
{
struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
rdma->vblank_cb = NULL;
rdma->vblank_cb_data = NULL;
}
void mtk_rdma_enable_vblank(struct device *dev)
{
rdma_update_bits(dev, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
RDMA_FRAME_END_INT);
}
void mtk_rdma_disable_vblank(struct device *dev)
{
rdma_update_bits(dev, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
}
const u32 *mtk_rdma_get_formats(struct device *dev)
{
struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
return rdma->data->formats;
}
size_t mtk_rdma_get_num_formats(struct device *dev)
{
struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
return rdma->data->num_formats;
}
int mtk_rdma_clk_enable(struct device *dev)
{
struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
return clk_prepare_enable(rdma->clk);
}
void mtk_rdma_clk_disable(struct device *dev)
{
struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
clk_disable_unprepare(rdma->clk);
}
void mtk_rdma_start(struct device *dev)
{
rdma_update_bits(dev, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN,
RDMA_ENGINE_EN);
}
void mtk_rdma_stop(struct device *dev)
{
rdma_update_bits(dev, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, 0);
}
void mtk_rdma_config(struct device *dev, unsigned int width,
unsigned int height, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
unsigned int threshold;
unsigned int reg;
struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
u32 rdma_fifo_size;
mtk_ddp_write_mask(cmdq_pkt, width, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_0, 0xfff);
mtk_ddp_write_mask(cmdq_pkt, height, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_1, 0xfffff);
if (rdma->fifo_size)
rdma_fifo_size = rdma->fifo_size;
else
rdma_fifo_size = RDMA_FIFO_SIZE(rdma);
/*
* Enable FIFO underflow since DSI and DPI can't be blocked.
* Keep the FIFO pseudo size reset default of 8 KiB. Set the
* output threshold to 70% of max fifo size to make sure the
* threhold will not overflow
*/
threshold = rdma_fifo_size * 7 / 10;
reg = RDMA_FIFO_UNDERFLOW_EN |
RDMA_FIFO_PSEUDO_SIZE(rdma_fifo_size) |
RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
mtk_ddp_write(cmdq_pkt, reg, &rdma->cmdq_reg, rdma->regs, DISP_REG_RDMA_FIFO_CON);
}
static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
unsigned int fmt)
{
/* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
* is defined in mediatek HW data sheet.
* The alphabet order in XXX is no relation to data
* arrangement in memory.
*/
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
return MEM_MODE_INPUT_FORMAT_RGB565;
case DRM_FORMAT_BGR565:
return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
case DRM_FORMAT_RGB888:
return MEM_MODE_INPUT_FORMAT_RGB888;
case DRM_FORMAT_BGR888:
return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
return MEM_MODE_INPUT_FORMAT_ARGB8888;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
return MEM_MODE_INPUT_FORMAT_RGBA8888;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
case DRM_FORMAT_UYVY:
return MEM_MODE_INPUT_FORMAT_UYVY;
case DRM_FORMAT_YUYV:
return MEM_MODE_INPUT_FORMAT_YUYV;
}
}
unsigned int mtk_rdma_layer_nr(struct device *dev)
{
return 1;
}
void mtk_rdma_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
unsigned int pitch = pending->pitch & 0xffff;
unsigned int fmt = pending->format;
unsigned int con;
con = rdma_fmt_convert(rdma, fmt);
mtk_ddp_write_relaxed(cmdq_pkt, con, &rdma->cmdq_reg, rdma->regs, DISP_RDMA_MEM_CON);
if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_0,
RDMA_MATRIX_ENABLE);
mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB,
&rdma->cmdq_reg, rdma->regs, DISP_REG_RDMA_SIZE_CON_0,
RDMA_MATRIX_INT_MTX_SEL);
} else {
mtk_ddp_write_mask(cmdq_pkt, 0, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_0,
RDMA_MATRIX_ENABLE);
}
mtk_ddp_write_relaxed(cmdq_pkt, addr, &rdma->cmdq_reg, rdma->regs,
DISP_RDMA_MEM_START_ADDR);
mtk_ddp_write_relaxed(cmdq_pkt, pitch, &rdma->cmdq_reg, rdma->regs,
DISP_RDMA_MEM_SRC_PITCH);
mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, &rdma->cmdq_reg, rdma->regs,
DISP_RDMA_MEM_GMC_SETTING_0);
mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY);
}
static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
void *data)
{
return 0;
}
static void mtk_disp_rdma_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_disp_rdma_component_ops = {
.bind = mtk_disp_rdma_bind,
.unbind = mtk_disp_rdma_unbind,
};
static int mtk_disp_rdma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_rdma *priv;
struct resource *res;
int irq;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get rdma clk\n");
return PTR_ERR(priv->clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap rdma\n");
return PTR_ERR(priv->regs);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
if (of_find_property(dev->of_node, "mediatek,rdma-fifo-size", &ret)) {
ret = of_property_read_u32(dev->of_node,
"mediatek,rdma-fifo-size",
&priv->fifo_size);
if (ret) {
dev_err(dev, "Failed to get rdma fifo size\n");
return ret;
}
}
/* Disable and clear pending interrupts */
writel(0x0, priv->regs + DISP_REG_RDMA_INT_ENABLE);
writel(0x0, priv->regs + DISP_REG_RDMA_INT_STATUS);
ret = devm_request_irq(dev, irq, mtk_disp_rdma_irq_handler,
IRQF_TRIGGER_NONE, dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
return ret;
}
priv->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
pm_runtime_enable(dev);
ret = component_add(dev, &mtk_disp_rdma_component_ops);
if (ret) {
pm_runtime_disable(dev);
dev_err(dev, "Failed to add component: %d\n", ret);
}
return ret;
}
static void mtk_disp_rdma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_rdma_component_ops);
pm_runtime_disable(&pdev->dev);
}
static const struct mtk_disp_rdma_data mt2701_rdma_driver_data = {
.fifo_size = SZ_4K,
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = {
.fifo_size = SZ_8K,
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = {
.fifo_size = 5 * SZ_1K,
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
static const struct mtk_disp_rdma_data mt8195_rdma_driver_data = {
.fifo_size = 1920,
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-rdma",
.data = &mt2701_rdma_driver_data},
{ .compatible = "mediatek,mt8173-disp-rdma",
.data = &mt8173_rdma_driver_data},
{ .compatible = "mediatek,mt8183-disp-rdma",
.data = &mt8183_rdma_driver_data},
{ .compatible = "mediatek,mt8195-disp-rdma",
.data = &mt8195_rdma_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
struct platform_driver mtk_disp_rdma_driver = {
.probe = mtk_disp_rdma_probe,
.remove_new = mtk_disp_rdma_remove,
.driver = {
.name = "mediatek-disp-rdma",
.owner = THIS_MODULE,
.of_match_table = mtk_disp_rdma_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_disp_rdma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: Jie Qiu <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include "mtk_drm_drv.h"
#include "mtk_hdmi.h"
#define SIF1_CLOK (288)
#define DDC_DDCMCTL0 (0x0)
#define DDCM_ODRAIN BIT(31)
#define DDCM_CLK_DIV_OFFSET (16)
#define DDCM_CLK_DIV_MASK (0xfff << 16)
#define DDCM_CS_STATUS BIT(4)
#define DDCM_SCL_STATE BIT(3)
#define DDCM_SDA_STATE BIT(2)
#define DDCM_SM0EN BIT(1)
#define DDCM_SCL_STRECH BIT(0)
#define DDC_DDCMCTL1 (0x4)
#define DDCM_ACK_OFFSET (16)
#define DDCM_ACK_MASK (0xff << 16)
#define DDCM_PGLEN_OFFSET (8)
#define DDCM_PGLEN_MASK (0x7 << 8)
#define DDCM_SIF_MODE_OFFSET (4)
#define DDCM_SIF_MODE_MASK (0x7 << 4)
#define DDCM_START (0x1)
#define DDCM_WRITE_DATA (0x2)
#define DDCM_STOP (0x3)
#define DDCM_READ_DATA_NO_ACK (0x4)
#define DDCM_READ_DATA_ACK (0x5)
#define DDCM_TRI BIT(0)
#define DDC_DDCMD0 (0x8)
#define DDCM_DATA3 (0xff << 24)
#define DDCM_DATA2 (0xff << 16)
#define DDCM_DATA1 (0xff << 8)
#define DDCM_DATA0 (0xff << 0)
#define DDC_DDCMD1 (0xc)
#define DDCM_DATA7 (0xff << 24)
#define DDCM_DATA6 (0xff << 16)
#define DDCM_DATA5 (0xff << 8)
#define DDCM_DATA4 (0xff << 0)
struct mtk_hdmi_ddc {
struct i2c_adapter adap;
struct clk *clk;
void __iomem *regs;
};
static inline void sif_set_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
unsigned int val)
{
writel(readl(ddc->regs + offset) | val, ddc->regs + offset);
}
static inline void sif_clr_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
unsigned int val)
{
writel(readl(ddc->regs + offset) & ~val, ddc->regs + offset);
}
static inline bool sif_bit_is_set(struct mtk_hdmi_ddc *ddc, unsigned int offset,
unsigned int val)
{
return (readl(ddc->regs + offset) & val) == val;
}
static inline void sif_write_mask(struct mtk_hdmi_ddc *ddc, unsigned int offset,
unsigned int mask, unsigned int shift,
unsigned int val)
{
unsigned int tmp;
tmp = readl(ddc->regs + offset);
tmp &= ~mask;
tmp |= (val << shift) & mask;
writel(tmp, ddc->regs + offset);
}
static inline unsigned int sif_read_mask(struct mtk_hdmi_ddc *ddc,
unsigned int offset, unsigned int mask,
unsigned int shift)
{
return (readl(ddc->regs + offset) & mask) >> shift;
}
static void ddcm_trigger_mode(struct mtk_hdmi_ddc *ddc, int mode)
{
u32 val;
sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_SIF_MODE_MASK,
DDCM_SIF_MODE_OFFSET, mode);
sif_set_bit(ddc, DDC_DDCMCTL1, DDCM_TRI);
readl_poll_timeout(ddc->regs + DDC_DDCMCTL1, val,
(val & DDCM_TRI) != DDCM_TRI, 4, 20000);
}
static int mtk_hdmi_ddc_read_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
{
struct device *dev = ddc->adap.dev.parent;
u32 remain_count, ack_count, ack_final, read_count, temp_count;
u32 index = 0;
u32 ack;
int i;
ddcm_trigger_mode(ddc, DDCM_START);
sif_write_mask(ddc, DDC_DDCMD0, 0xff, 0, (msg->addr << 1) | 0x01);
sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
0x00);
ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
dev_dbg(dev, "ack = 0x%x\n", ack);
if (ack != 0x01) {
dev_err(dev, "i2c ack err!\n");
return -ENXIO;
}
remain_count = msg->len;
ack_count = (msg->len - 1) / 8;
ack_final = 0;
while (remain_count > 0) {
if (ack_count > 0) {
read_count = 8;
ack_final = 0;
ack_count--;
} else {
read_count = remain_count;
ack_final = 1;
}
sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK,
DDCM_PGLEN_OFFSET, read_count - 1);
ddcm_trigger_mode(ddc, (ack_final == 1) ?
DDCM_READ_DATA_NO_ACK :
DDCM_READ_DATA_ACK);
ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK,
DDCM_ACK_OFFSET);
temp_count = 0;
while (((ack & (1 << temp_count)) != 0) && (temp_count < 8))
temp_count++;
if (((ack_final == 1) && (temp_count != (read_count - 1))) ||
((ack_final == 0) && (temp_count != read_count))) {
dev_err(dev, "Address NACK! ACK(0x%x)\n", ack);
break;
}
for (i = read_count; i >= 1; i--) {
int shift;
int offset;
if (i > 4) {
offset = DDC_DDCMD1;
shift = (i - 5) * 8;
} else {
offset = DDC_DDCMD0;
shift = (i - 1) * 8;
}
msg->buf[index + i - 1] = sif_read_mask(ddc, offset,
0xff << shift,
shift);
}
remain_count -= read_count;
index += read_count;
}
return 0;
}
static int mtk_hdmi_ddc_write_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
{
struct device *dev = ddc->adap.dev.parent;
u32 ack;
ddcm_trigger_mode(ddc, DDCM_START);
sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA0, 0, msg->addr << 1);
sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA1, 8, msg->buf[0]);
sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
0x1);
ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
dev_dbg(dev, "ack = %d\n", ack);
if (ack != 0x03) {
dev_err(dev, "i2c ack err!\n");
return -EIO;
}
return 0;
}
static int mtk_hdmi_ddc_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num)
{
struct mtk_hdmi_ddc *ddc = adapter->algo_data;
struct device *dev = adapter->dev.parent;
int ret;
int i;
if (!ddc) {
dev_err(dev, "invalid arguments\n");
return -EINVAL;
}
sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SCL_STRECH);
sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SM0EN);
sif_clr_bit(ddc, DDC_DDCMCTL0, DDCM_ODRAIN);
if (sif_bit_is_set(ddc, DDC_DDCMCTL1, DDCM_TRI)) {
dev_err(dev, "ddc line is busy!\n");
return -EBUSY;
}
sif_write_mask(ddc, DDC_DDCMCTL0, DDCM_CLK_DIV_MASK,
DDCM_CLK_DIV_OFFSET, SIF1_CLOK);
for (i = 0; i < num; i++) {
struct i2c_msg *msg = &msgs[i];
dev_dbg(dev, "i2c msg, adr:0x%x, flags:%d, len :0x%x\n",
msg->addr, msg->flags, msg->len);
if (msg->flags & I2C_M_RD)
ret = mtk_hdmi_ddc_read_msg(ddc, msg);
else
ret = mtk_hdmi_ddc_write_msg(ddc, msg);
if (ret < 0)
goto xfer_end;
}
ddcm_trigger_mode(ddc, DDCM_STOP);
return i;
xfer_end:
ddcm_trigger_mode(ddc, DDCM_STOP);
dev_err(dev, "ddc failed!\n");
return ret;
}
static u32 mtk_hdmi_ddc_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm mtk_hdmi_ddc_algorithm = {
.master_xfer = mtk_hdmi_ddc_xfer,
.functionality = mtk_hdmi_ddc_func,
};
static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_hdmi_ddc *ddc;
struct resource *mem;
int ret;
ddc = devm_kzalloc(dev, sizeof(struct mtk_hdmi_ddc), GFP_KERNEL);
if (!ddc)
return -ENOMEM;
ddc->clk = devm_clk_get(dev, "ddc-i2c");
if (IS_ERR(ddc->clk)) {
dev_err(dev, "get ddc_clk failed: %p ,\n", ddc->clk);
return PTR_ERR(ddc->clk);
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ddc->regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(ddc->regs))
return PTR_ERR(ddc->regs);
ret = clk_prepare_enable(ddc->clk);
if (ret) {
dev_err(dev, "enable ddc clk failed!\n");
return ret;
}
strscpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
ddc->adap.owner = THIS_MODULE;
ddc->adap.class = I2C_CLASS_DDC;
ddc->adap.algo = &mtk_hdmi_ddc_algorithm;
ddc->adap.retries = 3;
ddc->adap.dev.of_node = dev->of_node;
ddc->adap.algo_data = ddc;
ddc->adap.dev.parent = &pdev->dev;
ret = i2c_add_adapter(&ddc->adap);
if (ret < 0) {
dev_err(dev, "failed to add bus to i2c core\n");
goto err_clk_disable;
}
platform_set_drvdata(pdev, ddc);
dev_dbg(dev, "ddc->adap: %p\n", &ddc->adap);
dev_dbg(dev, "ddc->clk: %p\n", ddc->clk);
dev_dbg(dev, "physical adr: %pa, end: %pa\n", &mem->start,
&mem->end);
return 0;
err_clk_disable:
clk_disable_unprepare(ddc->clk);
return ret;
}
static void mtk_hdmi_ddc_remove(struct platform_device *pdev)
{
struct mtk_hdmi_ddc *ddc = platform_get_drvdata(pdev);
i2c_del_adapter(&ddc->adap);
clk_disable_unprepare(ddc->clk);
}
static const struct of_device_id mtk_hdmi_ddc_match[] = {
{ .compatible = "mediatek,mt8173-hdmi-ddc", },
{},
};
MODULE_DEVICE_TABLE(of, mtk_hdmi_ddc_match);
struct platform_driver mtk_hdmi_ddc_driver = {
.probe = mtk_hdmi_ddc_probe,
.remove_new = mtk_hdmi_ddc_remove,
.driver = {
.name = "mediatek-hdmi-ddc",
.of_match_table = mtk_hdmi_ddc_match,
},
};
MODULE_AUTHOR("Jie Qiu <[email protected]>");
MODULE_DESCRIPTION("MediaTek HDMI DDC Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: Jie Qiu <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "mtk_cec.h"
#include "mtk_hdmi.h"
#include "mtk_drm_drv.h"
#define TR_CONFIG 0x00
#define CLEAR_CEC_IRQ BIT(15)
#define CEC_CKGEN 0x04
#define CEC_32K_PDN BIT(19)
#define PDN BIT(16)
#define RX_EVENT 0x54
#define HDMI_PORD BIT(25)
#define HDMI_HTPLG BIT(24)
#define HDMI_PORD_INT_EN BIT(9)
#define HDMI_HTPLG_INT_EN BIT(8)
#define RX_GEN_WD 0x58
#define HDMI_PORD_INT_32K_STATUS BIT(26)
#define RX_RISC_INT_32K_STATUS BIT(25)
#define HDMI_HTPLG_INT_32K_STATUS BIT(24)
#define HDMI_PORD_INT_32K_CLR BIT(18)
#define RX_INT_32K_CLR BIT(17)
#define HDMI_HTPLG_INT_32K_CLR BIT(16)
#define HDMI_PORD_INT_32K_STA_MASK BIT(10)
#define RX_RISC_INT_32K_STA_MASK BIT(9)
#define HDMI_HTPLG_INT_32K_STA_MASK BIT(8)
#define HDMI_PORD_INT_32K_EN BIT(2)
#define RX_INT_32K_EN BIT(1)
#define HDMI_HTPLG_INT_32K_EN BIT(0)
#define NORMAL_INT_CTRL 0x5C
#define HDMI_HTPLG_INT_STA BIT(0)
#define HDMI_PORD_INT_STA BIT(1)
#define HDMI_HTPLG_INT_CLR BIT(16)
#define HDMI_PORD_INT_CLR BIT(17)
#define HDMI_FULL_INT_CLR BIT(20)
struct mtk_cec {
void __iomem *regs;
struct clk *clk;
int irq;
bool hpd;
void (*hpd_event)(bool hpd, struct device *dev);
struct device *hdmi_dev;
spinlock_t lock;
};
static void mtk_cec_clear_bits(struct mtk_cec *cec, unsigned int offset,
unsigned int bits)
{
void __iomem *reg = cec->regs + offset;
u32 tmp;
tmp = readl(reg);
tmp &= ~bits;
writel(tmp, reg);
}
static void mtk_cec_set_bits(struct mtk_cec *cec, unsigned int offset,
unsigned int bits)
{
void __iomem *reg = cec->regs + offset;
u32 tmp;
tmp = readl(reg);
tmp |= bits;
writel(tmp, reg);
}
static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset,
unsigned int val, unsigned int mask)
{
u32 tmp = readl(cec->regs + offset) & ~mask;
tmp |= val & mask;
writel(tmp, cec->regs + offset);
}
void mtk_cec_set_hpd_event(struct device *dev,
void (*hpd_event)(bool hpd, struct device *dev),
struct device *hdmi_dev)
{
struct mtk_cec *cec = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&cec->lock, flags);
cec->hdmi_dev = hdmi_dev;
cec->hpd_event = hpd_event;
spin_unlock_irqrestore(&cec->lock, flags);
}
bool mtk_cec_hpd_high(struct device *dev)
{
struct mtk_cec *cec = dev_get_drvdata(dev);
unsigned int status;
status = readl(cec->regs + RX_EVENT);
return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG);
}
static void mtk_cec_htplg_irq_init(struct mtk_cec *cec)
{
mtk_cec_mask(cec, CEC_CKGEN, 0 | CEC_32K_PDN, PDN | CEC_32K_PDN);
mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
mtk_cec_mask(cec, RX_GEN_WD, 0, HDMI_PORD_INT_32K_CLR | RX_INT_32K_CLR |
HDMI_HTPLG_INT_32K_CLR | HDMI_PORD_INT_32K_EN |
RX_INT_32K_EN | HDMI_HTPLG_INT_32K_EN);
}
static void mtk_cec_htplg_irq_enable(struct mtk_cec *cec)
{
mtk_cec_set_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
}
static void mtk_cec_htplg_irq_disable(struct mtk_cec *cec)
{
mtk_cec_clear_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
}
static void mtk_cec_clear_htplg_irq(struct mtk_cec *cec)
{
mtk_cec_set_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
mtk_cec_set_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
usleep_range(5, 10);
mtk_cec_clear_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
mtk_cec_clear_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
mtk_cec_clear_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
}
static void mtk_cec_hpd_event(struct mtk_cec *cec, bool hpd)
{
void (*hpd_event)(bool hpd, struct device *dev);
struct device *hdmi_dev;
unsigned long flags;
spin_lock_irqsave(&cec->lock, flags);
hpd_event = cec->hpd_event;
hdmi_dev = cec->hdmi_dev;
spin_unlock_irqrestore(&cec->lock, flags);
if (hpd_event)
hpd_event(hpd, hdmi_dev);
}
static irqreturn_t mtk_cec_htplg_isr_thread(int irq, void *arg)
{
struct device *dev = arg;
struct mtk_cec *cec = dev_get_drvdata(dev);
bool hpd;
mtk_cec_clear_htplg_irq(cec);
hpd = mtk_cec_hpd_high(dev);
if (cec->hpd != hpd) {
dev_dbg(dev, "hotplug event! cur hpd = %d, hpd = %d\n",
cec->hpd, hpd);
cec->hpd = hpd;
mtk_cec_hpd_event(cec, hpd);
}
return IRQ_HANDLED;
}
static int mtk_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_cec *cec;
struct resource *res;
int ret;
cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
platform_set_drvdata(pdev, cec);
spin_lock_init(&cec->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cec->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cec->regs)) {
ret = PTR_ERR(cec->regs);
dev_err(dev, "Failed to ioremap cec: %d\n", ret);
return ret;
}
cec->clk = devm_clk_get(dev, NULL);
if (IS_ERR(cec->clk)) {
ret = PTR_ERR(cec->clk);
dev_err(dev, "Failed to get cec clock: %d\n", ret);
return ret;
}
cec->irq = platform_get_irq(pdev, 0);
if (cec->irq < 0)
return cec->irq;
ret = devm_request_threaded_irq(dev, cec->irq, NULL,
mtk_cec_htplg_isr_thread,
IRQF_SHARED | IRQF_TRIGGER_LOW |
IRQF_ONESHOT, "hdmi hpd", dev);
if (ret) {
dev_err(dev, "Failed to register cec irq: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(cec->clk);
if (ret) {
dev_err(dev, "Failed to enable cec clock: %d\n", ret);
return ret;
}
mtk_cec_htplg_irq_init(cec);
mtk_cec_htplg_irq_enable(cec);
return 0;
}
static void mtk_cec_remove(struct platform_device *pdev)
{
struct mtk_cec *cec = platform_get_drvdata(pdev);
mtk_cec_htplg_irq_disable(cec);
clk_disable_unprepare(cec->clk);
}
static const struct of_device_id mtk_cec_of_ids[] = {
{ .compatible = "mediatek,mt8173-cec", },
{}
};
MODULE_DEVICE_TABLE(of, mtk_cec_of_ids);
struct platform_driver mtk_cec_driver = {
.probe = mtk_cec_probe,
.remove_new = mtk_cec_remove,
.driver = {
.name = "mediatek-cec",
.of_match_table = mtk_cec_of_ids,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_cec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 MediaTek Inc.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_GAMMA_EN 0x0000
#define GAMMA_EN BIT(0)
#define DISP_GAMMA_CFG 0x0020
#define GAMMA_LUT_EN BIT(1)
#define GAMMA_DITHERING BIT(2)
#define DISP_GAMMA_SIZE 0x0030
#define DISP_GAMMA_LUT 0x0700
#define LUT_10BIT_MASK 0x03ff
struct mtk_disp_gamma_data {
bool has_dither;
bool lut_diff;
};
/*
* struct mtk_disp_gamma - DISP_GAMMA driver structure
*/
struct mtk_disp_gamma {
struct clk *clk;
void __iomem *regs;
struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_gamma_data *data;
};
int mtk_gamma_clk_enable(struct device *dev)
{
struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
return clk_prepare_enable(gamma->clk);
}
void mtk_gamma_clk_disable(struct device *dev)
{
struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
clk_disable_unprepare(gamma->clk);
}
void mtk_gamma_set_common(void __iomem *regs, struct drm_crtc_state *state, bool lut_diff)
{
unsigned int i, reg;
struct drm_color_lut *lut;
void __iomem *lut_base;
u32 word;
u32 diff[3] = {0};
if (state->gamma_lut) {
reg = readl(regs + DISP_GAMMA_CFG);
reg = reg | GAMMA_LUT_EN;
writel(reg, regs + DISP_GAMMA_CFG);
lut_base = regs + DISP_GAMMA_LUT;
lut = (struct drm_color_lut *)state->gamma_lut->data;
for (i = 0; i < MTK_LUT_SIZE; i++) {
if (!lut_diff || (i % 2 == 0)) {
word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) +
(((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) +
((lut[i].blue >> 6) & LUT_10BIT_MASK);
} else {
diff[0] = (lut[i].red >> 6) - (lut[i - 1].red >> 6);
diff[1] = (lut[i].green >> 6) - (lut[i - 1].green >> 6);
diff[2] = (lut[i].blue >> 6) - (lut[i - 1].blue >> 6);
word = ((diff[0] & LUT_10BIT_MASK) << 20) +
((diff[1] & LUT_10BIT_MASK) << 10) +
(diff[2] & LUT_10BIT_MASK);
}
writel(word, (lut_base + i * 4));
}
}
}
void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state)
{
struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
bool lut_diff = false;
if (gamma->data)
lut_diff = gamma->data->lut_diff;
mtk_gamma_set_common(gamma->regs, state, lut_diff);
}
void mtk_gamma_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, h << 16 | w, &gamma->cmdq_reg, gamma->regs,
DISP_GAMMA_SIZE);
if (gamma->data && gamma->data->has_dither)
mtk_dither_set_common(gamma->regs, &gamma->cmdq_reg, bpc,
DISP_GAMMA_CFG, GAMMA_DITHERING, cmdq_pkt);
}
void mtk_gamma_start(struct device *dev)
{
struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
writel(GAMMA_EN, gamma->regs + DISP_GAMMA_EN);
}
void mtk_gamma_stop(struct device *dev)
{
struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
writel_relaxed(0x0, gamma->regs + DISP_GAMMA_EN);
}
static int mtk_disp_gamma_bind(struct device *dev, struct device *master,
void *data)
{
return 0;
}
static void mtk_disp_gamma_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_disp_gamma_component_ops = {
.bind = mtk_disp_gamma_bind,
.unbind = mtk_disp_gamma_unbind,
};
static int mtk_disp_gamma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_gamma *priv;
struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get gamma clk\n");
return PTR_ERR(priv->clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap gamma\n");
return PTR_ERR(priv->regs);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
priv->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_gamma_component_ops);
if (ret)
dev_err(dev, "Failed to add component: %d\n", ret);
return ret;
}
static void mtk_disp_gamma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_gamma_component_ops);
}
static const struct mtk_disp_gamma_data mt8173_gamma_driver_data = {
.has_dither = true,
};
static const struct mtk_disp_gamma_data mt8183_gamma_driver_data = {
.lut_diff = true,
};
static const struct of_device_id mtk_disp_gamma_driver_dt_match[] = {
{ .compatible = "mediatek,mt8173-disp-gamma",
.data = &mt8173_gamma_driver_data},
{ .compatible = "mediatek,mt8183-disp-gamma",
.data = &mt8183_gamma_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_gamma_driver_dt_match);
struct platform_driver mtk_disp_gamma_driver = {
.probe = mtk_disp_gamma_probe,
.remove_new = mtk_disp_gamma_remove,
.driver = {
.name = "mediatek-disp-gamma",
.owner = THIS_MODULE,
.of_match_table = mtk_disp_gamma_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_disp_gamma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 MediaTek Inc.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_AAL_EN 0x0000
#define AAL_EN BIT(0)
#define DISP_AAL_SIZE 0x0030
#define DISP_AAL_OUTPUT_SIZE 0x04d8
struct mtk_disp_aal_data {
bool has_gamma;
};
struct mtk_disp_aal {
struct clk *clk;
void __iomem *regs;
struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_aal_data *data;
};
int mtk_aal_clk_enable(struct device *dev)
{
struct mtk_disp_aal *aal = dev_get_drvdata(dev);
return clk_prepare_enable(aal->clk);
}
void mtk_aal_clk_disable(struct device *dev)
{
struct mtk_disp_aal *aal = dev_get_drvdata(dev);
clk_disable_unprepare(aal->clk);
}
void mtk_aal_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_aal *aal = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &aal->cmdq_reg, aal->regs, DISP_AAL_SIZE);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &aal->cmdq_reg, aal->regs, DISP_AAL_OUTPUT_SIZE);
}
void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
{
struct mtk_disp_aal *aal = dev_get_drvdata(dev);
if (aal->data && aal->data->has_gamma)
mtk_gamma_set_common(aal->regs, state, false);
}
void mtk_aal_start(struct device *dev)
{
struct mtk_disp_aal *aal = dev_get_drvdata(dev);
writel(AAL_EN, aal->regs + DISP_AAL_EN);
}
void mtk_aal_stop(struct device *dev)
{
struct mtk_disp_aal *aal = dev_get_drvdata(dev);
writel_relaxed(0x0, aal->regs + DISP_AAL_EN);
}
static int mtk_disp_aal_bind(struct device *dev, struct device *master,
void *data)
{
return 0;
}
static void mtk_disp_aal_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_disp_aal_component_ops = {
.bind = mtk_disp_aal_bind,
.unbind = mtk_disp_aal_unbind,
};
static int mtk_disp_aal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_aal *priv;
struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get aal clk\n");
return PTR_ERR(priv->clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap aal\n");
return PTR_ERR(priv->regs);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
priv->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_aal_component_ops);
if (ret)
dev_err(dev, "Failed to add component: %d\n", ret);
return ret;
}
static void mtk_disp_aal_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_aal_component_ops);
}
static const struct mtk_disp_aal_data mt8173_aal_driver_data = {
.has_gamma = true,
};
static const struct of_device_id mtk_disp_aal_driver_dt_match[] = {
{ .compatible = "mediatek,mt8173-disp-aal",
.data = &mt8173_aal_driver_data},
{ .compatible = "mediatek,mt8183-disp-aal"},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_aal_driver_dt_match);
struct platform_driver mtk_disp_aal_driver = {
.probe = mtk_disp_aal_probe,
.remove_new = mtk_disp_aal_remove,
.driver = {
.name = "mediatek-disp-aal",
.owner = THIS_MODULE,
.of_match_table = mtk_disp_aal_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/mediatek/mtk_disp_aal.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* DRM driver for Solomon SSD130X OLED displays (SPI bus)
*
* Copyright 2022 Red Hat Inc.
* Authors: Javier Martinez Canillas <[email protected]>
*/
#include <linux/spi/spi.h>
#include <linux/module.h>
#include "ssd130x.h"
#define DRIVER_NAME "ssd130x-spi"
#define DRIVER_DESC "DRM driver for Solomon SSD130X OLED displays (SPI)"
struct ssd130x_spi_transport {
struct spi_device *spi;
struct gpio_desc *dc;
};
/*
* The regmap bus .write handler, it is just a wrapper around spi_write()
* but toggling the Data/Command control pin (D/C#). Since for 4-wire SPI
* a D/C# pin is used, in contrast with I2C where a control byte is sent,
* prior to every data byte, that contains a bit with the D/C# value.
*
* These control bytes are considered registers by the ssd130x core driver
* and can be used by the ssd130x SPI driver to determine if the data sent
* is for a command register or for the Graphic Display Data RAM (GDDRAM).
*/
static int ssd130x_spi_write(void *context, const void *data, size_t count)
{
struct ssd130x_spi_transport *t = context;
struct spi_device *spi = t->spi;
const u8 *reg = data;
if (*reg == SSD130X_COMMAND)
gpiod_set_value_cansleep(t->dc, 0);
if (*reg == SSD130X_DATA)
gpiod_set_value_cansleep(t->dc, 1);
/* Remove control byte since is not used in a 4-wire SPI interface */
return spi_write(spi, reg + 1, count - 1);
}
/* The ssd130x driver does not read registers but regmap expects a .read */
static int ssd130x_spi_read(void *context, const void *reg, size_t reg_size,
void *val, size_t val_size)
{
return -EOPNOTSUPP;
}
static const struct regmap_config ssd130x_spi_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.write = ssd130x_spi_write,
.read = ssd130x_spi_read,
.can_multi_write = true,
};
static int ssd130x_spi_probe(struct spi_device *spi)
{
struct ssd130x_spi_transport *t;
struct ssd130x_device *ssd130x;
struct regmap *regmap;
struct gpio_desc *dc;
struct device *dev = &spi->dev;
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc))
return dev_err_probe(dev, PTR_ERR(dc),
"Failed to get dc gpio\n");
t = devm_kzalloc(dev, sizeof(*t), GFP_KERNEL);
if (!t)
return dev_err_probe(dev, -ENOMEM,
"Failed to allocate SPI transport data\n");
t->spi = spi;
t->dc = dc;
regmap = devm_regmap_init(dev, NULL, t, &ssd130x_spi_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
ssd130x = ssd130x_probe(dev, regmap);
if (IS_ERR(ssd130x))
return PTR_ERR(ssd130x);
spi_set_drvdata(spi, ssd130x);
return 0;
}
static void ssd130x_spi_remove(struct spi_device *spi)
{
struct ssd130x_device *ssd130x = spi_get_drvdata(spi);
ssd130x_remove(ssd130x);
}
static void ssd130x_spi_shutdown(struct spi_device *spi)
{
struct ssd130x_device *ssd130x = spi_get_drvdata(spi);
ssd130x_shutdown(ssd130x);
}
static const struct of_device_id ssd130x_of_match[] = {
{
.compatible = "sinowealth,sh1106",
.data = &ssd130x_variants[SH1106_ID],
},
{
.compatible = "solomon,ssd1305",
.data = &ssd130x_variants[SSD1305_ID],
},
{
.compatible = "solomon,ssd1306",
.data = &ssd130x_variants[SSD1306_ID],
},
{
.compatible = "solomon,ssd1307",
.data = &ssd130x_variants[SSD1307_ID],
},
{
.compatible = "solomon,ssd1309",
.data = &ssd130x_variants[SSD1309_ID],
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ssd130x_of_match);
#if IS_MODULE(CONFIG_DRM_SSD130X_SPI)
/*
* The SPI core always reports a MODALIAS uevent of the form "spi:<dev>", even
* if the device was registered via OF. This means that the module will not be
* auto loaded, unless it contains an alias that matches the MODALIAS reported.
*
* To workaround this issue, add a SPI device ID table. Even when this should
* not be needed for this driver to match the registered SPI devices.
*/
static const struct spi_device_id ssd130x_spi_table[] = {
{ "sh1106", SH1106_ID },
{ "ssd1305", SSD1305_ID },
{ "ssd1306", SSD1306_ID },
{ "ssd1307", SSD1307_ID },
{ "ssd1309", SSD1309_ID },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, ssd130x_spi_table);
#endif
static struct spi_driver ssd130x_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = ssd130x_of_match,
},
.probe = ssd130x_spi_probe,
.remove = ssd130x_spi_remove,
.shutdown = ssd130x_spi_shutdown,
};
module_spi_driver(ssd130x_spi_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Javier Martinez Canillas <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(DRM_SSD130X);
| linux-master | drivers/gpu/drm/solomon/ssd130x-spi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* DRM driver for Solomon SSD130x OLED displays (I2C bus)
*
* Copyright 2022 Red Hat Inc.
* Author: Javier Martinez Canillas <[email protected]>
*
* Based on drivers/video/fbdev/ssd1307fb.c
* Copyright 2012 Free Electrons
*/
#include <linux/i2c.h>
#include <linux/module.h>
#include "ssd130x.h"
#define DRIVER_NAME "ssd130x-i2c"
#define DRIVER_DESC "DRM driver for Solomon SSD130x OLED displays (I2C)"
static const struct regmap_config ssd130x_i2c_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static int ssd130x_i2c_probe(struct i2c_client *client)
{
struct ssd130x_device *ssd130x;
struct regmap *regmap;
regmap = devm_regmap_init_i2c(client, &ssd130x_i2c_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
ssd130x = ssd130x_probe(&client->dev, regmap);
if (IS_ERR(ssd130x))
return PTR_ERR(ssd130x);
i2c_set_clientdata(client, ssd130x);
return 0;
}
static void ssd130x_i2c_remove(struct i2c_client *client)
{
struct ssd130x_device *ssd130x = i2c_get_clientdata(client);
ssd130x_remove(ssd130x);
}
static void ssd130x_i2c_shutdown(struct i2c_client *client)
{
struct ssd130x_device *ssd130x = i2c_get_clientdata(client);
ssd130x_shutdown(ssd130x);
}
static const struct of_device_id ssd130x_of_match[] = {
{
.compatible = "sinowealth,sh1106",
.data = &ssd130x_variants[SH1106_ID],
},
{
.compatible = "solomon,ssd1305",
.data = &ssd130x_variants[SSD1305_ID],
},
{
.compatible = "solomon,ssd1306",
.data = &ssd130x_variants[SSD1306_ID],
},
{
.compatible = "solomon,ssd1307",
.data = &ssd130x_variants[SSD1307_ID],
},
{
.compatible = "solomon,ssd1309",
.data = &ssd130x_variants[SSD1309_ID],
},
/* Deprecated but kept for backward compatibility */
{
.compatible = "solomon,ssd1305fb-i2c",
.data = &ssd130x_variants[SSD1305_ID],
},
{
.compatible = "solomon,ssd1306fb-i2c",
.data = &ssd130x_variants[SSD1306_ID],
},
{
.compatible = "solomon,ssd1307fb-i2c",
.data = &ssd130x_variants[SSD1307_ID],
},
{
.compatible = "solomon,ssd1309fb-i2c",
.data = &ssd130x_variants[SSD1309_ID],
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ssd130x_of_match);
static struct i2c_driver ssd130x_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = ssd130x_of_match,
},
.probe = ssd130x_i2c_probe,
.remove = ssd130x_i2c_remove,
.shutdown = ssd130x_i2c_shutdown,
};
module_i2c_driver(ssd130x_i2c_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Javier Martinez Canillas <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(DRM_SSD130X);
| linux-master | drivers/gpu/drm/solomon/ssd130x-i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* DRM driver for Solomon SSD130x OLED displays
*
* Copyright 2022 Red Hat Inc.
* Author: Javier Martinez Canillas <[email protected]>
*
* Based on drivers/video/fbdev/ssd1307fb.c
* Copyright 2012 Free Electrons
*/
#include <linux/backlight.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
#include <drm/drm_probe_helper.h>
#include "ssd130x.h"
#define DRIVER_NAME "ssd130x"
#define DRIVER_DESC "DRM driver for Solomon SSD130x OLED displays"
#define DRIVER_DATE "20220131"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define SSD130X_PAGE_COL_START_LOW 0x00
#define SSD130X_PAGE_COL_START_HIGH 0x10
#define SSD130X_SET_ADDRESS_MODE 0x20
#define SSD130X_SET_COL_RANGE 0x21
#define SSD130X_SET_PAGE_RANGE 0x22
#define SSD130X_CONTRAST 0x81
#define SSD130X_SET_LOOKUP_TABLE 0x91
#define SSD130X_CHARGE_PUMP 0x8d
#define SSD130X_SET_SEG_REMAP 0xa0
#define SSD130X_DISPLAY_OFF 0xae
#define SSD130X_SET_MULTIPLEX_RATIO 0xa8
#define SSD130X_DISPLAY_ON 0xaf
#define SSD130X_START_PAGE_ADDRESS 0xb0
#define SSD130X_SET_COM_SCAN_DIR 0xc0
#define SSD130X_SET_DISPLAY_OFFSET 0xd3
#define SSD130X_SET_CLOCK_FREQ 0xd5
#define SSD130X_SET_AREA_COLOR_MODE 0xd8
#define SSD130X_SET_PRECHARGE_PERIOD 0xd9
#define SSD130X_SET_COM_PINS_CONFIG 0xda
#define SSD130X_SET_VCOMH 0xdb
#define SSD130X_PAGE_COL_START_MASK GENMASK(3, 0)
#define SSD130X_PAGE_COL_START_HIGH_SET(val) FIELD_PREP(SSD130X_PAGE_COL_START_MASK, (val) >> 4)
#define SSD130X_PAGE_COL_START_LOW_SET(val) FIELD_PREP(SSD130X_PAGE_COL_START_MASK, (val))
#define SSD130X_START_PAGE_ADDRESS_MASK GENMASK(2, 0)
#define SSD130X_START_PAGE_ADDRESS_SET(val) FIELD_PREP(SSD130X_START_PAGE_ADDRESS_MASK, (val))
#define SSD130X_SET_SEG_REMAP_MASK GENMASK(0, 0)
#define SSD130X_SET_SEG_REMAP_SET(val) FIELD_PREP(SSD130X_SET_SEG_REMAP_MASK, (val))
#define SSD130X_SET_COM_SCAN_DIR_MASK GENMASK(3, 3)
#define SSD130X_SET_COM_SCAN_DIR_SET(val) FIELD_PREP(SSD130X_SET_COM_SCAN_DIR_MASK, (val))
#define SSD130X_SET_CLOCK_DIV_MASK GENMASK(3, 0)
#define SSD130X_SET_CLOCK_DIV_SET(val) FIELD_PREP(SSD130X_SET_CLOCK_DIV_MASK, (val))
#define SSD130X_SET_CLOCK_FREQ_MASK GENMASK(7, 4)
#define SSD130X_SET_CLOCK_FREQ_SET(val) FIELD_PREP(SSD130X_SET_CLOCK_FREQ_MASK, (val))
#define SSD130X_SET_PRECHARGE_PERIOD1_MASK GENMASK(3, 0)
#define SSD130X_SET_PRECHARGE_PERIOD1_SET(val) FIELD_PREP(SSD130X_SET_PRECHARGE_PERIOD1_MASK, (val))
#define SSD130X_SET_PRECHARGE_PERIOD2_MASK GENMASK(7, 4)
#define SSD130X_SET_PRECHARGE_PERIOD2_SET(val) FIELD_PREP(SSD130X_SET_PRECHARGE_PERIOD2_MASK, (val))
#define SSD130X_SET_COM_PINS_CONFIG1_MASK GENMASK(4, 4)
#define SSD130X_SET_COM_PINS_CONFIG1_SET(val) FIELD_PREP(SSD130X_SET_COM_PINS_CONFIG1_MASK, (val))
#define SSD130X_SET_COM_PINS_CONFIG2_MASK GENMASK(5, 5)
#define SSD130X_SET_COM_PINS_CONFIG2_SET(val) FIELD_PREP(SSD130X_SET_COM_PINS_CONFIG2_MASK, (val))
#define SSD130X_SET_ADDRESS_MODE_HORIZONTAL 0x00
#define SSD130X_SET_ADDRESS_MODE_VERTICAL 0x01
#define SSD130X_SET_ADDRESS_MODE_PAGE 0x02
#define SSD130X_SET_AREA_COLOR_MODE_ENABLE 0x1e
#define SSD130X_SET_AREA_COLOR_MODE_LOW_POWER 0x05
#define MAX_CONTRAST 255
const struct ssd130x_deviceinfo ssd130x_variants[] = {
[SH1106_ID] = {
.default_vcomh = 0x40,
.default_dclk_div = 1,
.default_dclk_frq = 5,
.default_width = 132,
.default_height = 64,
.page_mode_only = 1,
.page_height = 8,
},
[SSD1305_ID] = {
.default_vcomh = 0x34,
.default_dclk_div = 1,
.default_dclk_frq = 7,
.default_width = 132,
.default_height = 64,
.page_height = 8,
},
[SSD1306_ID] = {
.default_vcomh = 0x20,
.default_dclk_div = 1,
.default_dclk_frq = 8,
.need_chargepump = 1,
.default_width = 128,
.default_height = 64,
.page_height = 8,
},
[SSD1307_ID] = {
.default_vcomh = 0x20,
.default_dclk_div = 2,
.default_dclk_frq = 12,
.need_pwm = 1,
.default_width = 128,
.default_height = 39,
.page_height = 8,
},
[SSD1309_ID] = {
.default_vcomh = 0x34,
.default_dclk_div = 1,
.default_dclk_frq = 10,
.default_width = 128,
.default_height = 64,
.page_height = 8,
}
};
EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X);
struct ssd130x_plane_state {
struct drm_shadow_plane_state base;
/* Intermediate buffer to convert pixels from XRGB8888 to HW format */
u8 *buffer;
/* Buffer to store pixels in HW format and written to the panel */
u8 *data_array;
};
static inline struct ssd130x_plane_state *to_ssd130x_plane_state(struct drm_plane_state *state)
{
return container_of(state, struct ssd130x_plane_state, base.base);
}
static inline struct ssd130x_device *drm_to_ssd130x(struct drm_device *drm)
{
return container_of(drm, struct ssd130x_device, drm);
}
/*
* Helper to write data (SSD130X_DATA) to the device.
*/
static int ssd130x_write_data(struct ssd130x_device *ssd130x, u8 *values, int count)
{
return regmap_bulk_write(ssd130x->regmap, SSD130X_DATA, values, count);
}
/*
* Helper to write command (SSD130X_COMMAND). The fist variadic argument
* is the command to write and the following are the command options.
*
* Note that the ssd130x protocol requires each command and option to be
* written as a SSD130X_COMMAND device register value. That is why a call
* to regmap_write(..., SSD130X_COMMAND, ...) is done for each argument.
*/
static int ssd130x_write_cmd(struct ssd130x_device *ssd130x, int count,
/* u8 cmd, u8 option, ... */...)
{
va_list ap;
u8 value;
int ret;
va_start(ap, count);
do {
value = va_arg(ap, int);
ret = regmap_write(ssd130x->regmap, SSD130X_COMMAND, value);
if (ret)
goto out_end;
} while (--count);
out_end:
va_end(ap);
return ret;
}
/* Set address range for horizontal/vertical addressing modes */
static int ssd130x_set_col_range(struct ssd130x_device *ssd130x,
u8 col_start, u8 cols)
{
u8 col_end = col_start + cols - 1;
int ret;
if (col_start == ssd130x->col_start && col_end == ssd130x->col_end)
return 0;
ret = ssd130x_write_cmd(ssd130x, 3, SSD130X_SET_COL_RANGE, col_start, col_end);
if (ret < 0)
return ret;
ssd130x->col_start = col_start;
ssd130x->col_end = col_end;
return 0;
}
static int ssd130x_set_page_range(struct ssd130x_device *ssd130x,
u8 page_start, u8 pages)
{
u8 page_end = page_start + pages - 1;
int ret;
if (page_start == ssd130x->page_start && page_end == ssd130x->page_end)
return 0;
ret = ssd130x_write_cmd(ssd130x, 3, SSD130X_SET_PAGE_RANGE, page_start, page_end);
if (ret < 0)
return ret;
ssd130x->page_start = page_start;
ssd130x->page_end = page_end;
return 0;
}
/* Set page and column start address for page addressing mode */
static int ssd130x_set_page_pos(struct ssd130x_device *ssd130x,
u8 page_start, u8 col_start)
{
int ret;
u32 page, col_low, col_high;
page = SSD130X_START_PAGE_ADDRESS |
SSD130X_START_PAGE_ADDRESS_SET(page_start);
col_low = SSD130X_PAGE_COL_START_LOW |
SSD130X_PAGE_COL_START_LOW_SET(col_start);
col_high = SSD130X_PAGE_COL_START_HIGH |
SSD130X_PAGE_COL_START_HIGH_SET(col_start);
ret = ssd130x_write_cmd(ssd130x, 3, page, col_low, col_high);
if (ret < 0)
return ret;
return 0;
}
static int ssd130x_pwm_enable(struct ssd130x_device *ssd130x)
{
struct device *dev = ssd130x->dev;
struct pwm_state pwmstate;
ssd130x->pwm = pwm_get(dev, NULL);
if (IS_ERR(ssd130x->pwm)) {
dev_err(dev, "Could not get PWM from firmware description!\n");
return PTR_ERR(ssd130x->pwm);
}
pwm_init_state(ssd130x->pwm, &pwmstate);
pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
pwm_apply_state(ssd130x->pwm, &pwmstate);
/* Enable the PWM */
pwm_enable(ssd130x->pwm);
dev_dbg(dev, "Using PWM%d with a %lluns period.\n",
ssd130x->pwm->pwm, pwm_get_period(ssd130x->pwm));
return 0;
}
static void ssd130x_reset(struct ssd130x_device *ssd130x)
{
if (!ssd130x->reset)
return;
/* Reset the screen */
gpiod_set_value_cansleep(ssd130x->reset, 1);
udelay(4);
gpiod_set_value_cansleep(ssd130x->reset, 0);
udelay(4);
}
static int ssd130x_power_on(struct ssd130x_device *ssd130x)
{
struct device *dev = ssd130x->dev;
int ret;
ssd130x_reset(ssd130x);
ret = regulator_enable(ssd130x->vcc_reg);
if (ret) {
dev_err(dev, "Failed to enable VCC: %d\n", ret);
return ret;
}
if (ssd130x->device_info->need_pwm) {
ret = ssd130x_pwm_enable(ssd130x);
if (ret) {
dev_err(dev, "Failed to enable PWM: %d\n", ret);
regulator_disable(ssd130x->vcc_reg);
return ret;
}
}
return 0;
}
static void ssd130x_power_off(struct ssd130x_device *ssd130x)
{
pwm_disable(ssd130x->pwm);
pwm_put(ssd130x->pwm);
regulator_disable(ssd130x->vcc_reg);
}
static int ssd130x_init(struct ssd130x_device *ssd130x)
{
u32 precharge, dclk, com_invdir, compins, chargepump, seg_remap;
bool scan_mode;
int ret;
/* Set initial contrast */
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_CONTRAST, ssd130x->contrast);
if (ret < 0)
return ret;
/* Set segment re-map */
seg_remap = (SSD130X_SET_SEG_REMAP |
SSD130X_SET_SEG_REMAP_SET(ssd130x->seg_remap));
ret = ssd130x_write_cmd(ssd130x, 1, seg_remap);
if (ret < 0)
return ret;
/* Set COM direction */
com_invdir = (SSD130X_SET_COM_SCAN_DIR |
SSD130X_SET_COM_SCAN_DIR_SET(ssd130x->com_invdir));
ret = ssd130x_write_cmd(ssd130x, 1, com_invdir);
if (ret < 0)
return ret;
/* Set multiplex ratio value */
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_MULTIPLEX_RATIO, ssd130x->height - 1);
if (ret < 0)
return ret;
/* set display offset value */
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_DISPLAY_OFFSET, ssd130x->com_offset);
if (ret < 0)
return ret;
/* Set clock frequency */
dclk = (SSD130X_SET_CLOCK_DIV_SET(ssd130x->dclk_div - 1) |
SSD130X_SET_CLOCK_FREQ_SET(ssd130x->dclk_frq));
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_CLOCK_FREQ, dclk);
if (ret < 0)
return ret;
/* Set Area Color Mode ON/OFF & Low Power Display Mode */
if (ssd130x->area_color_enable || ssd130x->low_power) {
u32 mode = 0;
if (ssd130x->area_color_enable)
mode |= SSD130X_SET_AREA_COLOR_MODE_ENABLE;
if (ssd130x->low_power)
mode |= SSD130X_SET_AREA_COLOR_MODE_LOW_POWER;
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_AREA_COLOR_MODE, mode);
if (ret < 0)
return ret;
}
/* Set precharge period in number of ticks from the internal clock */
precharge = (SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep1) |
SSD130X_SET_PRECHARGE_PERIOD2_SET(ssd130x->prechargep2));
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_PRECHARGE_PERIOD, precharge);
if (ret < 0)
return ret;
/* Set COM pins configuration */
compins = BIT(1);
/*
* The COM scan mode field values are the inverse of the boolean DT
* property "solomon,com-seq". The value 0b means scan from COM0 to
* COM[N - 1] while 1b means scan from COM[N - 1] to COM0.
*/
scan_mode = !ssd130x->com_seq;
compins |= (SSD130X_SET_COM_PINS_CONFIG1_SET(scan_mode) |
SSD130X_SET_COM_PINS_CONFIG2_SET(ssd130x->com_lrremap));
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_COM_PINS_CONFIG, compins);
if (ret < 0)
return ret;
/* Set VCOMH */
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_VCOMH, ssd130x->vcomh);
if (ret < 0)
return ret;
/* Turn on the DC-DC Charge Pump */
chargepump = BIT(4);
if (ssd130x->device_info->need_chargepump)
chargepump |= BIT(2);
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_CHARGE_PUMP, chargepump);
if (ret < 0)
return ret;
/* Set lookup table */
if (ssd130x->lookup_table_set) {
int i;
ret = ssd130x_write_cmd(ssd130x, 1, SSD130X_SET_LOOKUP_TABLE);
if (ret < 0)
return ret;
for (i = 0; i < ARRAY_SIZE(ssd130x->lookup_table); i++) {
u8 val = ssd130x->lookup_table[i];
if (val < 31 || val > 63)
dev_warn(ssd130x->dev,
"lookup table index %d value out of range 31 <= %d <= 63\n",
i, val);
ret = ssd130x_write_cmd(ssd130x, 1, val);
if (ret < 0)
return ret;
}
}
/* Switch to page addressing mode */
if (ssd130x->page_address_mode)
return ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_ADDRESS_MODE,
SSD130X_SET_ADDRESS_MODE_PAGE);
/* Switch to horizontal addressing mode */
return ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_ADDRESS_MODE,
SSD130X_SET_ADDRESS_MODE_HORIZONTAL);
}
static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
struct ssd130x_plane_state *ssd130x_state,
struct drm_rect *rect)
{
unsigned int x = rect->x1;
unsigned int y = rect->y1;
u8 *buf = ssd130x_state->buffer;
u8 *data_array = ssd130x_state->data_array;
unsigned int width = drm_rect_width(rect);
unsigned int height = drm_rect_height(rect);
unsigned int line_length = DIV_ROUND_UP(width, 8);
unsigned int page_height = ssd130x->device_info->page_height;
unsigned int pages = DIV_ROUND_UP(height, page_height);
struct drm_device *drm = &ssd130x->drm;
u32 array_idx = 0;
int ret, i, j, k;
drm_WARN_ONCE(drm, y % 8 != 0, "y must be aligned to screen page\n");
/*
* The screen is divided in pages, each having a height of 8
* pixels, and the width of the screen. When sending a byte of
* data to the controller, it gives the 8 bits for the current
* column. I.e, the first byte are the 8 bits of the first
* column, then the 8 bits for the second column, etc.
*
*
* Representation of the screen, assuming it is 5 bits
* wide. Each letter-number combination is a bit that controls
* one pixel.
*
* A0 A1 A2 A3 A4
* B0 B1 B2 B3 B4
* C0 C1 C2 C3 C4
* D0 D1 D2 D3 D4
* E0 E1 E2 E3 E4
* F0 F1 F2 F3 F4
* G0 G1 G2 G3 G4
* H0 H1 H2 H3 H4
*
* If you want to update this screen, you need to send 5 bytes:
* (1) A0 B0 C0 D0 E0 F0 G0 H0
* (2) A1 B1 C1 D1 E1 F1 G1 H1
* (3) A2 B2 C2 D2 E2 F2 G2 H2
* (4) A3 B3 C3 D3 E3 F3 G3 H3
* (5) A4 B4 C4 D4 E4 F4 G4 H4
*/
if (!ssd130x->page_address_mode) {
/* Set address range for horizontal addressing mode */
ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
if (ret < 0)
return ret;
ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages);
if (ret < 0)
return ret;
}
for (i = 0; i < pages; i++) {
int m = 8;
/* Last page may be partial */
if (8 * (y / 8 + i + 1) > ssd130x->height)
m = ssd130x->height % 8;
for (j = 0; j < width; j++) {
u8 data = 0;
for (k = 0; k < m; k++) {
u8 byte = buf[(8 * i + k) * line_length + j / 8];
u8 bit = (byte >> (j % 8)) & 1;
data |= bit << k;
}
data_array[array_idx++] = data;
}
/*
* In page addressing mode, the start address needs to be reset,
* and each page then needs to be written out separately.
*/
if (ssd130x->page_address_mode) {
ret = ssd130x_set_page_pos(ssd130x,
ssd130x->page_offset + i,
ssd130x->col_offset + x);
if (ret < 0)
return ret;
ret = ssd130x_write_data(ssd130x, data_array, width);
if (ret < 0)
return ret;
array_idx = 0;
}
}
/* Write out update in one go if we aren't using page addressing mode */
if (!ssd130x->page_address_mode)
ret = ssd130x_write_data(ssd130x, data_array, width * pages);
return ret;
}
static void ssd130x_clear_screen(struct ssd130x_device *ssd130x,
struct ssd130x_plane_state *ssd130x_state)
{
struct drm_rect fullscreen = {
.x1 = 0,
.x2 = ssd130x->width,
.y1 = 0,
.y2 = ssd130x->height,
};
ssd130x_update_rect(ssd130x, ssd130x_state, &fullscreen);
}
static int ssd130x_fb_blit_rect(struct drm_plane_state *state,
const struct iosys_map *vmap,
struct drm_rect *rect)
{
struct drm_framebuffer *fb = state->fb;
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
unsigned int page_height = ssd130x->device_info->page_height;
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(state);
u8 *buf = ssd130x_state->buffer;
struct iosys_map dst;
unsigned int dst_pitch;
int ret = 0;
/* Align y to display page boundaries */
rect->y1 = round_down(rect->y1, page_height);
rect->y2 = min_t(unsigned int, round_up(rect->y2, page_height), ssd130x->height);
dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
iosys_map_set_vaddr(&dst, buf);
drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
ssd130x_update_rect(ssd130x, ssd130x_state, rect);
return ret;
}
static int ssd130x_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *drm = plane->dev;
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
unsigned int page_height = ssd130x->device_info->page_height;
unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
const struct drm_format_info *fi;
unsigned int pitch;
int ret;
ret = drm_plane_helper_atomic_check(plane, state);
if (ret)
return ret;
fi = drm_format_info(DRM_FORMAT_R1);
if (!fi)
return -EINVAL;
pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
ssd130x_state->buffer = kcalloc(pitch, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->buffer)
return -ENOMEM;
ssd130x_state->data_array = kcalloc(ssd130x->width, pages, GFP_KERNEL);
if (!ssd130x_state->data_array) {
kfree(ssd130x_state->buffer);
/* Set to prevent a double free in .atomic_destroy_state() */
ssd130x_state->buffer = NULL;
return -ENOMEM;
}
return 0;
}
static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_atomic_helper_damage_iter iter;
struct drm_device *drm = plane->dev;
struct drm_rect dst_clip;
struct drm_rect damage;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
dst_clip = plane_state->dst;
if (!drm_rect_intersect(&dst_clip, &damage))
continue;
ssd130x_fb_blit_rect(plane_state, &shadow_plane_state->data[0], &dst_clip);
}
drm_dev_exit(idx);
}
static void ssd130x_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *drm = plane->dev;
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane->state);
int idx;
if (!drm_dev_enter(drm, &idx))
return;
ssd130x_clear_screen(ssd130x, ssd130x_state);
drm_dev_exit(idx);
}
/* Called during init to allocate the plane's atomic state. */
static void ssd130x_primary_plane_reset(struct drm_plane *plane)
{
struct ssd130x_plane_state *ssd130x_state;
WARN_ON(plane->state);
ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL);
if (!ssd130x_state)
return;
__drm_gem_reset_shadow_plane(plane, &ssd130x_state->base);
}
static struct drm_plane_state *ssd130x_primary_plane_duplicate_state(struct drm_plane *plane)
{
struct drm_shadow_plane_state *new_shadow_plane_state;
struct ssd130x_plane_state *old_ssd130x_state;
struct ssd130x_plane_state *ssd130x_state;
if (WARN_ON(!plane->state))
return NULL;
old_ssd130x_state = to_ssd130x_plane_state(plane->state);
ssd130x_state = kmemdup(old_ssd130x_state, sizeof(*ssd130x_state), GFP_KERNEL);
if (!ssd130x_state)
return NULL;
/* The buffers are not duplicated and are allocated in .atomic_check */
ssd130x_state->buffer = NULL;
ssd130x_state->data_array = NULL;
new_shadow_plane_state = &ssd130x_state->base;
__drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
return &new_shadow_plane_state->base;
}
static void ssd130x_primary_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(state);
kfree(ssd130x_state->data_array);
kfree(ssd130x_state->buffer);
__drm_gem_destroy_shadow_plane_state(&ssd130x_state->base);
kfree(ssd130x_state);
}
static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ssd130x_primary_plane_helper_atomic_check,
.atomic_update = ssd130x_primary_plane_helper_atomic_update,
.atomic_disable = ssd130x_primary_plane_helper_atomic_disable,
};
static const struct drm_plane_funcs ssd130x_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = ssd130x_primary_plane_reset,
.atomic_duplicate_state = ssd130x_primary_plane_duplicate_state,
.atomic_destroy_state = ssd130x_primary_plane_destroy_state,
.destroy = drm_plane_cleanup,
};
static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(crtc->dev);
if (mode->hdisplay != ssd130x->mode.hdisplay &&
mode->vdisplay != ssd130x->mode.vdisplay)
return MODE_ONE_SIZE;
else if (mode->hdisplay != ssd130x->mode.hdisplay)
return MODE_ONE_WIDTH;
else if (mode->vdisplay != ssd130x->mode.vdisplay)
return MODE_ONE_HEIGHT;
return MODE_OK;
}
/*
* The CRTC is always enabled. Screen updates are performed by
* the primary plane's atomic_update function. Disabling clears
* the screen in the primary plane's atomic_disable function.
*/
static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
.mode_valid = ssd130x_crtc_helper_mode_valid,
.atomic_check = drm_crtc_helper_atomic_check,
};
static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct drm_device *drm = encoder->dev;
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
int ret;
ret = ssd130x_power_on(ssd130x);
if (ret)
return;
ret = ssd130x_init(ssd130x);
if (ret)
goto power_off;
ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
backlight_enable(ssd130x->bl_dev);
return;
power_off:
ssd130x_power_off(ssd130x);
return;
}
static void ssd130x_encoder_helper_atomic_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct drm_device *drm = encoder->dev;
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
backlight_disable(ssd130x->bl_dev);
ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_OFF);
ssd130x_power_off(ssd130x);
}
static const struct drm_encoder_helper_funcs ssd130x_encoder_helper_funcs = {
.atomic_enable = ssd130x_encoder_helper_atomic_enable,
.atomic_disable = ssd130x_encoder_helper_atomic_disable,
};
static const struct drm_encoder_funcs ssd130x_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static int ssd130x_connector_helper_get_modes(struct drm_connector *connector)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(connector->dev);
struct drm_display_mode *mode;
struct device *dev = ssd130x->dev;
mode = drm_mode_duplicate(connector->dev, &ssd130x->mode);
if (!mode) {
dev_err(dev, "Failed to duplicated mode\n");
return 0;
}
drm_mode_probed_add(connector, mode);
drm_set_preferred_mode(connector, mode->hdisplay, mode->vdisplay);
/* There is only a single mode */
return 1;
}
static const struct drm_connector_helper_funcs ssd130x_connector_helper_funcs = {
.get_modes = ssd130x_connector_helper_get_modes,
};
static const struct drm_connector_funcs ssd130x_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs ssd130x_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const uint32_t ssd130x_formats[] = {
DRM_FORMAT_XRGB8888,
};
DEFINE_DRM_GEM_FOPS(ssd130x_fops);
static const struct drm_driver ssd130x_drm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
.fops = &ssd130x_fops,
};
static int ssd130x_update_bl(struct backlight_device *bdev)
{
struct ssd130x_device *ssd130x = bl_get_data(bdev);
int brightness = backlight_get_brightness(bdev);
int ret;
ssd130x->contrast = brightness;
ret = ssd130x_write_cmd(ssd130x, 1, SSD130X_CONTRAST);
if (ret < 0)
return ret;
ret = ssd130x_write_cmd(ssd130x, 1, ssd130x->contrast);
if (ret < 0)
return ret;
return 0;
}
static const struct backlight_ops ssd130xfb_bl_ops = {
.update_status = ssd130x_update_bl,
};
static void ssd130x_parse_properties(struct ssd130x_device *ssd130x)
{
struct device *dev = ssd130x->dev;
if (device_property_read_u32(dev, "solomon,width", &ssd130x->width))
ssd130x->width = ssd130x->device_info->default_width;
if (device_property_read_u32(dev, "solomon,height", &ssd130x->height))
ssd130x->height = ssd130x->device_info->default_height;
if (device_property_read_u32(dev, "solomon,page-offset", &ssd130x->page_offset))
ssd130x->page_offset = 1;
if (device_property_read_u32(dev, "solomon,col-offset", &ssd130x->col_offset))
ssd130x->col_offset = 0;
if (device_property_read_u32(dev, "solomon,com-offset", &ssd130x->com_offset))
ssd130x->com_offset = 0;
if (device_property_read_u32(dev, "solomon,prechargep1", &ssd130x->prechargep1))
ssd130x->prechargep1 = 2;
if (device_property_read_u32(dev, "solomon,prechargep2", &ssd130x->prechargep2))
ssd130x->prechargep2 = 2;
if (!device_property_read_u8_array(dev, "solomon,lookup-table",
ssd130x->lookup_table,
ARRAY_SIZE(ssd130x->lookup_table)))
ssd130x->lookup_table_set = 1;
ssd130x->seg_remap = !device_property_read_bool(dev, "solomon,segment-no-remap");
ssd130x->com_seq = device_property_read_bool(dev, "solomon,com-seq");
ssd130x->com_lrremap = device_property_read_bool(dev, "solomon,com-lrremap");
ssd130x->com_invdir = device_property_read_bool(dev, "solomon,com-invdir");
ssd130x->area_color_enable =
device_property_read_bool(dev, "solomon,area-color-enable");
ssd130x->low_power = device_property_read_bool(dev, "solomon,low-power");
ssd130x->contrast = 127;
ssd130x->vcomh = ssd130x->device_info->default_vcomh;
/* Setup display timing */
if (device_property_read_u32(dev, "solomon,dclk-div", &ssd130x->dclk_div))
ssd130x->dclk_div = ssd130x->device_info->default_dclk_div;
if (device_property_read_u32(dev, "solomon,dclk-frq", &ssd130x->dclk_frq))
ssd130x->dclk_frq = ssd130x->device_info->default_dclk_frq;
}
static int ssd130x_init_modeset(struct ssd130x_device *ssd130x)
{
struct drm_display_mode *mode = &ssd130x->mode;
struct device *dev = ssd130x->dev;
struct drm_device *drm = &ssd130x->drm;
unsigned long max_width, max_height;
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
/*
* Modesetting
*/
ret = drmm_mode_config_init(drm);
if (ret) {
dev_err(dev, "DRM mode config init failed: %d\n", ret);
return ret;
}
mode->type = DRM_MODE_TYPE_DRIVER;
mode->clock = 1;
mode->hdisplay = mode->htotal = ssd130x->width;
mode->hsync_start = mode->hsync_end = ssd130x->width;
mode->vdisplay = mode->vtotal = ssd130x->height;
mode->vsync_start = mode->vsync_end = ssd130x->height;
mode->width_mm = 27;
mode->height_mm = 27;
max_width = max_t(unsigned long, mode->hdisplay, DRM_SHADOW_PLANE_MAX_WIDTH);
max_height = max_t(unsigned long, mode->vdisplay, DRM_SHADOW_PLANE_MAX_HEIGHT);
drm->mode_config.min_width = mode->hdisplay;
drm->mode_config.max_width = max_width;
drm->mode_config.min_height = mode->vdisplay;
drm->mode_config.max_height = max_height;
drm->mode_config.preferred_depth = 24;
drm->mode_config.funcs = &ssd130x_mode_config_funcs;
/* Primary plane */
primary_plane = &ssd130x->primary_plane;
ret = drm_universal_plane_init(drm, primary_plane, 0, &ssd130x_primary_plane_funcs,
ssd130x_formats, ARRAY_SIZE(ssd130x_formats),
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
dev_err(dev, "DRM primary plane init failed: %d\n", ret);
return ret;
}
drm_plane_helper_add(primary_plane, &ssd130x_primary_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(primary_plane);
/* CRTC */
crtc = &ssd130x->crtc;
ret = drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
&ssd130x_crtc_funcs, NULL);
if (ret) {
dev_err(dev, "DRM crtc init failed: %d\n", ret);
return ret;
}
drm_crtc_helper_add(crtc, &ssd130x_crtc_helper_funcs);
/* Encoder */
encoder = &ssd130x->encoder;
ret = drm_encoder_init(drm, encoder, &ssd130x_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret) {
dev_err(dev, "DRM encoder init failed: %d\n", ret);
return ret;
}
drm_encoder_helper_add(encoder, &ssd130x_encoder_helper_funcs);
encoder->possible_crtcs = drm_crtc_mask(crtc);
/* Connector */
connector = &ssd130x->connector;
ret = drm_connector_init(drm, connector, &ssd130x_connector_funcs,
DRM_MODE_CONNECTOR_Unknown);
if (ret) {
dev_err(dev, "DRM connector init failed: %d\n", ret);
return ret;
}
drm_connector_helper_add(connector, &ssd130x_connector_helper_funcs);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
dev_err(dev, "DRM attach connector to encoder failed: %d\n", ret);
return ret;
}
drm_mode_config_reset(drm);
return 0;
}
static int ssd130x_get_resources(struct ssd130x_device *ssd130x)
{
struct device *dev = ssd130x->dev;
ssd130x->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ssd130x->reset))
return dev_err_probe(dev, PTR_ERR(ssd130x->reset),
"Failed to get reset gpio\n");
ssd130x->vcc_reg = devm_regulator_get(dev, "vcc");
if (IS_ERR(ssd130x->vcc_reg))
return dev_err_probe(dev, PTR_ERR(ssd130x->vcc_reg),
"Failed to get VCC regulator\n");
return 0;
}
struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap)
{
struct ssd130x_device *ssd130x;
struct backlight_device *bl;
struct drm_device *drm;
int ret;
ssd130x = devm_drm_dev_alloc(dev, &ssd130x_drm_driver,
struct ssd130x_device, drm);
if (IS_ERR(ssd130x))
return ERR_PTR(dev_err_probe(dev, PTR_ERR(ssd130x),
"Failed to allocate DRM device\n"));
drm = &ssd130x->drm;
ssd130x->dev = dev;
ssd130x->regmap = regmap;
ssd130x->device_info = device_get_match_data(dev);
if (ssd130x->device_info->page_mode_only)
ssd130x->page_address_mode = 1;
ssd130x_parse_properties(ssd130x);
ret = ssd130x_get_resources(ssd130x);
if (ret)
return ERR_PTR(ret);
bl = devm_backlight_device_register(dev, dev_name(dev), dev, ssd130x,
&ssd130xfb_bl_ops, NULL);
if (IS_ERR(bl))
return ERR_PTR(dev_err_probe(dev, PTR_ERR(bl),
"Unable to register backlight device\n"));
bl->props.brightness = ssd130x->contrast;
bl->props.max_brightness = MAX_CONTRAST;
ssd130x->bl_dev = bl;
ret = ssd130x_init_modeset(ssd130x);
if (ret)
return ERR_PTR(ret);
ret = drm_dev_register(drm, 0);
if (ret)
return ERR_PTR(dev_err_probe(dev, ret, "DRM device register failed\n"));
drm_fbdev_generic_setup(drm, 32);
return ssd130x;
}
EXPORT_SYMBOL_GPL(ssd130x_probe);
void ssd130x_remove(struct ssd130x_device *ssd130x)
{
drm_dev_unplug(&ssd130x->drm);
}
EXPORT_SYMBOL_GPL(ssd130x_remove);
void ssd130x_shutdown(struct ssd130x_device *ssd130x)
{
drm_atomic_helper_shutdown(&ssd130x->drm);
}
EXPORT_SYMBOL_GPL(ssd130x_shutdown);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Javier Martinez Canillas <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/solomon/ssd130x.c |
#define CREATE_TRACE_POINTS
#include "trace.h"
| linux-master | drivers/gpu/drm/tegra/trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2013 Avionic Design GmbH
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*
* Based on the KMS/FB DMA helpers
* Copyright (C) 2012 Analog Devices Inc.
*/
#include <linux/console.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include "drm.h"
#include "gem.h"
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index)
{
return to_tegra_bo(drm_gem_fb_get_obj(framebuffer, index));
}
bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
{
struct tegra_bo *bo = tegra_fb_get_plane(framebuffer, 0);
if (bo->flags & TEGRA_BO_BOTTOM_UP)
return true;
return false;
}
int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling)
{
uint64_t modifier = framebuffer->modifier;
if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
else
tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_GPU;
modifier &= ~DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT;
}
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
tiling->mode = TEGRA_BO_TILING_MODE_PITCH;
tiling->value = 0;
break;
case DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED:
tiling->mode = TEGRA_BO_TILING_MODE_TILED;
tiling->value = 0;
break;
case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0):
tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
tiling->value = 0;
break;
case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1):
tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
tiling->value = 1;
break;
case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2):
tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
tiling->value = 2;
break;
case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3):
tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
tiling->value = 3;
break;
case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4):
tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
tiling->value = 4;
break;
case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5):
tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
tiling->value = 5;
break;
default:
DRM_DEBUG_KMS("unknown format modifier: %llx\n", modifier);
return -EINVAL;
}
return 0;
}
static const struct drm_framebuffer_funcs tegra_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct tegra_bo **planes,
unsigned int num_planes)
{
struct drm_framebuffer *fb;
unsigned int i;
int err;
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd);
for (i = 0; i < fb->format->num_planes; i++)
fb->obj[i] = &planes[i]->gem;
err = drm_framebuffer_init(drm, fb, &tegra_fb_funcs);
if (err < 0) {
dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
err);
kfree(fb);
return ERR_PTR(err);
}
return fb;
}
struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
{
const struct drm_format_info *info = drm_get_format_info(drm, cmd);
struct tegra_bo *planes[4];
struct drm_gem_object *gem;
struct drm_framebuffer *fb;
unsigned int i;
int err;
for (i = 0; i < info->num_planes; i++) {
unsigned int width = cmd->width / (i ? info->hsub : 1);
unsigned int height = cmd->height / (i ? info->vsub : 1);
unsigned int size, bpp;
gem = drm_gem_object_lookup(file, cmd->handles[i]);
if (!gem) {
err = -ENXIO;
goto unreference;
}
bpp = info->cpp[i];
size = (height - 1) * cmd->pitches[i] +
width * bpp + cmd->offsets[i];
if (gem->size < size) {
err = -EINVAL;
goto unreference;
}
planes[i] = to_tegra_bo(gem);
}
fb = tegra_fb_alloc(drm, cmd, planes, i);
if (IS_ERR(fb)) {
err = PTR_ERR(fb);
goto unreference;
}
return fb;
unreference:
while (i--)
drm_gem_object_put(&planes[i]->gem);
return ERR_PTR(err);
}
| linux-master | drivers/gpu/drm/tegra/fb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Avionic Design GmbH
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/common.h>
#include <soc/tegra/pmc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include "dc.h"
#include "drm.h"
#include "gem.h"
#include "hub.h"
#include "plane.h"
static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
{
stats->frames = 0;
stats->vblank = 0;
stats->underflow = 0;
stats->overflow = 0;
}
/* Reads the active copy of a register. */
static u32 tegra_dc_readl_active(struct tegra_dc *dc, unsigned long offset)
{
u32 value;
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
value = tegra_dc_readl(dc, offset);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
return value;
}
static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
unsigned int offset)
{
if (offset >= 0x500 && offset <= 0x638) {
offset = 0x000 + (offset - 0x500);
return plane->offset + offset;
}
if (offset >= 0x700 && offset <= 0x719) {
offset = 0x180 + (offset - 0x700);
return plane->offset + offset;
}
if (offset >= 0x800 && offset <= 0x839) {
offset = 0x1c0 + (offset - 0x800);
return plane->offset + offset;
}
dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
return plane->offset + offset;
}
static inline u32 tegra_plane_readl(struct tegra_plane *plane,
unsigned int offset)
{
return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
}
static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
unsigned int offset)
{
tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
}
bool tegra_dc_has_output(struct tegra_dc *dc, struct device *dev)
{
struct device_node *np = dc->dev->of_node;
struct of_phandle_iterator it;
int err;
of_for_each_phandle(&it, err, np, "nvidia,outputs", NULL, 0)
if (it.node == dev->of_node)
return true;
return false;
}
/*
* Double-buffered registers have two copies: ASSEMBLY and ACTIVE. When the
* *_ACT_REQ bits are set the ASSEMBLY copy is latched into the ACTIVE copy.
* Latching happens mmediately if the display controller is in STOP mode or
* on the next frame boundary otherwise.
*
* Triple-buffered registers have three copies: ASSEMBLY, ARM and ACTIVE. The
* ASSEMBLY copy is latched into the ARM copy immediately after *_UPDATE bits
* are written. When the *_ACT_REQ bits are written, the ARM copy is latched
* into the ACTIVE copy, either immediately if the display controller is in
* STOP mode, or at the next frame boundary otherwise.
*/
void tegra_dc_commit(struct tegra_dc *dc)
{
tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
}
static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
unsigned int bpp)
{
fixed20_12 outf = dfixed_init(out);
fixed20_12 inf = dfixed_init(in);
u32 dda_inc;
int max;
if (v)
max = 15;
else {
switch (bpp) {
case 2:
max = 8;
break;
default:
WARN_ON_ONCE(1);
fallthrough;
case 4:
max = 4;
break;
}
}
outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
inf.full -= dfixed_const(1);
dda_inc = dfixed_div(inf, outf);
dda_inc = min_t(u32, dda_inc, dfixed_const(max));
return dda_inc;
}
static inline u32 compute_initial_dda(unsigned int in)
{
fixed20_12 inf = dfixed_init(in);
return dfixed_frac(inf);
}
static void tegra_plane_setup_blending_legacy(struct tegra_plane *plane)
{
u32 background[3] = {
BLEND_WEIGHT1(0) | BLEND_WEIGHT0(0) | BLEND_COLOR_KEY_NONE,
BLEND_WEIGHT1(0) | BLEND_WEIGHT0(0) | BLEND_COLOR_KEY_NONE,
BLEND_WEIGHT1(0) | BLEND_WEIGHT0(0) | BLEND_COLOR_KEY_NONE,
};
u32 foreground = BLEND_WEIGHT1(255) | BLEND_WEIGHT0(255) |
BLEND_COLOR_KEY_NONE;
u32 blendnokey = BLEND_WEIGHT1(255) | BLEND_WEIGHT0(255);
struct tegra_plane_state *state;
u32 blending[2];
unsigned int i;
/* disable blending for non-overlapping case */
tegra_plane_writel(plane, blendnokey, DC_WIN_BLEND_NOKEY);
tegra_plane_writel(plane, foreground, DC_WIN_BLEND_1WIN);
state = to_tegra_plane_state(plane->base.state);
if (state->opaque) {
/*
* Since custom fix-weight blending isn't utilized and weight
* of top window is set to max, we can enforce dependent
* blending which in this case results in transparent bottom
* window if top window is opaque and if top window enables
* alpha blending, then bottom window is getting alpha value
* of 1 minus the sum of alpha components of the overlapping
* plane.
*/
background[0] |= BLEND_CONTROL_DEPENDENT;
background[1] |= BLEND_CONTROL_DEPENDENT;
/*
* The region where three windows overlap is the intersection
* of the two regions where two windows overlap. It contributes
* to the area if all of the windows on top of it have an alpha
* component.
*/
switch (state->base.normalized_zpos) {
case 0:
if (state->blending[0].alpha &&
state->blending[1].alpha)
background[2] |= BLEND_CONTROL_DEPENDENT;
break;
case 1:
background[2] |= BLEND_CONTROL_DEPENDENT;
break;
}
} else {
/*
* Enable alpha blending if pixel format has an alpha
* component.
*/
foreground |= BLEND_CONTROL_ALPHA;
/*
* If any of the windows on top of this window is opaque, it
* will completely conceal this window within that area. If
* top window has an alpha component, it is blended over the
* bottom window.
*/
for (i = 0; i < 2; i++) {
if (state->blending[i].alpha &&
state->blending[i].top)
background[i] |= BLEND_CONTROL_DEPENDENT;
}
switch (state->base.normalized_zpos) {
case 0:
if (state->blending[0].alpha &&
state->blending[1].alpha)
background[2] |= BLEND_CONTROL_DEPENDENT;
break;
case 1:
/*
* When both middle and topmost windows have an alpha,
* these windows a mixed together and then the result
* is blended over the bottom window.
*/
if (state->blending[0].alpha &&
state->blending[0].top)
background[2] |= BLEND_CONTROL_ALPHA;
if (state->blending[1].alpha &&
state->blending[1].top)
background[2] |= BLEND_CONTROL_ALPHA;
break;
}
}
switch (state->base.normalized_zpos) {
case 0:
tegra_plane_writel(plane, background[0], DC_WIN_BLEND_2WIN_X);
tegra_plane_writel(plane, background[1], DC_WIN_BLEND_2WIN_Y);
tegra_plane_writel(plane, background[2], DC_WIN_BLEND_3WIN_XY);
break;
case 1:
/*
* If window B / C is topmost, then X / Y registers are
* matching the order of blending[...] state indices,
* otherwise a swap is required.
*/
if (!state->blending[0].top && state->blending[1].top) {
blending[0] = foreground;
blending[1] = background[1];
} else {
blending[0] = background[0];
blending[1] = foreground;
}
tegra_plane_writel(plane, blending[0], DC_WIN_BLEND_2WIN_X);
tegra_plane_writel(plane, blending[1], DC_WIN_BLEND_2WIN_Y);
tegra_plane_writel(plane, background[2], DC_WIN_BLEND_3WIN_XY);
break;
case 2:
tegra_plane_writel(plane, foreground, DC_WIN_BLEND_2WIN_X);
tegra_plane_writel(plane, foreground, DC_WIN_BLEND_2WIN_Y);
tegra_plane_writel(plane, foreground, DC_WIN_BLEND_3WIN_XY);
break;
}
}
static void tegra_plane_setup_blending(struct tegra_plane *plane,
const struct tegra_dc_window *window)
{
u32 value;
value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
tegra_plane_writel(plane, value, DC_WIN_BLEND_MATCH_SELECT);
value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
tegra_plane_writel(plane, value, DC_WIN_BLEND_NOMATCH_SELECT);
value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - window->zpos);
tegra_plane_writel(plane, value, DC_WIN_BLEND_LAYER_CONTROL);
}
static bool
tegra_plane_use_horizontal_filtering(struct tegra_plane *plane,
const struct tegra_dc_window *window)
{
struct tegra_dc *dc = plane->dc;
if (window->src.w == window->dst.w)
return false;
if (plane->index == 0 && dc->soc->has_win_a_without_filters)
return false;
return true;
}
static bool
tegra_plane_use_vertical_filtering(struct tegra_plane *plane,
const struct tegra_dc_window *window)
{
struct tegra_dc *dc = plane->dc;
if (window->src.h == window->dst.h)
return false;
if (plane->index == 0 && dc->soc->has_win_a_without_filters)
return false;
if (plane->index == 2 && dc->soc->has_win_c_without_vert_filter)
return false;
return true;
}
static void tegra_dc_setup_window(struct tegra_plane *plane,
const struct tegra_dc_window *window)
{
unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
struct tegra_dc *dc = plane->dc;
unsigned int planes;
u32 value;
bool yuv;
/*
* For YUV planar modes, the number of bytes per pixel takes into
* account only the luma component and therefore is 1.
*/
yuv = tegra_plane_format_is_yuv(window->format, &planes, NULL);
if (!yuv)
bpp = window->bits_per_pixel / 8;
else
bpp = (planes > 1) ? 1 : 2;
tegra_plane_writel(plane, window->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(plane, window->swap, DC_WIN_BYTE_SWAP);
value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
tegra_plane_writel(plane, value, DC_WIN_POSITION);
value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
tegra_plane_writel(plane, value, DC_WIN_SIZE);
h_offset = window->src.x * bpp;
v_offset = window->src.y;
h_size = window->src.w * bpp;
v_size = window->src.h;
if (window->reflect_x)
h_offset += (window->src.w - 1) * bpp;
if (window->reflect_y)
v_offset += window->src.h - 1;
value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
tegra_plane_writel(plane, value, DC_WIN_PRESCALED_SIZE);
/*
* For DDA computations the number of bytes per pixel for YUV planar
* modes needs to take into account all Y, U and V components.
*/
if (yuv && planes > 1)
bpp = 2;
h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
tegra_plane_writel(plane, value, DC_WIN_DDA_INC);
h_dda = compute_initial_dda(window->src.x);
v_dda = compute_initial_dda(window->src.y);
tegra_plane_writel(plane, h_dda, DC_WIN_H_INITIAL_DDA);
tegra_plane_writel(plane, v_dda, DC_WIN_V_INITIAL_DDA);
tegra_plane_writel(plane, 0, DC_WIN_UV_BUF_STRIDE);
tegra_plane_writel(plane, 0, DC_WIN_BUF_STRIDE);
tegra_plane_writel(plane, window->base[0], DC_WINBUF_START_ADDR);
if (yuv && planes > 1) {
tegra_plane_writel(plane, window->base[1], DC_WINBUF_START_ADDR_U);
if (planes > 2)
tegra_plane_writel(plane, window->base[2], DC_WINBUF_START_ADDR_V);
value = window->stride[1] << 16 | window->stride[0];
tegra_plane_writel(plane, value, DC_WIN_LINE_STRIDE);
} else {
tegra_plane_writel(plane, window->stride[0], DC_WIN_LINE_STRIDE);
}
tegra_plane_writel(plane, h_offset, DC_WINBUF_ADDR_H_OFFSET);
tegra_plane_writel(plane, v_offset, DC_WINBUF_ADDR_V_OFFSET);
if (dc->soc->supports_block_linear) {
unsigned long height = window->tiling.value;
switch (window->tiling.mode) {
case TEGRA_BO_TILING_MODE_PITCH:
value = DC_WINBUF_SURFACE_KIND_PITCH;
break;
case TEGRA_BO_TILING_MODE_TILED:
value = DC_WINBUF_SURFACE_KIND_TILED;
break;
case TEGRA_BO_TILING_MODE_BLOCK:
value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
DC_WINBUF_SURFACE_KIND_BLOCK;
break;
}
tegra_plane_writel(plane, value, DC_WINBUF_SURFACE_KIND);
} else {
switch (window->tiling.mode) {
case TEGRA_BO_TILING_MODE_PITCH:
value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
DC_WIN_BUFFER_ADDR_MODE_LINEAR;
break;
case TEGRA_BO_TILING_MODE_TILED:
value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
DC_WIN_BUFFER_ADDR_MODE_TILE;
break;
case TEGRA_BO_TILING_MODE_BLOCK:
/*
* No need to handle this here because ->atomic_check
* will already have filtered it out.
*/
break;
}
tegra_plane_writel(plane, value, DC_WIN_BUFFER_ADDR_MODE);
}
value = WIN_ENABLE;
if (yuv) {
/* setup default colorspace conversion coefficients */
tegra_plane_writel(plane, 0x00f0, DC_WIN_CSC_YOF);
tegra_plane_writel(plane, 0x012a, DC_WIN_CSC_KYRGB);
tegra_plane_writel(plane, 0x0000, DC_WIN_CSC_KUR);
tegra_plane_writel(plane, 0x0198, DC_WIN_CSC_KVR);
tegra_plane_writel(plane, 0x039b, DC_WIN_CSC_KUG);
tegra_plane_writel(plane, 0x032f, DC_WIN_CSC_KVG);
tegra_plane_writel(plane, 0x0204, DC_WIN_CSC_KUB);
tegra_plane_writel(plane, 0x0000, DC_WIN_CSC_KVB);
value |= CSC_ENABLE;
} else if (window->bits_per_pixel < 24) {
value |= COLOR_EXPAND;
}
if (window->reflect_x)
value |= H_DIRECTION;
if (window->reflect_y)
value |= V_DIRECTION;
if (tegra_plane_use_horizontal_filtering(plane, window)) {
/*
* Enable horizontal 6-tap filter and set filtering
* coefficients to the default values defined in TRM.
*/
tegra_plane_writel(plane, 0x00008000, DC_WIN_H_FILTER_P(0));
tegra_plane_writel(plane, 0x3e087ce1, DC_WIN_H_FILTER_P(1));
tegra_plane_writel(plane, 0x3b117ac1, DC_WIN_H_FILTER_P(2));
tegra_plane_writel(plane, 0x591b73aa, DC_WIN_H_FILTER_P(3));
tegra_plane_writel(plane, 0x57256d9a, DC_WIN_H_FILTER_P(4));
tegra_plane_writel(plane, 0x552f668b, DC_WIN_H_FILTER_P(5));
tegra_plane_writel(plane, 0x73385e8b, DC_WIN_H_FILTER_P(6));
tegra_plane_writel(plane, 0x72435583, DC_WIN_H_FILTER_P(7));
tegra_plane_writel(plane, 0x714c4c8b, DC_WIN_H_FILTER_P(8));
tegra_plane_writel(plane, 0x70554393, DC_WIN_H_FILTER_P(9));
tegra_plane_writel(plane, 0x715e389b, DC_WIN_H_FILTER_P(10));
tegra_plane_writel(plane, 0x71662faa, DC_WIN_H_FILTER_P(11));
tegra_plane_writel(plane, 0x536d25ba, DC_WIN_H_FILTER_P(12));
tegra_plane_writel(plane, 0x55731bca, DC_WIN_H_FILTER_P(13));
tegra_plane_writel(plane, 0x387a11d9, DC_WIN_H_FILTER_P(14));
tegra_plane_writel(plane, 0x3c7c08f1, DC_WIN_H_FILTER_P(15));
value |= H_FILTER;
}
if (tegra_plane_use_vertical_filtering(plane, window)) {
unsigned int i, k;
/*
* Enable vertical 2-tap filter and set filtering
* coefficients to the default values defined in TRM.
*/
for (i = 0, k = 128; i < 16; i++, k -= 8)
tegra_plane_writel(plane, k, DC_WIN_V_FILTER_P(i));
value |= V_FILTER;
}
tegra_plane_writel(plane, value, DC_WIN_WIN_OPTIONS);
if (dc->soc->has_legacy_blending)
tegra_plane_setup_blending_legacy(plane);
else
tegra_plane_setup_blending(plane, window);
}
static const u32 tegra20_primary_formats[] = {
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
/* non-native formats */
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
};
static const u64 tegra20_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED,
DRM_FORMAT_MOD_INVALID
};
static const u32 tegra114_primary_formats[] = {
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
/* new on Tegra114 */
DRM_FORMAT_ABGR4444,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
};
static const u32 tegra124_primary_formats[] = {
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
/* new on Tegra114 */
DRM_FORMAT_ABGR4444,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
/* new on Tegra124 */
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRX8888,
};
static const u64 tegra124_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
DRM_FORMAT_MOD_INVALID
};
static int tegra_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
unsigned int supported_rotation = DRM_MODE_ROTATE_0 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y;
unsigned int rotation = new_plane_state->rotation;
struct tegra_bo_tiling *tiling = &plane_state->tiling;
struct tegra_plane *tegra = to_tegra_plane(plane);
struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
int err;
plane_state->peak_memory_bandwidth = 0;
plane_state->avg_memory_bandwidth = 0;
/* no need for further checks if the plane is being disabled */
if (!new_plane_state->crtc) {
plane_state->total_peak_memory_bandwidth = 0;
return 0;
}
err = tegra_plane_format(new_plane_state->fb->format->format,
&plane_state->format,
&plane_state->swap);
if (err < 0)
return err;
/*
* Tegra20 and Tegra30 are special cases here because they support
* only variants of specific formats with an alpha component, but not
* the corresponding opaque formats. However, the opaque formats can
* be emulated by disabling alpha blending for the plane.
*/
if (dc->soc->has_legacy_blending) {
err = tegra_plane_setup_legacy_state(tegra, plane_state);
if (err < 0)
return err;
}
err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
if (err < 0)
return err;
if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
!dc->soc->supports_block_linear) {
DRM_ERROR("hardware doesn't support block linear mode\n");
return -EINVAL;
}
/*
* Older userspace used custom BO flag in order to specify the Y
* reflection, while modern userspace uses the generic DRM rotation
* property in order to achieve the same result. The legacy BO flag
* duplicates the DRM rotation property when both are set.
*/
if (tegra_fb_is_bottom_up(new_plane_state->fb))
rotation |= DRM_MODE_REFLECT_Y;
rotation = drm_rotation_simplify(rotation, supported_rotation);
if (rotation & DRM_MODE_REFLECT_X)
plane_state->reflect_x = true;
else
plane_state->reflect_x = false;
if (rotation & DRM_MODE_REFLECT_Y)
plane_state->reflect_y = true;
else
plane_state->reflect_y = false;
/*
* Tegra doesn't support different strides for U and V planes so we
* error out if the user tries to display a framebuffer with such a
* configuration.
*/
if (new_plane_state->fb->format->num_planes > 2) {
if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
DRM_ERROR("unsupported UV-plane configuration\n");
return -EINVAL;
}
}
err = tegra_plane_state_add(tegra, new_plane_state);
if (err < 0)
return err;
return 0;
}
static void tegra_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct tegra_plane *p = to_tegra_plane(plane);
u32 value;
/* rien ne va plus */
if (!old_state || !old_state->crtc)
return;
value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
value &= ~WIN_ENABLE;
tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
}
static void tegra_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
struct drm_framebuffer *fb = new_state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_dc_window window;
unsigned int i;
/* rien ne va plus */
if (!new_state->crtc || !new_state->fb)
return;
if (!new_state->visible)
return tegra_plane_atomic_disable(plane, state);
memset(&window, 0, sizeof(window));
window.src.x = new_state->src.x1 >> 16;
window.src.y = new_state->src.y1 >> 16;
window.src.w = drm_rect_width(&new_state->src) >> 16;
window.src.h = drm_rect_height(&new_state->src) >> 16;
window.dst.x = new_state->dst.x1;
window.dst.y = new_state->dst.y1;
window.dst.w = drm_rect_width(&new_state->dst);
window.dst.h = drm_rect_height(&new_state->dst);
window.bits_per_pixel = fb->format->cpp[0] * 8;
window.reflect_x = tegra_plane_state->reflect_x;
window.reflect_y = tegra_plane_state->reflect_y;
/* copy from state */
window.zpos = new_state->normalized_zpos;
window.tiling = tegra_plane_state->tiling;
window.format = tegra_plane_state->format;
window.swap = tegra_plane_state->swap;
for (i = 0; i < fb->format->num_planes; i++) {
window.base[i] = tegra_plane_state->iova[i] + fb->offsets[i];
/*
* Tegra uses a shared stride for UV planes. Framebuffers are
* already checked for this in the tegra_plane_atomic_check()
* function, so it's safe to ignore the V-plane pitch here.
*/
if (i < 2)
window.stride[i] = fb->pitches[i];
}
tegra_dc_setup_window(p, &window);
}
static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
.prepare_fb = tegra_plane_prepare_fb,
.cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_plane_atomic_check,
.atomic_disable = tegra_plane_atomic_disable,
.atomic_update = tegra_plane_atomic_update,
};
static unsigned long tegra_plane_get_possible_crtcs(struct drm_device *drm)
{
/*
* Ideally this would use drm_crtc_mask(), but that would require the
* CRTC to already be in the mode_config's list of CRTCs. However, it
* will only be added to that list in the drm_crtc_init_with_planes()
* (in tegra_dc_init()), which in turn requires registration of these
* planes. So we have ourselves a nice little chicken and egg problem
* here.
*
* We work around this by manually creating the mask from the number
* of CRTCs that have been registered, and should therefore always be
* the same as drm_crtc_index() after registration.
*/
return 1 << drm->mode_config.num_crtc;
}
static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
struct tegra_dc *dc)
{
unsigned long possible_crtcs = tegra_plane_get_possible_crtcs(drm);
enum drm_plane_type type = DRM_PLANE_TYPE_PRIMARY;
struct tegra_plane *plane;
unsigned int num_formats;
const u64 *modifiers;
const u32 *formats;
int err;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
/* Always use window A as primary window */
plane->offset = 0xa00;
plane->index = 0;
plane->dc = dc;
num_formats = dc->soc->num_primary_formats;
formats = dc->soc->primary_formats;
modifiers = dc->soc->modifiers;
err = tegra_plane_interconnect_init(plane);
if (err) {
kfree(plane);
return ERR_PTR(err);
}
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
num_formats, modifiers, type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
}
drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
drm_plane_create_zpos_property(&plane->base, plane->index, 0, 255);
err = drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
if (err < 0)
dev_err(dc->dev, "failed to create rotation property: %d\n",
err);
return &plane->base;
}
static const u32 tegra_legacy_cursor_plane_formats[] = {
DRM_FORMAT_RGBA8888,
};
static const u32 tegra_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
static int tegra_cursor_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
struct tegra_plane *tegra = to_tegra_plane(plane);
int err;
plane_state->peak_memory_bandwidth = 0;
plane_state->avg_memory_bandwidth = 0;
/* no need for further checks if the plane is being disabled */
if (!new_plane_state->crtc) {
plane_state->total_peak_memory_bandwidth = 0;
return 0;
}
/* scaling not supported for cursor */
if ((new_plane_state->src_w >> 16 != new_plane_state->crtc_w) ||
(new_plane_state->src_h >> 16 != new_plane_state->crtc_h))
return -EINVAL;
/* only square cursors supported */
if (new_plane_state->src_w != new_plane_state->src_h)
return -EINVAL;
if (new_plane_state->crtc_w != 32 && new_plane_state->crtc_w != 64 &&
new_plane_state->crtc_w != 128 && new_plane_state->crtc_w != 256)
return -EINVAL;
err = tegra_plane_state_add(tegra, new_plane_state);
if (err < 0)
return err;
return 0;
}
static void __tegra_cursor_atomic_update(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
struct tegra_drm *tegra = plane->dev->dev_private;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
u64 dma_mask = *dc->dev->dma_mask;
#endif
unsigned int x, y;
u32 value = 0;
/* rien ne va plus */
if (!new_state->crtc || !new_state->fb)
return;
/*
* Legacy display supports hardware clipping of the cursor, but
* nvdisplay relies on software to clip the cursor to the screen.
*/
if (!dc->soc->has_nvdisplay)
value |= CURSOR_CLIP_DISPLAY;
switch (new_state->crtc_w) {
case 32:
value |= CURSOR_SIZE_32x32;
break;
case 64:
value |= CURSOR_SIZE_64x64;
break;
case 128:
value |= CURSOR_SIZE_128x128;
break;
case 256:
value |= CURSOR_SIZE_256x256;
break;
default:
WARN(1, "cursor size %ux%u not supported\n",
new_state->crtc_w, new_state->crtc_h);
return;
}
value |= (tegra_plane_state->iova[0] >> 10) & 0x3fffff;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
value = (tegra_plane_state->iova[0] >> 32) & (dma_mask >> 32);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
/* enable cursor and set blend mode */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value |= CURSOR_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
value &= ~CURSOR_DST_BLEND_MASK;
value &= ~CURSOR_SRC_BLEND_MASK;
if (dc->soc->has_nvdisplay)
value &= ~CURSOR_COMPOSITION_MODE_XOR;
else
value |= CURSOR_MODE_NORMAL;
value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
value |= CURSOR_ALPHA;
tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
/* nvdisplay relies on software for clipping */
if (dc->soc->has_nvdisplay) {
struct drm_rect src;
x = new_state->dst.x1;
y = new_state->dst.y1;
drm_rect_fp_to_int(&src, &new_state->src);
value = (src.y1 & tegra->vmask) << 16 | (src.x1 & tegra->hmask);
tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR);
value = (drm_rect_height(&src) & tegra->vmask) << 16 |
(drm_rect_width(&src) & tegra->hmask);
tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR);
} else {
x = new_state->crtc_x;
y = new_state->crtc_y;
}
/* position the cursor */
value = ((y & tegra->vmask) << 16) | (x & tegra->hmask);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
}
static void tegra_cursor_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
__tegra_cursor_atomic_update(plane, new_state);
}
static void tegra_cursor_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct tegra_dc *dc;
u32 value;
/* rien ne va plus */
if (!old_state || !old_state->crtc)
return;
dc = to_tegra_dc(old_state->crtc);
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value &= ~CURSOR_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
}
static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc_state *crtc_state;
int min_scale, max_scale;
int err;
crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
if (!crtc_state->active)
return -EINVAL;
if (plane->state->crtc != new_state->crtc ||
plane->state->src_w != new_state->src_w ||
plane->state->src_h != new_state->src_h ||
plane->state->crtc_w != new_state->crtc_w ||
plane->state->crtc_h != new_state->crtc_h ||
plane->state->fb != new_state->fb ||
plane->state->fb == NULL)
return -EINVAL;
min_scale = (1 << 16) / 8;
max_scale = (8 << 16) / 1;
err = drm_atomic_helper_check_plane_state(new_state, crtc_state, min_scale, max_scale,
true, true);
if (err < 0)
return err;
if (new_state->visible != plane->state->visible)
return -EINVAL;
return 0;
}
static void tegra_cursor_atomic_async_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
plane->state->crtc_x = new_state->crtc_x;
plane->state->crtc_y = new_state->crtc_y;
if (new_state->visible) {
struct tegra_plane *p = to_tegra_plane(plane);
u32 value;
__tegra_cursor_atomic_update(plane, new_state);
value = (WIN_A_ACT_REQ << p->index) << 8 | GENERAL_UPDATE;
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
(void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
value = (WIN_A_ACT_REQ << p->index) | GENERAL_ACT_REQ;
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
(void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
}
}
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
.prepare_fb = tegra_plane_prepare_fb,
.cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
.atomic_disable = tegra_cursor_atomic_disable,
.atomic_async_check = tegra_cursor_atomic_async_check,
.atomic_async_update = tegra_cursor_atomic_async_update,
};
static const uint64_t linear_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
struct tegra_dc *dc)
{
unsigned long possible_crtcs = tegra_plane_get_possible_crtcs(drm);
struct tegra_plane *plane;
unsigned int num_formats;
const u32 *formats;
int err;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
/*
* This index is kind of fake. The cursor isn't a regular plane, but
* its update and activation request bits in DC_CMD_STATE_CONTROL do
* use the same programming. Setting this fake index here allows the
* code in tegra_add_plane_state() to do the right thing without the
* need to special-casing the cursor plane.
*/
plane->index = 6;
plane->dc = dc;
if (!dc->soc->has_nvdisplay) {
num_formats = ARRAY_SIZE(tegra_legacy_cursor_plane_formats);
formats = tegra_legacy_cursor_plane_formats;
err = tegra_plane_interconnect_init(plane);
if (err) {
kfree(plane);
return ERR_PTR(err);
}
} else {
num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
formats = tegra_cursor_plane_formats;
}
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
num_formats, linear_modifiers,
DRM_PLANE_TYPE_CURSOR, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
}
drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs);
drm_plane_create_zpos_immutable_property(&plane->base, 255);
return &plane->base;
}
static const u32 tegra20_overlay_formats[] = {
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
/* non-native formats */
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
/* planar formats */
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
};
static const u32 tegra114_overlay_formats[] = {
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
/* new on Tegra114 */
DRM_FORMAT_ABGR4444,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
/* planar formats */
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
/* semi-planar formats */
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV16,
DRM_FORMAT_NV61,
DRM_FORMAT_NV24,
DRM_FORMAT_NV42,
};
static const u32 tegra124_overlay_formats[] = {
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
/* new on Tegra114 */
DRM_FORMAT_ABGR4444,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
/* new on Tegra124 */
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRX8888,
/* planar formats */
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUV420, /* YU12 */
DRM_FORMAT_YUV422, /* YU16 */
DRM_FORMAT_YUV444, /* YU24 */
/* semi-planar formats */
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV16,
DRM_FORMAT_NV61,
DRM_FORMAT_NV24,
DRM_FORMAT_NV42,
};
static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int index,
bool cursor)
{
unsigned long possible_crtcs = tegra_plane_get_possible_crtcs(drm);
struct tegra_plane *plane;
unsigned int num_formats;
enum drm_plane_type type;
const u32 *formats;
int err;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
plane->offset = 0xa00 + 0x200 * index;
plane->index = index;
plane->dc = dc;
num_formats = dc->soc->num_overlay_formats;
formats = dc->soc->overlay_formats;
err = tegra_plane_interconnect_init(plane);
if (err) {
kfree(plane);
return ERR_PTR(err);
}
if (!cursor)
type = DRM_PLANE_TYPE_OVERLAY;
else
type = DRM_PLANE_TYPE_CURSOR;
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
num_formats, linear_modifiers,
type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
}
drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
drm_plane_create_zpos_property(&plane->base, plane->index, 0, 255);
err = drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
if (err < 0)
dev_err(dc->dev, "failed to create rotation property: %d\n",
err);
return &plane->base;
}
static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
struct tegra_dc *dc)
{
struct drm_plane *plane, *primary = NULL;
unsigned int i, j;
for (i = 0; i < dc->soc->num_wgrps; i++) {
const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
if (wgrp->dc == dc->pipe) {
for (j = 0; j < wgrp->num_windows; j++) {
unsigned int index = wgrp->windows[j];
plane = tegra_shared_plane_create(drm, dc,
wgrp->index,
index);
if (IS_ERR(plane))
return plane;
/*
* Choose the first shared plane owned by this
* head as the primary plane.
*/
if (!primary) {
plane->type = DRM_PLANE_TYPE_PRIMARY;
primary = plane;
}
}
}
}
return primary;
}
static struct drm_plane *tegra_dc_add_planes(struct drm_device *drm,
struct tegra_dc *dc)
{
struct drm_plane *planes[2], *primary;
unsigned int planes_num;
unsigned int i;
int err;
primary = tegra_primary_plane_create(drm, dc);
if (IS_ERR(primary))
return primary;
if (dc->soc->supports_cursor)
planes_num = 2;
else
planes_num = 1;
for (i = 0; i < planes_num; i++) {
planes[i] = tegra_dc_overlay_plane_create(drm, dc, 1 + i,
false);
if (IS_ERR(planes[i])) {
err = PTR_ERR(planes[i]);
while (i--)
planes[i]->funcs->destroy(planes[i]);
primary->funcs->destroy(primary);
return ERR_PTR(err);
}
}
return primary;
}
static void tegra_dc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
}
static void tegra_crtc_reset(struct drm_crtc *crtc)
{
struct tegra_dc_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
if (crtc->state)
tegra_crtc_atomic_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &state->base);
}
static struct drm_crtc_state *
tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
struct tegra_dc_state *state = to_dc_state(crtc->state);
struct tegra_dc_state *copy;
copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, ©->base);
copy->clk = state->clk;
copy->pclk = state->pclk;
copy->div = state->div;
copy->planes = state->planes;
return ©->base;
}
static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
__drm_atomic_helper_crtc_destroy_state(state);
kfree(state);
}
#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
static const struct debugfs_reg32 tegra_dc_regs[] = {
DEBUGFS_REG32(DC_CMD_GENERAL_INCR_SYNCPT),
DEBUGFS_REG32(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL),
DEBUGFS_REG32(DC_CMD_GENERAL_INCR_SYNCPT_ERROR),
DEBUGFS_REG32(DC_CMD_WIN_A_INCR_SYNCPT),
DEBUGFS_REG32(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL),
DEBUGFS_REG32(DC_CMD_WIN_A_INCR_SYNCPT_ERROR),
DEBUGFS_REG32(DC_CMD_WIN_B_INCR_SYNCPT),
DEBUGFS_REG32(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL),
DEBUGFS_REG32(DC_CMD_WIN_B_INCR_SYNCPT_ERROR),
DEBUGFS_REG32(DC_CMD_WIN_C_INCR_SYNCPT),
DEBUGFS_REG32(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL),
DEBUGFS_REG32(DC_CMD_WIN_C_INCR_SYNCPT_ERROR),
DEBUGFS_REG32(DC_CMD_CONT_SYNCPT_VSYNC),
DEBUGFS_REG32(DC_CMD_DISPLAY_COMMAND_OPTION0),
DEBUGFS_REG32(DC_CMD_DISPLAY_COMMAND),
DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE),
DEBUGFS_REG32(DC_CMD_DISPLAY_POWER_CONTROL),
DEBUGFS_REG32(DC_CMD_INT_STATUS),
DEBUGFS_REG32(DC_CMD_INT_MASK),
DEBUGFS_REG32(DC_CMD_INT_ENABLE),
DEBUGFS_REG32(DC_CMD_INT_TYPE),
DEBUGFS_REG32(DC_CMD_INT_POLARITY),
DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE1),
DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE2),
DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE3),
DEBUGFS_REG32(DC_CMD_STATE_ACCESS),
DEBUGFS_REG32(DC_CMD_STATE_CONTROL),
DEBUGFS_REG32(DC_CMD_DISPLAY_WINDOW_HEADER),
DEBUGFS_REG32(DC_CMD_REG_ACT_CONTROL),
DEBUGFS_REG32(DC_COM_CRC_CONTROL),
DEBUGFS_REG32(DC_COM_CRC_CHECKSUM),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(0)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(1)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(2)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(3)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(0)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(1)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(2)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(3)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(0)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(1)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(2)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(3)),
DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(0)),
DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(1)),
DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(2)),
DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(3)),
DEBUGFS_REG32(DC_COM_PIN_INPUT_DATA(0)),
DEBUGFS_REG32(DC_COM_PIN_INPUT_DATA(1)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(0)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(1)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(2)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(3)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(4)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(5)),
DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(6)),
DEBUGFS_REG32(DC_COM_PIN_MISC_CONTROL),
DEBUGFS_REG32(DC_COM_PIN_PM0_CONTROL),
DEBUGFS_REG32(DC_COM_PIN_PM0_DUTY_CYCLE),
DEBUGFS_REG32(DC_COM_PIN_PM1_CONTROL),
DEBUGFS_REG32(DC_COM_PIN_PM1_DUTY_CYCLE),
DEBUGFS_REG32(DC_COM_SPI_CONTROL),
DEBUGFS_REG32(DC_COM_SPI_START_BYTE),
DEBUGFS_REG32(DC_COM_HSPI_WRITE_DATA_AB),
DEBUGFS_REG32(DC_COM_HSPI_WRITE_DATA_CD),
DEBUGFS_REG32(DC_COM_HSPI_CS_DC),
DEBUGFS_REG32(DC_COM_SCRATCH_REGISTER_A),
DEBUGFS_REG32(DC_COM_SCRATCH_REGISTER_B),
DEBUGFS_REG32(DC_COM_GPIO_CTRL),
DEBUGFS_REG32(DC_COM_GPIO_DEBOUNCE_COUNTER),
DEBUGFS_REG32(DC_COM_CRC_CHECKSUM_LATCHED),
DEBUGFS_REG32(DC_DISP_DISP_SIGNAL_OPTIONS0),
DEBUGFS_REG32(DC_DISP_DISP_SIGNAL_OPTIONS1),
DEBUGFS_REG32(DC_DISP_DISP_WIN_OPTIONS),
DEBUGFS_REG32(DC_DISP_DISP_MEM_HIGH_PRIORITY),
DEBUGFS_REG32(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER),
DEBUGFS_REG32(DC_DISP_DISP_TIMING_OPTIONS),
DEBUGFS_REG32(DC_DISP_REF_TO_SYNC),
DEBUGFS_REG32(DC_DISP_SYNC_WIDTH),
DEBUGFS_REG32(DC_DISP_BACK_PORCH),
DEBUGFS_REG32(DC_DISP_ACTIVE),
DEBUGFS_REG32(DC_DISP_FRONT_PORCH),
DEBUGFS_REG32(DC_DISP_H_PULSE0_CONTROL),
DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_A),
DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_B),
DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_C),
DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_D),
DEBUGFS_REG32(DC_DISP_H_PULSE1_CONTROL),
DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_A),
DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_B),
DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_C),
DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_D),
DEBUGFS_REG32(DC_DISP_H_PULSE2_CONTROL),
DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_A),
DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_B),
DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_C),
DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_D),
DEBUGFS_REG32(DC_DISP_V_PULSE0_CONTROL),
DEBUGFS_REG32(DC_DISP_V_PULSE0_POSITION_A),
DEBUGFS_REG32(DC_DISP_V_PULSE0_POSITION_B),
DEBUGFS_REG32(DC_DISP_V_PULSE0_POSITION_C),
DEBUGFS_REG32(DC_DISP_V_PULSE1_CONTROL),
DEBUGFS_REG32(DC_DISP_V_PULSE1_POSITION_A),
DEBUGFS_REG32(DC_DISP_V_PULSE1_POSITION_B),
DEBUGFS_REG32(DC_DISP_V_PULSE1_POSITION_C),
DEBUGFS_REG32(DC_DISP_V_PULSE2_CONTROL),
DEBUGFS_REG32(DC_DISP_V_PULSE2_POSITION_A),
DEBUGFS_REG32(DC_DISP_V_PULSE3_CONTROL),
DEBUGFS_REG32(DC_DISP_V_PULSE3_POSITION_A),
DEBUGFS_REG32(DC_DISP_M0_CONTROL),
DEBUGFS_REG32(DC_DISP_M1_CONTROL),
DEBUGFS_REG32(DC_DISP_DI_CONTROL),
DEBUGFS_REG32(DC_DISP_PP_CONTROL),
DEBUGFS_REG32(DC_DISP_PP_SELECT_A),
DEBUGFS_REG32(DC_DISP_PP_SELECT_B),
DEBUGFS_REG32(DC_DISP_PP_SELECT_C),
DEBUGFS_REG32(DC_DISP_PP_SELECT_D),
DEBUGFS_REG32(DC_DISP_DISP_CLOCK_CONTROL),
DEBUGFS_REG32(DC_DISP_DISP_INTERFACE_CONTROL),
DEBUGFS_REG32(DC_DISP_DISP_COLOR_CONTROL),
DEBUGFS_REG32(DC_DISP_SHIFT_CLOCK_OPTIONS),
DEBUGFS_REG32(DC_DISP_DATA_ENABLE_OPTIONS),
DEBUGFS_REG32(DC_DISP_SERIAL_INTERFACE_OPTIONS),
DEBUGFS_REG32(DC_DISP_LCD_SPI_OPTIONS),
DEBUGFS_REG32(DC_DISP_BORDER_COLOR),
DEBUGFS_REG32(DC_DISP_COLOR_KEY0_LOWER),
DEBUGFS_REG32(DC_DISP_COLOR_KEY0_UPPER),
DEBUGFS_REG32(DC_DISP_COLOR_KEY1_LOWER),
DEBUGFS_REG32(DC_DISP_COLOR_KEY1_UPPER),
DEBUGFS_REG32(DC_DISP_CURSOR_FOREGROUND),
DEBUGFS_REG32(DC_DISP_CURSOR_BACKGROUND),
DEBUGFS_REG32(DC_DISP_CURSOR_START_ADDR),
DEBUGFS_REG32(DC_DISP_CURSOR_START_ADDR_NS),
DEBUGFS_REG32(DC_DISP_CURSOR_POSITION),
DEBUGFS_REG32(DC_DISP_CURSOR_POSITION_NS),
DEBUGFS_REG32(DC_DISP_INIT_SEQ_CONTROL),
DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_A),
DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_B),
DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_C),
DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_D),
DEBUGFS_REG32(DC_DISP_DC_MCCIF_FIFOCTRL),
DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY0A_HYST),
DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY0B_HYST),
DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY1A_HYST),
DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY1B_HYST),
DEBUGFS_REG32(DC_DISP_DAC_CRT_CTRL),
DEBUGFS_REG32(DC_DISP_DISP_MISC_CONTROL),
DEBUGFS_REG32(DC_DISP_SD_CONTROL),
DEBUGFS_REG32(DC_DISP_SD_CSC_COEFF),
DEBUGFS_REG32(DC_DISP_SD_LUT(0)),
DEBUGFS_REG32(DC_DISP_SD_LUT(1)),
DEBUGFS_REG32(DC_DISP_SD_LUT(2)),
DEBUGFS_REG32(DC_DISP_SD_LUT(3)),
DEBUGFS_REG32(DC_DISP_SD_LUT(4)),
DEBUGFS_REG32(DC_DISP_SD_LUT(5)),
DEBUGFS_REG32(DC_DISP_SD_LUT(6)),
DEBUGFS_REG32(DC_DISP_SD_LUT(7)),
DEBUGFS_REG32(DC_DISP_SD_LUT(8)),
DEBUGFS_REG32(DC_DISP_SD_FLICKER_CONTROL),
DEBUGFS_REG32(DC_DISP_DC_PIXEL_COUNT),
DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(0)),
DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(1)),
DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(2)),
DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(3)),
DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(4)),
DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(5)),
DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(6)),
DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(7)),
DEBUGFS_REG32(DC_DISP_SD_BL_TF(0)),
DEBUGFS_REG32(DC_DISP_SD_BL_TF(1)),
DEBUGFS_REG32(DC_DISP_SD_BL_TF(2)),
DEBUGFS_REG32(DC_DISP_SD_BL_TF(3)),
DEBUGFS_REG32(DC_DISP_SD_BL_CONTROL),
DEBUGFS_REG32(DC_DISP_SD_HW_K_VALUES),
DEBUGFS_REG32(DC_DISP_SD_MAN_K_VALUES),
DEBUGFS_REG32(DC_DISP_CURSOR_START_ADDR_HI),
DEBUGFS_REG32(DC_DISP_BLEND_CURSOR_CONTROL),
DEBUGFS_REG32(DC_WIN_WIN_OPTIONS),
DEBUGFS_REG32(DC_WIN_BYTE_SWAP),
DEBUGFS_REG32(DC_WIN_BUFFER_CONTROL),
DEBUGFS_REG32(DC_WIN_COLOR_DEPTH),
DEBUGFS_REG32(DC_WIN_POSITION),
DEBUGFS_REG32(DC_WIN_SIZE),
DEBUGFS_REG32(DC_WIN_PRESCALED_SIZE),
DEBUGFS_REG32(DC_WIN_H_INITIAL_DDA),
DEBUGFS_REG32(DC_WIN_V_INITIAL_DDA),
DEBUGFS_REG32(DC_WIN_DDA_INC),
DEBUGFS_REG32(DC_WIN_LINE_STRIDE),
DEBUGFS_REG32(DC_WIN_BUF_STRIDE),
DEBUGFS_REG32(DC_WIN_UV_BUF_STRIDE),
DEBUGFS_REG32(DC_WIN_BUFFER_ADDR_MODE),
DEBUGFS_REG32(DC_WIN_DV_CONTROL),
DEBUGFS_REG32(DC_WIN_BLEND_NOKEY),
DEBUGFS_REG32(DC_WIN_BLEND_1WIN),
DEBUGFS_REG32(DC_WIN_BLEND_2WIN_X),
DEBUGFS_REG32(DC_WIN_BLEND_2WIN_Y),
DEBUGFS_REG32(DC_WIN_BLEND_3WIN_XY),
DEBUGFS_REG32(DC_WIN_HP_FETCH_CONTROL),
DEBUGFS_REG32(DC_WINBUF_START_ADDR),
DEBUGFS_REG32(DC_WINBUF_START_ADDR_NS),
DEBUGFS_REG32(DC_WINBUF_START_ADDR_U),
DEBUGFS_REG32(DC_WINBUF_START_ADDR_U_NS),
DEBUGFS_REG32(DC_WINBUF_START_ADDR_V),
DEBUGFS_REG32(DC_WINBUF_START_ADDR_V_NS),
DEBUGFS_REG32(DC_WINBUF_ADDR_H_OFFSET),
DEBUGFS_REG32(DC_WINBUF_ADDR_H_OFFSET_NS),
DEBUGFS_REG32(DC_WINBUF_ADDR_V_OFFSET),
DEBUGFS_REG32(DC_WINBUF_ADDR_V_OFFSET_NS),
DEBUGFS_REG32(DC_WINBUF_UFLOW_STATUS),
DEBUGFS_REG32(DC_WINBUF_AD_UFLOW_STATUS),
DEBUGFS_REG32(DC_WINBUF_BD_UFLOW_STATUS),
DEBUGFS_REG32(DC_WINBUF_CD_UFLOW_STATUS),
};
static int tegra_dc_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_dc *dc = node->info_ent->data;
unsigned int i;
int err = 0;
drm_modeset_lock(&dc->base.mutex, NULL);
if (!dc->base.state->active) {
err = -EBUSY;
goto unlock;
}
for (i = 0; i < ARRAY_SIZE(tegra_dc_regs); i++) {
unsigned int offset = tegra_dc_regs[i].offset;
seq_printf(s, "%-40s %#05x %08x\n", tegra_dc_regs[i].name,
offset, tegra_dc_readl(dc, offset));
}
unlock:
drm_modeset_unlock(&dc->base.mutex);
return err;
}
static int tegra_dc_show_crc(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_dc *dc = node->info_ent->data;
int err = 0;
u32 value;
drm_modeset_lock(&dc->base.mutex, NULL);
if (!dc->base.state->active) {
err = -EBUSY;
goto unlock;
}
value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE;
tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL);
tegra_dc_commit(dc);
drm_crtc_wait_one_vblank(&dc->base);
drm_crtc_wait_one_vblank(&dc->base);
value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM);
seq_printf(s, "%08x\n", value);
tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL);
unlock:
drm_modeset_unlock(&dc->base.mutex);
return err;
}
static int tegra_dc_show_stats(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_dc *dc = node->info_ent->data;
seq_printf(s, "frames: %lu\n", dc->stats.frames);
seq_printf(s, "vblank: %lu\n", dc->stats.vblank);
seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
seq_printf(s, "frames total: %lu\n", dc->stats.frames_total);
seq_printf(s, "vblank total: %lu\n", dc->stats.vblank_total);
seq_printf(s, "underflow total: %lu\n", dc->stats.underflow_total);
seq_printf(s, "overflow total: %lu\n", dc->stats.overflow_total);
return 0;
}
static struct drm_info_list debugfs_files[] = {
{ "regs", tegra_dc_show_regs, 0, NULL },
{ "crc", tegra_dc_show_crc, 0, NULL },
{ "stats", tegra_dc_show_stats, 0, NULL },
};
static int tegra_dc_late_register(struct drm_crtc *crtc)
{
unsigned int i, count = ARRAY_SIZE(debugfs_files);
struct drm_minor *minor = crtc->dev->primary;
struct dentry *root;
struct tegra_dc *dc = to_tegra_dc(crtc);
#ifdef CONFIG_DEBUG_FS
root = crtc->debugfs_entry;
#else
root = NULL;
#endif
dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
if (!dc->debugfs_files)
return -ENOMEM;
for (i = 0; i < count; i++)
dc->debugfs_files[i].data = dc;
drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
return 0;
}
static void tegra_dc_early_unregister(struct drm_crtc *crtc)
{
unsigned int count = ARRAY_SIZE(debugfs_files);
struct drm_minor *minor = crtc->dev->primary;
struct tegra_dc *dc = to_tegra_dc(crtc);
drm_debugfs_remove_files(dc->debugfs_files, count, minor);
kfree(dc->debugfs_files);
dc->debugfs_files = NULL;
}
static u32 tegra_dc_get_vblank_counter(struct drm_crtc *crtc)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
/* XXX vblank syncpoints don't work with nvdisplay yet */
if (dc->syncpt && !dc->soc->has_nvdisplay)
return host1x_syncpt_read(dc->syncpt);
/* fallback to software emulated VBLANK counter */
return (u32)drm_crtc_vblank_count(&dc->base);
}
static int tegra_dc_enable_vblank(struct drm_crtc *crtc)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
value |= VBLANK_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
return 0;
}
static void tegra_dc_disable_vblank(struct drm_crtc *crtc)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
value &= ~VBLANK_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
}
static const struct drm_crtc_funcs tegra_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.set_config = drm_atomic_helper_set_config,
.destroy = tegra_dc_destroy,
.reset = tegra_crtc_reset,
.atomic_duplicate_state = tegra_crtc_atomic_duplicate_state,
.atomic_destroy_state = tegra_crtc_atomic_destroy_state,
.late_register = tegra_dc_late_register,
.early_unregister = tegra_dc_early_unregister,
.get_vblank_counter = tegra_dc_get_vblank_counter,
.enable_vblank = tegra_dc_enable_vblank,
.disable_vblank = tegra_dc_disable_vblank,
};
static int tegra_dc_set_timings(struct tegra_dc *dc,
struct drm_display_mode *mode)
{
unsigned int h_ref_to_sync = 1;
unsigned int v_ref_to_sync = 1;
unsigned long value;
if (!dc->soc->has_nvdisplay) {
tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
value = (v_ref_to_sync << 16) | h_ref_to_sync;
tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
}
value = ((mode->vsync_end - mode->vsync_start) << 16) |
((mode->hsync_end - mode->hsync_start) << 0);
tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
value = ((mode->vtotal - mode->vsync_end) << 16) |
((mode->htotal - mode->hsync_end) << 0);
tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
value = ((mode->vsync_start - mode->vdisplay) << 16) |
((mode->hsync_start - mode->hdisplay) << 0);
tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
value = (mode->vdisplay << 16) | mode->hdisplay;
tegra_dc_writel(dc, value, DC_DISP_ACTIVE);
return 0;
}
/**
* tegra_dc_state_setup_clock - check clock settings and store them in atomic
* state
* @dc: display controller
* @crtc_state: CRTC atomic state
* @clk: parent clock for display controller
* @pclk: pixel clock
* @div: shift clock divider
*
* Returns:
* 0 on success or a negative error-code on failure.
*/
int tegra_dc_state_setup_clock(struct tegra_dc *dc,
struct drm_crtc_state *crtc_state,
struct clk *clk, unsigned long pclk,
unsigned int div)
{
struct tegra_dc_state *state = to_dc_state(crtc_state);
if (!clk_has_parent(dc->clk, clk))
return -EINVAL;
state->clk = clk;
state->pclk = pclk;
state->div = div;
return 0;
}
static void tegra_dc_update_voltage_state(struct tegra_dc *dc,
struct tegra_dc_state *state)
{
unsigned long rate, pstate;
struct dev_pm_opp *opp;
int err;
if (!dc->has_opp_table)
return;
/* calculate actual pixel clock rate which depends on internal divider */
rate = DIV_ROUND_UP(clk_get_rate(dc->clk) * 2, state->div + 2);
/* find suitable OPP for the rate */
opp = dev_pm_opp_find_freq_ceil(dc->dev, &rate);
/*
* Very high resolution modes may results in a clock rate that is
* above the characterized maximum. In this case it's okay to fall
* back to the characterized maximum.
*/
if (opp == ERR_PTR(-ERANGE))
opp = dev_pm_opp_find_freq_floor(dc->dev, &rate);
if (IS_ERR(opp)) {
dev_err(dc->dev, "failed to find OPP for %luHz: %pe\n",
rate, opp);
return;
}
pstate = dev_pm_opp_get_required_pstate(opp, 0);
dev_pm_opp_put(opp);
/*
* The minimum core voltage depends on the pixel clock rate (which
* depends on internal clock divider of the CRTC) and not on the
* rate of the display controller clock. This is why we're not using
* dev_pm_opp_set_rate() API and instead controlling the power domain
* directly.
*/
err = dev_pm_genpd_set_performance_state(dc->dev, pstate);
if (err)
dev_err(dc->dev, "failed to set power domain state to %lu: %d\n",
pstate, err);
}
static void tegra_dc_set_clock_rate(struct tegra_dc *dc,
struct tegra_dc_state *state)
{
int err;
err = clk_set_parent(dc->clk, state->clk);
if (err < 0)
dev_err(dc->dev, "failed to set parent clock: %d\n", err);
/*
* Outputs may not want to change the parent clock rate. This is only
* relevant to Tegra20 where only a single display PLL is available.
* Since that PLL would typically be used for HDMI, an internal LVDS
* panel would need to be driven by some other clock such as PLL_P
* which is shared with other peripherals. Changing the clock rate
* should therefore be avoided.
*/
if (state->pclk > 0) {
err = clk_set_rate(state->clk, state->pclk);
if (err < 0)
dev_err(dc->dev,
"failed to set clock rate to %lu Hz\n",
state->pclk);
err = clk_set_rate(dc->clk, state->pclk);
if (err < 0)
dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
dc->clk, state->pclk, err);
}
DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
state->div);
DRM_DEBUG_KMS("pclk: %lu\n", state->pclk);
tegra_dc_update_voltage_state(dc, state);
}
static void tegra_dc_stop(struct tegra_dc *dc)
{
u32 value;
/* stop the display controller */
value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
value &= ~DISP_CTRL_MODE_MASK;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
tegra_dc_commit(dc);
}
static bool tegra_dc_idle(struct tegra_dc *dc)
{
u32 value;
value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
return (value & DISP_CTRL_MODE_MASK) == 0;
}
static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
{
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, timeout)) {
if (tegra_dc_idle(dc))
return 0;
usleep_range(1000, 2000);
}
dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
return -ETIMEDOUT;
}
static void
tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
struct drm_atomic_state *state,
bool prepare_bandwidth_transition)
{
const struct tegra_plane_state *old_tegra_state, *new_tegra_state;
u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw;
const struct drm_plane_state *old_plane_state;
const struct drm_crtc_state *old_crtc_state;
struct tegra_dc_window window, old_window;
struct tegra_dc *dc = to_tegra_dc(crtc);
struct tegra_plane *tegra;
struct drm_plane *plane;
if (dc->soc->has_nvdisplay)
return;
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
if (!crtc->state->active) {
if (!old_crtc_state->active)
return;
/*
* When CRTC is disabled on DPMS, the state of attached planes
* is kept unchanged. Hence we need to enforce removal of the
* bandwidths from the ICC paths.
*/
drm_atomic_crtc_for_each_plane(plane, crtc) {
tegra = to_tegra_plane(plane);
icc_set_bw(tegra->icc_mem, 0, 0);
icc_set_bw(tegra->icc_mem_vfilter, 0, 0);
}
return;
}
for_each_old_plane_in_state(old_crtc_state->state, plane,
old_plane_state, i) {
old_tegra_state = to_const_tegra_plane_state(old_plane_state);
new_tegra_state = to_const_tegra_plane_state(plane->state);
tegra = to_tegra_plane(plane);
/*
* We're iterating over the global atomic state and it contains
* planes from another CRTC, hence we need to filter out the
* planes unrelated to this CRTC.
*/
if (tegra->dc != dc)
continue;
new_avg_bw = new_tegra_state->avg_memory_bandwidth;
old_avg_bw = old_tegra_state->avg_memory_bandwidth;
new_peak_bw = new_tegra_state->total_peak_memory_bandwidth;
old_peak_bw = old_tegra_state->total_peak_memory_bandwidth;
/*
* See the comment related to !crtc->state->active above,
* which explains why bandwidths need to be updated when
* CRTC is turning ON.
*/
if (new_avg_bw == old_avg_bw && new_peak_bw == old_peak_bw &&
old_crtc_state->active)
continue;
window.src.h = drm_rect_height(&plane->state->src) >> 16;
window.dst.h = drm_rect_height(&plane->state->dst);
old_window.src.h = drm_rect_height(&old_plane_state->src) >> 16;
old_window.dst.h = drm_rect_height(&old_plane_state->dst);
/*
* During the preparation phase (atomic_begin), the memory
* freq should go high before the DC changes are committed
* if bandwidth requirement goes up, otherwise memory freq
* should to stay high if BW requirement goes down. The
* opposite applies to the completion phase (post_commit).
*/
if (prepare_bandwidth_transition) {
new_avg_bw = max(old_avg_bw, new_avg_bw);
new_peak_bw = max(old_peak_bw, new_peak_bw);
if (tegra_plane_use_vertical_filtering(tegra, &old_window))
window = old_window;
}
icc_set_bw(tegra->icc_mem, new_avg_bw, new_peak_bw);
if (tegra_plane_use_vertical_filtering(tegra, &window))
icc_set_bw(tegra->icc_mem_vfilter, new_avg_bw, new_peak_bw);
else
icc_set_bw(tegra->icc_mem_vfilter, 0, 0);
}
}
static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
int err;
if (!tegra_dc_idle(dc)) {
tegra_dc_stop(dc);
/*
* Ignore the return value, there isn't anything useful to do
* in case this fails.
*/
tegra_dc_wait_idle(dc, 100);
}
/*
* This should really be part of the RGB encoder driver, but clearing
* these bits has the side-effect of stopping the display controller.
* When that happens no VBLANK interrupts will be raised. At the same
* time the encoder is disabled before the display controller, so the
* above code is always going to timeout waiting for the controller
* to go idle.
*
* Given the close coupling between the RGB encoder and the display
* controller doing it here is still kind of okay. None of the other
* encoder drivers require these bits to be cleared.
*
* XXX: Perhaps given that the display controller is switched off at
* this point anyway maybe clearing these bits isn't even useful for
* the RGB encoder?
*/
if (dc->rgb) {
value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
}
tegra_dc_stats_reset(&dc->stats);
drm_crtc_vblank_off(crtc);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
err = host1x_client_suspend(&dc->client);
if (err < 0)
dev_err(dc->dev, "failed to suspend: %d\n", err);
if (dc->has_opp_table) {
err = dev_pm_genpd_set_performance_state(dc->dev, 0);
if (err)
dev_err(dc->dev,
"failed to clear power domain state: %d\n", err);
}
}
static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct tegra_dc_state *crtc_state = to_dc_state(crtc->state);
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
int err;
/* apply PLL changes */
tegra_dc_set_clock_rate(dc, crtc_state);
err = host1x_client_resume(&dc->client);
if (err < 0) {
dev_err(dc->dev, "failed to resume: %d\n", err);
return;
}
/* initialize display controller */
if (dc->syncpt) {
u32 syncpt = host1x_syncpt_id(dc->syncpt), enable;
if (dc->soc->has_nvdisplay)
enable = 1 << 31;
else
enable = 1 << 8;
value = SYNCPT_CNTRL_NO_STALL;
tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
value = enable | syncpt;
tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
}
if (dc->soc->has_nvdisplay) {
value = DSC_TO_UF_INT | DSC_BBUF_UF_INT | DSC_RBUF_UF_INT |
DSC_OBUF_UF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
value = DSC_TO_UF_INT | DSC_BBUF_UF_INT | DSC_RBUF_UF_INT |
DSC_OBUF_UF_INT | SD3_BUCKET_WALK_DONE_INT |
HEAD_UF_INT | MSF_INT | REG_TMOUT_INT |
REGION_CRC_INT | V_PULSE2_INT | V_PULSE3_INT |
VBLANK_INT | FRAME_END_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
value = SD3_BUCKET_WALK_DONE_INT | HEAD_UF_INT | VBLANK_INT |
FRAME_END_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
value = HEAD_UF_INT | REG_TMOUT_INT | FRAME_END_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
} else {
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
/* initialize timer */
value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
}
if (dc->soc->supports_background_color)
tegra_dc_writel(dc, 0, DC_DISP_BLEND_BACKGROUND_COLOR);
else
tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
/* apply pixel clock changes */
if (!dc->soc->has_nvdisplay) {
value = SHIFT_CLK_DIVIDER(crtc_state->div) | PIXEL_CLK_DIVIDER_PCD1;
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
}
/* program display mode */
tegra_dc_set_timings(dc, mode);
/* interlacing isn't supported yet, so disable it */
if (dc->soc->supports_interlacing) {
value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
value &= ~INTERLACE_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL);
}
value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
value &= ~DISP_CTRL_MODE_MASK;
value |= DISP_CTRL_MODE_C_DISPLAY;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
if (!dc->soc->has_nvdisplay) {
value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
}
/* enable underflow reporting and display red for missing pixels */
if (dc->soc->has_nvdisplay) {
value = UNDERFLOW_MODE_RED | UNDERFLOW_REPORT_ENABLE;
tegra_dc_writel(dc, value, DC_COM_RG_UNDERFLOW);
}
if (dc->rgb) {
/* XXX: parameterize? */
value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
tegra_dc_writel(dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
}
tegra_dc_commit(dc);
drm_crtc_vblank_on(crtc);
}
static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
unsigned long flags;
tegra_crtc_update_memory_bandwidth(crtc, state, true);
if (crtc->state->event) {
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (drm_crtc_vblank_get(crtc) != 0)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
else
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL;
}
}
static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct tegra_dc_state *dc_state = to_dc_state(crtc_state);
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
value = dc_state->planes << 8 | GENERAL_UPDATE;
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
value = dc_state->planes | GENERAL_ACT_REQ;
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
}
static bool tegra_plane_is_cursor(const struct drm_plane_state *state)
{
const struct tegra_dc_soc_info *soc = to_tegra_dc(state->crtc)->soc;
const struct drm_format_info *fmt = state->fb->format;
unsigned int src_w = drm_rect_width(&state->src) >> 16;
unsigned int dst_w = drm_rect_width(&state->dst);
if (state->plane->type != DRM_PLANE_TYPE_CURSOR)
return false;
if (soc->supports_cursor)
return true;
if (src_w != dst_w || fmt->num_planes != 1 || src_w * fmt->cpp[0] > 256)
return false;
return true;
}
static unsigned long
tegra_plane_overlap_mask(struct drm_crtc_state *state,
const struct drm_plane_state *plane_state)
{
const struct drm_plane_state *other_state;
const struct tegra_plane *tegra;
unsigned long overlap_mask = 0;
struct drm_plane *plane;
struct drm_rect rect;
if (!plane_state->visible || !plane_state->fb)
return 0;
/*
* Data-prefetch FIFO will easily help to overcome temporal memory
* pressure if other plane overlaps with the cursor plane.
*/
if (tegra_plane_is_cursor(plane_state))
return 0;
drm_atomic_crtc_state_for_each_plane_state(plane, other_state, state) {
rect = plane_state->dst;
tegra = to_tegra_plane(other_state->plane);
if (!other_state->visible || !other_state->fb)
continue;
/*
* Ignore cursor plane overlaps because it's not practical to
* assume that it contributes to the bandwidth in overlapping
* area if window width is small.
*/
if (tegra_plane_is_cursor(other_state))
continue;
if (drm_rect_intersect(&rect, &other_state->dst))
overlap_mask |= BIT(tegra->index);
}
return overlap_mask;
}
static int tegra_crtc_calculate_memory_bandwidth(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
ulong overlap_mask[TEGRA_DC_LEGACY_PLANES_NUM] = {}, mask;
u32 plane_peak_bw[TEGRA_DC_LEGACY_PLANES_NUM] = {};
bool all_planes_overlap_simultaneously = true;
const struct tegra_plane_state *tegra_state;
const struct drm_plane_state *plane_state;
struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_crtc_state *new_state;
struct tegra_plane *tegra;
struct drm_plane *plane;
/*
* The nv-display uses shared planes. The algorithm below assumes
* maximum 3 planes per-CRTC, this assumption isn't applicable to
* the nv-display. Note that T124 support has additional windows,
* but currently they aren't supported by the driver.
*/
if (dc->soc->has_nvdisplay)
return 0;
new_state = drm_atomic_get_new_crtc_state(state, crtc);
/*
* For overlapping planes pixel's data is fetched for each plane at
* the same time, hence bandwidths are accumulated in this case.
* This needs to be taken into account for calculating total bandwidth
* consumed by all planes.
*
* Here we get the overlapping state of each plane, which is a
* bitmask of plane indices telling with what planes there is an
* overlap. Note that bitmask[plane] includes BIT(plane) in order
* to make further code nicer and simpler.
*/
drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, new_state) {
tegra_state = to_const_tegra_plane_state(plane_state);
tegra = to_tegra_plane(plane);
if (WARN_ON_ONCE(tegra->index >= TEGRA_DC_LEGACY_PLANES_NUM))
return -EINVAL;
plane_peak_bw[tegra->index] = tegra_state->peak_memory_bandwidth;
mask = tegra_plane_overlap_mask(new_state, plane_state);
overlap_mask[tegra->index] = mask;
if (hweight_long(mask) != 3)
all_planes_overlap_simultaneously = false;
}
/*
* Then we calculate maximum bandwidth of each plane state.
* The bandwidth includes the plane BW + BW of the "simultaneously"
* overlapping planes, where "simultaneously" means areas where DC
* fetches from the planes simultaneously during of scan-out process.
*
* For example, if plane A overlaps with planes B and C, but B and C
* don't overlap, then the peak bandwidth will be either in area where
* A-and-B or A-and-C planes overlap.
*
* The plane_peak_bw[] contains peak memory bandwidth values of
* each plane, this information is needed by interconnect provider
* in order to set up latency allowance based on the peak BW, see
* tegra_crtc_update_memory_bandwidth().
*/
drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, new_state) {
u32 i, old_peak_bw, new_peak_bw, overlap_bw = 0;
/*
* Note that plane's atomic check doesn't touch the
* total_peak_memory_bandwidth of enabled plane, hence the
* current state contains the old bandwidth state from the
* previous CRTC commit.
*/
tegra_state = to_const_tegra_plane_state(plane_state);
tegra = to_tegra_plane(plane);
for_each_set_bit(i, &overlap_mask[tegra->index], 3) {
if (i == tegra->index)
continue;
if (all_planes_overlap_simultaneously)
overlap_bw += plane_peak_bw[i];
else
overlap_bw = max(overlap_bw, plane_peak_bw[i]);
}
new_peak_bw = plane_peak_bw[tegra->index] + overlap_bw;
old_peak_bw = tegra_state->total_peak_memory_bandwidth;
/*
* If plane's peak bandwidth changed (for example plane isn't
* overlapped anymore) and plane isn't in the atomic state,
* then add plane to the state in order to have the bandwidth
* updated.
*/
if (old_peak_bw != new_peak_bw) {
struct tegra_plane_state *new_tegra_state;
struct drm_plane_state *new_plane_state;
new_plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(new_plane_state))
return PTR_ERR(new_plane_state);
new_tegra_state = to_tegra_plane_state(new_plane_state);
new_tegra_state->total_peak_memory_bandwidth = new_peak_bw;
}
}
return 0;
}
static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
int err;
err = tegra_crtc_calculate_memory_bandwidth(crtc, state);
if (err)
return err;
return 0;
}
void tegra_crtc_atomic_post_commit(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
/*
* Display bandwidth is allowed to go down only once hardware state
* is known to be armed, i.e. state was committed and VBLANK event
* received.
*/
tegra_crtc_update_memory_bandwidth(crtc, state, false);
}
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
.atomic_check = tegra_crtc_atomic_check,
.atomic_begin = tegra_crtc_atomic_begin,
.atomic_flush = tegra_crtc_atomic_flush,
.atomic_enable = tegra_crtc_atomic_enable,
.atomic_disable = tegra_crtc_atomic_disable,
};
static irqreturn_t tegra_dc_irq(int irq, void *data)
{
struct tegra_dc *dc = data;
unsigned long status;
status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
if (status & FRAME_END_INT) {
/*
dev_dbg(dc->dev, "%s(): frame end\n", __func__);
*/
dc->stats.frames_total++;
dc->stats.frames++;
}
if (status & VBLANK_INT) {
/*
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
drm_crtc_handle_vblank(&dc->base);
dc->stats.vblank_total++;
dc->stats.vblank++;
}
if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
/*
dev_dbg(dc->dev, "%s(): underflow\n", __func__);
*/
dc->stats.underflow_total++;
dc->stats.underflow++;
}
if (status & (WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT)) {
/*
dev_dbg(dc->dev, "%s(): overflow\n", __func__);
*/
dc->stats.overflow_total++;
dc->stats.overflow++;
}
if (status & HEAD_UF_INT) {
dev_dbg_ratelimited(dc->dev, "%s(): head underflow\n", __func__);
dc->stats.underflow_total++;
dc->stats.underflow++;
}
return IRQ_HANDLED;
}
static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
{
unsigned int i;
if (!dc->soc->wgrps)
return true;
for (i = 0; i < dc->soc->num_wgrps; i++) {
const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
if (wgrp->dc == dc->pipe && wgrp->num_windows > 0)
return true;
}
return false;
}
static int tegra_dc_early_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
tegra->num_crtcs++;
return 0;
}
static int tegra_dc_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
struct tegra_dc *dc = host1x_client_to_dc(client);
struct tegra_drm *tegra = drm->dev_private;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
int err;
/*
* DC has been reset by now, so VBLANK syncpoint can be released
* for general use.
*/
host1x_syncpt_release_vblank_reservation(client, 26 + dc->pipe);
/*
* XXX do not register DCs with no window groups because we cannot
* assign a primary plane to them, which in turn will cause KMS to
* crash.
*/
if (!tegra_dc_has_window_groups(dc))
return 0;
/*
* Set the display hub as the host1x client parent for the display
* controller. This is needed for the runtime reference counting that
* ensures the display hub is always powered when any of the display
* controllers are.
*/
if (dc->soc->has_nvdisplay)
client->parent = &tegra->hub->client;
dc->syncpt = host1x_syncpt_request(client, flags);
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
err = host1x_client_iommu_attach(client);
if (err < 0 && err != -ENODEV) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
return err;
}
if (dc->soc->wgrps)
primary = tegra_dc_add_shared_planes(drm, dc);
else
primary = tegra_dc_add_planes(drm, dc);
if (IS_ERR(primary)) {
err = PTR_ERR(primary);
goto cleanup;
}
if (dc->soc->supports_cursor) {
cursor = tegra_dc_cursor_plane_create(drm, dc);
if (IS_ERR(cursor)) {
err = PTR_ERR(cursor);
goto cleanup;
}
} else {
/* dedicate one overlay to mouse cursor */
cursor = tegra_dc_overlay_plane_create(drm, dc, 2, true);
if (IS_ERR(cursor)) {
err = PTR_ERR(cursor);
goto cleanup;
}
}
err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
&tegra_crtc_funcs, NULL);
if (err < 0)
goto cleanup;
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
/*
* Keep track of the minimum pitch alignment across all display
* controllers.
*/
if (dc->soc->pitch_align > tegra->pitch_align)
tegra->pitch_align = dc->soc->pitch_align;
/* track maximum resolution */
if (dc->soc->has_nvdisplay)
drm->mode_config.max_width = drm->mode_config.max_height = 16384;
else
drm->mode_config.max_width = drm->mode_config.max_height = 4096;
err = tegra_dc_rgb_init(drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
goto cleanup;
}
err = devm_request_irq(dc->dev, dc->irq, tegra_dc_irq, 0,
dev_name(dc->dev), dc);
if (err < 0) {
dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
err);
goto cleanup;
}
/*
* Inherit the DMA parameters (such as maximum segment size) from the
* parent host1x device.
*/
client->dev->dma_parms = client->host->dma_parms;
return 0;
cleanup:
if (!IS_ERR_OR_NULL(cursor))
drm_plane_cleanup(cursor);
if (!IS_ERR(primary))
drm_plane_cleanup(primary);
host1x_client_iommu_detach(client);
host1x_syncpt_put(dc->syncpt);
return err;
}
static int tegra_dc_exit(struct host1x_client *client)
{
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
if (!tegra_dc_has_window_groups(dc))
return 0;
/* avoid a dangling pointer just in case this disappears */
client->dev->dma_parms = NULL;
devm_free_irq(dc->dev, dc->irq, dc);
err = tegra_dc_rgb_exit(dc);
if (err) {
dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
return err;
}
host1x_client_iommu_detach(client);
host1x_syncpt_put(dc->syncpt);
return 0;
}
static int tegra_dc_late_exit(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
tegra->num_crtcs--;
return 0;
}
static int tegra_dc_runtime_suspend(struct host1x_client *client)
{
struct tegra_dc *dc = host1x_client_to_dc(client);
struct device *dev = client->dev;
int err;
err = reset_control_assert(dc->rst);
if (err < 0) {
dev_err(dev, "failed to assert reset: %d\n", err);
return err;
}
if (dc->soc->has_powergate)
tegra_powergate_power_off(dc->powergate);
clk_disable_unprepare(dc->clk);
pm_runtime_put_sync(dev);
return 0;
}
static int tegra_dc_runtime_resume(struct host1x_client *client)
{
struct tegra_dc *dc = host1x_client_to_dc(client);
struct device *dev = client->dev;
int err;
err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
}
if (dc->soc->has_powergate) {
err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
dc->rst);
if (err < 0) {
dev_err(dev, "failed to power partition: %d\n", err);
goto put_rpm;
}
} else {
err = clk_prepare_enable(dc->clk);
if (err < 0) {
dev_err(dev, "failed to enable clock: %d\n", err);
goto put_rpm;
}
err = reset_control_deassert(dc->rst);
if (err < 0) {
dev_err(dev, "failed to deassert reset: %d\n", err);
goto disable_clk;
}
}
return 0;
disable_clk:
clk_disable_unprepare(dc->clk);
put_rpm:
pm_runtime_put_sync(dev);
return err;
}
static const struct host1x_client_ops dc_client_ops = {
.early_init = tegra_dc_early_init,
.init = tegra_dc_init,
.exit = tegra_dc_exit,
.late_exit = tegra_dc_late_exit,
.suspend = tegra_dc_runtime_suspend,
.resume = tegra_dc_runtime_resume,
};
static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.supports_background_color = false,
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
.supports_sector_layout = false,
.has_legacy_blending = true,
.pitch_align = 8,
.has_powergate = false,
.coupled_pm = true,
.has_nvdisplay = false,
.num_primary_formats = ARRAY_SIZE(tegra20_primary_formats),
.primary_formats = tegra20_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra20_overlay_formats),
.overlay_formats = tegra20_overlay_formats,
.modifiers = tegra20_modifiers,
.has_win_a_without_filters = true,
.has_win_b_vfilter_mem_client = true,
.has_win_c_without_vert_filter = true,
.plane_tiled_memory_bandwidth_x2 = false,
.has_pll_d2_out0 = false,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.supports_background_color = false,
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
.supports_sector_layout = false,
.has_legacy_blending = true,
.pitch_align = 8,
.has_powergate = false,
.coupled_pm = false,
.has_nvdisplay = false,
.num_primary_formats = ARRAY_SIZE(tegra20_primary_formats),
.primary_formats = tegra20_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra20_overlay_formats),
.overlay_formats = tegra20_overlay_formats,
.modifiers = tegra20_modifiers,
.has_win_a_without_filters = false,
.has_win_b_vfilter_mem_client = true,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = true,
.has_pll_d2_out0 = true,
};
static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
.supports_background_color = false,
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
.supports_sector_layout = false,
.has_legacy_blending = true,
.pitch_align = 64,
.has_powergate = true,
.coupled_pm = false,
.has_nvdisplay = false,
.num_primary_formats = ARRAY_SIZE(tegra114_primary_formats),
.primary_formats = tegra114_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra114_overlay_formats),
.overlay_formats = tegra114_overlay_formats,
.modifiers = tegra20_modifiers,
.has_win_a_without_filters = false,
.has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = true,
.has_pll_d2_out0 = true,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.supports_background_color = true,
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
.supports_sector_layout = false,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = true,
.coupled_pm = false,
.has_nvdisplay = false,
.num_primary_formats = ARRAY_SIZE(tegra124_primary_formats),
.primary_formats = tegra124_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra124_overlay_formats),
.overlay_formats = tegra124_overlay_formats,
.modifiers = tegra124_modifiers,
.has_win_a_without_filters = false,
.has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = false,
.has_pll_d2_out0 = true,
};
static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
.supports_background_color = true,
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
.supports_sector_layout = false,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = true,
.coupled_pm = false,
.has_nvdisplay = false,
.num_primary_formats = ARRAY_SIZE(tegra114_primary_formats),
.primary_formats = tegra114_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra114_overlay_formats),
.overlay_formats = tegra114_overlay_formats,
.modifiers = tegra124_modifiers,
.has_win_a_without_filters = false,
.has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = false,
.has_pll_d2_out0 = true,
};
static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = {
{
.index = 0,
.dc = 0,
.windows = (const unsigned int[]) { 0 },
.num_windows = 1,
}, {
.index = 1,
.dc = 1,
.windows = (const unsigned int[]) { 1 },
.num_windows = 1,
}, {
.index = 2,
.dc = 1,
.windows = (const unsigned int[]) { 2 },
.num_windows = 1,
}, {
.index = 3,
.dc = 2,
.windows = (const unsigned int[]) { 3 },
.num_windows = 1,
}, {
.index = 4,
.dc = 2,
.windows = (const unsigned int[]) { 4 },
.num_windows = 1,
}, {
.index = 5,
.dc = 2,
.windows = (const unsigned int[]) { 5 },
.num_windows = 1,
},
};
static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
.supports_background_color = true,
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
.supports_sector_layout = false,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = false,
.coupled_pm = false,
.has_nvdisplay = true,
.wgrps = tegra186_dc_wgrps,
.num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps),
.plane_tiled_memory_bandwidth_x2 = false,
.has_pll_d2_out0 = false,
};
static const struct tegra_windowgroup_soc tegra194_dc_wgrps[] = {
{
.index = 0,
.dc = 0,
.windows = (const unsigned int[]) { 0 },
.num_windows = 1,
}, {
.index = 1,
.dc = 1,
.windows = (const unsigned int[]) { 1 },
.num_windows = 1,
}, {
.index = 2,
.dc = 1,
.windows = (const unsigned int[]) { 2 },
.num_windows = 1,
}, {
.index = 3,
.dc = 2,
.windows = (const unsigned int[]) { 3 },
.num_windows = 1,
}, {
.index = 4,
.dc = 2,
.windows = (const unsigned int[]) { 4 },
.num_windows = 1,
}, {
.index = 5,
.dc = 2,
.windows = (const unsigned int[]) { 5 },
.num_windows = 1,
},
};
static const struct tegra_dc_soc_info tegra194_dc_soc_info = {
.supports_background_color = true,
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
.supports_sector_layout = true,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = false,
.coupled_pm = false,
.has_nvdisplay = true,
.wgrps = tegra194_dc_wgrps,
.num_wgrps = ARRAY_SIZE(tegra194_dc_wgrps),
.plane_tiled_memory_bandwidth_x2 = false,
.has_pll_d2_out0 = false,
};
static const struct of_device_id tegra_dc_of_match[] = {
{
.compatible = "nvidia,tegra194-dc",
.data = &tegra194_dc_soc_info,
}, {
.compatible = "nvidia,tegra186-dc",
.data = &tegra186_dc_soc_info,
}, {
.compatible = "nvidia,tegra210-dc",
.data = &tegra210_dc_soc_info,
}, {
.compatible = "nvidia,tegra124-dc",
.data = &tegra124_dc_soc_info,
}, {
.compatible = "nvidia,tegra114-dc",
.data = &tegra114_dc_soc_info,
}, {
.compatible = "nvidia,tegra30-dc",
.data = &tegra30_dc_soc_info,
}, {
.compatible = "nvidia,tegra20-dc",
.data = &tegra20_dc_soc_info,
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, tegra_dc_of_match);
static int tegra_dc_parse_dt(struct tegra_dc *dc)
{
struct device_node *np;
u32 value = 0;
int err;
err = of_property_read_u32(dc->dev->of_node, "nvidia,head", &value);
if (err < 0) {
dev_err(dc->dev, "missing \"nvidia,head\" property\n");
/*
* If the nvidia,head property isn't present, try to find the
* correct head number by looking up the position of this
* display controller's node within the device tree. Assuming
* that the nodes are ordered properly in the DTS file and
* that the translation into a flattened device tree blob
* preserves that ordering this will actually yield the right
* head number.
*
* If those assumptions don't hold, this will still work for
* cases where only a single display controller is used.
*/
for_each_matching_node(np, tegra_dc_of_match) {
if (np == dc->dev->of_node) {
of_node_put(np);
break;
}
value++;
}
}
dc->pipe = value;
return 0;
}
static int tegra_dc_match_by_pipe(struct device *dev, const void *data)
{
struct tegra_dc *dc = dev_get_drvdata(dev);
unsigned int pipe = (unsigned long)(void *)data;
return dc->pipe == pipe;
}
static int tegra_dc_couple(struct tegra_dc *dc)
{
/*
* On Tegra20, DC1 requires DC0 to be taken out of reset in order to
* be enabled, otherwise CPU hangs on writing to CMD_DISPLAY_COMMAND /
* POWER_CONTROL registers during CRTC enabling.
*/
if (dc->soc->coupled_pm && dc->pipe == 1) {
struct device *companion;
struct tegra_dc *parent;
companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
tegra_dc_match_by_pipe);
if (!companion)
return -EPROBE_DEFER;
parent = dev_get_drvdata(companion);
dc->client.parent = &parent->client;
dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
}
return 0;
}
static int tegra_dc_init_opp_table(struct tegra_dc *dc)
{
struct tegra_core_opp_params opp_params = {};
int err;
err = devm_tegra_core_dev_init_opp_table(dc->dev, &opp_params);
if (err && err != -ENODEV)
return err;
if (err)
dc->has_opp_table = false;
else
dc->has_opp_table = true;
return 0;
}
static int tegra_dc_probe(struct platform_device *pdev)
{
u64 dma_mask = dma_get_mask(pdev->dev.parent);
struct tegra_dc *dc;
int err;
err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
if (err < 0) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
return err;
}
dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
if (!dc)
return -ENOMEM;
dc->soc = of_device_get_match_data(&pdev->dev);
INIT_LIST_HEAD(&dc->list);
dc->dev = &pdev->dev;
err = tegra_dc_parse_dt(dc);
if (err < 0)
return err;
err = tegra_dc_couple(dc);
if (err < 0)
return err;
dc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dc->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(dc->clk);
}
dc->rst = devm_reset_control_get(&pdev->dev, "dc");
if (IS_ERR(dc->rst)) {
dev_err(&pdev->dev, "failed to get reset\n");
return PTR_ERR(dc->rst);
}
/* assert reset and disable clock */
err = clk_prepare_enable(dc->clk);
if (err < 0)
return err;
usleep_range(2000, 4000);
err = reset_control_assert(dc->rst);
if (err < 0) {
clk_disable_unprepare(dc->clk);
return err;
}
usleep_range(2000, 4000);
clk_disable_unprepare(dc->clk);
if (dc->soc->has_powergate) {
if (dc->pipe == 0)
dc->powergate = TEGRA_POWERGATE_DIS;
else
dc->powergate = TEGRA_POWERGATE_DISB;
tegra_powergate_power_off(dc->powergate);
}
err = tegra_dc_init_opp_table(dc);
if (err < 0)
return err;
dc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dc->regs))
return PTR_ERR(dc->regs);
dc->irq = platform_get_irq(pdev, 0);
if (dc->irq < 0)
return -ENXIO;
err = tegra_dc_rgb_probe(dc);
if (err < 0 && err != -ENODEV)
return dev_err_probe(&pdev->dev, err,
"failed to probe RGB output\n");
platform_set_drvdata(pdev, dc);
pm_runtime_enable(&pdev->dev);
INIT_LIST_HEAD(&dc->client.list);
dc->client.ops = &dc_client_ops;
dc->client.dev = &pdev->dev;
err = host1x_client_register(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
goto disable_pm;
}
return 0;
disable_pm:
pm_runtime_disable(&pdev->dev);
tegra_dc_rgb_remove(dc);
return err;
}
static void tegra_dc_remove(struct platform_device *pdev)
{
struct tegra_dc *dc = platform_get_drvdata(pdev);
host1x_client_unregister(&dc->client);
tegra_dc_rgb_remove(dc);
pm_runtime_disable(&pdev->dev);
}
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
.of_match_table = tegra_dc_of_match,
},
.probe = tegra_dc_probe,
.remove_new = tegra_dc_remove,
};
| linux-master | drivers/gpu/drm/tegra/dc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022, NVIDIA Corporation.
*/
#include <linux/dev_printk.h>
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include "riscv.h"
#define RISCV_CPUCTL 0x4388
#define RISCV_CPUCTL_STARTCPU_TRUE (1 << 0)
#define RISCV_BR_RETCODE 0x465c
#define RISCV_BR_RETCODE_RESULT_V(x) ((x) & 0x3)
#define RISCV_BR_RETCODE_RESULT_PASS_V 3
#define RISCV_BCR_CTRL 0x4668
#define RISCV_BCR_CTRL_CORE_SELECT_RISCV (1 << 4)
#define RISCV_BCR_DMACFG 0x466c
#define RISCV_BCR_DMACFG_TARGET_LOCAL_FB (0 << 0)
#define RISCV_BCR_DMACFG_LOCK_LOCKED (1 << 31)
#define RISCV_BCR_DMAADDR_PKCPARAM_LO 0x4670
#define RISCV_BCR_DMAADDR_PKCPARAM_HI 0x4674
#define RISCV_BCR_DMAADDR_FMCCODE_LO 0x4678
#define RISCV_BCR_DMAADDR_FMCCODE_HI 0x467c
#define RISCV_BCR_DMAADDR_FMCDATA_LO 0x4680
#define RISCV_BCR_DMAADDR_FMCDATA_HI 0x4684
#define RISCV_BCR_DMACFG_SEC 0x4694
#define RISCV_BCR_DMACFG_SEC_GSCID(v) ((v) << 16)
static void riscv_writel(struct tegra_drm_riscv *riscv, u32 value, u32 offset)
{
writel(value, riscv->regs + offset);
}
int tegra_drm_riscv_read_descriptors(struct tegra_drm_riscv *riscv)
{
struct tegra_drm_riscv_descriptor *bl = &riscv->bl_desc;
struct tegra_drm_riscv_descriptor *os = &riscv->os_desc;
const struct device_node *np = riscv->dev->of_node;
int err;
#define READ_PROP(name, location) \
err = of_property_read_u32(np, name, location); \
if (err) { \
dev_err(riscv->dev, "failed to read " name ": %d\n", err); \
return err; \
}
READ_PROP("nvidia,bl-manifest-offset", &bl->manifest_offset);
READ_PROP("nvidia,bl-code-offset", &bl->code_offset);
READ_PROP("nvidia,bl-data-offset", &bl->data_offset);
READ_PROP("nvidia,os-manifest-offset", &os->manifest_offset);
READ_PROP("nvidia,os-code-offset", &os->code_offset);
READ_PROP("nvidia,os-data-offset", &os->data_offset);
#undef READ_PROP
if (bl->manifest_offset == 0 && bl->code_offset == 0 &&
bl->data_offset == 0 && os->manifest_offset == 0 &&
os->code_offset == 0 && os->data_offset == 0) {
dev_err(riscv->dev, "descriptors not available\n");
return -EINVAL;
}
return 0;
}
int tegra_drm_riscv_boot_bootrom(struct tegra_drm_riscv *riscv, phys_addr_t image_address,
u32 gscid, const struct tegra_drm_riscv_descriptor *desc)
{
phys_addr_t addr;
int err;
u32 val;
riscv_writel(riscv, RISCV_BCR_CTRL_CORE_SELECT_RISCV, RISCV_BCR_CTRL);
addr = image_address + desc->manifest_offset;
riscv_writel(riscv, lower_32_bits(addr >> 8), RISCV_BCR_DMAADDR_PKCPARAM_LO);
riscv_writel(riscv, upper_32_bits(addr >> 8), RISCV_BCR_DMAADDR_PKCPARAM_HI);
addr = image_address + desc->code_offset;
riscv_writel(riscv, lower_32_bits(addr >> 8), RISCV_BCR_DMAADDR_FMCCODE_LO);
riscv_writel(riscv, upper_32_bits(addr >> 8), RISCV_BCR_DMAADDR_FMCCODE_HI);
addr = image_address + desc->data_offset;
riscv_writel(riscv, lower_32_bits(addr >> 8), RISCV_BCR_DMAADDR_FMCDATA_LO);
riscv_writel(riscv, upper_32_bits(addr >> 8), RISCV_BCR_DMAADDR_FMCDATA_HI);
riscv_writel(riscv, RISCV_BCR_DMACFG_SEC_GSCID(gscid), RISCV_BCR_DMACFG_SEC);
riscv_writel(riscv,
RISCV_BCR_DMACFG_TARGET_LOCAL_FB | RISCV_BCR_DMACFG_LOCK_LOCKED, RISCV_BCR_DMACFG);
riscv_writel(riscv, RISCV_CPUCTL_STARTCPU_TRUE, RISCV_CPUCTL);
err = readl_poll_timeout(
riscv->regs + RISCV_BR_RETCODE, val,
RISCV_BR_RETCODE_RESULT_V(val) == RISCV_BR_RETCODE_RESULT_PASS_V,
10, 100000);
if (err) {
dev_err(riscv->dev, "error during bootrom execution. BR_RETCODE=%d\n", val);
return err;
}
return 0;
}
| linux-master | drivers/gpu/drm/tegra/riscv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, NVIDIA Corporation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
#include "drm.h"
#include "falcon.h"
#include "vic.h"
struct vic_config {
const char *firmware;
unsigned int version;
bool supports_sid;
};
struct vic {
struct falcon falcon;
void __iomem *regs;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct device *dev;
struct clk *clk;
struct reset_control *rst;
bool can_use_context;
/* Platform configuration */
const struct vic_config *config;
};
static inline struct vic *to_vic(struct tegra_drm_client *client)
{
return container_of(client, struct vic, client);
}
static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
{
writel(value, vic->regs + offset);
}
static int vic_boot(struct vic *vic)
{
u32 fce_ucode_size, fce_bin_data_offset, stream_id;
void *hdr;
int err = 0;
if (vic->config->supports_sid && tegra_dev_iommu_get_stream_id(vic->dev, &stream_id)) {
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
TRANSCFG_ATT(0, TRANSCFG_SID_HW);
vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
/*
* STREAMID0 is used for input/output buffers. Initialize it to SID_VIC in case
* context isolation is not enabled, and SID_VIC is used for both firmware and
* data buffers.
*
* If context isolation is enabled, it will be overridden by the SETSTREAMID
* opcode as part of each job.
*/
vic_writel(vic, stream_id, VIC_THI_STREAMID0);
/* STREAMID1 is used for firmware loading. */
vic_writel(vic, stream_id, VIC_THI_STREAMID1);
}
/* setup clockgating registers */
vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
CG_IDLE_CG_EN |
CG_WAKEUP_DLY_CNT(4),
NV_PVIC_MISC_PRI_VIC_CG);
err = falcon_boot(&vic->falcon);
if (err < 0)
return err;
hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
/* Old VIC firmware needs kernel help with setting up FCE microcode. */
if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
hdr = vic->falcon.firmware.virt +
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
fce_ucode_size);
falcon_execute_method(
&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
(vic->falcon.firmware.iova + fce_bin_data_offset) >> 8);
}
err = falcon_wait_idle(&vic->falcon);
if (err < 0) {
dev_err(vic->dev,
"failed to set application ID and FCE base\n");
return err;
}
return 0;
}
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
err = host1x_client_iommu_attach(client);
if (err < 0 && err != -ENODEV) {
dev_err(vic->dev, "failed to attach to domain: %d\n", err);
return err;
}
vic->channel = host1x_channel_request(client);
if (!vic->channel) {
err = -ENOMEM;
goto detach;
}
client->syncpts[0] = host1x_syncpt_request(client, 0);
if (!client->syncpts[0]) {
err = -ENOMEM;
goto free_channel;
}
err = tegra_drm_register_client(tegra, drm);
if (err < 0)
goto free_syncpt;
/*
* Inherit the DMA parameters (such as maximum segment size) from the
* parent host1x device.
*/
client->dev->dma_parms = client->host->dma_parms;
return 0;
free_syncpt:
host1x_syncpt_put(client->syncpts[0]);
free_channel:
host1x_channel_put(vic->channel);
detach:
host1x_client_iommu_detach(client);
return err;
}
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
/* avoid a dangling pointer just in case this disappears */
client->dev->dma_parms = NULL;
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
pm_runtime_dont_use_autosuspend(client->dev);
pm_runtime_force_suspend(client->dev);
host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(vic->channel);
host1x_client_iommu_detach(client);
vic->channel = NULL;
if (client->group) {
dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
vic->falcon.firmware.size, DMA_TO_DEVICE);
tegra_drm_free(tegra, vic->falcon.firmware.size,
vic->falcon.firmware.virt,
vic->falcon.firmware.iova);
} else {
dma_free_coherent(vic->dev, vic->falcon.firmware.size,
vic->falcon.firmware.virt,
vic->falcon.firmware.iova);
}
return 0;
}
static const struct host1x_client_ops vic_client_ops = {
.init = vic_init,
.exit = vic_exit,
};
static int vic_load_firmware(struct vic *vic)
{
struct host1x_client *client = &vic->client.base;
struct tegra_drm *tegra = vic->client.drm;
static DEFINE_MUTEX(lock);
u32 fce_bin_data_offset;
dma_addr_t iova;
size_t size;
void *virt;
int err;
mutex_lock(&lock);
if (vic->falcon.firmware.virt) {
err = 0;
goto unlock;
}
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
goto unlock;
size = vic->falcon.firmware.size;
if (!client->group) {
virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
if (!virt) {
err = -ENOMEM;
goto unlock;
}
} else {
virt = tegra_drm_alloc(tegra, size, &iova);
if (IS_ERR(virt)) {
err = PTR_ERR(virt);
goto unlock;
}
}
vic->falcon.firmware.virt = virt;
vic->falcon.firmware.iova = iova;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
goto cleanup;
/*
* In this case we have received an IOVA from the shared domain, so we
* need to make sure to get the physical address so that the DMA API
* knows what memory pages to flush the cache for.
*/
if (client->group) {
dma_addr_t phys;
phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
err = dma_mapping_error(vic->dev, phys);
if (err < 0)
goto cleanup;
vic->falcon.firmware.phys = phys;
}
/*
* Check if firmware is new enough to not require mapping firmware
* to data buffer domains.
*/
fce_bin_data_offset = *(u32 *)(virt + VIC_UCODE_FCE_DATA_OFFSET);
if (!vic->config->supports_sid) {
vic->can_use_context = false;
} else if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
/*
* Firmware will access FCE through STREAMID0, so context
* isolation cannot be used.
*/
vic->can_use_context = false;
dev_warn_once(vic->dev, "context isolation disabled due to old firmware\n");
} else {
vic->can_use_context = true;
}
unlock:
mutex_unlock(&lock);
return err;
cleanup:
if (!client->group)
dma_free_coherent(vic->dev, size, virt, iova);
else
tegra_drm_free(tegra, size, virt, iova);
mutex_unlock(&lock);
return err;
}
static int __maybe_unused vic_runtime_resume(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(vic->clk);
if (err < 0)
return err;
usleep_range(10, 20);
err = reset_control_deassert(vic->rst);
if (err < 0)
goto disable;
usleep_range(10, 20);
err = vic_load_firmware(vic);
if (err < 0)
goto assert;
err = vic_boot(vic);
if (err < 0)
goto assert;
return 0;
assert:
reset_control_assert(vic->rst);
disable:
clk_disable_unprepare(vic->clk);
return err;
}
static int __maybe_unused vic_runtime_suspend(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
int err;
host1x_channel_stop(vic->channel);
err = reset_control_assert(vic->rst);
if (err < 0)
return err;
usleep_range(2000, 4000);
clk_disable_unprepare(vic->clk);
return 0;
}
static int vic_open_channel(struct tegra_drm_client *client,
struct tegra_drm_context *context)
{
struct vic *vic = to_vic(client);
context->channel = host1x_channel_get(vic->channel);
if (!context->channel)
return -ENOMEM;
return 0;
}
static void vic_close_channel(struct tegra_drm_context *context)
{
host1x_channel_put(context->channel);
}
static int vic_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
{
struct vic *vic = to_vic(client);
int err;
/* This doesn't access HW so it's safe to call without powering up. */
err = vic_load_firmware(vic);
if (err < 0)
return err;
*supported = vic->can_use_context;
return 0;
}
static const struct tegra_drm_client_ops vic_ops = {
.open_channel = vic_open_channel,
.close_channel = vic_close_channel,
.submit = tegra_drm_submit,
.get_streamid_offset = tegra_drm_get_streamid_offset_thi,
.can_use_memory_ctx = vic_can_use_memory_ctx,
};
#define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin"
static const struct vic_config vic_t124_config = {
.firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE,
.version = 0x40,
.supports_sid = false,
};
#define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
static const struct vic_config vic_t210_config = {
.firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
.version = 0x21,
.supports_sid = false,
};
#define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
static const struct vic_config vic_t186_config = {
.firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE,
.version = 0x18,
.supports_sid = true,
};
#define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
static const struct vic_config vic_t194_config = {
.firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
.version = 0x19,
.supports_sid = true,
};
#define NVIDIA_TEGRA_234_VIC_FIRMWARE "nvidia/tegra234/vic.bin"
static const struct vic_config vic_t234_config = {
.firmware = NVIDIA_TEGRA_234_VIC_FIRMWARE,
.version = 0x23,
.supports_sid = true,
};
static const struct of_device_id tegra_vic_of_match[] = {
{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
{ .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
{ .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
{ .compatible = "nvidia,tegra234-vic", .data = &vic_t234_config },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_vic_of_match);
static int vic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct host1x_syncpt **syncpts;
struct vic *vic;
int err;
/* inherit DMA mask from host1x parent */
err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
if (err < 0) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
return err;
}
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
if (!vic)
return -ENOMEM;
vic->config = of_device_get_match_data(dev);
syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
if (!syncpts)
return -ENOMEM;
vic->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vic->regs))
return PTR_ERR(vic->regs);
vic->clk = devm_clk_get(dev, NULL);
if (IS_ERR(vic->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(vic->clk);
}
err = clk_set_rate(vic->clk, ULONG_MAX);
if (err < 0) {
dev_err(&pdev->dev, "failed to set clock rate\n");
return err;
}
if (!dev->pm_domain) {
vic->rst = devm_reset_control_get(dev, "vic");
if (IS_ERR(vic->rst)) {
dev_err(&pdev->dev, "failed to get reset\n");
return PTR_ERR(vic->rst);
}
}
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
err = falcon_init(&vic->falcon);
if (err < 0)
return err;
platform_set_drvdata(pdev, vic);
INIT_LIST_HEAD(&vic->client.base.list);
vic->client.base.ops = &vic_client_ops;
vic->client.base.dev = dev;
vic->client.base.class = HOST1X_CLASS_VIC;
vic->client.base.syncpts = syncpts;
vic->client.base.num_syncpts = 1;
vic->dev = dev;
INIT_LIST_HEAD(&vic->client.list);
vic->client.version = vic->config->version;
vic->client.ops = &vic_ops;
err = host1x_client_register(&vic->client.base);
if (err < 0) {
dev_err(dev, "failed to register host1x client: %d\n", err);
goto exit_falcon;
}
pm_runtime_enable(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 500);
return 0;
exit_falcon:
falcon_exit(&vic->falcon);
return err;
}
static void vic_remove(struct platform_device *pdev)
{
struct vic *vic = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&vic->client.base);
falcon_exit(&vic->falcon);
}
static const struct dev_pm_ops vic_pm_ops = {
RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL)
SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
struct platform_driver tegra_vic_driver = {
.driver = {
.name = "tegra-vic",
.of_match_table = tegra_vic_of_match,
.pm = &vic_pm_ops
},
.probe = vic_probe,
.remove_new = vic_remove,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE);
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_234_VIC_FIRMWARE);
#endif
| linux-master | drivers/gpu/drm/tegra/vic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2013 Avionic Design GmbH
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*
* Based on the KMS/FB DMA helpers
* Copyright (C) 2012 Analog Devices Inc.
*/
#include <linux/console.h>
#include <linux/fb.h>
#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include "drm.h"
#include "gem.h"
static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = info->par;
struct tegra_bo *bo;
int err;
bo = tegra_fb_get_plane(helper->fb, 0);
err = drm_gem_mmap_obj(&bo->gem, bo->gem.size, vma);
if (err < 0)
return err;
return __tegra_gem_mmap(&bo->gem, vma);
}
static void tegra_fbdev_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
struct drm_framebuffer *fb = helper->fb;
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
drm_fb_helper_fini(helper);
/* Undo the special mapping we made in fbdev probe. */
if (bo->pages) {
vunmap(bo->vaddr);
bo->vaddr = NULL;
}
drm_framebuffer_remove(fb);
drm_client_release(&helper->client);
drm_fb_helper_unprepare(helper);
kfree(helper);
}
static const struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_DMAMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_mmap = tegra_fb_mmap,
.fb_destroy = tegra_fbdev_fb_destroy,
};
static int tegra_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct tegra_drm *tegra = helper->dev->dev_private;
struct drm_device *drm = helper->dev;
struct drm_mode_fb_cmd2 cmd = { 0 };
unsigned int bytes_per_pixel;
struct drm_framebuffer *fb;
unsigned long offset;
struct fb_info *info;
struct tegra_bo *bo;
size_t size;
int err;
bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
cmd.width = sizes->surface_width;
cmd.height = sizes->surface_height;
cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel,
tegra->pitch_align);
cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = cmd.pitches[0] * cmd.height;
bo = tegra_bo_create(drm, size, 0);
if (IS_ERR(bo))
return PTR_ERR(bo);
info = drm_fb_helper_alloc_info(helper);
if (IS_ERR(info)) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
drm_gem_object_put(&bo->gem);
return PTR_ERR(info);
}
fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
if (IS_ERR(fb)) {
err = PTR_ERR(fb);
dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
err);
drm_gem_object_put(&bo->gem);
return PTR_ERR(fb);
}
helper->fb = fb;
helper->info = info;
info->fbops = &tegra_fb_ops;
drm_fb_helper_fill_info(info, helper, sizes);
offset = info->var.xoffset * bytes_per_pixel +
info->var.yoffset * fb->pitches[0];
if (bo->pages) {
bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!bo->vaddr) {
dev_err(drm->dev, "failed to vmap() framebuffer\n");
err = -ENOMEM;
goto destroy;
}
}
info->flags |= FBINFO_VIRTFB;
info->screen_buffer = bo->vaddr + offset;
info->screen_size = size;
info->fix.smem_start = (unsigned long)(bo->iova + offset);
info->fix.smem_len = size;
return 0;
destroy:
drm_framebuffer_remove(fb);
return err;
}
static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
.fb_probe = tegra_fbdev_probe,
};
/*
* struct drm_client
*/
static void tegra_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->info) {
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
}
static int tegra_fbdev_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
static int tegra_fbdev_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
int ret;
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err_drm_err;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
goto err_drm_fb_helper_fini;
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(fb_helper);
err_drm_err:
drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
return ret;
}
static const struct drm_client_funcs tegra_fbdev_client_funcs = {
.owner = THIS_MODULE,
.unregister = tegra_fbdev_client_unregister,
.restore = tegra_fbdev_client_restore,
.hotplug = tegra_fbdev_client_hotplug,
};
void tegra_fbdev_setup(struct drm_device *dev)
{
struct drm_fb_helper *helper;
int ret;
drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
helper = kzalloc(sizeof(*helper), GFP_KERNEL);
if (!helper)
return;
drm_fb_helper_prepare(dev, helper, 32, &tegra_fb_helper_funcs);
ret = drm_client_init(dev, &helper->client, "fbdev", &tegra_fbdev_client_funcs);
if (ret)
goto err_drm_client_init;
drm_client_register(&helper->client);
return;
err_drm_client_init:
drm_fb_helper_unprepare(helper);
kfree(helper);
}
| linux-master | drivers/gpu/drm/tegra/fbdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include "drm.h"
#include "dc.h"
#include "plane.h"
#define NFB 24
static const u32 tegra_shared_plane_formats[] = {
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
/* new on Tegra114 */
DRM_FORMAT_ABGR4444,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_BGR565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
/* planar formats */
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
};
static const u64 tegra_shared_plane_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
/*
* The GPU sector layout is only supported on Tegra194, but these will
* be filtered out later on by ->format_mod_supported() on SoCs where
* it isn't supported.
*/
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
/* sentinel */
DRM_FORMAT_MOD_INVALID
};
static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
unsigned int offset)
{
if (offset >= 0x500 && offset <= 0x581) {
offset = 0x000 + (offset - 0x500);
return plane->offset + offset;
}
if (offset >= 0x700 && offset <= 0x73c) {
offset = 0x180 + (offset - 0x700);
return plane->offset + offset;
}
if (offset >= 0x800 && offset <= 0x83e) {
offset = 0x1c0 + (offset - 0x800);
return plane->offset + offset;
}
dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
return plane->offset + offset;
}
static inline u32 tegra_plane_readl(struct tegra_plane *plane,
unsigned int offset)
{
return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
}
static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
unsigned int offset)
{
tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
}
static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
{
int err = 0;
mutex_lock(&wgrp->lock);
if (wgrp->usecount == 0) {
err = host1x_client_resume(wgrp->parent);
if (err < 0) {
dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
goto unlock;
}
reset_control_deassert(wgrp->rst);
}
wgrp->usecount++;
unlock:
mutex_unlock(&wgrp->lock);
return err;
}
static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
{
int err;
mutex_lock(&wgrp->lock);
if (wgrp->usecount == 1) {
err = reset_control_assert(wgrp->rst);
if (err < 0) {
pr_err("failed to assert reset for window group %u\n",
wgrp->index);
}
host1x_client_suspend(wgrp->parent);
}
wgrp->usecount--;
mutex_unlock(&wgrp->lock);
}
int tegra_display_hub_prepare(struct tegra_display_hub *hub)
{
unsigned int i;
/*
* XXX Enabling/disabling windowgroups needs to happen when the owner
* display controller is disabled. There's currently no good point at
* which this could be executed, so unconditionally enable all window
* groups for now.
*/
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
/* Skip orphaned window group whose parent DC is disabled */
if (wgrp->parent)
tegra_windowgroup_enable(wgrp);
}
return 0;
}
void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
{
unsigned int i;
/*
* XXX Remove this once window groups can be more fine-grainedly
* enabled and disabled.
*/
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
/* Skip orphaned window group whose parent DC is disabled */
if (wgrp->parent)
tegra_windowgroup_disable(wgrp);
}
}
static void tegra_shared_plane_update(struct tegra_plane *plane)
{
struct tegra_dc *dc = plane->dc;
unsigned long timeout;
u32 mask, value;
mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
if ((value & mask) == 0)
break;
usleep_range(100, 400);
}
}
static void tegra_shared_plane_activate(struct tegra_plane *plane)
{
struct tegra_dc *dc = plane->dc;
unsigned long timeout;
u32 mask, value;
mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
if ((value & mask) == 0)
break;
usleep_range(100, 400);
}
}
static unsigned int
tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
{
unsigned int offset =
tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
return tegra_dc_readl(dc, offset) & OWNER_MASK;
}
static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
struct tegra_plane *plane)
{
struct device *dev = dc->dev;
if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
if (plane->dc == dc)
return true;
dev_WARN(dev, "head %u owns window %u but is not attached\n",
dc->pipe, plane->index);
}
return false;
}
static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
struct tegra_dc *new)
{
unsigned int offset =
tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
struct tegra_dc *old = plane->dc, *dc = new ? new : old;
struct device *dev = new ? new->dev : old->dev;
unsigned int owner, index = plane->index;
u32 value;
value = tegra_dc_readl(dc, offset);
owner = value & OWNER_MASK;
if (new && (owner != OWNER_MASK && owner != new->pipe)) {
dev_WARN(dev, "window %u owned by head %u\n", index, owner);
return -EBUSY;
}
/*
* This seems to happen whenever the head has been disabled with one
* or more windows being active. This is harmless because we'll just
* reassign the window to the new head anyway.
*/
if (old && owner == OWNER_MASK)
dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
old->pipe, owner);
value &= ~OWNER_MASK;
if (new)
value |= OWNER(new->pipe);
else
value |= OWNER_MASK;
tegra_dc_writel(dc, value, offset);
plane->dc = new;
return 0;
}
static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
{
static const unsigned int coeffs[192] = {
0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
};
unsigned int ratio, row, column;
for (ratio = 0; ratio <= 2; ratio++) {
for (row = 0; row <= 15; row++) {
for (column = 0; column <= 3; column++) {
unsigned int index = (ratio << 6) + (row << 2) + column;
u32 value;
value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
tegra_plane_writel(plane, value,
DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
}
}
}
}
static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
struct tegra_plane *plane)
{
u32 value;
int err;
if (!tegra_dc_owns_shared_plane(dc, plane)) {
err = tegra_shared_plane_set_owner(plane, dc);
if (err < 0)
return;
}
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
value |= MODE_FOUR_LINES;
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
value = SLOTS(1);
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
/* disable watermark */
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
value &= ~LATENCY_CTL_MODE_ENABLE;
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
value |= WATERMARK_MASK;
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
/* pipe meter */
value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
/* mempool entries */
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
value = MEMPOOL_ENTRIES(0x331);
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
value &= ~THREAD_NUM_MASK;
value |= THREAD_NUM(plane->base.index);
value |= THREAD_GROUP_ENABLE;
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
tegra_shared_plane_setup_scaler(plane);
tegra_shared_plane_update(plane);
tegra_shared_plane_activate(plane);
}
static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
struct tegra_plane *plane)
{
tegra_shared_plane_set_owner(plane, NULL);
}
static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
struct tegra_bo_tiling *tiling = &plane_state->tiling;
struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
int err;
/* no need for further checks if the plane is being disabled */
if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
err = tegra_plane_format(new_plane_state->fb->format->format,
&plane_state->format,
&plane_state->swap);
if (err < 0)
return err;
err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
if (err < 0)
return err;
if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
!dc->soc->supports_block_linear) {
DRM_ERROR("hardware doesn't support block linear mode\n");
return -EINVAL;
}
if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
!dc->soc->supports_sector_layout) {
DRM_ERROR("hardware doesn't support GPU sector layout\n");
return -EINVAL;
}
/*
* Tegra doesn't support different strides for U and V planes so we
* error out if the user tries to display a framebuffer with such a
* configuration.
*/
if (new_plane_state->fb->format->num_planes > 2) {
if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
DRM_ERROR("unsupported UV-plane configuration\n");
return -EINVAL;
}
}
/* XXX scaling is not yet supported, add a check here */
err = tegra_plane_state_add(&tegra->base, new_plane_state);
if (err < 0)
return err;
return 0;
}
static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_dc *dc;
u32 value;
int err;
/* rien ne va plus */
if (!old_state || !old_state->crtc)
return;
dc = to_tegra_dc(old_state->crtc);
err = host1x_client_resume(&dc->client);
if (err < 0) {
dev_err(dc->dev, "failed to resume: %d\n", err);
return;
}
/*
* XXX Legacy helpers seem to sometimes call ->atomic_disable() even
* on planes that are already disabled. Make sure we fallback to the
* head for this particular state instead of crashing.
*/
if (WARN_ON(p->dc == NULL))
p->dc = dc;
value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
value &= ~WIN_ENABLE;
tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
tegra_dc_remove_shared_plane(dc, p);
host1x_client_suspend(&dc->client);
}
static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
{
u64 tmp, tmp1, tmp2;
tmp = (u64)dfixed_trunc(in);
tmp2 = (u64)out;
tmp1 = (tmp << NFB) + (tmp2 >> 1);
do_div(tmp1, tmp2);
return lower_32_bits(tmp1);
}
static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
unsigned int zpos = new_state->normalized_zpos;
struct drm_framebuffer *fb = new_state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
u32 value, min_width, bypass = 0;
dma_addr_t base, addr_flag = 0;
unsigned int bpc, planes;
bool yuv;
int err;
/* rien ne va plus */
if (!new_state->crtc || !new_state->fb)
return;
if (!new_state->visible) {
tegra_shared_plane_atomic_disable(plane, state);
return;
}
err = host1x_client_resume(&dc->client);
if (err < 0) {
dev_err(dc->dev, "failed to resume: %d\n", err);
return;
}
yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
tegra_dc_assign_shared_plane(dc, p);
tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
/* blending */
value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
/* scaling */
min_width = min(new_state->src_w >> 16, new_state->crtc_w);
value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
if (min_width < MAX_PIXELS_5TAP444(value)) {
value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
} else {
value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
if (min_width < MAX_PIXELS_2TAP444(value))
value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
else
dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
}
value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
if (new_state->src_w != new_state->crtc_w << 16) {
fixed20_12 width = dfixed_init(new_state->src_w >> 16);
u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
u32 init = (1 << (NFB - 1)) + (incr >> 1);
tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
} else {
bypass |= INPUT_SCALER_HBYPASS;
}
if (new_state->src_h != new_state->crtc_h << 16) {
fixed20_12 height = dfixed_init(new_state->src_h >> 16);
u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
u32 init = (1 << (NFB - 1)) + (incr >> 1);
tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
} else {
bypass |= INPUT_SCALER_VBYPASS;
}
tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
/* disable compression */
tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
/*
* Physical address bit 39 in Tegra194 is used as a switch for special
* logic that swizzles the memory using either the legacy Tegra or the
* dGPU sector layout.
*/
if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
addr_flag = BIT_ULL(39);
#endif
base = tegra_plane_state->iova[0] + fb->offsets[0];
base |= addr_flag;
tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
value = V_POSITION(new_state->crtc_y) |
H_POSITION(new_state->crtc_x);
tegra_plane_writel(p, value, DC_WIN_POSITION);
value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
tegra_plane_writel(p, value, DC_WIN_SIZE);
value = WIN_ENABLE | COLOR_EXPAND;
tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
value = PITCH(fb->pitches[0]);
tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
if (yuv && planes > 1) {
base = tegra_plane_state->iova[1] + fb->offsets[1];
base |= addr_flag;
tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
if (planes > 2) {
base = tegra_plane_state->iova[2] + fb->offsets[2];
base |= addr_flag;
tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
}
value = PITCH_U(fb->pitches[1]);
if (planes > 2)
value |= PITCH_V(fb->pitches[2]);
tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
} else {
tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
}
value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
if (yuv) {
if (bpc < 12)
value |= DEGAMMA_YUV8_10;
else
value |= DEGAMMA_YUV12;
/* XXX parameterize */
value |= COLOR_SPACE_YUV_2020;
} else {
if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
value |= DEGAMMA_SRGB;
}
tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
value = OFFSET_X(new_state->src_y >> 16) |
OFFSET_Y(new_state->src_x >> 16);
tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
if (dc->soc->supports_block_linear) {
unsigned long height = tegra_plane_state->tiling.value;
/* XXX */
switch (tegra_plane_state->tiling.mode) {
case TEGRA_BO_TILING_MODE_PITCH:
value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
DC_WINBUF_SURFACE_KIND_PITCH;
break;
/* XXX not supported on Tegra186 and later */
case TEGRA_BO_TILING_MODE_TILED:
value = DC_WINBUF_SURFACE_KIND_TILED;
break;
case TEGRA_BO_TILING_MODE_BLOCK:
value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
DC_WINBUF_SURFACE_KIND_BLOCK;
break;
}
tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
}
/* disable gamut CSC */
value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
value &= ~CONTROL_CSC_ENABLE;
tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
host1x_client_suspend(&dc->client);
}
static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
.prepare_fb = tegra_plane_prepare_fb,
.cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_shared_plane_atomic_check,
.atomic_update = tegra_shared_plane_atomic_update,
.atomic_disable = tegra_shared_plane_atomic_disable,
};
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
unsigned int index)
{
enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
struct tegra_shared_plane *plane;
unsigned int possible_crtcs;
unsigned int num_formats;
const u64 *modifiers;
struct drm_plane *p;
const u32 *formats;
int err;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
plane->base.offset = 0x0a00 + 0x0300 * index;
plane->base.index = index;
plane->wgrp = &hub->wgrps[wgrp];
plane->wgrp->parent = &dc->client;
p = &plane->base.base;
/* planes can be assigned to arbitrary CRTCs */
possible_crtcs = BIT(tegra->num_crtcs) - 1;
num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
formats = tegra_shared_plane_formats;
modifiers = tegra_shared_plane_modifiers;
err = drm_universal_plane_init(drm, p, possible_crtcs,
&tegra_plane_funcs, formats,
num_formats, modifiers, type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
}
drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
drm_plane_create_zpos_property(p, 0, 0, 255);
return p;
}
static struct drm_private_state *
tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
{
struct tegra_display_hub_state *state;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct tegra_display_hub_state *hub_state =
to_tegra_display_hub_state(state);
kfree(hub_state);
}
static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
.atomic_duplicate_state = tegra_display_hub_duplicate_state,
.atomic_destroy_state = tegra_display_hub_destroy_state,
};
static struct tegra_display_hub_state *
tegra_display_hub_get_state(struct tegra_display_hub *hub,
struct drm_atomic_state *state)
{
struct drm_private_state *priv;
priv = drm_atomic_get_private_obj_state(state, &hub->base);
if (IS_ERR(priv))
return ERR_CAST(priv);
return to_tegra_display_hub_state(priv);
}
int tegra_display_hub_atomic_check(struct drm_device *drm,
struct drm_atomic_state *state)
{
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub_state *hub_state;
struct drm_crtc_state *old, *new;
struct drm_crtc *crtc;
unsigned int i;
if (!tegra->hub)
return 0;
hub_state = tegra_display_hub_get_state(tegra->hub, state);
if (IS_ERR(hub_state))
return PTR_ERR(hub_state);
/*
* The display hub display clock needs to be fed by the display clock
* with the highest frequency to ensure proper functioning of all the
* displays.
*
* Note that this isn't used before Tegra186, but it doesn't hurt and
* conditionalizing it would make the code less clean.
*/
for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
struct tegra_dc_state *dc = to_dc_state(new);
if (new->active) {
if (!hub_state->clk || dc->pclk > hub_state->rate) {
hub_state->dc = to_tegra_dc(dc->base.crtc);
hub_state->clk = hub_state->dc->clk;
hub_state->rate = dc->pclk;
}
}
}
return 0;
}
static void tegra_display_hub_update(struct tegra_dc *dc)
{
u32 value;
int err;
err = host1x_client_resume(&dc->client);
if (err < 0) {
dev_err(dc->dev, "failed to resume: %d\n", err);
return;
}
value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
value &= ~LATENCY_EVENT;
tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
value = CURS_SLOTS(1) | WGRP_SLOTS(1);
tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
host1x_client_suspend(&dc->client);
}
void tegra_display_hub_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state)
{
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
struct tegra_display_hub_state *hub_state;
struct device *dev = hub->client.dev;
int err;
hub_state = to_tegra_display_hub_state(hub->base.state);
if (hub_state->clk) {
err = clk_set_rate(hub_state->clk, hub_state->rate);
if (err < 0)
dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
hub_state->clk, hub_state->rate);
err = clk_set_parent(hub->clk_disp, hub_state->clk);
if (err < 0)
dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
hub->clk_disp, hub_state->clk, err);
}
if (hub_state->dc)
tegra_display_hub_update(hub_state->dc);
}
static int tegra_display_hub_init(struct host1x_client *client)
{
struct tegra_display_hub *hub = to_tegra_display_hub(client);
struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
drm_atomic_private_obj_init(drm, &hub->base, &state->base,
&tegra_display_hub_state_funcs);
tegra->hub = hub;
return 0;
}
static int tegra_display_hub_exit(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
drm_atomic_private_obj_fini(&tegra->hub->base);
tegra->hub = NULL;
return 0;
}
static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
{
struct tegra_display_hub *hub = to_tegra_display_hub(client);
struct device *dev = client->dev;
unsigned int i = hub->num_heads;
int err;
err = reset_control_assert(hub->rst);
if (err < 0)
return err;
while (i--)
clk_disable_unprepare(hub->clk_heads[i]);
clk_disable_unprepare(hub->clk_hub);
clk_disable_unprepare(hub->clk_dsc);
clk_disable_unprepare(hub->clk_disp);
pm_runtime_put_sync(dev);
return 0;
}
static int tegra_display_hub_runtime_resume(struct host1x_client *client)
{
struct tegra_display_hub *hub = to_tegra_display_hub(client);
struct device *dev = client->dev;
unsigned int i;
int err;
err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
}
err = clk_prepare_enable(hub->clk_disp);
if (err < 0)
goto put_rpm;
err = clk_prepare_enable(hub->clk_dsc);
if (err < 0)
goto disable_disp;
err = clk_prepare_enable(hub->clk_hub);
if (err < 0)
goto disable_dsc;
for (i = 0; i < hub->num_heads; i++) {
err = clk_prepare_enable(hub->clk_heads[i]);
if (err < 0)
goto disable_heads;
}
err = reset_control_deassert(hub->rst);
if (err < 0)
goto disable_heads;
return 0;
disable_heads:
while (i--)
clk_disable_unprepare(hub->clk_heads[i]);
clk_disable_unprepare(hub->clk_hub);
disable_dsc:
clk_disable_unprepare(hub->clk_dsc);
disable_disp:
clk_disable_unprepare(hub->clk_disp);
put_rpm:
pm_runtime_put_sync(dev);
return err;
}
static const struct host1x_client_ops tegra_display_hub_ops = {
.init = tegra_display_hub_init,
.exit = tegra_display_hub_exit,
.suspend = tegra_display_hub_runtime_suspend,
.resume = tegra_display_hub_runtime_resume,
};
static int tegra_display_hub_probe(struct platform_device *pdev)
{
u64 dma_mask = dma_get_mask(pdev->dev.parent);
struct device_node *child = NULL;
struct tegra_display_hub *hub;
struct clk *clk;
unsigned int i;
int err;
err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
if (err < 0) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
return err;
}
hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
if (!hub)
return -ENOMEM;
hub->soc = of_device_get_match_data(&pdev->dev);
hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
if (IS_ERR(hub->clk_disp)) {
err = PTR_ERR(hub->clk_disp);
return err;
}
if (hub->soc->supports_dsc) {
hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
if (IS_ERR(hub->clk_dsc)) {
err = PTR_ERR(hub->clk_dsc);
return err;
}
}
hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
if (IS_ERR(hub->clk_hub)) {
err = PTR_ERR(hub->clk_hub);
return err;
}
hub->rst = devm_reset_control_get(&pdev->dev, "misc");
if (IS_ERR(hub->rst)) {
err = PTR_ERR(hub->rst);
return err;
}
hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
sizeof(*hub->wgrps), GFP_KERNEL);
if (!hub->wgrps)
return -ENOMEM;
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
char id[8];
snprintf(id, sizeof(id), "wgrp%u", i);
mutex_init(&wgrp->lock);
wgrp->usecount = 0;
wgrp->index = i;
wgrp->rst = devm_reset_control_get(&pdev->dev, id);
if (IS_ERR(wgrp->rst))
return PTR_ERR(wgrp->rst);
err = reset_control_assert(wgrp->rst);
if (err < 0)
return err;
}
hub->num_heads = of_get_child_count(pdev->dev.of_node);
hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
GFP_KERNEL);
if (!hub->clk_heads)
return -ENOMEM;
for (i = 0; i < hub->num_heads; i++) {
child = of_get_next_child(pdev->dev.of_node, child);
if (!child) {
dev_err(&pdev->dev, "failed to find node for head %u\n",
i);
return -ENODEV;
}
clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock for head %u\n",
i);
of_node_put(child);
return PTR_ERR(clk);
}
hub->clk_heads[i] = clk;
}
of_node_put(child);
/* XXX: enable clock across reset? */
err = reset_control_assert(hub->rst);
if (err < 0)
return err;
platform_set_drvdata(pdev, hub);
pm_runtime_enable(&pdev->dev);
INIT_LIST_HEAD(&hub->client.list);
hub->client.ops = &tegra_display_hub_ops;
hub->client.dev = &pdev->dev;
err = host1x_client_register(&hub->client);
if (err < 0)
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
err = devm_of_platform_populate(&pdev->dev);
if (err < 0)
goto unregister;
return err;
unregister:
host1x_client_unregister(&hub->client);
pm_runtime_disable(&pdev->dev);
return err;
}
static void tegra_display_hub_remove(struct platform_device *pdev)
{
struct tegra_display_hub *hub = platform_get_drvdata(pdev);
unsigned int i;
host1x_client_unregister(&hub->client);
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
mutex_destroy(&wgrp->lock);
}
pm_runtime_disable(&pdev->dev);
}
static const struct tegra_display_hub_soc tegra186_display_hub = {
.num_wgrps = 6,
.supports_dsc = true,
};
static const struct tegra_display_hub_soc tegra194_display_hub = {
.num_wgrps = 6,
.supports_dsc = false,
};
static const struct of_device_id tegra_display_hub_of_match[] = {
{
.compatible = "nvidia,tegra194-display",
.data = &tegra194_display_hub
}, {
.compatible = "nvidia,tegra186-display",
.data = &tegra186_display_hub
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
struct platform_driver tegra_display_hub_driver = {
.driver = {
.name = "tegra-display-hub",
.of_match_table = tegra_display_hub_of_match,
},
.probe = tegra_display_hub_probe,
.remove_new = tegra_display_hub_remove,
};
| linux-master | drivers/gpu/drm/tegra/hub.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Avionic Design GmbH
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <soc/tegra/common.h>
#include <sound/hdmi-codec.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "hda.h"
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
#include "trace.h"
#define HDMI_ELD_BUFFER_SIZE 96
struct tmds_config {
unsigned int pclk;
u32 pll0;
u32 pll1;
u32 pe_current;
u32 drive_current;
u32 peak_current;
};
struct tegra_hdmi_config {
const struct tmds_config *tmds;
unsigned int num_tmds;
unsigned long fuse_override_offset;
u32 fuse_override_value;
bool has_sor_io_peak_current;
bool has_hda;
bool has_hbr;
};
struct tegra_hdmi {
struct host1x_client client;
struct tegra_output output;
struct device *dev;
struct regulator *hdmi;
struct regulator *pll;
struct regulator *vdd;
void __iomem *regs;
unsigned int irq;
struct clk *clk_parent;
struct clk *clk;
struct reset_control *rst;
const struct tegra_hdmi_config *config;
unsigned int audio_source;
struct tegra_hda_format format;
unsigned int pixel_clock;
bool stereo;
bool dvi;
struct drm_info_list *debugfs_files;
struct platform_device *audio_pdev;
struct mutex audio_lock;
};
static inline struct tegra_hdmi *
host1x_client_to_hdmi(struct host1x_client *client)
{
return container_of(client, struct tegra_hdmi, client);
}
static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output)
{
return container_of(output, struct tegra_hdmi, output);
}
#define HDMI_AUDIOCLK_FREQ 216000000
#define HDMI_REKEY_DEFAULT 56
enum {
AUTO = 0,
SPDIF,
HDA,
};
static inline u32 tegra_hdmi_readl(struct tegra_hdmi *hdmi,
unsigned int offset)
{
u32 value = readl(hdmi->regs + (offset << 2));
trace_hdmi_readl(hdmi->dev, offset, value);
return value;
}
static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, u32 value,
unsigned int offset)
{
trace_hdmi_writel(hdmi->dev, offset, value);
writel(value, hdmi->regs + (offset << 2));
}
struct tegra_hdmi_audio_config {
unsigned int n;
unsigned int cts;
unsigned int aval;
};
static const struct tmds_config tegra20_tmds_config[] = {
{ /* slow pixel clock modes */
.pclk = 27000000,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
SOR_PLL_TX_REG_LOAD(3),
.pll1 = SOR_PLL_TMDS_TERM_ENABLE,
.pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
PE_CURRENT1(PE_CURRENT_0_0_mA) |
PE_CURRENT2(PE_CURRENT_0_0_mA) |
PE_CURRENT3(PE_CURRENT_0_0_mA),
.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
},
{ /* high pixel clock modes */
.pclk = UINT_MAX,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
SOR_PLL_TX_REG_LOAD(3),
.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
.pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
PE_CURRENT1(PE_CURRENT_6_0_mA) |
PE_CURRENT2(PE_CURRENT_6_0_mA) |
PE_CURRENT3(PE_CURRENT_6_0_mA),
.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
},
};
static const struct tmds_config tegra30_tmds_config[] = {
{ /* 480p modes */
.pclk = 27000000,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
SOR_PLL_TX_REG_LOAD(0),
.pll1 = SOR_PLL_TMDS_TERM_ENABLE,
.pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
PE_CURRENT1(PE_CURRENT_0_0_mA) |
PE_CURRENT2(PE_CURRENT_0_0_mA) |
PE_CURRENT3(PE_CURRENT_0_0_mA),
.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
}, { /* 720p modes */
.pclk = 74250000,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
SOR_PLL_TX_REG_LOAD(0),
.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
.pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
PE_CURRENT1(PE_CURRENT_5_0_mA) |
PE_CURRENT2(PE_CURRENT_5_0_mA) |
PE_CURRENT3(PE_CURRENT_5_0_mA),
.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
}, { /* 1080p modes */
.pclk = UINT_MAX,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) |
SOR_PLL_TX_REG_LOAD(0),
.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
.pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
PE_CURRENT1(PE_CURRENT_5_0_mA) |
PE_CURRENT2(PE_CURRENT_5_0_mA) |
PE_CURRENT3(PE_CURRENT_5_0_mA),
.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
},
};
static const struct tmds_config tegra114_tmds_config[] = {
{ /* 480p/576p / 25.2MHz/27MHz modes */
.pclk = 27000000,
.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
.pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
.pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
PE_CURRENT1(PE_CURRENT_0_mA_T114) |
PE_CURRENT2(PE_CURRENT_0_mA_T114) |
PE_CURRENT3(PE_CURRENT_0_mA_T114),
.drive_current =
DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
}, { /* 720p / 74.25MHz modes */
.pclk = 74250000,
.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
.pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
SOR_PLL_TMDS_TERMADJ(0),
.pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
PE_CURRENT1(PE_CURRENT_15_mA_T114) |
PE_CURRENT2(PE_CURRENT_15_mA_T114) |
PE_CURRENT3(PE_CURRENT_15_mA_T114),
.drive_current =
DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
}, { /* 1080p / 148.5MHz modes */
.pclk = 148500000,
.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
.pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
SOR_PLL_TMDS_TERMADJ(0),
.pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
PE_CURRENT1(PE_CURRENT_10_mA_T114) |
PE_CURRENT2(PE_CURRENT_10_mA_T114) |
PE_CURRENT3(PE_CURRENT_10_mA_T114),
.drive_current =
DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
}, { /* 225/297MHz modes */
.pclk = UINT_MAX,
.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
.pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
| SOR_PLL_TMDS_TERM_ENABLE,
.pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
PE_CURRENT1(PE_CURRENT_0_mA_T114) |
PE_CURRENT2(PE_CURRENT_0_mA_T114) |
PE_CURRENT3(PE_CURRENT_0_mA_T114),
.drive_current =
DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
},
};
static const struct tmds_config tegra124_tmds_config[] = {
{ /* 480p/576p / 25.2MHz/27MHz modes */
.pclk = 27000000,
.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
.pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
.pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
PE_CURRENT1(PE_CURRENT_0_mA_T114) |
PE_CURRENT2(PE_CURRENT_0_mA_T114) |
PE_CURRENT3(PE_CURRENT_0_mA_T114),
.drive_current =
DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
}, { /* 720p / 74.25MHz modes */
.pclk = 74250000,
.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
.pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
SOR_PLL_TMDS_TERMADJ(0),
.pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
PE_CURRENT1(PE_CURRENT_15_mA_T114) |
PE_CURRENT2(PE_CURRENT_15_mA_T114) |
PE_CURRENT3(PE_CURRENT_15_mA_T114),
.drive_current =
DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
}, { /* 1080p / 148.5MHz modes */
.pclk = 148500000,
.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
.pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
SOR_PLL_TMDS_TERMADJ(0),
.pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
PE_CURRENT1(PE_CURRENT_10_mA_T114) |
PE_CURRENT2(PE_CURRENT_10_mA_T114) |
PE_CURRENT3(PE_CURRENT_10_mA_T114),
.drive_current =
DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
}, { /* 225/297MHz modes */
.pclk = UINT_MAX,
.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
.pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
| SOR_PLL_TMDS_TERM_ENABLE,
.pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
PE_CURRENT1(PE_CURRENT_0_mA_T114) |
PE_CURRENT2(PE_CURRENT_0_mA_T114) |
PE_CURRENT3(PE_CURRENT_0_mA_T114),
.drive_current =
DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
},
};
static void tegra_hdmi_audio_lock(struct tegra_hdmi *hdmi)
{
mutex_lock(&hdmi->audio_lock);
disable_irq(hdmi->irq);
}
static void tegra_hdmi_audio_unlock(struct tegra_hdmi *hdmi)
{
enable_irq(hdmi->irq);
mutex_unlock(&hdmi->audio_lock);
}
static int
tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock,
struct tegra_hdmi_audio_config *config)
{
const unsigned int afreq = 128 * audio_freq;
const unsigned int min_n = afreq / 1500;
const unsigned int max_n = afreq / 300;
const unsigned int ideal_n = afreq / 1000;
int64_t min_err = (uint64_t)-1 >> 1;
unsigned int min_delta = -1;
int n;
memset(config, 0, sizeof(*config));
config->n = -1;
for (n = min_n; n <= max_n; n++) {
uint64_t cts_f, aval_f;
unsigned int delta;
int64_t cts, err;
/* compute aval in 48.16 fixed point */
aval_f = ((int64_t)24000000 << 16) * n;
do_div(aval_f, afreq);
/* It should round without any rest */
if (aval_f & 0xFFFF)
continue;
/* Compute cts in 48.16 fixed point */
cts_f = ((int64_t)pix_clock << 16) * n;
do_div(cts_f, afreq);
/* Round it to the nearest integer */
cts = (cts_f & ~0xFFFF) + ((cts_f & BIT(15)) << 1);
delta = abs(n - ideal_n);
/* Compute the absolute error */
err = abs((int64_t)cts_f - cts);
if (err < min_err || (err == min_err && delta < min_delta)) {
config->n = n;
config->cts = cts >> 16;
config->aval = aval_f >> 16;
min_delta = delta;
min_err = err;
}
}
return config->n != -1 ? 0 : -EINVAL;
}
static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
{
const unsigned int freqs[] = {
32000, 44100, 48000, 88200, 96000, 176400, 192000
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
unsigned int f = freqs[i];
unsigned int eight_half;
unsigned int delta;
u32 value;
if (f > 96000)
delta = 2;
else if (f > 48000)
delta = 6;
else
delta = 9;
eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
value = AUDIO_FS_LOW(eight_half - delta) |
AUDIO_FS_HIGH(eight_half + delta);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i));
}
}
static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value)
{
static const struct {
unsigned int sample_rate;
unsigned int offset;
} regs[] = {
{ 32000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 },
{ 44100, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 },
{ 48000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 },
{ 88200, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 },
{ 96000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 },
{ 176400, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 },
{ 192000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 },
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(regs); i++) {
if (regs[i].sample_rate == hdmi->format.sample_rate) {
tegra_hdmi_writel(hdmi, value, regs[i].offset);
break;
}
}
}
static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
{
struct tegra_hdmi_audio_config config;
u32 source, value;
int err;
switch (hdmi->audio_source) {
case HDA:
if (hdmi->config->has_hda)
source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
else
return -EINVAL;
break;
case SPDIF:
if (hdmi->config->has_hda)
source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
else
source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
break;
default:
if (hdmi->config->has_hda)
source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
else
source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
break;
}
/*
* Tegra30 and later use a slightly modified version of the register
* layout to accomodate for changes related to supporting HDA as the
* audio input source for HDMI. The source select field has moved to
* the SOR_AUDIO_CNTRL0 register, but the error tolerance and frames
* per block fields remain in the AUDIO_CNTRL0 register.
*/
if (hdmi->config->has_hda) {
/*
* Inject null samples into the audio FIFO for every frame in
* which the codec did not receive any samples. This applies
* to stereo LPCM only.
*
* XXX: This seems to be a remnant of MCP days when this was
* used to work around issues with monitors not being able to
* play back system startup sounds early. It is possibly not
* needed on Linux at all.
*/
if (hdmi->format.channels == 2)
value = SOR_AUDIO_CNTRL0_INJECT_NULLSMPL;
else
value = 0;
value |= source;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
}
/*
* On Tegra20, HDA is not a supported audio source and the source
* select field is part of the AUDIO_CNTRL0 register.
*/
value = AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
AUDIO_CNTRL0_ERROR_TOLERANCE(6);
if (!hdmi->config->has_hda)
value |= source;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
/*
* Advertise support for High Bit-Rate on Tegra114 and later.
*/
if (hdmi->config->has_hbr) {
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
value |= SOR_AUDIO_SPARE0_HBR_ENABLE;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
}
err = tegra_hdmi_get_audio_config(hdmi->format.sample_rate,
hdmi->pixel_clock, &config);
if (err < 0) {
dev_err(hdmi->dev,
"cannot set audio to %u Hz at %u Hz pixel clock\n",
hdmi->format.sample_rate, hdmi->pixel_clock);
return err;
}
dev_dbg(hdmi->dev, "audio: pixclk=%u, n=%u, cts=%u, aval=%u\n",
hdmi->pixel_clock, config.n, config.cts, config.aval);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
AUDIO_N_VALUE(config.n - 1);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config.n) | ACR_ENABLE,
HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config.cts),
HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N);
value &= ~AUDIO_N_RESETF;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
if (hdmi->config->has_hda)
tegra_hdmi_write_aval(hdmi, config.aval);
tegra_hdmi_setup_audio_fs_tables(hdmi);
return 0;
}
static void tegra_hdmi_disable_audio(struct tegra_hdmi *hdmi)
{
u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
value &= ~GENERIC_CTRL_AUDIO;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
}
static void tegra_hdmi_enable_audio(struct tegra_hdmi *hdmi)
{
u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
value |= GENERIC_CTRL_AUDIO;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
}
static void tegra_hdmi_write_eld(struct tegra_hdmi *hdmi)
{
size_t length = drm_eld_size(hdmi->output.connector.eld), i;
u32 value;
for (i = 0; i < length; i++)
tegra_hdmi_writel(hdmi, i << 8 | hdmi->output.connector.eld[i],
HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
/*
* The HDA codec will always report an ELD buffer size of 96 bytes and
* the HDA codec driver will check that each byte read from the buffer
* is valid. Therefore every byte must be written, even if no 96 bytes
* were parsed from EDID.
*/
for (i = length; i < HDMI_ELD_BUFFER_SIZE; i++)
tegra_hdmi_writel(hdmi, i << 8 | 0,
HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
value = SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
}
static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size)
{
u32 value = 0;
size_t i;
for (i = size; i > 0; i--)
value = (value << 8) | ptr[i - 1];
return value;
}
static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
size_t size)
{
const u8 *ptr = data;
unsigned long offset;
size_t i, j;
u32 value;
switch (ptr[0]) {
case HDMI_INFOFRAME_TYPE_AVI:
offset = HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER;
break;
case HDMI_INFOFRAME_TYPE_AUDIO:
offset = HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER;
break;
case HDMI_INFOFRAME_TYPE_VENDOR:
offset = HDMI_NV_PDISP_HDMI_GENERIC_HEADER;
break;
default:
dev_err(hdmi->dev, "unsupported infoframe type: %02x\n",
ptr[0]);
return;
}
value = INFOFRAME_HEADER_TYPE(ptr[0]) |
INFOFRAME_HEADER_VERSION(ptr[1]) |
INFOFRAME_HEADER_LEN(ptr[2]);
tegra_hdmi_writel(hdmi, value, offset);
offset++;
/*
* Each subpack contains 7 bytes, divided into:
* - subpack_low: bytes 0 - 3
* - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
for (i = 3, j = 0; i < size; i += 7, j += 8) {
size_t rem = size - i, num = min_t(size_t, rem, 4);
value = tegra_hdmi_subpack(&ptr[i], num);
tegra_hdmi_writel(hdmi, value, offset++);
num = min_t(size_t, rem - num, 3);
value = tegra_hdmi_subpack(&ptr[i + 4], num);
tegra_hdmi_writel(hdmi, value, offset++);
}
}
static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct hdmi_avi_infoframe frame;
u8 buffer[17];
ssize_t err;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
&hdmi->output.connector, mode);
if (err < 0) {
dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
return;
}
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(hdmi->dev, "failed to pack AVI infoframe: %zd\n", err);
return;
}
tegra_hdmi_write_infopack(hdmi, buffer, err);
}
static void tegra_hdmi_disable_avi_infoframe(struct tegra_hdmi *hdmi)
{
u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
value &= ~INFOFRAME_CTRL_ENABLE;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
}
static void tegra_hdmi_enable_avi_infoframe(struct tegra_hdmi *hdmi)
{
u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
value |= INFOFRAME_CTRL_ENABLE;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
}
static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
{
struct hdmi_audio_infoframe frame;
u8 buffer[14];
ssize_t err;
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
err);
return;
}
frame.channels = hdmi->format.channels;
err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(hdmi->dev, "failed to pack audio infoframe: %zd\n",
err);
return;
}
/*
* The audio infoframe has only one set of subpack registers, so the
* infoframe needs to be truncated. One set of subpack registers can
* contain 7 bytes. Including the 3 byte header only the first 10
* bytes can be programmed.
*/
tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
}
static void tegra_hdmi_disable_audio_infoframe(struct tegra_hdmi *hdmi)
{
u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
value &= ~INFOFRAME_CTRL_ENABLE;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
}
static void tegra_hdmi_enable_audio_infoframe(struct tegra_hdmi *hdmi)
{
u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
value |= INFOFRAME_CTRL_ENABLE;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
}
static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
{
struct hdmi_vendor_infoframe frame;
u8 buffer[10];
ssize_t err;
hdmi_vendor_infoframe_init(&frame);
frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n",
err);
return;
}
tegra_hdmi_write_infopack(hdmi, buffer, err);
}
static void tegra_hdmi_disable_stereo_infoframe(struct tegra_hdmi *hdmi)
{
u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
value &= ~GENERIC_CTRL_ENABLE;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
}
static void tegra_hdmi_enable_stereo_infoframe(struct tegra_hdmi *hdmi)
{
u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
value |= GENERIC_CTRL_ENABLE;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
}
static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
const struct tmds_config *tmds)
{
u32 value;
tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
tegra_hdmi_writel(hdmi, tmds->drive_current,
HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
value = tegra_hdmi_readl(hdmi, hdmi->config->fuse_override_offset);
value |= hdmi->config->fuse_override_value;
tegra_hdmi_writel(hdmi, value, hdmi->config->fuse_override_offset);
if (hdmi->config->has_sor_io_peak_current)
tegra_hdmi_writel(hdmi, tmds->peak_current,
HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
}
static int tegra_hdmi_reconfigure_audio(struct tegra_hdmi *hdmi)
{
int err;
err = tegra_hdmi_setup_audio(hdmi);
if (err < 0) {
tegra_hdmi_disable_audio_infoframe(hdmi);
tegra_hdmi_disable_audio(hdmi);
} else {
tegra_hdmi_setup_audio_infoframe(hdmi);
tegra_hdmi_enable_audio_infoframe(hdmi);
tegra_hdmi_enable_audio(hdmi);
}
return err;
}
static bool tegra_output_is_hdmi(struct tegra_output *output)
{
return output->connector.display_info.is_hdmi;
}
static enum drm_connector_status
tegra_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct tegra_output *output = connector_to_output(connector);
struct tegra_hdmi *hdmi = to_hdmi(output);
enum drm_connector_status status;
status = tegra_output_connector_detect(connector, force);
if (status == connector_status_connected)
return status;
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
return status;
}
#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
static const struct debugfs_reg32 tegra_hdmi_regs[] = {
DEBUGFS_REG32(HDMI_CTXSW),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_STATE0),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_STATE1),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_STATE2),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AN_MSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AN_LSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CN_MSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CN_LSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CTRL),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CMODE),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_RI),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CS_MSB),
DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CS_LSB),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU0),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU1),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU2),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_CTRL),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_STATUS),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_HEADER),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_CTRL),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_CTRL),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GCP_CTRL),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GCP_STATUS),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GCP_SUBPACK),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_EMU0),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_EMU1),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_EMU1_RDATA),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_SPARE),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2),
DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CAP),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PWR),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_TEST),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PLL0),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PLL1),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PLL2),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CSTM),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_LVDS),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CRCA),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CRCB),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_BLANK),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_CTL),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(0)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(1)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(2)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(3)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(4)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(5)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(6)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(7)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(8)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(9)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(10)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(11)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(12)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(13)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(14)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(15)),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_VCRCA0),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_VCRCA1),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CCRCA0),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CCRCA1),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_EDATAA0),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_EDATAA1),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_COUNTA0),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_COUNTA1),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_DEBUGA0),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_DEBUGA1),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_TRIG),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_MSCHECK),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_DEBUG0),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_DEBUG1),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_DEBUG2),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(0)),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(1)),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(2)),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(3)),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(4)),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(5)),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(6)),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_THRESHOLD),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_CNTRL0),
DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_N),
DEBUGFS_REG32(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_REFCLK),
DEBUGFS_REG32(HDMI_NV_PDISP_CRC_CONTROL),
DEBUGFS_REG32(HDMI_NV_PDISP_INPUT_CONTROL),
DEBUGFS_REG32(HDMI_NV_PDISP_SCRATCH),
DEBUGFS_REG32(HDMI_NV_PDISP_PE_CURRENT),
DEBUGFS_REG32(HDMI_NV_PDISP_KEY_CTRL),
DEBUGFS_REG32(HDMI_NV_PDISP_KEY_DEBUG0),
DEBUGFS_REG32(HDMI_NV_PDISP_KEY_DEBUG1),
DEBUGFS_REG32(HDMI_NV_PDISP_KEY_DEBUG2),
DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_0),
DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_1),
DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_2),
DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_3),
DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG),
DEBUGFS_REG32(HDMI_NV_PDISP_KEY_SKEY_INDEX),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_SPARE0),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE),
DEBUGFS_REG32(HDMI_NV_PDISP_INT_STATUS),
DEBUGFS_REG32(HDMI_NV_PDISP_INT_MASK),
DEBUGFS_REG32(HDMI_NV_PDISP_INT_ENABLE),
DEBUGFS_REG32(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT),
};
static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_hdmi *hdmi = node->info_ent->data;
struct drm_crtc *crtc = hdmi->output.encoder.crtc;
struct drm_device *drm = node->minor->dev;
unsigned int i;
int err = 0;
drm_modeset_lock_all(drm);
if (!crtc || !crtc->state->active) {
err = -EBUSY;
goto unlock;
}
for (i = 0; i < ARRAY_SIZE(tegra_hdmi_regs); i++) {
unsigned int offset = tegra_hdmi_regs[i].offset;
seq_printf(s, "%-56s %#05x %08x\n", tegra_hdmi_regs[i].name,
offset, tegra_hdmi_readl(hdmi, offset));
}
unlock:
drm_modeset_unlock_all(drm);
return err;
}
static struct drm_info_list debugfs_files[] = {
{ "regs", tegra_hdmi_show_regs, 0, NULL },
};
static int tegra_hdmi_late_register(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
unsigned int i, count = ARRAY_SIZE(debugfs_files);
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_hdmi *hdmi = to_hdmi(output);
hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
if (!hdmi->debugfs_files)
return -ENOMEM;
for (i = 0; i < count; i++)
hdmi->debugfs_files[i].data = hdmi;
drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
return 0;
}
static void tegra_hdmi_early_unregister(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
struct drm_minor *minor = connector->dev->primary;
unsigned int count = ARRAY_SIZE(debugfs_files);
struct tegra_hdmi *hdmi = to_hdmi(output);
drm_debugfs_remove_files(hdmi->debugfs_files, count, minor);
kfree(hdmi->debugfs_files);
hdmi->debugfs_files = NULL;
}
static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = tegra_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = tegra_output_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.late_register = tegra_hdmi_late_register,
.early_unregister = tegra_hdmi_early_unregister,
};
static enum drm_mode_status
tegra_hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct tegra_output *output = connector_to_output(connector);
struct tegra_hdmi *hdmi = to_hdmi(output);
unsigned long pclk = mode->clock * 1000;
enum drm_mode_status status = MODE_OK;
struct clk *parent;
long err;
parent = clk_get_parent(hdmi->clk_parent);
err = clk_round_rate(parent, pclk * 4);
if (err <= 0)
status = MODE_NOCLOCK;
return status;
}
static const struct drm_connector_helper_funcs
tegra_hdmi_connector_helper_funcs = {
.get_modes = tegra_output_connector_get_modes,
.mode_valid = tegra_hdmi_connector_mode_valid,
};
static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_hdmi *hdmi = to_hdmi(output);
u32 value;
int err;
tegra_hdmi_audio_lock(hdmi);
/*
* The following accesses registers of the display controller, so make
* sure it's only executed when the output is attached to one.
*/
if (dc) {
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value &= ~HDMI_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
}
if (!hdmi->dvi) {
if (hdmi->stereo)
tegra_hdmi_disable_stereo_infoframe(hdmi);
tegra_hdmi_disable_audio_infoframe(hdmi);
tegra_hdmi_disable_avi_infoframe(hdmi);
tegra_hdmi_disable_audio(hdmi);
}
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
hdmi->pixel_clock = 0;
tegra_hdmi_audio_unlock(hdmi);
err = host1x_client_suspend(&hdmi->client);
if (err < 0)
dev_err(hdmi->dev, "failed to suspend: %d\n", err);
}
static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_hdmi *hdmi = to_hdmi(output);
unsigned int pulse_start, div82;
int retries = 1000;
u32 value;
int err;
err = host1x_client_resume(&hdmi->client);
if (err < 0) {
dev_err(hdmi->dev, "failed to resume: %d\n", err);
return;
}
tegra_hdmi_audio_lock(hdmi);
/*
* Enable and unmask the HDA codec SCRATCH0 register interrupt. This
* is used for interoperability between the HDA codec driver and the
* HDMI driver.
*/
tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_ENABLE);
tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_MASK);
hdmi->pixel_clock = mode->clock * 1000;
h_sync_width = mode->hsync_end - mode->hsync_start;
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
err = dev_pm_opp_set_rate(hdmi->dev, hdmi->pixel_clock);
if (err < 0) {
dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
err);
}
DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk));
/* power up sequence */
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
value &= ~SOR_PLL_PDBG;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_PLL0);
usleep_range(10, 20);
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
value &= ~SOR_PLL_PWR;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_PLL0);
tegra_dc_writel(dc, VSYNC_H_POSITION(1),
DC_DISP_DISP_TIMING_OPTIONS);
tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE_888,
DC_DISP_DISP_COLOR_CONTROL);
/* video_preamble uses h_pulse2 */
pulse_start = 1 + h_sync_width + h_back_porch - 10;
tegra_dc_writel(dc, H_PULSE2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
PULSE_LAST_END_A;
tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8);
tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) |
VSYNC_WINDOW_ENABLE;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
if (dc->pipe)
value = HDMI_SRC_DISPLAYB;
else
value = HDMI_SRC_DISPLAYA;
if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) ||
(mode->vdisplay == 576)))
tegra_hdmi_writel(hdmi,
value | ARM_VIDEO_RANGE_FULL,
HDMI_NV_PDISP_INPUT_CONTROL);
else
tegra_hdmi_writel(hdmi,
value | ARM_VIDEO_RANGE_LIMITED,
HDMI_NV_PDISP_INPUT_CONTROL);
div82 = clk_get_rate(hdmi->clk) / 1000000 * 4;
value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
hdmi->dvi = !tegra_output_is_hdmi(output);
if (!hdmi->dvi) {
/*
* Make sure that the audio format has been configured before
* enabling audio, otherwise we may try to divide by zero.
*/
if (hdmi->format.sample_rate > 0) {
err = tegra_hdmi_setup_audio(hdmi);
if (err < 0)
hdmi->dvi = true;
}
}
if (hdmi->config->has_hda)
tegra_hdmi_write_eld(hdmi);
rekey = HDMI_REKEY_DEFAULT;
value = HDMI_CTRL_REKEY(rekey);
value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch +
h_front_porch - rekey - 18) / 32);
if (!hdmi->dvi)
value |= HDMI_CTRL_ENABLE;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
if (!hdmi->dvi) {
tegra_hdmi_setup_avi_infoframe(hdmi, mode);
tegra_hdmi_setup_audio_infoframe(hdmi);
if (hdmi->stereo)
tegra_hdmi_setup_stereo_infoframe(hdmi);
}
/* TMDS CONFIG */
for (i = 0; i < hdmi->config->num_tmds; i++) {
if (hdmi->pixel_clock <= hdmi->config->tmds[i].pclk) {
tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
break;
}
}
tegra_hdmi_writel(hdmi,
SOR_SEQ_PU_PC(0) |
SOR_SEQ_PU_PC_ALT(0) |
SOR_SEQ_PD_PC(8) |
SOR_SEQ_PD_PC_ALT(8),
HDMI_NV_PDISP_SOR_SEQ_CTL);
value = SOR_SEQ_INST_WAIT_TIME(1) |
SOR_SEQ_INST_WAIT_UNITS_VSYNC |
SOR_SEQ_INST_HALT |
SOR_SEQ_INST_PIN_A_LOW |
SOR_SEQ_INST_PIN_B_LOW |
SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_CSTM);
value &= ~SOR_CSTM_ROTCLK(~0);
value |= SOR_CSTM_ROTCLK(2);
value |= SOR_CSTM_PLLDIV;
value &= ~SOR_CSTM_LVDS_ENABLE;
value &= ~SOR_CSTM_MODE_MASK;
value |= SOR_CSTM_MODE_TMDS;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
/* start SOR */
tegra_hdmi_writel(hdmi,
SOR_PWR_NORMAL_STATE_PU |
SOR_PWR_NORMAL_START_NORMAL |
SOR_PWR_SAFE_STATE_PD |
SOR_PWR_SETTING_NEW_TRIGGER,
HDMI_NV_PDISP_SOR_PWR);
tegra_hdmi_writel(hdmi,
SOR_PWR_NORMAL_STATE_PU |
SOR_PWR_NORMAL_START_NORMAL |
SOR_PWR_SAFE_STATE_PD |
SOR_PWR_SETTING_NEW_DONE,
HDMI_NV_PDISP_SOR_PWR);
do {
BUG_ON(--retries < 0);
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
} while (value & SOR_PWR_SETTING_NEW_PENDING);
value = SOR_STATE_ASY_CRCMODE_COMPLETE |
SOR_STATE_ASY_OWNER_HEAD0 |
SOR_STATE_ASY_SUBOWNER_BOTH |
SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
SOR_STATE_ASY_DEPOL_POS;
/* setup sync polarities */
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
value |= SOR_STATE_ASY_HSYNCPOL_POS;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
value |= SOR_STATE_ASY_HSYNCPOL_NEG;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
value |= SOR_STATE_ASY_VSYNCPOL_POS;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
value |= SOR_STATE_ASY_VSYNCPOL_NEG;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2);
value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED,
HDMI_NV_PDISP_SOR_STATE1);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value |= HDMI_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
if (!hdmi->dvi) {
tegra_hdmi_enable_avi_infoframe(hdmi);
tegra_hdmi_enable_audio_infoframe(hdmi);
tegra_hdmi_enable_audio(hdmi);
if (hdmi->stereo)
tegra_hdmi_enable_stereo_infoframe(hdmi);
}
/* TODO: add HDCP support */
tegra_hdmi_audio_unlock(hdmi);
}
static int
tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_hdmi *hdmi = to_hdmi(output);
int err;
err = tegra_dc_state_setup_clock(dc, crtc_state, hdmi->clk_parent,
pclk, 0);
if (err < 0) {
dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
return err;
}
return err;
}
static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
.disable = tegra_hdmi_encoder_disable,
.enable = tegra_hdmi_encoder_enable,
.atomic_check = tegra_hdmi_encoder_atomic_check,
};
static int tegra_hdmi_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms)
{
struct tegra_hdmi *hdmi = data;
int ret = 0;
tegra_hdmi_audio_lock(hdmi);
hdmi->format.sample_rate = hparms->sample_rate;
hdmi->format.channels = hparms->channels;
if (hdmi->pixel_clock && !hdmi->dvi)
ret = tegra_hdmi_reconfigure_audio(hdmi);
tegra_hdmi_audio_unlock(hdmi);
return ret;
}
static int tegra_hdmi_audio_startup(struct device *dev, void *data)
{
struct tegra_hdmi *hdmi = data;
int ret;
ret = host1x_client_resume(&hdmi->client);
if (ret < 0)
dev_err(hdmi->dev, "failed to resume: %d\n", ret);
return ret;
}
static void tegra_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct tegra_hdmi *hdmi = data;
int ret;
tegra_hdmi_audio_lock(hdmi);
hdmi->format.sample_rate = 0;
hdmi->format.channels = 0;
tegra_hdmi_audio_unlock(hdmi);
ret = host1x_client_suspend(&hdmi->client);
if (ret < 0)
dev_err(hdmi->dev, "failed to suspend: %d\n", ret);
}
static const struct hdmi_codec_ops tegra_hdmi_codec_ops = {
.hw_params = tegra_hdmi_hw_params,
.audio_startup = tegra_hdmi_audio_startup,
.audio_shutdown = tegra_hdmi_audio_shutdown,
};
static int tegra_hdmi_codec_register(struct tegra_hdmi *hdmi)
{
struct hdmi_codec_pdata codec_data = {};
if (hdmi->config->has_hda)
return 0;
codec_data.ops = &tegra_hdmi_codec_ops;
codec_data.data = hdmi;
codec_data.spdif = 1;
hdmi->audio_pdev = platform_device_register_data(hdmi->dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
&codec_data,
sizeof(codec_data));
if (IS_ERR(hdmi->audio_pdev))
return PTR_ERR(hdmi->audio_pdev);
hdmi->format.channels = 2;
return 0;
}
static void tegra_hdmi_codec_unregister(struct tegra_hdmi *hdmi)
{
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
}
static int tegra_hdmi_init(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
struct drm_device *drm = dev_get_drvdata(client->host);
struct drm_connector *connector;
int err;
hdmi->output.dev = client->dev;
drm_simple_encoder_init(drm, &hdmi->output.encoder,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&hdmi->output.encoder,
&tegra_hdmi_encoder_helper_funcs);
if (hdmi->output.bridge) {
err = drm_bridge_attach(&hdmi->output.encoder, hdmi->output.bridge,
NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (err) {
dev_err(client->dev, "failed to attach bridge: %d\n",
err);
return err;
}
connector = drm_bridge_connector_init(drm, &hdmi->output.encoder);
if (IS_ERR(connector)) {
dev_err(client->dev,
"failed to initialize bridge connector: %pe\n",
connector);
return PTR_ERR(connector);
}
drm_connector_attach_encoder(connector, &hdmi->output.encoder);
} else {
drm_connector_init_with_ddc(drm, &hdmi->output.connector,
&tegra_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
hdmi->output.ddc);
drm_connector_helper_add(&hdmi->output.connector,
&tegra_hdmi_connector_helper_funcs);
hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
drm_connector_attach_encoder(&hdmi->output.connector,
&hdmi->output.encoder);
drm_connector_register(&hdmi->output.connector);
}
err = tegra_output_init(drm, &hdmi->output);
if (err < 0) {
dev_err(client->dev, "failed to initialize output: %d\n", err);
return err;
}
hdmi->output.encoder.possible_crtcs = 0x3;
err = regulator_enable(hdmi->hdmi);
if (err < 0) {
dev_err(client->dev, "failed to enable HDMI regulator: %d\n",
err);
goto output_exit;
}
err = regulator_enable(hdmi->pll);
if (err < 0) {
dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
goto disable_hdmi;
}
err = regulator_enable(hdmi->vdd);
if (err < 0) {
dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
goto disable_pll;
}
err = tegra_hdmi_codec_register(hdmi);
if (err < 0) {
dev_err(hdmi->dev, "failed to register audio codec: %d\n", err);
goto disable_vdd;
}
return 0;
disable_vdd:
regulator_disable(hdmi->vdd);
disable_pll:
regulator_disable(hdmi->pll);
disable_hdmi:
regulator_disable(hdmi->hdmi);
output_exit:
tegra_output_exit(&hdmi->output);
return err;
}
static int tegra_hdmi_exit(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
tegra_hdmi_codec_unregister(hdmi);
tegra_output_exit(&hdmi->output);
regulator_disable(hdmi->vdd);
regulator_disable(hdmi->pll);
regulator_disable(hdmi->hdmi);
return 0;
}
static int tegra_hdmi_runtime_suspend(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
struct device *dev = client->dev;
int err;
err = reset_control_assert(hdmi->rst);
if (err < 0) {
dev_err(dev, "failed to assert reset: %d\n", err);
return err;
}
usleep_range(1000, 2000);
clk_disable_unprepare(hdmi->clk);
pm_runtime_put_sync(dev);
return 0;
}
static int tegra_hdmi_runtime_resume(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
struct device *dev = client->dev;
int err;
err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
}
err = clk_prepare_enable(hdmi->clk);
if (err < 0) {
dev_err(dev, "failed to enable clock: %d\n", err);
goto put_rpm;
}
usleep_range(1000, 2000);
err = reset_control_deassert(hdmi->rst);
if (err < 0) {
dev_err(dev, "failed to deassert reset: %d\n", err);
goto disable_clk;
}
return 0;
disable_clk:
clk_disable_unprepare(hdmi->clk);
put_rpm:
pm_runtime_put_sync(dev);
return err;
}
static const struct host1x_client_ops hdmi_client_ops = {
.init = tegra_hdmi_init,
.exit = tegra_hdmi_exit,
.suspend = tegra_hdmi_runtime_suspend,
.resume = tegra_hdmi_runtime_resume,
};
static const struct tegra_hdmi_config tegra20_hdmi_config = {
.tmds = tegra20_tmds_config,
.num_tmds = ARRAY_SIZE(tegra20_tmds_config),
.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = false,
.has_hda = false,
.has_hbr = false,
};
static const struct tegra_hdmi_config tegra30_hdmi_config = {
.tmds = tegra30_tmds_config,
.num_tmds = ARRAY_SIZE(tegra30_tmds_config),
.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = false,
.has_hda = true,
.has_hbr = false,
};
static const struct tegra_hdmi_config tegra114_hdmi_config = {
.tmds = tegra114_tmds_config,
.num_tmds = ARRAY_SIZE(tegra114_tmds_config),
.fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = true,
.has_hda = true,
.has_hbr = true,
};
static const struct tegra_hdmi_config tegra124_hdmi_config = {
.tmds = tegra124_tmds_config,
.num_tmds = ARRAY_SIZE(tegra124_tmds_config),
.fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = true,
.has_hda = true,
.has_hbr = true,
};
static const struct of_device_id tegra_hdmi_of_match[] = {
{ .compatible = "nvidia,tegra124-hdmi", .data = &tegra124_hdmi_config },
{ .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
{ .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
{ .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
static irqreturn_t tegra_hdmi_irq(int irq, void *data)
{
struct tegra_hdmi *hdmi = data;
u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS);
if (value & INT_CODEC_SCRATCH0) {
unsigned int format;
u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
tegra_hda_parse_format(format, &hdmi->format);
tegra_hdmi_reconfigure_audio(hdmi);
} else {
tegra_hdmi_disable_audio_infoframe(hdmi);
tegra_hdmi_disable_audio(hdmi);
}
}
return IRQ_HANDLED;
}
static int tegra_hdmi_probe(struct platform_device *pdev)
{
struct tegra_hdmi *hdmi;
int err;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->config = of_device_get_match_data(&pdev->dev);
hdmi->dev = &pdev->dev;
hdmi->audio_source = AUTO;
hdmi->stereo = false;
hdmi->dvi = false;
mutex_init(&hdmi->audio_lock);
hdmi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hdmi->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(hdmi->clk);
}
hdmi->rst = devm_reset_control_get(&pdev->dev, "hdmi");
if (IS_ERR(hdmi->rst)) {
dev_err(&pdev->dev, "failed to get reset\n");
return PTR_ERR(hdmi->rst);
}
hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(hdmi->clk_parent))
return PTR_ERR(hdmi->clk_parent);
err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
if (err < 0) {
dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
return err;
}
hdmi->hdmi = devm_regulator_get(&pdev->dev, "hdmi");
err = PTR_ERR_OR_ZERO(hdmi->hdmi);
if (err)
return dev_err_probe(&pdev->dev, err,
"failed to get HDMI regulator\n");
hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
err = PTR_ERR_OR_ZERO(hdmi->pll);
if (err)
return dev_err_probe(&pdev->dev, err,
"failed to get PLL regulator\n");
hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
err = PTR_ERR_OR_ZERO(hdmi->vdd);
if (err)
return dev_err_probe(&pdev->dev, err,
"failed to get VDD regulator\n");
hdmi->output.dev = &pdev->dev;
err = tegra_output_probe(&hdmi->output);
if (err < 0)
return err;
hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
err = platform_get_irq(pdev, 0);
if (err < 0)
return err;
hdmi->irq = err;
err = devm_request_irq(hdmi->dev, hdmi->irq, tegra_hdmi_irq, 0,
dev_name(hdmi->dev), hdmi);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
hdmi->irq, err);
return err;
}
platform_set_drvdata(pdev, hdmi);
err = devm_pm_runtime_enable(&pdev->dev);
if (err)
return err;
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
return err;
INIT_LIST_HEAD(&hdmi->client.list);
hdmi->client.ops = &hdmi_client_ops;
hdmi->client.dev = &pdev->dev;
err = host1x_client_register(&hdmi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
return err;
}
return 0;
}
static void tegra_hdmi_remove(struct platform_device *pdev)
{
struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
host1x_client_unregister(&hdmi->client);
tegra_output_remove(&hdmi->output);
}
struct platform_driver tegra_hdmi_driver = {
.driver = {
.name = "tegra-hdmi",
.of_match_table = tegra_hdmi_of_match,
},
.probe = tegra_hdmi_probe,
.remove_new = tegra_hdmi_remove,
};
| linux-master | drivers/gpu/drm/tegra/hdmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Avionic Design GmbH
* Copyright (C) 2013 NVIDIA Corporation
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/common.h>
#include <soc/tegra/pmc.h>
#include "drm.h"
#include "gem.h"
#include "gr3d.h"
enum {
RST_MC,
RST_GR3D,
RST_MC2,
RST_GR3D2,
RST_GR3D_MAX,
};
struct gr3d_soc {
unsigned int version;
unsigned int num_clocks;
unsigned int num_resets;
};
struct gr3d {
struct tegra_drm_client client;
struct host1x_channel *channel;
const struct gr3d_soc *soc;
struct clk_bulk_data *clocks;
unsigned int nclocks;
struct reset_control_bulk_data resets[RST_GR3D_MAX];
unsigned int nresets;
DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
};
static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
{
return container_of(client, struct gr3d, client);
}
static int gr3d_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr3d *gr3d = to_gr3d(drm);
int err;
gr3d->channel = host1x_channel_request(client);
if (!gr3d->channel)
return -ENOMEM;
client->syncpts[0] = host1x_syncpt_request(client, flags);
if (!client->syncpts[0]) {
err = -ENOMEM;
dev_err(client->dev, "failed to request syncpoint: %d\n", err);
goto put;
}
err = host1x_client_iommu_attach(client);
if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
err = tegra_drm_register_client(dev->dev_private, drm);
if (err < 0) {
dev_err(client->dev, "failed to register client: %d\n", err);
goto detach_iommu;
}
return 0;
detach_iommu:
host1x_client_iommu_detach(client);
free:
host1x_syncpt_put(client->syncpts[0]);
put:
host1x_channel_put(gr3d->channel);
return err;
}
static int gr3d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->host);
struct gr3d *gr3d = to_gr3d(drm);
int err;
err = tegra_drm_unregister_client(dev->dev_private, drm);
if (err < 0)
return err;
pm_runtime_dont_use_autosuspend(client->dev);
pm_runtime_force_suspend(client->dev);
host1x_client_iommu_detach(client);
host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
gr3d->channel = NULL;
return 0;
}
static const struct host1x_client_ops gr3d_client_ops = {
.init = gr3d_init,
.exit = gr3d_exit,
};
static int gr3d_open_channel(struct tegra_drm_client *client,
struct tegra_drm_context *context)
{
struct gr3d *gr3d = to_gr3d(client);
context->channel = host1x_channel_get(gr3d->channel);
if (!context->channel)
return -ENOMEM;
return 0;
}
static void gr3d_close_channel(struct tegra_drm_context *context)
{
host1x_channel_put(context->channel);
}
static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
{
struct gr3d *gr3d = dev_get_drvdata(dev);
switch (class) {
case HOST1X_CLASS_HOST1X:
if (offset == 0x2b)
return 1;
break;
case HOST1X_CLASS_GR3D:
if (offset >= GR3D_NUM_REGS)
break;
if (test_bit(offset, gr3d->addr_regs))
return 1;
break;
}
return 0;
}
static const struct tegra_drm_client_ops gr3d_ops = {
.open_channel = gr3d_open_channel,
.close_channel = gr3d_close_channel,
.is_addr_reg = gr3d_is_addr_reg,
.submit = tegra_drm_submit,
};
static const struct gr3d_soc tegra20_gr3d_soc = {
.version = 0x20,
.num_clocks = 1,
.num_resets = 2,
};
static const struct gr3d_soc tegra30_gr3d_soc = {
.version = 0x30,
.num_clocks = 2,
.num_resets = 4,
};
static const struct gr3d_soc tegra114_gr3d_soc = {
.version = 0x35,
.num_clocks = 1,
.num_resets = 2,
};
static const struct of_device_id tegra_gr3d_match[] = {
{ .compatible = "nvidia,tegra114-gr3d", .data = &tegra114_gr3d_soc },
{ .compatible = "nvidia,tegra30-gr3d", .data = &tegra30_gr3d_soc },
{ .compatible = "nvidia,tegra20-gr3d", .data = &tegra20_gr3d_soc },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
static const u32 gr3d_addr_regs[] = {
GR3D_IDX_ATTRIBUTE( 0),
GR3D_IDX_ATTRIBUTE( 1),
GR3D_IDX_ATTRIBUTE( 2),
GR3D_IDX_ATTRIBUTE( 3),
GR3D_IDX_ATTRIBUTE( 4),
GR3D_IDX_ATTRIBUTE( 5),
GR3D_IDX_ATTRIBUTE( 6),
GR3D_IDX_ATTRIBUTE( 7),
GR3D_IDX_ATTRIBUTE( 8),
GR3D_IDX_ATTRIBUTE( 9),
GR3D_IDX_ATTRIBUTE(10),
GR3D_IDX_ATTRIBUTE(11),
GR3D_IDX_ATTRIBUTE(12),
GR3D_IDX_ATTRIBUTE(13),
GR3D_IDX_ATTRIBUTE(14),
GR3D_IDX_ATTRIBUTE(15),
GR3D_IDX_INDEX_BASE,
GR3D_QR_ZTAG_ADDR,
GR3D_QR_CTAG_ADDR,
GR3D_QR_CZ_ADDR,
GR3D_TEX_TEX_ADDR( 0),
GR3D_TEX_TEX_ADDR( 1),
GR3D_TEX_TEX_ADDR( 2),
GR3D_TEX_TEX_ADDR( 3),
GR3D_TEX_TEX_ADDR( 4),
GR3D_TEX_TEX_ADDR( 5),
GR3D_TEX_TEX_ADDR( 6),
GR3D_TEX_TEX_ADDR( 7),
GR3D_TEX_TEX_ADDR( 8),
GR3D_TEX_TEX_ADDR( 9),
GR3D_TEX_TEX_ADDR(10),
GR3D_TEX_TEX_ADDR(11),
GR3D_TEX_TEX_ADDR(12),
GR3D_TEX_TEX_ADDR(13),
GR3D_TEX_TEX_ADDR(14),
GR3D_TEX_TEX_ADDR(15),
GR3D_DW_MEMORY_OUTPUT_ADDRESS,
GR3D_GLOBAL_SURFADDR( 0),
GR3D_GLOBAL_SURFADDR( 1),
GR3D_GLOBAL_SURFADDR( 2),
GR3D_GLOBAL_SURFADDR( 3),
GR3D_GLOBAL_SURFADDR( 4),
GR3D_GLOBAL_SURFADDR( 5),
GR3D_GLOBAL_SURFADDR( 6),
GR3D_GLOBAL_SURFADDR( 7),
GR3D_GLOBAL_SURFADDR( 8),
GR3D_GLOBAL_SURFADDR( 9),
GR3D_GLOBAL_SURFADDR(10),
GR3D_GLOBAL_SURFADDR(11),
GR3D_GLOBAL_SURFADDR(12),
GR3D_GLOBAL_SURFADDR(13),
GR3D_GLOBAL_SURFADDR(14),
GR3D_GLOBAL_SURFADDR(15),
GR3D_GLOBAL_SPILLSURFADDR,
GR3D_GLOBAL_SURFOVERADDR( 0),
GR3D_GLOBAL_SURFOVERADDR( 1),
GR3D_GLOBAL_SURFOVERADDR( 2),
GR3D_GLOBAL_SURFOVERADDR( 3),
GR3D_GLOBAL_SURFOVERADDR( 4),
GR3D_GLOBAL_SURFOVERADDR( 5),
GR3D_GLOBAL_SURFOVERADDR( 6),
GR3D_GLOBAL_SURFOVERADDR( 7),
GR3D_GLOBAL_SURFOVERADDR( 8),
GR3D_GLOBAL_SURFOVERADDR( 9),
GR3D_GLOBAL_SURFOVERADDR(10),
GR3D_GLOBAL_SURFOVERADDR(11),
GR3D_GLOBAL_SURFOVERADDR(12),
GR3D_GLOBAL_SURFOVERADDR(13),
GR3D_GLOBAL_SURFOVERADDR(14),
GR3D_GLOBAL_SURFOVERADDR(15),
GR3D_GLOBAL_SAMP01SURFADDR( 0),
GR3D_GLOBAL_SAMP01SURFADDR( 1),
GR3D_GLOBAL_SAMP01SURFADDR( 2),
GR3D_GLOBAL_SAMP01SURFADDR( 3),
GR3D_GLOBAL_SAMP01SURFADDR( 4),
GR3D_GLOBAL_SAMP01SURFADDR( 5),
GR3D_GLOBAL_SAMP01SURFADDR( 6),
GR3D_GLOBAL_SAMP01SURFADDR( 7),
GR3D_GLOBAL_SAMP01SURFADDR( 8),
GR3D_GLOBAL_SAMP01SURFADDR( 9),
GR3D_GLOBAL_SAMP01SURFADDR(10),
GR3D_GLOBAL_SAMP01SURFADDR(11),
GR3D_GLOBAL_SAMP01SURFADDR(12),
GR3D_GLOBAL_SAMP01SURFADDR(13),
GR3D_GLOBAL_SAMP01SURFADDR(14),
GR3D_GLOBAL_SAMP01SURFADDR(15),
GR3D_GLOBAL_SAMP23SURFADDR( 0),
GR3D_GLOBAL_SAMP23SURFADDR( 1),
GR3D_GLOBAL_SAMP23SURFADDR( 2),
GR3D_GLOBAL_SAMP23SURFADDR( 3),
GR3D_GLOBAL_SAMP23SURFADDR( 4),
GR3D_GLOBAL_SAMP23SURFADDR( 5),
GR3D_GLOBAL_SAMP23SURFADDR( 6),
GR3D_GLOBAL_SAMP23SURFADDR( 7),
GR3D_GLOBAL_SAMP23SURFADDR( 8),
GR3D_GLOBAL_SAMP23SURFADDR( 9),
GR3D_GLOBAL_SAMP23SURFADDR(10),
GR3D_GLOBAL_SAMP23SURFADDR(11),
GR3D_GLOBAL_SAMP23SURFADDR(12),
GR3D_GLOBAL_SAMP23SURFADDR(13),
GR3D_GLOBAL_SAMP23SURFADDR(14),
GR3D_GLOBAL_SAMP23SURFADDR(15),
};
static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
unsigned int id)
{
struct gr3d *gr3d = dev_get_drvdata(dev);
struct reset_control *reset;
struct clk *clk;
unsigned int i;
int err;
/*
* Tegra20 device-tree doesn't specify 3d clock name and there is only
* one clock for Tegra20. Tegra30+ device-trees always specified names
* for the clocks.
*/
if (gr3d->nclocks == 1) {
if (id == TEGRA_POWERGATE_3D1)
return 0;
clk = gr3d->clocks[0].clk;
} else {
for (i = 0; i < gr3d->nclocks; i++) {
if (WARN_ON(!gr3d->clocks[i].id))
continue;
if (!strcmp(gr3d->clocks[i].id, name)) {
clk = gr3d->clocks[i].clk;
break;
}
}
if (WARN_ON(i == gr3d->nclocks))
return -EINVAL;
}
/*
* We use array of resets, which includes MC resets, and MC
* reset shouldn't be asserted while hardware is gated because
* MC flushing will fail for gated hardware. Hence for legacy
* PD we request the individual reset separately.
*/
reset = reset_control_get_exclusive_released(dev, name);
if (IS_ERR(reset))
return PTR_ERR(reset);
err = reset_control_acquire(reset);
if (err) {
dev_err(dev, "failed to acquire %s reset: %d\n", name, err);
} else {
err = tegra_powergate_sequence_power_up(id, clk, reset);
reset_control_release(reset);
}
reset_control_put(reset);
if (err)
return err;
/*
* tegra_powergate_sequence_power_up() leaves clocks enabled,
* while GENPD not. Hence keep clock-enable balanced.
*/
clk_disable_unprepare(clk);
return 0;
}
static void gr3d_del_link(void *link)
{
device_link_del(link);
}
static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
{
static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL };
const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
struct device **opp_virt_devs, *pd_dev;
struct device_link *link;
unsigned int i;
int err;
err = of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells");
if (err < 0) {
if (err != -ENOENT)
return err;
/*
* Older device-trees don't use GENPD. In this case we should
* toggle power domain manually.
*/
err = gr3d_power_up_legacy_domain(dev, "3d",
TEGRA_POWERGATE_3D);
if (err)
return err;
err = gr3d_power_up_legacy_domain(dev, "3d2",
TEGRA_POWERGATE_3D1);
if (err)
return err;
return 0;
}
/*
* The PM domain core automatically attaches a single power domain,
* otherwise it skips attaching completely. We have a single domain
* on Tegra20 and two domains on Tegra30+.
*/
if (dev->pm_domain)
return 0;
err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs);
if (err)
return err;
for (i = 0; opp_genpd_names[i]; i++) {
pd_dev = opp_virt_devs[i];
if (!pd_dev) {
dev_err(dev, "failed to get %s power domain\n",
opp_genpd_names[i]);
return -EINVAL;
}
link = device_link_add(dev, pd_dev, link_flags);
if (!link) {
dev_err(dev, "failed to link to %s\n", dev_name(pd_dev));
return -EINVAL;
}
err = devm_add_action_or_reset(dev, gr3d_del_link, link);
if (err)
return err;
}
return 0;
}
static int gr3d_get_clocks(struct device *dev, struct gr3d *gr3d)
{
int err;
err = devm_clk_bulk_get_all(dev, &gr3d->clocks);
if (err < 0) {
dev_err(dev, "failed to get clock: %d\n", err);
return err;
}
gr3d->nclocks = err;
if (gr3d->nclocks != gr3d->soc->num_clocks) {
dev_err(dev, "invalid number of clocks: %u\n", gr3d->nclocks);
return -ENOENT;
}
return 0;
}
static int gr3d_get_resets(struct device *dev, struct gr3d *gr3d)
{
int err;
gr3d->resets[RST_MC].id = "mc";
gr3d->resets[RST_MC2].id = "mc2";
gr3d->resets[RST_GR3D].id = "3d";
gr3d->resets[RST_GR3D2].id = "3d2";
gr3d->nresets = gr3d->soc->num_resets;
err = devm_reset_control_bulk_get_optional_exclusive_released(
dev, gr3d->nresets, gr3d->resets);
if (err) {
dev_err(dev, "failed to get reset: %d\n", err);
return err;
}
if (WARN_ON(!gr3d->resets[RST_GR3D].rstc) ||
WARN_ON(!gr3d->resets[RST_GR3D2].rstc && gr3d->nresets == 4))
return -ENOENT;
return 0;
}
static int gr3d_probe(struct platform_device *pdev)
{
struct host1x_syncpt **syncpts;
struct gr3d *gr3d;
unsigned int i;
int err;
gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
if (!gr3d)
return -ENOMEM;
platform_set_drvdata(pdev, gr3d);
gr3d->soc = of_device_get_match_data(&pdev->dev);
syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
if (!syncpts)
return -ENOMEM;
err = gr3d_get_clocks(&pdev->dev, gr3d);
if (err)
return err;
err = gr3d_get_resets(&pdev->dev, gr3d);
if (err)
return err;
err = gr3d_init_power(&pdev->dev, gr3d);
if (err)
return err;
INIT_LIST_HEAD(&gr3d->client.base.list);
gr3d->client.base.ops = &gr3d_client_ops;
gr3d->client.base.dev = &pdev->dev;
gr3d->client.base.class = HOST1X_CLASS_GR3D;
gr3d->client.base.syncpts = syncpts;
gr3d->client.base.num_syncpts = 1;
INIT_LIST_HEAD(&gr3d->client.list);
gr3d->client.version = gr3d->soc->version;
gr3d->client.ops = &gr3d_ops;
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
return err;
err = host1x_client_register(&gr3d->client.base);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
return err;
}
/* initialize address register map */
for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
return 0;
}
static void gr3d_remove(struct platform_device *pdev)
{
struct gr3d *gr3d = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&gr3d->client.base);
}
static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
{
struct gr3d *gr3d = dev_get_drvdata(dev);
int err;
host1x_channel_stop(gr3d->channel);
err = reset_control_bulk_assert(gr3d->nresets, gr3d->resets);
if (err) {
dev_err(dev, "failed to assert reset: %d\n", err);
return err;
}
usleep_range(10, 20);
/*
* Older device-trees don't specify MC resets and power-gating can't
* be done safely in that case. Hence we will keep the power ungated
* for older DTBs. For newer DTBs, GENPD will perform the power-gating.
*/
clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
reset_control_bulk_release(gr3d->nresets, gr3d->resets);
return 0;
}
static int __maybe_unused gr3d_runtime_resume(struct device *dev)
{
struct gr3d *gr3d = dev_get_drvdata(dev);
int err;
err = reset_control_bulk_acquire(gr3d->nresets, gr3d->resets);
if (err) {
dev_err(dev, "failed to acquire reset: %d\n", err);
return err;
}
err = clk_bulk_prepare_enable(gr3d->nclocks, gr3d->clocks);
if (err) {
dev_err(dev, "failed to enable clock: %d\n", err);
goto release_reset;
}
err = reset_control_bulk_deassert(gr3d->nresets, gr3d->resets);
if (err) {
dev_err(dev, "failed to deassert reset: %d\n", err);
goto disable_clk;
}
pm_runtime_enable(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 500);
return 0;
disable_clk:
clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
release_reset:
reset_control_bulk_release(gr3d->nresets, gr3d->resets);
return err;
}
static const struct dev_pm_ops tegra_gr3d_pm = {
SET_RUNTIME_PM_OPS(gr3d_runtime_suspend, gr3d_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
struct platform_driver tegra_gr3d_driver = {
.driver = {
.name = "tegra-gr3d",
.of_match_table = tegra_gr3d_match,
.pm = &tegra_gr3d_pm,
},
.probe = gr3d_probe,
.remove_new = gr3d_remove,
};
| linux-master | drivers/gpu/drm/tegra/gr3d.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2013, NVIDIA Corporation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/common.h>
#include "drm.h"
#include "gem.h"
#include "gr2d.h"
enum {
RST_MC,
RST_GR2D,
RST_GR2D_MAX,
};
struct gr2d_soc {
unsigned int version;
};
struct gr2d {
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk;
struct reset_control_bulk_data resets[RST_GR2D_MAX];
unsigned int nresets;
const struct gr2d_soc *soc;
DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
};
static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
{
return container_of(client, struct gr2d, client);
}
static int gr2d_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr2d *gr2d = to_gr2d(drm);
int err;
gr2d->channel = host1x_channel_request(client);
if (!gr2d->channel)
return -ENOMEM;
client->syncpts[0] = host1x_syncpt_request(client, flags);
if (!client->syncpts[0]) {
err = -ENOMEM;
dev_err(client->dev, "failed to request syncpoint: %d\n", err);
goto put;
}
err = host1x_client_iommu_attach(client);
if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
err = tegra_drm_register_client(dev->dev_private, drm);
if (err < 0) {
dev_err(client->dev, "failed to register client: %d\n", err);
goto detach_iommu;
}
return 0;
detach_iommu:
host1x_client_iommu_detach(client);
free:
host1x_syncpt_put(client->syncpts[0]);
put:
host1x_channel_put(gr2d->channel);
return err;
}
static int gr2d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct gr2d *gr2d = to_gr2d(drm);
int err;
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
pm_runtime_dont_use_autosuspend(client->dev);
pm_runtime_force_suspend(client->dev);
host1x_client_iommu_detach(client);
host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
gr2d->channel = NULL;
return 0;
}
static const struct host1x_client_ops gr2d_client_ops = {
.init = gr2d_init,
.exit = gr2d_exit,
};
static int gr2d_open_channel(struct tegra_drm_client *client,
struct tegra_drm_context *context)
{
struct gr2d *gr2d = to_gr2d(client);
context->channel = host1x_channel_get(gr2d->channel);
if (!context->channel)
return -ENOMEM;
return 0;
}
static void gr2d_close_channel(struct tegra_drm_context *context)
{
host1x_channel_put(context->channel);
}
static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset)
{
struct gr2d *gr2d = dev_get_drvdata(dev);
switch (class) {
case HOST1X_CLASS_HOST1X:
if (offset == 0x2b)
return 1;
break;
case HOST1X_CLASS_GR2D:
case HOST1X_CLASS_GR2D_SB:
if (offset >= GR2D_NUM_REGS)
break;
if (test_bit(offset, gr2d->addr_regs))
return 1;
break;
}
return 0;
}
static int gr2d_is_valid_class(u32 class)
{
return (class == HOST1X_CLASS_GR2D ||
class == HOST1X_CLASS_GR2D_SB);
}
static const struct tegra_drm_client_ops gr2d_ops = {
.open_channel = gr2d_open_channel,
.close_channel = gr2d_close_channel,
.is_addr_reg = gr2d_is_addr_reg,
.is_valid_class = gr2d_is_valid_class,
.submit = tegra_drm_submit,
};
static const struct gr2d_soc tegra20_gr2d_soc = {
.version = 0x20,
};
static const struct gr2d_soc tegra30_gr2d_soc = {
.version = 0x30,
};
static const struct gr2d_soc tegra114_gr2d_soc = {
.version = 0x35,
};
static const struct of_device_id gr2d_match[] = {
{ .compatible = "nvidia,tegra114-gr2d", .data = &tegra114_gr2d_soc },
{ .compatible = "nvidia,tegra30-gr2d", .data = &tegra30_gr2d_soc },
{ .compatible = "nvidia,tegra20-gr2d", .data = &tegra20_gr2d_soc },
{ },
};
MODULE_DEVICE_TABLE(of, gr2d_match);
static const u32 gr2d_addr_regs[] = {
GR2D_UA_BASE_ADDR,
GR2D_VA_BASE_ADDR,
GR2D_PAT_BASE_ADDR,
GR2D_DSTA_BASE_ADDR,
GR2D_DSTB_BASE_ADDR,
GR2D_DSTC_BASE_ADDR,
GR2D_SRCA_BASE_ADDR,
GR2D_SRCB_BASE_ADDR,
GR2D_PATBASE_ADDR,
GR2D_SRC_BASE_ADDR_SB,
GR2D_DSTA_BASE_ADDR_SB,
GR2D_DSTB_BASE_ADDR_SB,
GR2D_UA_BASE_ADDR_SB,
GR2D_VA_BASE_ADDR_SB,
};
static int gr2d_get_resets(struct device *dev, struct gr2d *gr2d)
{
int err;
gr2d->resets[RST_MC].id = "mc";
gr2d->resets[RST_GR2D].id = "2d";
gr2d->nresets = RST_GR2D_MAX;
err = devm_reset_control_bulk_get_optional_exclusive_released(
dev, gr2d->nresets, gr2d->resets);
if (err) {
dev_err(dev, "failed to get reset: %d\n", err);
return err;
}
if (WARN_ON(!gr2d->resets[RST_GR2D].rstc))
return -ENOENT;
return 0;
}
static int gr2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct host1x_syncpt **syncpts;
struct gr2d *gr2d;
unsigned int i;
int err;
gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
if (!gr2d)
return -ENOMEM;
platform_set_drvdata(pdev, gr2d);
gr2d->soc = of_device_get_match_data(dev);
syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
if (!syncpts)
return -ENOMEM;
gr2d->clk = devm_clk_get(dev, NULL);
if (IS_ERR(gr2d->clk)) {
dev_err(dev, "cannot get clock\n");
return PTR_ERR(gr2d->clk);
}
err = gr2d_get_resets(dev, gr2d);
if (err)
return err;
INIT_LIST_HEAD(&gr2d->client.base.list);
gr2d->client.base.ops = &gr2d_client_ops;
gr2d->client.base.dev = dev;
gr2d->client.base.class = HOST1X_CLASS_GR2D;
gr2d->client.base.syncpts = syncpts;
gr2d->client.base.num_syncpts = 1;
INIT_LIST_HEAD(&gr2d->client.list);
gr2d->client.version = gr2d->soc->version;
gr2d->client.ops = &gr2d_ops;
err = devm_tegra_core_dev_init_opp_table_common(dev);
if (err)
return err;
err = host1x_client_register(&gr2d->client.base);
if (err < 0) {
dev_err(dev, "failed to register host1x client: %d\n", err);
return err;
}
/* initialize address register map */
for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
return 0;
}
static void gr2d_remove(struct platform_device *pdev)
{
struct gr2d *gr2d = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&gr2d->client.base);
}
static int __maybe_unused gr2d_runtime_suspend(struct device *dev)
{
struct gr2d *gr2d = dev_get_drvdata(dev);
int err;
host1x_channel_stop(gr2d->channel);
reset_control_bulk_release(gr2d->nresets, gr2d->resets);
/*
* GR2D module shouldn't be reset while hardware is idling, otherwise
* host1x's cmdproc will stuck on trying to access any G2 register
* after reset. GR2D module could be either hot-reset or reset after
* power-gating of the HEG partition. Hence we will put in reset only
* the memory client part of the module, the HEG GENPD will take care
* of resetting GR2D module across power-gating.
*
* On Tegra20 there is no HEG partition, but it's okay to have
* undetermined h/w state since userspace is expected to reprogram
* the state on each job submission anyways.
*/
err = reset_control_acquire(gr2d->resets[RST_MC].rstc);
if (err) {
dev_err(dev, "failed to acquire MC reset: %d\n", err);
goto acquire_reset;
}
err = reset_control_assert(gr2d->resets[RST_MC].rstc);
reset_control_release(gr2d->resets[RST_MC].rstc);
if (err) {
dev_err(dev, "failed to assert MC reset: %d\n", err);
goto acquire_reset;
}
clk_disable_unprepare(gr2d->clk);
return 0;
acquire_reset:
reset_control_bulk_acquire(gr2d->nresets, gr2d->resets);
reset_control_bulk_deassert(gr2d->nresets, gr2d->resets);
return err;
}
static int __maybe_unused gr2d_runtime_resume(struct device *dev)
{
struct gr2d *gr2d = dev_get_drvdata(dev);
int err;
err = reset_control_bulk_acquire(gr2d->nresets, gr2d->resets);
if (err) {
dev_err(dev, "failed to acquire reset: %d\n", err);
return err;
}
err = clk_prepare_enable(gr2d->clk);
if (err) {
dev_err(dev, "failed to enable clock: %d\n", err);
goto release_reset;
}
usleep_range(2000, 4000);
/* this is a reset array which deasserts both 2D MC and 2D itself */
err = reset_control_bulk_deassert(gr2d->nresets, gr2d->resets);
if (err) {
dev_err(dev, "failed to deassert reset: %d\n", err);
goto disable_clk;
}
pm_runtime_enable(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 500);
return 0;
disable_clk:
clk_disable_unprepare(gr2d->clk);
release_reset:
reset_control_bulk_release(gr2d->nresets, gr2d->resets);
return err;
}
static const struct dev_pm_ops tegra_gr2d_pm = {
SET_RUNTIME_PM_OPS(gr2d_runtime_suspend, gr2d_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
struct platform_driver tegra_gr2d_driver = {
.driver = {
.name = "tegra-gr2d",
.of_match_table = gr2d_match,
.pm = &tegra_gr2d_pm,
},
.probe = gr2d_probe,
.remove_new = gr2d_remove,
};
| linux-master | drivers/gpu/drm/tegra/gr2d.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010-2020 NVIDIA Corporation */
#include "drm.h"
#include "submit.h"
#include "uapi.h"
struct tegra_drm_firewall {
struct tegra_drm_submit_data *submit;
struct tegra_drm_client *client;
u32 *data;
u32 pos;
u32 end;
u32 class;
};
static int fw_next(struct tegra_drm_firewall *fw, u32 *word)
{
if (fw->pos == fw->end)
return -EINVAL;
*word = fw->data[fw->pos++];
return 0;
}
static bool fw_check_addr_valid(struct tegra_drm_firewall *fw, u32 offset)
{
u32 i;
for (i = 0; i < fw->submit->num_used_mappings; i++) {
struct tegra_drm_mapping *m = fw->submit->used_mappings[i].mapping;
if (offset >= m->iova && offset <= m->iova_end)
return true;
}
return false;
}
static int fw_check_reg(struct tegra_drm_firewall *fw, u32 offset)
{
bool is_addr;
u32 word;
int err;
err = fw_next(fw, &word);
if (err)
return err;
if (!fw->client->ops->is_addr_reg)
return 0;
is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
offset);
if (!is_addr)
return 0;
if (!fw_check_addr_valid(fw, word))
return -EINVAL;
return 0;
}
static int fw_check_regs_seq(struct tegra_drm_firewall *fw, u32 offset,
u32 count, bool incr)
{
u32 i;
for (i = 0; i < count; i++) {
if (fw_check_reg(fw, offset))
return -EINVAL;
if (incr)
offset++;
}
return 0;
}
static int fw_check_regs_mask(struct tegra_drm_firewall *fw, u32 offset,
u16 mask)
{
unsigned long bmask = mask;
unsigned int bit;
for_each_set_bit(bit, &bmask, 16) {
if (fw_check_reg(fw, offset+bit))
return -EINVAL;
}
return 0;
}
static int fw_check_regs_imm(struct tegra_drm_firewall *fw, u32 offset)
{
bool is_addr;
if (!fw->client->ops->is_addr_reg)
return 0;
is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
offset);
if (is_addr)
return -EINVAL;
return 0;
}
static int fw_check_class(struct tegra_drm_firewall *fw, u32 class)
{
if (!fw->client->ops->is_valid_class) {
if (class == fw->client->base.class)
return 0;
else
return -EINVAL;
}
if (!fw->client->ops->is_valid_class(class))
return -EINVAL;
return 0;
}
enum {
HOST1X_OPCODE_SETCLASS = 0x00,
HOST1X_OPCODE_INCR = 0x01,
HOST1X_OPCODE_NONINCR = 0x02,
HOST1X_OPCODE_MASK = 0x03,
HOST1X_OPCODE_IMM = 0x04,
HOST1X_OPCODE_RESTART = 0x05,
HOST1X_OPCODE_GATHER = 0x06,
HOST1X_OPCODE_SETSTRMID = 0x07,
HOST1X_OPCODE_SETAPPID = 0x08,
HOST1X_OPCODE_SETPYLD = 0x09,
HOST1X_OPCODE_INCR_W = 0x0a,
HOST1X_OPCODE_NONINCR_W = 0x0b,
HOST1X_OPCODE_GATHER_W = 0x0c,
HOST1X_OPCODE_RESTART_W = 0x0d,
HOST1X_OPCODE_EXTEND = 0x0e,
};
int tegra_drm_fw_validate(struct tegra_drm_client *client, u32 *data, u32 start,
u32 words, struct tegra_drm_submit_data *submit,
u32 *job_class)
{
struct tegra_drm_firewall fw = {
.submit = submit,
.client = client,
.data = data,
.pos = start,
.end = start+words,
.class = *job_class,
};
bool payload_valid = false;
u32 payload;
int err;
while (fw.pos != fw.end) {
u32 word, opcode, offset, count, mask, class;
err = fw_next(&fw, &word);
if (err)
return err;
opcode = (word & 0xf0000000) >> 28;
switch (opcode) {
case HOST1X_OPCODE_SETCLASS:
offset = word >> 16 & 0xfff;
mask = word & 0x3f;
class = (word >> 6) & 0x3ff;
err = fw_check_class(&fw, class);
fw.class = class;
*job_class = class;
if (!err)
err = fw_check_regs_mask(&fw, offset, mask);
if (err)
dev_warn(client->base.dev,
"illegal SETCLASS(offset=0x%x, mask=0x%x, class=0x%x) at word %u",
offset, mask, class, fw.pos-1);
break;
case HOST1X_OPCODE_INCR:
offset = (word >> 16) & 0xfff;
count = word & 0xffff;
err = fw_check_regs_seq(&fw, offset, count, true);
if (err)
dev_warn(client->base.dev,
"illegal INCR(offset=0x%x, count=%u) in class 0x%x at word %u",
offset, count, fw.class, fw.pos-1);
break;
case HOST1X_OPCODE_NONINCR:
offset = (word >> 16) & 0xfff;
count = word & 0xffff;
err = fw_check_regs_seq(&fw, offset, count, false);
if (err)
dev_warn(client->base.dev,
"illegal NONINCR(offset=0x%x, count=%u) in class 0x%x at word %u",
offset, count, fw.class, fw.pos-1);
break;
case HOST1X_OPCODE_MASK:
offset = (word >> 16) & 0xfff;
mask = word & 0xffff;
err = fw_check_regs_mask(&fw, offset, mask);
if (err)
dev_warn(client->base.dev,
"illegal MASK(offset=0x%x, mask=0x%x) in class 0x%x at word %u",
offset, mask, fw.class, fw.pos-1);
break;
case HOST1X_OPCODE_IMM:
/* IMM cannot reasonably be used to write a pointer */
offset = (word >> 16) & 0xfff;
err = fw_check_regs_imm(&fw, offset);
if (err)
dev_warn(client->base.dev,
"illegal IMM(offset=0x%x) in class 0x%x at word %u",
offset, fw.class, fw.pos-1);
break;
case HOST1X_OPCODE_SETPYLD:
payload = word & 0xffff;
payload_valid = true;
break;
case HOST1X_OPCODE_INCR_W:
if (!payload_valid)
return -EINVAL;
offset = word & 0x3fffff;
err = fw_check_regs_seq(&fw, offset, payload, true);
if (err)
dev_warn(client->base.dev,
"illegal INCR_W(offset=0x%x) in class 0x%x at word %u",
offset, fw.class, fw.pos-1);
break;
case HOST1X_OPCODE_NONINCR_W:
if (!payload_valid)
return -EINVAL;
offset = word & 0x3fffff;
err = fw_check_regs_seq(&fw, offset, payload, false);
if (err)
dev_warn(client->base.dev,
"illegal NONINCR(offset=0x%x) in class 0x%x at word %u",
offset, fw.class, fw.pos-1);
break;
default:
dev_warn(client->base.dev, "illegal opcode at word %u",
fw.pos-1);
return -EINVAL;
}
if (err)
return err;
}
return 0;
}
| linux-master | drivers/gpu/drm/tegra/firewall.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Avionic Design GmbH
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/of.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_simple_kms_helper.h>
#include "drm.h"
#include "dc.h"
struct tegra_rgb {
struct tegra_output output;
struct tegra_dc *dc;
struct clk *pll_d_out0;
struct clk *pll_d2_out0;
struct clk *clk_parent;
struct clk *clk;
};
static inline struct tegra_rgb *to_rgb(struct tegra_output *output)
{
return container_of(output, struct tegra_rgb, output);
}
struct reg_entry {
unsigned long offset;
unsigned long value;
};
static const struct reg_entry rgb_enable[] = {
{ DC_COM_PIN_OUTPUT_ENABLE(0), 0x00000000 },
{ DC_COM_PIN_OUTPUT_ENABLE(1), 0x00000000 },
{ DC_COM_PIN_OUTPUT_ENABLE(2), 0x00000000 },
{ DC_COM_PIN_OUTPUT_ENABLE(3), 0x00000000 },
{ DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
{ DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 },
{ DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
{ DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
{ DC_COM_PIN_OUTPUT_DATA(0), 0x00000000 },
{ DC_COM_PIN_OUTPUT_DATA(1), 0x00000000 },
{ DC_COM_PIN_OUTPUT_DATA(2), 0x00000000 },
{ DC_COM_PIN_OUTPUT_DATA(3), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(4), 0x00210222 },
{ DC_COM_PIN_OUTPUT_SELECT(5), 0x00002200 },
{ DC_COM_PIN_OUTPUT_SELECT(6), 0x00020000 },
};
static const struct reg_entry rgb_disable[] = {
{ DC_COM_PIN_OUTPUT_SELECT(6), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(5), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(4), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
{ DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
{ DC_COM_PIN_OUTPUT_DATA(3), 0xaaaaaaaa },
{ DC_COM_PIN_OUTPUT_DATA(2), 0xaaaaaaaa },
{ DC_COM_PIN_OUTPUT_DATA(1), 0xaaaaaaaa },
{ DC_COM_PIN_OUTPUT_DATA(0), 0xaaaaaaaa },
{ DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
{ DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
{ DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 },
{ DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
{ DC_COM_PIN_OUTPUT_ENABLE(3), 0x55555555 },
{ DC_COM_PIN_OUTPUT_ENABLE(2), 0x55555555 },
{ DC_COM_PIN_OUTPUT_ENABLE(1), 0x55150005 },
{ DC_COM_PIN_OUTPUT_ENABLE(0), 0x55555555 },
};
static void tegra_dc_write_regs(struct tegra_dc *dc,
const struct reg_entry *table,
unsigned int num)
{
unsigned int i;
for (i = 0; i < num; i++)
tegra_dc_writel(dc, table[i].value, table[i].offset);
}
static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_rgb *rgb = to_rgb(output);
tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
tegra_dc_commit(rgb->dc);
}
static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_rgb *rgb = to_rgb(output);
u32 value;
tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
tegra_dc_writel(rgb->dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
/* XXX: parameterize? */
value = tegra_dc_readl(rgb->dc, DC_COM_PIN_OUTPUT_POLARITY(1));
value &= ~LVS_OUTPUT_POLARITY_LOW;
value &= ~LHS_OUTPUT_POLARITY_LOW;
tegra_dc_writel(rgb->dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
/* XXX: parameterize? */
value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
DISP_ORDER_RED_BLUE;
tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
tegra_dc_commit(rgb->dc);
}
static bool tegra_rgb_pll_rate_change_allowed(struct tegra_rgb *rgb)
{
if (!rgb->pll_d2_out0)
return false;
if (!clk_is_match(rgb->clk_parent, rgb->pll_d_out0) &&
!clk_is_match(rgb->clk_parent, rgb->pll_d2_out0))
return false;
return true;
}
static int
tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_rgb *rgb = to_rgb(output);
unsigned int div;
int err;
/*
* We may not want to change the frequency of the parent clock, since
* it may be a parent for other peripherals. This is due to the fact
* that on Tegra20 there's only a single clock dedicated to display
* (pll_d_out0), whereas later generations have a second one that can
* be used to independently drive a second output (pll_d2_out0).
*
* As a way to support multiple outputs on Tegra20 as well, pll_p is
* typically used as the parent clock for the display controllers.
* But this comes at a cost: pll_p is the parent of several other
* peripherals, so its frequency shouldn't change out of the blue.
*
* The best we can do at this point is to use the shift clock divider
* and hope that the desired frequency can be matched (or at least
* matched sufficiently close that the panel will still work).
*/
if (tegra_rgb_pll_rate_change_allowed(rgb)) {
/*
* Set display controller clock to x2 of PCLK in order to
* produce higher resolution pulse positions.
*/
div = 2;
pclk *= 2;
} else {
div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2;
pclk = 0;
}
err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent,
pclk, div);
if (err < 0) {
dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
return err;
}
return err;
}
static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
.disable = tegra_rgb_encoder_disable,
.enable = tegra_rgb_encoder_enable,
.atomic_check = tegra_rgb_encoder_atomic_check,
};
int tegra_dc_rgb_probe(struct tegra_dc *dc)
{
struct device_node *np;
struct tegra_rgb *rgb;
int err;
np = of_get_child_by_name(dc->dev->of_node, "rgb");
if (!np || !of_device_is_available(np))
return -ENODEV;
rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
if (!rgb)
return -ENOMEM;
rgb->output.dev = dc->dev;
rgb->output.of_node = np;
rgb->dc = dc;
err = tegra_output_probe(&rgb->output);
if (err < 0)
return err;
rgb->clk = devm_clk_get(dc->dev, NULL);
if (IS_ERR(rgb->clk)) {
dev_err(dc->dev, "failed to get clock\n");
return PTR_ERR(rgb->clk);
}
rgb->clk_parent = devm_clk_get(dc->dev, "parent");
if (IS_ERR(rgb->clk_parent)) {
dev_err(dc->dev, "failed to get parent clock\n");
return PTR_ERR(rgb->clk_parent);
}
err = clk_set_parent(rgb->clk, rgb->clk_parent);
if (err < 0) {
dev_err(dc->dev, "failed to set parent clock: %d\n", err);
return err;
}
rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0");
if (IS_ERR(rgb->pll_d_out0)) {
err = PTR_ERR(rgb->pll_d_out0);
dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err);
return err;
}
if (dc->soc->has_pll_d2_out0) {
rgb->pll_d2_out0 = clk_get_sys(NULL, "pll_d2_out0");
if (IS_ERR(rgb->pll_d2_out0)) {
err = PTR_ERR(rgb->pll_d2_out0);
dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err);
return err;
}
}
dc->rgb = &rgb->output;
return 0;
}
void tegra_dc_rgb_remove(struct tegra_dc *dc)
{
struct tegra_rgb *rgb;
if (!dc->rgb)
return;
rgb = to_rgb(dc->rgb);
clk_put(rgb->pll_d2_out0);
clk_put(rgb->pll_d_out0);
tegra_output_remove(dc->rgb);
dc->rgb = NULL;
}
int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
{
struct tegra_output *output = dc->rgb;
struct drm_connector *connector;
int err;
if (!dc->rgb)
return -ENODEV;
drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(&output->encoder,
&tegra_rgb_encoder_helper_funcs);
/*
* Wrap directly-connected panel into DRM bridge in order to let
* DRM core to handle panel for us.
*/
if (output->panel) {
output->bridge = devm_drm_panel_bridge_add(output->dev,
output->panel);
if (IS_ERR(output->bridge)) {
dev_err(output->dev,
"failed to wrap panel into bridge: %pe\n",
output->bridge);
return PTR_ERR(output->bridge);
}
output->panel = NULL;
}
/*
* Tegra devices that have LVDS panel utilize LVDS encoder bridge
* for converting up to 28 LCD LVTTL lanes into 5/4 LVDS lanes that
* go to display panel's receiver.
*
* Encoder usually have a power-down control which needs to be enabled
* in order to transmit data to the panel. Historically devices that
* use an older device-tree version didn't model the bridge, assuming
* that encoder is turned ON by default, while today's DRM allows us
* to model LVDS encoder properly.
*
* Newer device-trees utilize LVDS encoder bridge, which provides
* us with a connector and handles the display panel.
*
* For older device-trees we wrapped panel into the panel-bridge.
*/
if (output->bridge) {
err = drm_bridge_attach(&output->encoder, output->bridge,
NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (err)
return err;
connector = drm_bridge_connector_init(drm, &output->encoder);
if (IS_ERR(connector)) {
dev_err(output->dev,
"failed to initialize bridge connector: %pe\n",
connector);
return PTR_ERR(connector);
}
drm_connector_attach_encoder(connector, &output->encoder);
}
err = tegra_output_init(drm, output);
if (err < 0) {
dev_err(output->dev, "failed to initialize output: %d\n", err);
return err;
}
/*
* Other outputs can be attached to either display controller. The RGB
* outputs are an exception and work only with their parent display
* controller.
*/
output->encoder.possible_crtcs = drm_crtc_mask(&dc->base);
return 0;
}
int tegra_dc_rgb_exit(struct tegra_dc *dc)
{
if (dc->rgb)
tegra_output_exit(dc->rgb);
return 0;
}
| linux-master | drivers/gpu/drm/tegra/rgb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* NVIDIA Tegra DRM GEM helper functions
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
* Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
*
* Based on the GEM/CMA helpers
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
*/
#include <linux/dma-buf.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
#include <drm/tegra_drm.h>
#include "drm.h"
#include "gem.h"
MODULE_IMPORT_NS(DMA_BUF);
static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
{
dma_addr_t next = ~(dma_addr_t)0;
unsigned int count = 0, i;
struct scatterlist *s;
for_each_sg(sgl, s, nents, i) {
/* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
if (!sg_dma_len(s))
continue;
if (sg_dma_address(s) != next) {
next = sg_dma_address(s) + sg_dma_len(s);
count++;
}
}
return count;
}
static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
{
return sg_dma_count_chunks(sgt->sgl, sgt->nents);
}
static void tegra_bo_put(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
drm_gem_object_put(&obj->gem);
}
static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
enum dma_data_direction direction)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct drm_gem_object *gem = &obj->gem;
struct host1x_bo_mapping *map;
int err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return ERR_PTR(-ENOMEM);
kref_init(&map->ref);
map->bo = host1x_bo_get(bo);
map->direction = direction;
map->dev = dev;
/*
* Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
*/
if (gem->import_attach) {
struct dma_buf *buf = gem->import_attach->dmabuf;
map->attach = dma_buf_attach(buf, dev);
if (IS_ERR(map->attach)) {
err = PTR_ERR(map->attach);
goto free;
}
map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
if (IS_ERR(map->sgt)) {
dma_buf_detach(buf, map->attach);
err = PTR_ERR(map->sgt);
map->sgt = NULL;
goto free;
}
err = sgt_dma_count_chunks(map->sgt);
map->size = gem->size;
goto out;
}
/*
* If we don't have a mapping for this buffer yet, return an SG table
* so that host1x can do the mapping for us via the DMA API.
*/
map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
if (!map->sgt) {
err = -ENOMEM;
goto free;
}
if (obj->pages) {
/*
* If the buffer object was allocated from the explicit IOMMU
* API code paths, construct an SG table from the pages.
*/
err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
GFP_KERNEL);
if (err < 0)
goto free;
} else {
/*
* If the buffer object had no pages allocated and if it was
* not imported, it had to be allocated with the DMA API, so
* the DMA API helper can be used.
*/
err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
if (err < 0)
goto free;
}
err = dma_map_sgtable(dev, map->sgt, direction, 0);
if (err)
goto free_sgt;
out:
/*
* If we've manually mapped the buffer object through the IOMMU, make sure to return the
* existing IOVA address of our mapping.
*/
if (!obj->mm) {
map->phys = sg_dma_address(map->sgt->sgl);
map->chunks = err;
} else {
map->phys = obj->iova;
map->chunks = 1;
}
map->size = gem->size;
return map;
free_sgt:
sg_free_table(map->sgt);
free:
kfree(map->sgt);
kfree(map);
return ERR_PTR(err);
}
static void tegra_bo_unpin(struct host1x_bo_mapping *map)
{
if (map->attach) {
dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
map->direction);
dma_buf_detach(map->attach->dmabuf, map->attach);
} else {
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
sg_free_table(map->sgt);
kfree(map->sgt);
}
host1x_bo_put(map->bo);
kfree(map);
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct iosys_map map;
int ret;
if (obj->vaddr) {
return obj->vaddr;
} else if (obj->gem.import_attach) {
ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
return ret ? NULL : map.vaddr;
} else {
return vmap(obj->pages, obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
}
}
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
if (obj->vaddr)
return;
else if (obj->gem.import_attach)
dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
else
vunmap(addr);
}
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
drm_gem_object_get(&obj->gem);
return bo;
}
static const struct host1x_bo_ops tegra_bo_ops = {
.get = tegra_bo_get,
.put = tegra_bo_put,
.pin = tegra_bo_pin,
.unpin = tegra_bo_unpin,
.mmap = tegra_bo_mmap,
.munmap = tegra_bo_munmap,
};
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
{
int prot = IOMMU_READ | IOMMU_WRITE;
int err;
if (bo->mm)
return -EBUSY;
bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
if (!bo->mm)
return -ENOMEM;
mutex_lock(&tegra->mm_lock);
err = drm_mm_insert_node_generic(&tegra->mm,
bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
if (err < 0) {
dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
err);
goto unlock;
}
bo->iova = bo->mm->start;
bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
if (!bo->size) {
dev_err(tegra->drm->dev, "failed to map buffer\n");
err = -ENOMEM;
goto remove;
}
mutex_unlock(&tegra->mm_lock);
return 0;
remove:
drm_mm_remove_node(bo->mm);
unlock:
mutex_unlock(&tegra->mm_lock);
kfree(bo->mm);
return err;
}
static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
{
if (!bo->mm)
return 0;
mutex_lock(&tegra->mm_lock);
iommu_unmap(tegra->domain, bo->iova, bo->size);
drm_mm_remove_node(bo->mm);
mutex_unlock(&tegra->mm_lock);
kfree(bo->mm);
return 0;
}
static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
.free = tegra_bo_free_object,
.export = tegra_gem_prime_export,
.vm_ops = &tegra_bo_vm_ops,
};
static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
size_t size)
{
struct tegra_bo *bo;
int err;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
bo->gem.funcs = &tegra_gem_object_funcs;
host1x_bo_init(&bo->base, &tegra_bo_ops);
size = round_up(size, PAGE_SIZE);
err = drm_gem_object_init(drm, &bo->gem, size);
if (err < 0)
goto free;
err = drm_gem_create_mmap_offset(&bo->gem);
if (err < 0)
goto release;
return bo;
release:
drm_gem_object_release(&bo->gem);
free:
kfree(bo);
return ERR_PTR(err);
}
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
{
if (bo->pages) {
dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else if (bo->vaddr) {
dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
}
}
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
int err;
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
return PTR_ERR(bo->pages);
bo->num_pages = bo->gem.size >> PAGE_SHIFT;
bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto put_pages;
}
err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
if (err)
goto free_sgt;
return 0;
free_sgt:
sg_free_table(bo->sgt);
kfree(bo->sgt);
put_pages:
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
return err;
}
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
{
struct tegra_drm *tegra = drm->dev_private;
int err;
if (tegra->domain) {
err = tegra_bo_get_pages(drm, bo);
if (err < 0)
return err;
err = tegra_bo_iommu_map(tegra, bo);
if (err < 0) {
tegra_bo_free(drm, bo);
return err;
}
} else {
size_t size = bo->gem.size;
bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
"failed to allocate buffer of size %zu\n",
size);
return -ENOMEM;
}
}
return 0;
}
struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
unsigned long flags)
{
struct tegra_bo *bo;
int err;
bo = tegra_bo_alloc_object(drm, size);
if (IS_ERR(bo))
return bo;
err = tegra_bo_alloc(drm, bo);
if (err < 0)
goto release;
if (flags & DRM_TEGRA_GEM_CREATE_TILED)
bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
bo->flags |= TEGRA_BO_BOTTOM_UP;
return bo;
release:
drm_gem_object_release(&bo->gem);
kfree(bo);
return ERR_PTR(err);
}
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
struct drm_device *drm,
size_t size,
unsigned long flags,
u32 *handle)
{
struct tegra_bo *bo;
int err;
bo = tegra_bo_create(drm, size, flags);
if (IS_ERR(bo))
return bo;
err = drm_gem_handle_create(file, &bo->gem, handle);
if (err) {
tegra_bo_free_object(&bo->gem);
return ERR_PTR(err);
}
drm_gem_object_put(&bo->gem);
return bo;
}
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
struct dma_buf *buf)
{
struct tegra_drm *tegra = drm->dev_private;
struct dma_buf_attachment *attach;
struct tegra_bo *bo;
int err;
bo = tegra_bo_alloc_object(drm, buf->size);
if (IS_ERR(bo))
return bo;
attach = dma_buf_attach(buf, drm->dev);
if (IS_ERR(attach)) {
err = PTR_ERR(attach);
goto free;
}
get_dma_buf(buf);
bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto detach;
}
if (tegra->domain) {
err = tegra_bo_iommu_map(tegra, bo);
if (err < 0)
goto detach;
}
bo->gem.import_attach = attach;
return bo;
detach:
if (!IS_ERR_OR_NULL(bo->sgt))
dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
dma_buf_detach(buf, attach);
dma_buf_put(buf);
free:
drm_gem_object_release(&bo->gem);
kfree(bo);
return ERR_PTR(err);
}
void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_drm *tegra = gem->dev->dev_private;
struct host1x_bo_mapping *mapping, *tmp;
struct tegra_bo *bo = to_tegra_bo(gem);
/* remove all mappings of this buffer object from any caches */
list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
if (mapping->cache)
host1x_bo_unpin(mapping);
else
dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
dev_name(mapping->dev));
}
if (tegra->domain)
tegra_bo_iommu_unmap(tegra, bo);
if (gem->import_attach) {
dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
drm_prime_gem_destroy(gem, NULL);
} else {
tegra_bo_free(gem->dev, bo);
}
drm_gem_object_release(gem);
kfree(bo);
}
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_bo *bo;
args->pitch = round_up(min_pitch, tegra->pitch_align);
args->size = args->pitch * args->height;
bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
&args->handle);
if (IS_ERR(bo))
return PTR_ERR(bo);
return 0;
}
static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *gem = vma->vm_private_data;
struct tegra_bo *bo = to_tegra_bo(gem);
struct page *page;
pgoff_t offset;
if (!bo->pages)
return VM_FAULT_SIGBUS;
offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
page = bo->pages[offset];
return vmf_insert_page(vma, vmf->address, page);
}
const struct vm_operations_struct tegra_bo_vm_ops = {
.fault = tegra_bo_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
{
struct tegra_bo *bo = to_tegra_bo(gem);
if (!bo->pages) {
unsigned long vm_pgoff = vma->vm_pgoff;
int err;
/*
* Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
* and set the vm_pgoff (used as a fake buffer offset by DRM)
* to 0 as we want to map the whole buffer.
*/
vm_flags_clear(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
gem->size);
if (err < 0) {
drm_gem_vm_close(vma);
return err;
}
vma->vm_pgoff = vm_pgoff;
} else {
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(prot);
}
return 0;
}
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
{
struct drm_gem_object *gem;
int err;
err = drm_gem_mmap(file, vma);
if (err < 0)
return err;
gem = vma->vm_private_data;
return __tegra_gem_mmap(gem, vma);
}
static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
struct drm_gem_object *gem = attach->dmabuf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
struct sg_table *sgt;
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return NULL;
if (bo->pages) {
if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
0, gem->size, GFP_KERNEL) < 0)
goto free;
} else {
if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
gem->size) < 0)
goto free;
}
if (dma_map_sgtable(attach->dev, sgt, dir, 0))
goto free;
return sgt;
free:
sg_free_table(sgt);
kfree(sgt);
return NULL;
}
static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
struct drm_gem_object *gem = attach->dmabuf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
if (bo->pages)
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
}
static void tegra_gem_prime_release(struct dma_buf *buf)
{
drm_gem_dmabuf_release(buf);
}
static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
enum dma_data_direction direction)
{
struct drm_gem_object *gem = buf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
struct drm_device *drm = gem->dev;
if (bo->pages)
dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
return 0;
}
static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
enum dma_data_direction direction)
{
struct drm_gem_object *gem = buf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
struct drm_device *drm = gem->dev;
if (bo->pages)
dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
return 0;
}
static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
struct drm_gem_object *gem = buf->priv;
int err;
err = drm_gem_mmap_obj(gem, gem->size, vma);
if (err < 0)
return err;
return __tegra_gem_mmap(gem, vma);
}
static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
{
struct drm_gem_object *gem = buf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
void *vaddr;
vaddr = tegra_bo_mmap(&bo->base);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
iosys_map_set_vaddr(map, vaddr);
return 0;
}
static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
{
struct drm_gem_object *gem = buf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
tegra_bo_munmap(&bo->base, map->vaddr);
}
static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.map_dma_buf = tegra_gem_prime_map_dma_buf,
.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
.release = tegra_gem_prime_release,
.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
.end_cpu_access = tegra_gem_prime_end_cpu_access,
.mmap = tegra_gem_prime_mmap,
.vmap = tegra_gem_prime_vmap,
.vunmap = tegra_gem_prime_vunmap,
};
struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
int flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.exp_name = KBUILD_MODNAME;
exp_info.owner = gem->dev->driver->fops->owner;
exp_info.ops = &tegra_gem_prime_dmabuf_ops;
exp_info.size = gem->size;
exp_info.flags = flags;
exp_info.priv = gem;
return drm_gem_dmabuf_export(gem->dev, &exp_info);
}
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
struct dma_buf *buf)
{
struct tegra_bo *bo;
if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
struct drm_gem_object *gem = buf->priv;
if (gem->dev == drm) {
drm_gem_object_get(gem);
return gem;
}
}
bo = tegra_bo_import(drm, buf);
if (IS_ERR(bo))
return ERR_CAST(bo);
return &bo->gem;
}
struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
{
struct drm_gem_object *gem;
struct tegra_bo *bo;
gem = drm_gem_object_lookup(file, handle);
if (!gem)
return NULL;
bo = to_tegra_bo(gem);
return &bo->base;
}
| linux-master | drivers/gpu/drm/tegra/gem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2022, NVIDIA Corporation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/mc.h>
#include "drm.h"
#include "falcon.h"
#include "riscv.h"
#include "vic.h"
#define NVDEC_FALCON_DEBUGINFO 0x1094
#define NVDEC_TFBIF_TRANSCFG 0x2c44
struct nvdec_config {
const char *firmware;
unsigned int version;
bool supports_sid;
bool has_riscv;
bool has_extra_clocks;
};
struct nvdec {
struct falcon falcon;
void __iomem *regs;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct device *dev;
struct clk_bulk_data clks[3];
unsigned int num_clks;
struct reset_control *reset;
/* Platform configuration */
const struct nvdec_config *config;
/* RISC-V specific data */
struct tegra_drm_riscv riscv;
phys_addr_t carveout_base;
};
static inline struct nvdec *to_nvdec(struct tegra_drm_client *client)
{
return container_of(client, struct nvdec, client);
}
static inline void nvdec_writel(struct nvdec *nvdec, u32 value,
unsigned int offset)
{
writel(value, nvdec->regs + offset);
}
static int nvdec_boot_falcon(struct nvdec *nvdec)
{
u32 stream_id;
int err;
if (nvdec->config->supports_sid && tegra_dev_iommu_get_stream_id(nvdec->dev, &stream_id)) {
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
nvdec_writel(nvdec, value, NVDEC_TFBIF_TRANSCFG);
nvdec_writel(nvdec, stream_id, VIC_THI_STREAMID0);
nvdec_writel(nvdec, stream_id, VIC_THI_STREAMID1);
}
err = falcon_boot(&nvdec->falcon);
if (err < 0)
return err;
err = falcon_wait_idle(&nvdec->falcon);
if (err < 0) {
dev_err(nvdec->dev, "falcon boot timed out\n");
return err;
}
return 0;
}
static int nvdec_wait_debuginfo(struct nvdec *nvdec, const char *phase)
{
int err;
u32 val;
err = readl_poll_timeout(nvdec->regs + NVDEC_FALCON_DEBUGINFO, val, val == 0x0, 10, 100000);
if (err) {
dev_err(nvdec->dev, "failed to boot %s, debuginfo=0x%x\n", phase, val);
return err;
}
return 0;
}
static int nvdec_boot_riscv(struct nvdec *nvdec)
{
int err;
err = reset_control_acquire(nvdec->reset);
if (err)
return err;
nvdec_writel(nvdec, 0xabcd1234, NVDEC_FALCON_DEBUGINFO);
err = tegra_drm_riscv_boot_bootrom(&nvdec->riscv, nvdec->carveout_base, 1,
&nvdec->riscv.bl_desc);
if (err) {
dev_err(nvdec->dev, "failed to execute bootloader\n");
goto release_reset;
}
err = nvdec_wait_debuginfo(nvdec, "bootloader");
if (err)
goto release_reset;
err = reset_control_reset(nvdec->reset);
if (err)
goto release_reset;
nvdec_writel(nvdec, 0xabcd1234, NVDEC_FALCON_DEBUGINFO);
err = tegra_drm_riscv_boot_bootrom(&nvdec->riscv, nvdec->carveout_base, 1,
&nvdec->riscv.os_desc);
if (err) {
dev_err(nvdec->dev, "failed to execute firmware\n");
goto release_reset;
}
err = nvdec_wait_debuginfo(nvdec, "firmware");
if (err)
goto release_reset;
release_reset:
reset_control_release(nvdec->reset);
return err;
}
static int nvdec_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct nvdec *nvdec = to_nvdec(drm);
int err;
err = host1x_client_iommu_attach(client);
if (err < 0 && err != -ENODEV) {
dev_err(nvdec->dev, "failed to attach to domain: %d\n", err);
return err;
}
nvdec->channel = host1x_channel_request(client);
if (!nvdec->channel) {
err = -ENOMEM;
goto detach;
}
client->syncpts[0] = host1x_syncpt_request(client, 0);
if (!client->syncpts[0]) {
err = -ENOMEM;
goto free_channel;
}
err = tegra_drm_register_client(tegra, drm);
if (err < 0)
goto free_syncpt;
/*
* Inherit the DMA parameters (such as maximum segment size) from the
* parent host1x device.
*/
client->dev->dma_parms = client->host->dma_parms;
return 0;
free_syncpt:
host1x_syncpt_put(client->syncpts[0]);
free_channel:
host1x_channel_put(nvdec->channel);
detach:
host1x_client_iommu_detach(client);
return err;
}
static int nvdec_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct nvdec *nvdec = to_nvdec(drm);
int err;
/* avoid a dangling pointer just in case this disappears */
client->dev->dma_parms = NULL;
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
pm_runtime_dont_use_autosuspend(client->dev);
pm_runtime_force_suspend(client->dev);
host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(nvdec->channel);
host1x_client_iommu_detach(client);
nvdec->channel = NULL;
if (client->group) {
dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys,
nvdec->falcon.firmware.size, DMA_TO_DEVICE);
tegra_drm_free(tegra, nvdec->falcon.firmware.size,
nvdec->falcon.firmware.virt,
nvdec->falcon.firmware.iova);
} else {
dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size,
nvdec->falcon.firmware.virt,
nvdec->falcon.firmware.iova);
}
return 0;
}
static const struct host1x_client_ops nvdec_client_ops = {
.init = nvdec_init,
.exit = nvdec_exit,
};
static int nvdec_load_falcon_firmware(struct nvdec *nvdec)
{
struct host1x_client *client = &nvdec->client.base;
struct tegra_drm *tegra = nvdec->client.drm;
dma_addr_t iova;
size_t size;
void *virt;
int err;
if (nvdec->falcon.firmware.virt)
return 0;
err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware);
if (err < 0)
return err;
size = nvdec->falcon.firmware.size;
if (!client->group) {
virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
err = dma_mapping_error(nvdec->dev, iova);
if (err < 0)
return err;
} else {
virt = tegra_drm_alloc(tegra, size, &iova);
if (IS_ERR(virt))
return PTR_ERR(virt);
}
nvdec->falcon.firmware.virt = virt;
nvdec->falcon.firmware.iova = iova;
err = falcon_load_firmware(&nvdec->falcon);
if (err < 0)
goto cleanup;
/*
* In this case we have received an IOVA from the shared domain, so we
* need to make sure to get the physical address so that the DMA API
* knows what memory pages to flush the cache for.
*/
if (client->group) {
dma_addr_t phys;
phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE);
err = dma_mapping_error(nvdec->dev, phys);
if (err < 0)
goto cleanup;
nvdec->falcon.firmware.phys = phys;
}
return 0;
cleanup:
if (!client->group)
dma_free_coherent(nvdec->dev, size, virt, iova);
else
tegra_drm_free(tegra, size, virt, iova);
return err;
}
static __maybe_unused int nvdec_runtime_resume(struct device *dev)
{
struct nvdec *nvdec = dev_get_drvdata(dev);
int err;
err = clk_bulk_prepare_enable(nvdec->num_clks, nvdec->clks);
if (err < 0)
return err;
usleep_range(10, 20);
if (nvdec->config->has_riscv) {
err = nvdec_boot_riscv(nvdec);
if (err < 0)
goto disable;
} else {
err = nvdec_load_falcon_firmware(nvdec);
if (err < 0)
goto disable;
err = nvdec_boot_falcon(nvdec);
if (err < 0)
goto disable;
}
return 0;
disable:
clk_bulk_disable_unprepare(nvdec->num_clks, nvdec->clks);
return err;
}
static __maybe_unused int nvdec_runtime_suspend(struct device *dev)
{
struct nvdec *nvdec = dev_get_drvdata(dev);
host1x_channel_stop(nvdec->channel);
clk_bulk_disable_unprepare(nvdec->num_clks, nvdec->clks);
return 0;
}
static int nvdec_open_channel(struct tegra_drm_client *client,
struct tegra_drm_context *context)
{
struct nvdec *nvdec = to_nvdec(client);
context->channel = host1x_channel_get(nvdec->channel);
if (!context->channel)
return -ENOMEM;
return 0;
}
static void nvdec_close_channel(struct tegra_drm_context *context)
{
host1x_channel_put(context->channel);
}
static int nvdec_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
{
*supported = true;
return 0;
}
static const struct tegra_drm_client_ops nvdec_ops = {
.open_channel = nvdec_open_channel,
.close_channel = nvdec_close_channel,
.submit = tegra_drm_submit,
.get_streamid_offset = tegra_drm_get_streamid_offset_thi,
.can_use_memory_ctx = nvdec_can_use_memory_ctx,
};
#define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin"
static const struct nvdec_config nvdec_t210_config = {
.firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE,
.version = 0x21,
.supports_sid = false,
};
#define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin"
static const struct nvdec_config nvdec_t186_config = {
.firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE,
.version = 0x18,
.supports_sid = true,
};
#define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin"
static const struct nvdec_config nvdec_t194_config = {
.firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE,
.version = 0x19,
.supports_sid = true,
};
static const struct nvdec_config nvdec_t234_config = {
.version = 0x23,
.supports_sid = true,
.has_riscv = true,
.has_extra_clocks = true,
};
static const struct of_device_id tegra_nvdec_of_match[] = {
{ .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config },
{ .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config },
{ .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config },
{ .compatible = "nvidia,tegra234-nvdec", .data = &nvdec_t234_config },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match);
static int nvdec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct host1x_syncpt **syncpts;
struct nvdec *nvdec;
u32 host_class;
int err;
/* inherit DMA mask from host1x parent */
err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
if (err < 0) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
return err;
}
nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL);
if (!nvdec)
return -ENOMEM;
nvdec->config = of_device_get_match_data(dev);
syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
if (!syncpts)
return -ENOMEM;
nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(nvdec->regs))
return PTR_ERR(nvdec->regs);
nvdec->clks[0].id = "nvdec";
nvdec->num_clks = 1;
if (nvdec->config->has_extra_clocks) {
nvdec->num_clks = 3;
nvdec->clks[1].id = "fuse";
nvdec->clks[2].id = "tsec_pka";
}
err = devm_clk_bulk_get(dev, nvdec->num_clks, nvdec->clks);
if (err) {
dev_err(&pdev->dev, "failed to get clock(s)\n");
return err;
}
err = clk_set_rate(nvdec->clks[0].clk, ULONG_MAX);
if (err < 0) {
dev_err(&pdev->dev, "failed to set clock rate\n");
return err;
}
err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class);
if (err < 0)
host_class = HOST1X_CLASS_NVDEC;
if (nvdec->config->has_riscv) {
struct tegra_mc *mc;
mc = devm_tegra_memory_controller_get(dev);
if (IS_ERR(mc)) {
dev_err_probe(dev, PTR_ERR(mc),
"failed to get memory controller handle\n");
return PTR_ERR(mc);
}
err = tegra_mc_get_carveout_info(mc, 1, &nvdec->carveout_base, NULL);
if (err) {
dev_err(dev, "failed to get carveout info: %d\n", err);
return err;
}
nvdec->reset = devm_reset_control_get_exclusive_released(dev, "nvdec");
if (IS_ERR(nvdec->reset)) {
dev_err_probe(dev, PTR_ERR(nvdec->reset), "failed to get reset\n");
return PTR_ERR(nvdec->reset);
}
nvdec->riscv.dev = dev;
nvdec->riscv.regs = nvdec->regs;
err = tegra_drm_riscv_read_descriptors(&nvdec->riscv);
if (err < 0)
return err;
} else {
nvdec->falcon.dev = dev;
nvdec->falcon.regs = nvdec->regs;
err = falcon_init(&nvdec->falcon);
if (err < 0)
return err;
}
platform_set_drvdata(pdev, nvdec);
INIT_LIST_HEAD(&nvdec->client.base.list);
nvdec->client.base.ops = &nvdec_client_ops;
nvdec->client.base.dev = dev;
nvdec->client.base.class = host_class;
nvdec->client.base.syncpts = syncpts;
nvdec->client.base.num_syncpts = 1;
nvdec->dev = dev;
INIT_LIST_HEAD(&nvdec->client.list);
nvdec->client.version = nvdec->config->version;
nvdec->client.ops = &nvdec_ops;
err = host1x_client_register(&nvdec->client.base);
if (err < 0) {
dev_err(dev, "failed to register host1x client: %d\n", err);
goto exit_falcon;
}
pm_runtime_enable(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 500);
return 0;
exit_falcon:
falcon_exit(&nvdec->falcon);
return err;
}
static void nvdec_remove(struct platform_device *pdev)
{
struct nvdec *nvdec = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&nvdec->client.base);
falcon_exit(&nvdec->falcon);
}
static const struct dev_pm_ops nvdec_pm_ops = {
SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
struct platform_driver tegra_nvdec_driver = {
.driver = {
.name = "tegra-nvdec",
.of_match_table = tegra_nvdec_of_match,
.pm = &nvdec_pm_ops
},
.probe = nvdec_probe,
.remove_new = nvdec_remove,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE);
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE);
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE);
#endif
| linux-master | drivers/gpu/drm/tegra/nvdec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 NVIDIA Corporation
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
#include "dc.h"
#include "dp.h"
#include "drm.h"
#include "hda.h"
#include "sor.h"
#include "trace.h"
#define SOR_REKEY 0x38
struct tegra_sor_hdmi_settings {
unsigned long frequency;
u8 vcocap;
u8 filter;
u8 ichpmp;
u8 loadadj;
u8 tmds_termadj;
u8 tx_pu_value;
u8 bg_temp_coef;
u8 bg_vref_level;
u8 avdd10_level;
u8 avdd14_level;
u8 sparepll;
u8 drive_current[4];
u8 preemphasis[4];
};
#if 1
static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
{
.frequency = 54000000,
.vcocap = 0x0,
.filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
.tmds_termadj = 0x9,
.tx_pu_value = 0x10,
.bg_temp_coef = 0x3,
.bg_vref_level = 0x8,
.avdd10_level = 0x4,
.avdd14_level = 0x4,
.sparepll = 0x0,
.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 75000000,
.vcocap = 0x3,
.filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
.tmds_termadj = 0x9,
.tx_pu_value = 0x40,
.bg_temp_coef = 0x3,
.bg_vref_level = 0x8,
.avdd10_level = 0x4,
.avdd14_level = 0x4,
.sparepll = 0x0,
.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 150000000,
.vcocap = 0x3,
.filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
.tmds_termadj = 0x9,
.tx_pu_value = 0x66,
.bg_temp_coef = 0x3,
.bg_vref_level = 0x8,
.avdd10_level = 0x4,
.avdd14_level = 0x4,
.sparepll = 0x0,
.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 300000000,
.vcocap = 0x3,
.filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
.tmds_termadj = 0x9,
.tx_pu_value = 0x66,
.bg_temp_coef = 0x3,
.bg_vref_level = 0xa,
.avdd10_level = 0x4,
.avdd14_level = 0x4,
.sparepll = 0x0,
.drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
.preemphasis = { 0x00, 0x17, 0x17, 0x17 },
}, {
.frequency = 600000000,
.vcocap = 0x3,
.filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
.tmds_termadj = 0x9,
.tx_pu_value = 0x66,
.bg_temp_coef = 0x3,
.bg_vref_level = 0x8,
.avdd10_level = 0x4,
.avdd14_level = 0x4,
.sparepll = 0x0,
.drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
},
};
#else
static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
{
.frequency = 75000000,
.vcocap = 0x3,
.filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
.tmds_termadj = 0x9,
.tx_pu_value = 0x40,
.bg_temp_coef = 0x3,
.bg_vref_level = 0x8,
.avdd10_level = 0x4,
.avdd14_level = 0x4,
.sparepll = 0x0,
.drive_current = { 0x29, 0x29, 0x29, 0x29 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 150000000,
.vcocap = 0x3,
.filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
.tmds_termadj = 0x9,
.tx_pu_value = 0x66,
.bg_temp_coef = 0x3,
.bg_vref_level = 0x8,
.avdd10_level = 0x4,
.avdd14_level = 0x4,
.sparepll = 0x0,
.drive_current = { 0x30, 0x37, 0x37, 0x37 },
.preemphasis = { 0x01, 0x02, 0x02, 0x02 },
}, {
.frequency = 300000000,
.vcocap = 0x3,
.filter = 0x0,
.ichpmp = 0x6,
.loadadj = 0x3,
.tmds_termadj = 0x9,
.tx_pu_value = 0x66,
.bg_temp_coef = 0x3,
.bg_vref_level = 0xf,
.avdd10_level = 0x4,
.avdd14_level = 0x4,
.sparepll = 0x0,
.drive_current = { 0x30, 0x37, 0x37, 0x37 },
.preemphasis = { 0x10, 0x3e, 0x3e, 0x3e },
}, {
.frequency = 600000000,
.vcocap = 0x3,
.filter = 0x0,
.ichpmp = 0xa,
.loadadj = 0x3,
.tmds_termadj = 0xb,
.tx_pu_value = 0x66,
.bg_temp_coef = 0x3,
.bg_vref_level = 0xe,
.avdd10_level = 0x4,
.avdd14_level = 0x4,
.sparepll = 0x0,
.drive_current = { 0x35, 0x3e, 0x3e, 0x3e },
.preemphasis = { 0x02, 0x3f, 0x3f, 0x3f },
},
};
#endif
static const struct tegra_sor_hdmi_settings tegra186_sor_hdmi_defaults[] = {
{
.frequency = 54000000,
.vcocap = 0,
.filter = 5,
.ichpmp = 5,
.loadadj = 3,
.tmds_termadj = 0xf,
.tx_pu_value = 0,
.bg_temp_coef = 3,
.bg_vref_level = 8,
.avdd10_level = 4,
.avdd14_level = 4,
.sparepll = 0x54,
.drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 75000000,
.vcocap = 1,
.filter = 5,
.ichpmp = 5,
.loadadj = 3,
.tmds_termadj = 0xf,
.tx_pu_value = 0,
.bg_temp_coef = 3,
.bg_vref_level = 8,
.avdd10_level = 4,
.avdd14_level = 4,
.sparepll = 0x44,
.drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 150000000,
.vcocap = 3,
.filter = 5,
.ichpmp = 5,
.loadadj = 3,
.tmds_termadj = 15,
.tx_pu_value = 0x66 /* 0 */,
.bg_temp_coef = 3,
.bg_vref_level = 8,
.avdd10_level = 4,
.avdd14_level = 4,
.sparepll = 0x00, /* 0x34 */
.drive_current = { 0x3a, 0x3a, 0x3a, 0x37 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 300000000,
.vcocap = 3,
.filter = 5,
.ichpmp = 5,
.loadadj = 3,
.tmds_termadj = 15,
.tx_pu_value = 64,
.bg_temp_coef = 3,
.bg_vref_level = 8,
.avdd10_level = 4,
.avdd14_level = 4,
.sparepll = 0x34,
.drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 600000000,
.vcocap = 3,
.filter = 5,
.ichpmp = 5,
.loadadj = 3,
.tmds_termadj = 12,
.tx_pu_value = 96,
.bg_temp_coef = 3,
.bg_vref_level = 8,
.avdd10_level = 4,
.avdd14_level = 4,
.sparepll = 0x34,
.drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}
};
static const struct tegra_sor_hdmi_settings tegra194_sor_hdmi_defaults[] = {
{
.frequency = 54000000,
.vcocap = 0,
.filter = 5,
.ichpmp = 5,
.loadadj = 3,
.tmds_termadj = 0xf,
.tx_pu_value = 0,
.bg_temp_coef = 3,
.bg_vref_level = 8,
.avdd10_level = 4,
.avdd14_level = 4,
.sparepll = 0x54,
.drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 75000000,
.vcocap = 1,
.filter = 5,
.ichpmp = 5,
.loadadj = 3,
.tmds_termadj = 0xf,
.tx_pu_value = 0,
.bg_temp_coef = 3,
.bg_vref_level = 8,
.avdd10_level = 4,
.avdd14_level = 4,
.sparepll = 0x44,
.drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 150000000,
.vcocap = 3,
.filter = 5,
.ichpmp = 5,
.loadadj = 3,
.tmds_termadj = 15,
.tx_pu_value = 0x66 /* 0 */,
.bg_temp_coef = 3,
.bg_vref_level = 8,
.avdd10_level = 4,
.avdd14_level = 4,
.sparepll = 0x00, /* 0x34 */
.drive_current = { 0x3a, 0x3a, 0x3a, 0x37 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 300000000,
.vcocap = 3,
.filter = 5,
.ichpmp = 5,
.loadadj = 3,
.tmds_termadj = 15,
.tx_pu_value = 64,
.bg_temp_coef = 3,
.bg_vref_level = 8,
.avdd10_level = 4,
.avdd14_level = 4,
.sparepll = 0x34,
.drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 600000000,
.vcocap = 3,
.filter = 5,
.ichpmp = 5,
.loadadj = 3,
.tmds_termadj = 12,
.tx_pu_value = 96,
.bg_temp_coef = 3,
.bg_vref_level = 8,
.avdd10_level = 4,
.avdd14_level = 4,
.sparepll = 0x34,
.drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}
};
struct tegra_sor_regs {
unsigned int head_state0;
unsigned int head_state1;
unsigned int head_state2;
unsigned int head_state3;
unsigned int head_state4;
unsigned int head_state5;
unsigned int pll0;
unsigned int pll1;
unsigned int pll2;
unsigned int pll3;
unsigned int dp_padctl0;
unsigned int dp_padctl2;
};
struct tegra_sor_soc {
bool supports_lvds;
bool supports_hdmi;
bool supports_dp;
bool supports_audio;
bool supports_hdcp;
const struct tegra_sor_regs *regs;
bool has_nvdisplay;
const struct tegra_sor_hdmi_settings *settings;
unsigned int num_settings;
const u8 *xbar_cfg;
const u8 *lane_map;
const u8 (*voltage_swing)[4][4];
const u8 (*pre_emphasis)[4][4];
const u8 (*post_cursor)[4][4];
const u8 (*tx_pu)[4][4];
};
struct tegra_sor;
struct tegra_sor_ops {
const char *name;
int (*probe)(struct tegra_sor *sor);
void (*audio_enable)(struct tegra_sor *sor);
void (*audio_disable)(struct tegra_sor *sor);
};
struct tegra_sor {
struct host1x_client client;
struct tegra_output output;
struct device *dev;
const struct tegra_sor_soc *soc;
void __iomem *regs;
unsigned int index;
unsigned int irq;
struct reset_control *rst;
struct clk *clk_parent;
struct clk *clk_safe;
struct clk *clk_out;
struct clk *clk_pad;
struct clk *clk_dp;
struct clk *clk;
u8 xbar_cfg[5];
struct drm_dp_link link;
struct drm_dp_aux *aux;
struct drm_info_list *debugfs_files;
const struct tegra_sor_ops *ops;
enum tegra_io_pad pad;
/* for HDMI 2.0 */
struct tegra_sor_hdmi_settings *settings;
unsigned int num_settings;
struct regulator *avdd_io_supply;
struct regulator *vdd_pll_supply;
struct regulator *hdmi_supply;
struct delayed_work scdc;
bool scdc_enabled;
struct tegra_hda_format format;
};
struct tegra_sor_state {
struct drm_connector_state base;
unsigned int link_speed;
unsigned long pclk;
unsigned int bpc;
};
static inline struct tegra_sor_state *
to_sor_state(struct drm_connector_state *state)
{
return container_of(state, struct tegra_sor_state, base);
}
struct tegra_sor_config {
u32 bits_per_pixel;
u32 active_polarity;
u32 active_count;
u32 tu_size;
u32 active_frac;
u32 watermark;
u32 hblank_symbols;
u32 vblank_symbols;
};
static inline struct tegra_sor *
host1x_client_to_sor(struct host1x_client *client)
{
return container_of(client, struct tegra_sor, client);
}
static inline struct tegra_sor *to_sor(struct tegra_output *output)
{
return container_of(output, struct tegra_sor, output);
}
static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned int offset)
{
u32 value = readl(sor->regs + (offset << 2));
trace_sor_readl(sor->dev, offset, value);
return value;
}
static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
unsigned int offset)
{
trace_sor_writel(sor->dev, offset, value);
writel(value, sor->regs + (offset << 2));
}
static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
{
int err;
clk_disable_unprepare(sor->clk);
err = clk_set_parent(sor->clk_out, parent);
if (err < 0)
return err;
err = clk_prepare_enable(sor->clk);
if (err < 0)
return err;
return 0;
}
struct tegra_clk_sor_pad {
struct clk_hw hw;
struct tegra_sor *sor;
};
static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
{
return container_of(hw, struct tegra_clk_sor_pad, hw);
}
static const char * const tegra_clk_sor_pad_parents[2][2] = {
{ "pll_d_out0", "pll_dp" },
{ "pll_d2_out0", "pll_dp" },
};
/*
* Implementing ->set_parent() here isn't really required because the parent
* will be explicitly selected in the driver code via the DP_CLK_SEL mux in
* the SOR_CLK_CNTRL register. This is primarily for compatibility with the
* Tegra186 and later SoC generations where the BPMP implements this clock
* and doesn't expose the mux via the common clock framework.
*/
static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
struct tegra_clk_sor_pad *pad = to_pad(hw);
struct tegra_sor *sor = pad->sor;
u32 value;
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
switch (index) {
case 0:
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
break;
case 1:
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
break;
}
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
return 0;
}
static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw)
{
struct tegra_clk_sor_pad *pad = to_pad(hw);
struct tegra_sor *sor = pad->sor;
u8 parent = U8_MAX;
u32 value;
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
switch (value & SOR_CLK_CNTRL_DP_CLK_SEL_MASK) {
case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK:
case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK:
parent = 0;
break;
case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK:
case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK:
parent = 1;
break;
}
return parent;
}
static const struct clk_ops tegra_clk_sor_pad_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = tegra_clk_sor_pad_set_parent,
.get_parent = tegra_clk_sor_pad_get_parent,
};
static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
const char *name)
{
struct tegra_clk_sor_pad *pad;
struct clk_init_data init;
struct clk *clk;
pad = devm_kzalloc(sor->dev, sizeof(*pad), GFP_KERNEL);
if (!pad)
return ERR_PTR(-ENOMEM);
pad->sor = sor;
init.name = name;
init.flags = 0;
init.parent_names = tegra_clk_sor_pad_parents[sor->index];
init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]);
init.ops = &tegra_clk_sor_pad_ops;
pad->hw.init = &init;
clk = devm_clk_register(sor->dev, &pad->hw);
return clk;
}
static void tegra_sor_filter_rates(struct tegra_sor *sor)
{
struct drm_dp_link *link = &sor->link;
unsigned int i;
/* Tegra only supports RBR, HBR and HBR2 */
for (i = 0; i < link->num_rates; i++) {
switch (link->rates[i]) {
case 1620000:
case 2700000:
case 5400000:
break;
default:
DRM_DEBUG_KMS("link rate %lu kHz not supported\n",
link->rates[i]);
link->rates[i] = 0;
break;
}
}
drm_dp_link_update_rates(link);
}
static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes)
{
unsigned long timeout;
u32 value;
/*
* Clear or set the PD_TXD bit corresponding to each lane, depending
* on whether it is used or not.
*/
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
if (lanes <= 2)
value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]));
else
value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]);
if (lanes <= 1)
value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
else
value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
if (lanes == 0)
value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
else
value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
/* start lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
SOR_LANE_SEQ_CTL_POWER_STATE_UP;
tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
break;
usleep_range(250, 1000);
}
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
return -ETIMEDOUT;
return 0;
}
static int tegra_sor_power_down_lanes(struct tegra_sor *sor)
{
unsigned long timeout;
u32 value;
/* power down all lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
/* start lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
break;
usleep_range(25, 100);
}
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
return -ETIMEDOUT;
return 0;
}
static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes)
{
u32 value;
/* pre-charge all used lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
if (lanes <= 2)
value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]));
else
value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]);
if (lanes <= 1)
value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
else
value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
if (lanes == 0)
value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
else
value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
usleep_range(15, 100);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
}
static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
{
u32 mask = 0x08, adj = 0, value;
/* enable pad calibration logic */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value |= SOR_PLL1_TMDS_TERM;
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
while (mask) {
adj |= mask;
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
value |= SOR_PLL1_TMDS_TERMADJ(adj);
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
usleep_range(100, 200);
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
if (value & SOR_PLL1_TERM_COMPOUT)
adj &= ~mask;
mask >>= 1;
}
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
value |= SOR_PLL1_TMDS_TERMADJ(adj);
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
/* disable pad calibration logic */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
}
static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link)
{
struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0;
const struct tegra_sor_soc *soc = sor->soc;
u32 pattern = 0, tx_pu = 0, value;
unsigned int i;
for (value = 0, i = 0; i < link->lanes; i++) {
u8 vs = link->train.request.voltage_swing[i];
u8 pe = link->train.request.pre_emphasis[i];
u8 pc = link->train.request.post_cursor[i];
u8 shift = sor->soc->lane_map[i] << 3;
voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift;
pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift;
post_cursor |= soc->post_cursor[pc][vs][pe] << shift;
if (sor->soc->tx_pu[pc][vs][pe] > tx_pu)
tx_pu = sor->soc->tx_pu[pc][vs][pe];
switch (link->train.pattern) {
case DP_TRAINING_PATTERN_DISABLE:
value = SOR_DP_TPG_SCRAMBLER_GALIOS |
SOR_DP_TPG_PATTERN_NONE;
break;
case DP_TRAINING_PATTERN_1:
value = SOR_DP_TPG_SCRAMBLER_NONE |
SOR_DP_TPG_PATTERN_TRAIN1;
break;
case DP_TRAINING_PATTERN_2:
value = SOR_DP_TPG_SCRAMBLER_NONE |
SOR_DP_TPG_PATTERN_TRAIN2;
break;
case DP_TRAINING_PATTERN_3:
value = SOR_DP_TPG_SCRAMBLER_NONE |
SOR_DP_TPG_PATTERN_TRAIN3;
break;
default:
return -EINVAL;
}
if (link->caps.channel_coding)
value |= SOR_DP_TPG_CHANNEL_CODING;
pattern = pattern << 8 | value;
}
tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0);
tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0);
if (link->caps.tps3_supported)
tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0);
tegra_sor_writel(sor, pattern, SOR_DP_TPG);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_TX_PU_MASK;
value |= SOR_DP_PADCTL_TX_PU_ENABLE;
value |= SOR_DP_PADCTL_TX_PU(tx_pu);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
usleep_range(20, 100);
return 0;
}
static int tegra_sor_dp_link_configure(struct drm_dp_link *link)
{
struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
unsigned int rate, lanes;
u32 value;
int err;
rate = drm_dp_link_rate_to_bw_code(link->rate);
lanes = link->lanes;
/* configure link speed and lane count */
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
if (link->caps.enhanced_framing)
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
usleep_range(400, 1000);
/* configure load pulse position adjustment */
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value &= ~SOR_PLL1_LOADADJ_MASK;
switch (rate) {
case DP_LINK_BW_1_62:
value |= SOR_PLL1_LOADADJ(0x3);
break;
case DP_LINK_BW_2_7:
value |= SOR_PLL1_LOADADJ(0x4);
break;
case DP_LINK_BW_5_4:
value |= SOR_PLL1_LOADADJ(0x6);
break;
}
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
/* use alternate scrambler reset for eDP */
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
if (link->edp == 0)
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
else
value |= SOR_DP_SPARE_PANEL_INTERNAL;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
err = tegra_sor_power_down_lanes(sor);
if (err < 0) {
dev_err(sor->dev, "failed to power down lanes: %d\n", err);
return err;
}
/* power up and pre-charge lanes */
err = tegra_sor_power_up_lanes(sor, lanes);
if (err < 0) {
dev_err(sor->dev, "failed to power up %u lane%s: %d\n",
lanes, (lanes != 1) ? "s" : "", err);
return err;
}
tegra_sor_dp_precharge(sor, lanes);
return 0;
}
static const struct drm_dp_link_ops tegra_sor_dp_link_ops = {
.apply_training = tegra_sor_dp_link_apply_training,
.configure = tegra_sor_dp_link_configure,
};
static void tegra_sor_super_update(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
tegra_sor_writel(sor, 1, SOR_SUPER_STATE0);
tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
}
static void tegra_sor_update(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_STATE0);
tegra_sor_writel(sor, 1, SOR_STATE0);
tegra_sor_writel(sor, 0, SOR_STATE0);
}
static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
{
u32 value;
value = tegra_sor_readl(sor, SOR_PWM_DIV);
value &= ~SOR_PWM_DIV_MASK;
value |= 0x400; /* period */
tegra_sor_writel(sor, value, SOR_PWM_DIV);
value = tegra_sor_readl(sor, SOR_PWM_CTL);
value &= ~SOR_PWM_CTL_DUTY_CYCLE_MASK;
value |= 0x400; /* duty cycle */
value &= ~SOR_PWM_CTL_CLK_SEL; /* clock source: PCLK */
value |= SOR_PWM_CTL_TRIGGER;
tegra_sor_writel(sor, value, SOR_PWM_CTL);
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_PWM_CTL);
if ((value & SOR_PWM_CTL_TRIGGER) == 0)
return 0;
usleep_range(25, 100);
}
return -ETIMEDOUT;
}
static int tegra_sor_attach(struct tegra_sor *sor)
{
unsigned long value, timeout;
/* wake up in normal mode */
value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE;
value |= SOR_SUPER_STATE_MODE_NORMAL;
tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
/* attach */
value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value |= SOR_SUPER_STATE_ATTACHED;
tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_TEST);
if ((value & SOR_TEST_ATTACHED) != 0)
return 0;
usleep_range(25, 100);
}
return -ETIMEDOUT;
}
static int tegra_sor_wakeup(struct tegra_sor *sor)
{
unsigned long value, timeout;
timeout = jiffies + msecs_to_jiffies(250);
/* wait for head to wake up */
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_TEST);
value &= SOR_TEST_HEAD_MODE_MASK;
if (value == SOR_TEST_HEAD_MODE_AWAKE)
return 0;
usleep_range(25, 100);
}
return -ETIMEDOUT;
}
static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout)
{
u32 value;
value = tegra_sor_readl(sor, SOR_PWR);
value |= SOR_PWR_TRIGGER | SOR_PWR_NORMAL_STATE_PU;
tegra_sor_writel(sor, value, SOR_PWR);
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_PWR);
if ((value & SOR_PWR_TRIGGER) == 0)
return 0;
usleep_range(25, 100);
}
return -ETIMEDOUT;
}
struct tegra_sor_params {
/* number of link clocks per line */
unsigned int num_clocks;
/* ratio between input and output */
u64 ratio;
/* precision factor */
u64 precision;
unsigned int active_polarity;
unsigned int active_count;
unsigned int active_frac;
unsigned int tu_size;
unsigned int error;
};
static int tegra_sor_compute_params(struct tegra_sor *sor,
struct tegra_sor_params *params,
unsigned int tu_size)
{
u64 active_sym, active_count, frac, approx;
u32 active_polarity, active_frac = 0;
const u64 f = params->precision;
s64 error;
active_sym = params->ratio * tu_size;
active_count = div_u64(active_sym, f) * f;
frac = active_sym - active_count;
/* fraction < 0.5 */
if (frac >= (f / 2)) {
active_polarity = 1;
frac = f - frac;
} else {
active_polarity = 0;
}
if (frac != 0) {
frac = div_u64(f * f, frac); /* 1/fraction */
if (frac <= (15 * f)) {
active_frac = div_u64(frac, f);
/* round up */
if (active_polarity)
active_frac++;
} else {
active_frac = active_polarity ? 1 : 15;
}
}
if (active_frac == 1)
active_polarity = 0;
if (active_polarity == 1) {
if (active_frac) {
approx = active_count + (active_frac * (f - 1)) * f;
approx = div_u64(approx, active_frac * f);
} else {
approx = active_count + f;
}
} else {
if (active_frac)
approx = active_count + div_u64(f, active_frac);
else
approx = active_count;
}
error = div_s64(active_sym - approx, tu_size);
error *= params->num_clocks;
if (error <= 0 && abs(error) < params->error) {
params->active_count = div_u64(active_count, f);
params->active_polarity = active_polarity;
params->active_frac = active_frac;
params->error = abs(error);
params->tu_size = tu_size;
if (error == 0)
return true;
}
return false;
}
static int tegra_sor_compute_config(struct tegra_sor *sor,
const struct drm_display_mode *mode,
struct tegra_sor_config *config,
struct drm_dp_link *link)
{
const u64 f = 100000, link_rate = link->rate * 1000;
const u64 pclk = (u64)mode->clock * 1000;
u64 input, output, watermark, num;
struct tegra_sor_params params;
u32 num_syms_per_line;
unsigned int i;
if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel)
return -EINVAL;
input = pclk * config->bits_per_pixel;
output = link_rate * 8 * link->lanes;
if (input >= output)
return -ERANGE;
memset(¶ms, 0, sizeof(params));
params.ratio = div64_u64(input * f, output);
params.num_clocks = div_u64(link_rate * mode->hdisplay, pclk);
params.precision = f;
params.error = 64 * f;
params.tu_size = 64;
for (i = params.tu_size; i >= 32; i--)
if (tegra_sor_compute_params(sor, ¶ms, i))
break;
if (params.active_frac == 0) {
config->active_polarity = 0;
config->active_count = params.active_count;
if (!params.active_polarity)
config->active_count--;
config->tu_size = params.tu_size;
config->active_frac = 1;
} else {
config->active_polarity = params.active_polarity;
config->active_count = params.active_count;
config->active_frac = params.active_frac;
config->tu_size = params.tu_size;
}
dev_dbg(sor->dev,
"polarity: %d active count: %d tu size: %d active frac: %d\n",
config->active_polarity, config->active_count,
config->tu_size, config->active_frac);
watermark = params.ratio * config->tu_size * (f - params.ratio);
watermark = div_u64(watermark, f);
watermark = div_u64(watermark + params.error, f);
config->watermark = watermark + (config->bits_per_pixel / 8) + 2;
num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) *
(link->lanes * 8);
if (config->watermark > 30) {
config->watermark = 30;
dev_err(sor->dev,
"unable to compute TU size, forcing watermark to %u\n",
config->watermark);
} else if (config->watermark > num_syms_per_line) {
config->watermark = num_syms_per_line;
dev_err(sor->dev, "watermark too high, forcing to %u\n",
config->watermark);
}
/* compute the number of symbols per horizontal blanking interval */
num = ((mode->htotal - mode->hdisplay) - 7) * link_rate;
config->hblank_symbols = div_u64(num, pclk);
if (link->caps.enhanced_framing)
config->hblank_symbols -= 3;
config->hblank_symbols -= 12 / link->lanes;
/* compute the number of symbols per vertical blanking interval */
num = (mode->hdisplay - 25) * link_rate;
config->vblank_symbols = div_u64(num, pclk);
config->vblank_symbols -= 36 / link->lanes + 4;
dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols,
config->vblank_symbols);
return 0;
}
static void tegra_sor_apply_config(struct tegra_sor *sor,
const struct tegra_sor_config *config)
{
u32 value;
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
value |= SOR_DP_LINKCTL_TU_SIZE(config->tu_size);
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
value |= SOR_DP_CONFIG_WATERMARK(config->watermark);
value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config->active_count);
value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config->active_frac);
if (config->active_polarity)
value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
else
value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
value |= config->hblank_symbols & 0xffff;
tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
value |= config->vblank_symbols & 0xffff;
tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
}
static void tegra_sor_mode_set(struct tegra_sor *sor,
const struct drm_display_mode *mode,
struct tegra_sor_state *state)
{
struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
unsigned int vbe, vse, hbe, hse, vbs, hbs;
u32 value;
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
value &= ~SOR_STATE_ASY_OWNER_MASK;
value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
SOR_STATE_ASY_OWNER(dc->pipe + 1);
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
value &= ~SOR_STATE_ASY_HSYNCPOL;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
value |= SOR_STATE_ASY_HSYNCPOL;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
value &= ~SOR_STATE_ASY_VSYNCPOL;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
value |= SOR_STATE_ASY_VSYNCPOL;
switch (state->bpc) {
case 16:
value |= SOR_STATE_ASY_PIXELDEPTH_BPP_48_444;
break;
case 12:
value |= SOR_STATE_ASY_PIXELDEPTH_BPP_36_444;
break;
case 10:
value |= SOR_STATE_ASY_PIXELDEPTH_BPP_30_444;
break;
case 8:
value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
break;
case 6:
value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
break;
default:
value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
break;
}
tegra_sor_writel(sor, value, SOR_STATE1);
/*
* TODO: The video timing programming below doesn't seem to match the
* register definitions.
*/
value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
tegra_sor_writel(sor, value, sor->soc->regs->head_state1 + dc->pipe);
/* sync end = sync width - 1 */
vse = mode->vsync_end - mode->vsync_start - 1;
hse = mode->hsync_end - mode->hsync_start - 1;
value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
tegra_sor_writel(sor, value, sor->soc->regs->head_state2 + dc->pipe);
/* blank end = sync end + back porch */
vbe = vse + (mode->vtotal - mode->vsync_end);
hbe = hse + (mode->htotal - mode->hsync_end);
value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
tegra_sor_writel(sor, value, sor->soc->regs->head_state3 + dc->pipe);
/* blank start = blank end + active */
vbs = vbe + mode->vdisplay;
hbs = hbe + mode->hdisplay;
value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
tegra_sor_writel(sor, value, sor->soc->regs->head_state4 + dc->pipe);
/* XXX interlacing support */
tegra_sor_writel(sor, 0x001, sor->soc->regs->head_state5 + dc->pipe);
}
static int tegra_sor_detach(struct tegra_sor *sor)
{
unsigned long value, timeout;
/* switch to safe mode */
value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value &= ~SOR_SUPER_STATE_MODE_NORMAL;
tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_PWR);
if (value & SOR_PWR_MODE_SAFE)
break;
}
if ((value & SOR_PWR_MODE_SAFE) == 0)
return -ETIMEDOUT;
/* go to sleep */
value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
/* detach */
value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value &= ~SOR_SUPER_STATE_ATTACHED;
tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_TEST);
if ((value & SOR_TEST_ATTACHED) == 0)
break;
usleep_range(25, 100);
}
if ((value & SOR_TEST_ATTACHED) != 0)
return -ETIMEDOUT;
return 0;
}
static int tegra_sor_power_down(struct tegra_sor *sor)
{
unsigned long value, timeout;
int err;
value = tegra_sor_readl(sor, SOR_PWR);
value &= ~SOR_PWR_NORMAL_STATE_PU;
value |= SOR_PWR_TRIGGER;
tegra_sor_writel(sor, value, SOR_PWR);
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_PWR);
if ((value & SOR_PWR_TRIGGER) == 0)
return 0;
usleep_range(25, 100);
}
if ((value & SOR_PWR_TRIGGER) != 0)
return -ETIMEDOUT;
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
return err;
}
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_SEQ_PLLCAPPD;
value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
return 0;
}
static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
{
u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_CRCA);
if (value & SOR_CRCA_VALID)
return 0;
usleep_range(100, 200);
}
return -ETIMEDOUT;
}
static int tegra_sor_show_crc(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_sor *sor = node->info_ent->data;
struct drm_crtc *crtc = sor->output.encoder.crtc;
struct drm_device *drm = node->minor->dev;
int err = 0;
u32 value;
drm_modeset_lock_all(drm);
if (!crtc || !crtc->state->active) {
err = -EBUSY;
goto unlock;
}
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
tegra_sor_writel(sor, value, SOR_STATE1);
value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
value |= SOR_CRC_CNTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_CRC_CNTRL);
value = tegra_sor_readl(sor, SOR_TEST);
value &= ~SOR_TEST_CRC_POST_SERIALIZE;
tegra_sor_writel(sor, value, SOR_TEST);
err = tegra_sor_crc_wait(sor, 100);
if (err < 0)
goto unlock;
tegra_sor_writel(sor, SOR_CRCA_RESET, SOR_CRCA);
value = tegra_sor_readl(sor, SOR_CRCB);
seq_printf(s, "%08x\n", value);
unlock:
drm_modeset_unlock_all(drm);
return err;
}
#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
static const struct debugfs_reg32 tegra_sor_regs[] = {
DEBUGFS_REG32(SOR_CTXSW),
DEBUGFS_REG32(SOR_SUPER_STATE0),
DEBUGFS_REG32(SOR_SUPER_STATE1),
DEBUGFS_REG32(SOR_STATE0),
DEBUGFS_REG32(SOR_STATE1),
DEBUGFS_REG32(SOR_HEAD_STATE0(0)),
DEBUGFS_REG32(SOR_HEAD_STATE0(1)),
DEBUGFS_REG32(SOR_HEAD_STATE1(0)),
DEBUGFS_REG32(SOR_HEAD_STATE1(1)),
DEBUGFS_REG32(SOR_HEAD_STATE2(0)),
DEBUGFS_REG32(SOR_HEAD_STATE2(1)),
DEBUGFS_REG32(SOR_HEAD_STATE3(0)),
DEBUGFS_REG32(SOR_HEAD_STATE3(1)),
DEBUGFS_REG32(SOR_HEAD_STATE4(0)),
DEBUGFS_REG32(SOR_HEAD_STATE4(1)),
DEBUGFS_REG32(SOR_HEAD_STATE5(0)),
DEBUGFS_REG32(SOR_HEAD_STATE5(1)),
DEBUGFS_REG32(SOR_CRC_CNTRL),
DEBUGFS_REG32(SOR_DP_DEBUG_MVID),
DEBUGFS_REG32(SOR_CLK_CNTRL),
DEBUGFS_REG32(SOR_CAP),
DEBUGFS_REG32(SOR_PWR),
DEBUGFS_REG32(SOR_TEST),
DEBUGFS_REG32(SOR_PLL0),
DEBUGFS_REG32(SOR_PLL1),
DEBUGFS_REG32(SOR_PLL2),
DEBUGFS_REG32(SOR_PLL3),
DEBUGFS_REG32(SOR_CSTM),
DEBUGFS_REG32(SOR_LVDS),
DEBUGFS_REG32(SOR_CRCA),
DEBUGFS_REG32(SOR_CRCB),
DEBUGFS_REG32(SOR_BLANK),
DEBUGFS_REG32(SOR_SEQ_CTL),
DEBUGFS_REG32(SOR_LANE_SEQ_CTL),
DEBUGFS_REG32(SOR_SEQ_INST(0)),
DEBUGFS_REG32(SOR_SEQ_INST(1)),
DEBUGFS_REG32(SOR_SEQ_INST(2)),
DEBUGFS_REG32(SOR_SEQ_INST(3)),
DEBUGFS_REG32(SOR_SEQ_INST(4)),
DEBUGFS_REG32(SOR_SEQ_INST(5)),
DEBUGFS_REG32(SOR_SEQ_INST(6)),
DEBUGFS_REG32(SOR_SEQ_INST(7)),
DEBUGFS_REG32(SOR_SEQ_INST(8)),
DEBUGFS_REG32(SOR_SEQ_INST(9)),
DEBUGFS_REG32(SOR_SEQ_INST(10)),
DEBUGFS_REG32(SOR_SEQ_INST(11)),
DEBUGFS_REG32(SOR_SEQ_INST(12)),
DEBUGFS_REG32(SOR_SEQ_INST(13)),
DEBUGFS_REG32(SOR_SEQ_INST(14)),
DEBUGFS_REG32(SOR_SEQ_INST(15)),
DEBUGFS_REG32(SOR_PWM_DIV),
DEBUGFS_REG32(SOR_PWM_CTL),
DEBUGFS_REG32(SOR_VCRC_A0),
DEBUGFS_REG32(SOR_VCRC_A1),
DEBUGFS_REG32(SOR_VCRC_B0),
DEBUGFS_REG32(SOR_VCRC_B1),
DEBUGFS_REG32(SOR_CCRC_A0),
DEBUGFS_REG32(SOR_CCRC_A1),
DEBUGFS_REG32(SOR_CCRC_B0),
DEBUGFS_REG32(SOR_CCRC_B1),
DEBUGFS_REG32(SOR_EDATA_A0),
DEBUGFS_REG32(SOR_EDATA_A1),
DEBUGFS_REG32(SOR_EDATA_B0),
DEBUGFS_REG32(SOR_EDATA_B1),
DEBUGFS_REG32(SOR_COUNT_A0),
DEBUGFS_REG32(SOR_COUNT_A1),
DEBUGFS_REG32(SOR_COUNT_B0),
DEBUGFS_REG32(SOR_COUNT_B1),
DEBUGFS_REG32(SOR_DEBUG_A0),
DEBUGFS_REG32(SOR_DEBUG_A1),
DEBUGFS_REG32(SOR_DEBUG_B0),
DEBUGFS_REG32(SOR_DEBUG_B1),
DEBUGFS_REG32(SOR_TRIG),
DEBUGFS_REG32(SOR_MSCHECK),
DEBUGFS_REG32(SOR_XBAR_CTRL),
DEBUGFS_REG32(SOR_XBAR_POL),
DEBUGFS_REG32(SOR_DP_LINKCTL0),
DEBUGFS_REG32(SOR_DP_LINKCTL1),
DEBUGFS_REG32(SOR_LANE_DRIVE_CURRENT0),
DEBUGFS_REG32(SOR_LANE_DRIVE_CURRENT1),
DEBUGFS_REG32(SOR_LANE4_DRIVE_CURRENT0),
DEBUGFS_REG32(SOR_LANE4_DRIVE_CURRENT1),
DEBUGFS_REG32(SOR_LANE_PREEMPHASIS0),
DEBUGFS_REG32(SOR_LANE_PREEMPHASIS1),
DEBUGFS_REG32(SOR_LANE4_PREEMPHASIS0),
DEBUGFS_REG32(SOR_LANE4_PREEMPHASIS1),
DEBUGFS_REG32(SOR_LANE_POSTCURSOR0),
DEBUGFS_REG32(SOR_LANE_POSTCURSOR1),
DEBUGFS_REG32(SOR_DP_CONFIG0),
DEBUGFS_REG32(SOR_DP_CONFIG1),
DEBUGFS_REG32(SOR_DP_MN0),
DEBUGFS_REG32(SOR_DP_MN1),
DEBUGFS_REG32(SOR_DP_PADCTL0),
DEBUGFS_REG32(SOR_DP_PADCTL1),
DEBUGFS_REG32(SOR_DP_PADCTL2),
DEBUGFS_REG32(SOR_DP_DEBUG0),
DEBUGFS_REG32(SOR_DP_DEBUG1),
DEBUGFS_REG32(SOR_DP_SPARE0),
DEBUGFS_REG32(SOR_DP_SPARE1),
DEBUGFS_REG32(SOR_DP_AUDIO_CTRL),
DEBUGFS_REG32(SOR_DP_AUDIO_HBLANK_SYMBOLS),
DEBUGFS_REG32(SOR_DP_AUDIO_VBLANK_SYMBOLS),
DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_HEADER),
DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK0),
DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK1),
DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK2),
DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK3),
DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK4),
DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK5),
DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK6),
DEBUGFS_REG32(SOR_DP_TPG),
DEBUGFS_REG32(SOR_DP_TPG_CONFIG),
DEBUGFS_REG32(SOR_DP_LQ_CSTM0),
DEBUGFS_REG32(SOR_DP_LQ_CSTM1),
DEBUGFS_REG32(SOR_DP_LQ_CSTM2),
};
static int tegra_sor_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_sor *sor = node->info_ent->data;
struct drm_crtc *crtc = sor->output.encoder.crtc;
struct drm_device *drm = node->minor->dev;
unsigned int i;
int err = 0;
drm_modeset_lock_all(drm);
if (!crtc || !crtc->state->active) {
err = -EBUSY;
goto unlock;
}
for (i = 0; i < ARRAY_SIZE(tegra_sor_regs); i++) {
unsigned int offset = tegra_sor_regs[i].offset;
seq_printf(s, "%-38s %#05x %08x\n", tegra_sor_regs[i].name,
offset, tegra_sor_readl(sor, offset));
}
unlock:
drm_modeset_unlock_all(drm);
return err;
}
static const struct drm_info_list debugfs_files[] = {
{ "crc", tegra_sor_show_crc, 0, NULL },
{ "regs", tegra_sor_show_regs, 0, NULL },
};
static int tegra_sor_late_register(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
unsigned int i, count = ARRAY_SIZE(debugfs_files);
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_sor *sor = to_sor(output);
sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
if (!sor->debugfs_files)
return -ENOMEM;
for (i = 0; i < count; i++)
sor->debugfs_files[i].data = sor;
drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
return 0;
}
static void tegra_sor_early_unregister(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
unsigned int count = ARRAY_SIZE(debugfs_files);
struct tegra_sor *sor = to_sor(output);
drm_debugfs_remove_files(sor->debugfs_files, count,
connector->dev->primary);
kfree(sor->debugfs_files);
sor->debugfs_files = NULL;
}
static void tegra_sor_connector_reset(struct drm_connector *connector)
{
struct tegra_sor_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return;
if (connector->state) {
__drm_atomic_helper_connector_destroy_state(connector->state);
kfree(connector->state);
}
__drm_atomic_helper_connector_reset(connector, &state->base);
}
static enum drm_connector_status
tegra_sor_connector_detect(struct drm_connector *connector, bool force)
{
struct tegra_output *output = connector_to_output(connector);
struct tegra_sor *sor = to_sor(output);
if (sor->aux)
return drm_dp_aux_detect(sor->aux);
return tegra_output_connector_detect(connector, force);
}
static struct drm_connector_state *
tegra_sor_connector_duplicate_state(struct drm_connector *connector)
{
struct tegra_sor_state *state = to_sor_state(connector->state);
struct tegra_sor_state *copy;
copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
if (!copy)
return NULL;
__drm_atomic_helper_connector_duplicate_state(connector, ©->base);
return ©->base;
}
static const struct drm_connector_funcs tegra_sor_connector_funcs = {
.reset = tegra_sor_connector_reset,
.detect = tegra_sor_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = tegra_output_connector_destroy,
.atomic_duplicate_state = tegra_sor_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.late_register = tegra_sor_late_register,
.early_unregister = tegra_sor_early_unregister,
};
static int tegra_sor_connector_get_modes(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
struct tegra_sor *sor = to_sor(output);
int err;
if (sor->aux)
drm_dp_aux_enable(sor->aux);
err = tegra_output_connector_get_modes(connector);
if (sor->aux)
drm_dp_aux_disable(sor->aux);
return err;
}
static enum drm_mode_status
tegra_sor_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = {
.get_modes = tegra_sor_connector_get_modes,
.mode_valid = tegra_sor_connector_mode_valid,
};
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_sor_state *state = to_sor_state(conn_state);
struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_sor *sor = to_sor(output);
struct drm_display_info *info;
int err;
info = &output->connector.display_info;
/*
* For HBR2 modes, the SOR brick needs to use the x20 multiplier, so
* the pixel clock must be corrected accordingly.
*/
if (pclk >= 340000000) {
state->link_speed = 20;
state->pclk = pclk / 2;
} else {
state->link_speed = 10;
state->pclk = pclk;
}
err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
pclk, 0);
if (err < 0) {
dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
return err;
}
switch (info->bpc) {
case 8:
case 6:
state->bpc = info->bpc;
break;
default:
DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc);
state->bpc = 8;
break;
}
return 0;
}
static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
{
u32 value = 0;
size_t i;
for (i = size; i > 0; i--)
value = (value << 8) | ptr[i - 1];
return value;
}
static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
const void *data, size_t size)
{
const u8 *ptr = data;
unsigned long offset;
size_t i, j;
u32 value;
switch (ptr[0]) {
case HDMI_INFOFRAME_TYPE_AVI:
offset = SOR_HDMI_AVI_INFOFRAME_HEADER;
break;
case HDMI_INFOFRAME_TYPE_AUDIO:
offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER;
break;
case HDMI_INFOFRAME_TYPE_VENDOR:
offset = SOR_HDMI_VSI_INFOFRAME_HEADER;
break;
default:
dev_err(sor->dev, "unsupported infoframe type: %02x\n",
ptr[0]);
return;
}
value = INFOFRAME_HEADER_TYPE(ptr[0]) |
INFOFRAME_HEADER_VERSION(ptr[1]) |
INFOFRAME_HEADER_LEN(ptr[2]);
tegra_sor_writel(sor, value, offset);
offset++;
/*
* Each subpack contains 7 bytes, divided into:
* - subpack_low: bytes 0 - 3
* - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
for (i = 3, j = 0; i < size; i += 7, j += 8) {
size_t rem = size - i, num = min_t(size_t, rem, 4);
value = tegra_sor_hdmi_subpack(&ptr[i], num);
tegra_sor_writel(sor, value, offset++);
num = min_t(size_t, rem - num, 3);
value = tegra_sor_hdmi_subpack(&ptr[i + 4], num);
tegra_sor_writel(sor, value, offset++);
}
}
static int
tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
const struct drm_display_mode *mode)
{
u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame;
u32 value;
int err;
/* disable AVI infoframe */
value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
value &= ~INFOFRAME_CTRL_SINGLE;
value &= ~INFOFRAME_CTRL_OTHER;
value &= ~INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
&sor->output.connector, mode);
if (err < 0) {
dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
return err;
}
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err);
return err;
}
tegra_sor_hdmi_write_infopack(sor, buffer, err);
/* enable AVI infoframe */
value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
value |= INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
return 0;
}
static void tegra_sor_write_eld(struct tegra_sor *sor)
{
size_t length = drm_eld_size(sor->output.connector.eld), i;
for (i = 0; i < length; i++)
tegra_sor_writel(sor, i << 8 | sor->output.connector.eld[i],
SOR_AUDIO_HDA_ELD_BUFWR);
/*
* The HDA codec will always report an ELD buffer size of 96 bytes and
* the HDA codec driver will check that each byte read from the buffer
* is valid. Therefore every byte must be written, even if no 96 bytes
* were parsed from EDID.
*/
for (i = length; i < 96; i++)
tegra_sor_writel(sor, i << 8 | 0, SOR_AUDIO_HDA_ELD_BUFWR);
}
static void tegra_sor_audio_prepare(struct tegra_sor *sor)
{
u32 value;
/*
* Enable and unmask the HDA codec SCRATCH0 register interrupt. This
* is used for interoperability between the HDA codec driver and the
* HDMI/DP driver.
*/
value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
tegra_sor_writel(sor, value, SOR_INT_ENABLE);
tegra_sor_writel(sor, value, SOR_INT_MASK);
tegra_sor_write_eld(sor);
value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
tegra_sor_writel(sor, value, SOR_AUDIO_HDA_PRESENSE);
}
static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
tegra_sor_writel(sor, 0, SOR_INT_MASK);
tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
}
static void tegra_sor_audio_enable(struct tegra_sor *sor)
{
u32 value;
value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
/* select HDA audio input */
value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
/* inject null samples */
if (sor->format.channels != 2)
value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
else
value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
/* enable advertising HBR capability */
tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
}
static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
{
u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
struct hdmi_audio_infoframe frame;
u32 value;
int err;
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
dev_err(sor->dev, "failed to setup audio infoframe: %d\n", err);
return err;
}
frame.channels = sor->format.channels;
err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(sor->dev, "failed to pack audio infoframe: %d\n", err);
return err;
}
tegra_sor_hdmi_write_infopack(sor, buffer, err);
value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
value |= INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
return 0;
}
static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
{
u32 value;
tegra_sor_audio_enable(sor);
tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
value = SOR_HDMI_SPARE_ACR_PRIORITY_HIGH |
SOR_HDMI_SPARE_CTS_RESET(1) |
SOR_HDMI_SPARE_HW_CTS_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_SPARE);
/* enable HW CTS */
value = SOR_HDMI_ACR_SUBPACK_LOW_SB1(0);
tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_LOW);
/* allow packet to be sent */
value = SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_HIGH);
/* reset N counter and enable lookup */
value = SOR_HDMI_AUDIO_N_RESET | SOR_HDMI_AUDIO_N_LOOKUP;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
value = (24000 * 4096) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0320);
tegra_sor_writel(sor, 4096, SOR_AUDIO_NVAL_0320);
tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0441);
tegra_sor_writel(sor, 4704, SOR_AUDIO_NVAL_0441);
tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0882);
tegra_sor_writel(sor, 9408, SOR_AUDIO_NVAL_0882);
tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_1764);
tegra_sor_writel(sor, 18816, SOR_AUDIO_NVAL_1764);
value = (24000 * 6144) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0480);
tegra_sor_writel(sor, 6144, SOR_AUDIO_NVAL_0480);
value = (24000 * 12288) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0960);
tegra_sor_writel(sor, 12288, SOR_AUDIO_NVAL_0960);
value = (24000 * 24576) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_1920);
tegra_sor_writel(sor, 24576, SOR_AUDIO_NVAL_1920);
value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_N);
value &= ~SOR_HDMI_AUDIO_N_RESET;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
tegra_sor_hdmi_enable_audio_infoframe(sor);
}
static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
{
u32 value;
value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
value &= ~INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
}
static void tegra_sor_hdmi_audio_disable(struct tegra_sor *sor)
{
tegra_sor_hdmi_disable_audio_infoframe(sor);
}
static struct tegra_sor_hdmi_settings *
tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
{
unsigned int i;
for (i = 0; i < sor->num_settings; i++)
if (frequency <= sor->settings[i].frequency)
return &sor->settings[i];
return NULL;
}
static void tegra_sor_hdmi_disable_scrambling(struct tegra_sor *sor)
{
u32 value;
value = tegra_sor_readl(sor, SOR_HDMI2_CTRL);
value &= ~SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4;
value &= ~SOR_HDMI2_CTRL_SCRAMBLE;
tegra_sor_writel(sor, value, SOR_HDMI2_CTRL);
}
static void tegra_sor_hdmi_scdc_disable(struct tegra_sor *sor)
{
drm_scdc_set_high_tmds_clock_ratio(&sor->output.connector, false);
drm_scdc_set_scrambling(&sor->output.connector, false);
tegra_sor_hdmi_disable_scrambling(sor);
}
static void tegra_sor_hdmi_scdc_stop(struct tegra_sor *sor)
{
if (sor->scdc_enabled) {
cancel_delayed_work_sync(&sor->scdc);
tegra_sor_hdmi_scdc_disable(sor);
}
}
static void tegra_sor_hdmi_enable_scrambling(struct tegra_sor *sor)
{
u32 value;
value = tegra_sor_readl(sor, SOR_HDMI2_CTRL);
value |= SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4;
value |= SOR_HDMI2_CTRL_SCRAMBLE;
tegra_sor_writel(sor, value, SOR_HDMI2_CTRL);
}
static void tegra_sor_hdmi_scdc_enable(struct tegra_sor *sor)
{
drm_scdc_set_high_tmds_clock_ratio(&sor->output.connector, true);
drm_scdc_set_scrambling(&sor->output.connector, true);
tegra_sor_hdmi_enable_scrambling(sor);
}
static void tegra_sor_hdmi_scdc_work(struct work_struct *work)
{
struct tegra_sor *sor = container_of(work, struct tegra_sor, scdc.work);
if (!drm_scdc_get_scrambling_status(&sor->output.connector)) {
DRM_DEBUG_KMS("SCDC not scrambled\n");
tegra_sor_hdmi_scdc_enable(sor);
}
schedule_delayed_work(&sor->scdc, msecs_to_jiffies(5000));
}
static void tegra_sor_hdmi_scdc_start(struct tegra_sor *sor)
{
struct drm_scdc *scdc = &sor->output.connector.display_info.hdmi.scdc;
struct drm_display_mode *mode;
mode = &sor->output.encoder.crtc->state->adjusted_mode;
if (mode->clock >= 340000 && scdc->supported) {
schedule_delayed_work(&sor->scdc, msecs_to_jiffies(5000));
tegra_sor_hdmi_scdc_enable(sor);
sor->scdc_enabled = true;
}
}
static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor *sor = to_sor(output);
u32 value;
int err;
tegra_sor_audio_unprepare(sor);
tegra_sor_hdmi_scdc_stop(sor);
err = tegra_sor_detach(sor);
if (err < 0)
dev_err(sor->dev, "failed to detach SOR: %d\n", err);
tegra_sor_writel(sor, 0, SOR_STATE1);
tegra_sor_update(sor);
/* disable display to SOR clock */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
value &= ~SOR1_TIMING_CYA;
value &= ~SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
err = tegra_sor_power_down(sor);
if (err < 0)
dev_err(sor->dev, "failed to power down SOR: %d\n", err);
err = tegra_io_pad_power_disable(sor->pad);
if (err < 0)
dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
host1x_client_suspend(&sor->client);
}
static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor_hdmi_settings *settings;
struct tegra_sor *sor = to_sor(output);
struct tegra_sor_state *state;
struct drm_display_mode *mode;
unsigned long rate, pclk;
unsigned int div, i;
u32 value;
int err;
state = to_sor_state(output->connector.state);
mode = &encoder->crtc->state->adjusted_mode;
pclk = mode->clock * 1000;
err = host1x_client_resume(&sor->client);
if (err < 0) {
dev_err(sor->dev, "failed to resume: %d\n", err);
return;
}
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
return;
}
div = clk_get_rate(sor->clk) / 1000000 * 4;
err = tegra_io_pad_power_enable(sor->pad);
if (err < 0)
dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
usleep_range(20, 100);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
value = tegra_sor_readl(sor, sor->soc->regs->pll3);
value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
tegra_sor_writel(sor, value, sor->soc->regs->pll3);
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_VCOPD;
value &= ~SOR_PLL0_PWR;
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(200, 400);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
value &= ~SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
while (true) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0)
break;
usleep_range(250, 1000);
}
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5);
tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
while (true) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
break;
usleep_range(250, 1000);
}
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
if (mode->clock < 340000) {
DRM_DEBUG_KMS("setting 2.7 GHz link speed\n");
value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
} else {
DRM_DEBUG_KMS("setting 5.4 GHz link speed\n");
value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
}
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
/* SOR pad PLL stabilization time */
usleep_range(250, 1000);
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(4);
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
value &= ~SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
value &= ~SOR_DP_SPARE_SEQ_ENABLE;
value &= ~SOR_DP_SPARE_MACRO_SOR_CLK;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8);
tegra_sor_writel(sor, value, SOR_SEQ_CTL);
value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT |
SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1);
tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
if (!sor->soc->has_nvdisplay) {
/* program the reference clock */
value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
tegra_sor_writel(sor, value, SOR_REFCLK);
}
/* XXX not in TRM */
for (value = 0, i = 0; i < 5; i++)
value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
SOR_XBAR_CTRL_LINK1_XSEL(i, i);
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
/*
* Switch the pad clock to the DP clock. Note that we cannot actually
* do this because Tegra186 and later don't support clk_set_parent()
* on the sorX_pad_clkout clocks. We already do the equivalent above
* using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
*/
#if 0
err = clk_set_parent(sor->clk_pad, sor->clk_dp);
if (err < 0) {
dev_err(sor->dev, "failed to select pad parent clock: %d\n",
err);
return;
}
#endif
/* switch the SOR clock to the pad clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
if (err < 0) {
dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
err);
return;
}
/* switch the output clock to the parent pixel clock */
err = clk_set_parent(sor->clk, sor->clk_parent);
if (err < 0) {
dev_err(sor->dev, "failed to select output parent clock: %d\n",
err);
return;
}
/* adjust clock rate for HDMI 2.0 modes */
rate = clk_get_rate(sor->clk_parent);
if (mode->clock >= 340000)
rate /= 2;
DRM_DEBUG_KMS("setting clock to %lu Hz, mode: %lu Hz\n", rate, pclk);
clk_set_rate(sor->clk, rate);
if (!sor->soc->has_nvdisplay) {
value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
/* XXX is this the proper check? */
if (mode->clock < 75000)
value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
}
max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) |
SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
if (!dc->soc->has_nvdisplay) {
/* H_PULSE2 setup */
pulse_start = h_ref_to_sync +
(mode->hsync_end - mode->hsync_start) +
(mode->htotal - mode->hsync_end) - 10;
value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
value |= H_PULSE2_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
}
/* infoframe setup */
err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
if (err < 0)
dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
/* XXX HDMI audio support not implemented yet */
tegra_sor_hdmi_disable_audio_infoframe(sor);
/* use single TMDS protocol */
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A;
tegra_sor_writel(sor, value, SOR_STATE1);
/* power up pad calibration */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
/* production settings */
settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
if (!settings) {
dev_err(sor->dev, "no settings for pixel clock %d Hz\n",
mode->clock * 1000);
return;
}
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_ICHPMP_MASK;
value &= ~SOR_PLL0_FILTER_MASK;
value &= ~SOR_PLL0_VCOCAP_MASK;
value |= SOR_PLL0_ICHPMP(settings->ichpmp);
value |= SOR_PLL0_FILTER(settings->filter);
value |= SOR_PLL0_VCOCAP(settings->vcocap);
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
/* XXX not in TRM */
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value &= ~SOR_PLL1_LOADADJ_MASK;
value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
value |= SOR_PLL1_LOADADJ(settings->loadadj);
value |= SOR_PLL1_TMDS_TERMADJ(settings->tmds_termadj);
value |= SOR_PLL1_TMDS_TERM;
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
value = tegra_sor_readl(sor, sor->soc->regs->pll3);
value &= ~SOR_PLL3_BG_TEMP_COEF_MASK;
value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
value &= ~SOR_PLL3_AVDD10_LEVEL_MASK;
value &= ~SOR_PLL3_AVDD14_LEVEL_MASK;
value |= SOR_PLL3_BG_TEMP_COEF(settings->bg_temp_coef);
value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref_level);
value |= SOR_PLL3_AVDD10_LEVEL(settings->avdd10_level);
value |= SOR_PLL3_AVDD14_LEVEL(settings->avdd14_level);
tegra_sor_writel(sor, value, sor->soc->regs->pll3);
value = settings->drive_current[3] << 24 |
settings->drive_current[2] << 16 |
settings->drive_current[1] << 8 |
settings->drive_current[0] << 0;
tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
value = settings->preemphasis[3] << 24 |
settings->preemphasis[2] << 16 |
settings->preemphasis[1] << 8 |
settings->preemphasis[0] << 0;
tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_TX_PU_MASK;
value |= SOR_DP_PADCTL_TX_PU_ENABLE;
value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu_value);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl2);
value &= ~SOR_DP_PADCTL_SPAREPLL_MASK;
value |= SOR_DP_PADCTL_SPAREPLL(settings->sparepll);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl2);
/* power down pad calibration */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
if (!dc->soc->has_nvdisplay) {
/* miscellaneous display controller settings */
value = VSYNC_H_POSITION(1);
tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
}
value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
value &= ~DITHER_CONTROL_MASK;
value &= ~BASE_COLOR_SIZE_MASK;
switch (state->bpc) {
case 6:
value |= BASE_COLOR_SIZE_666;
break;
case 8:
value |= BASE_COLOR_SIZE_888;
break;
case 10:
value |= BASE_COLOR_SIZE_101010;
break;
case 12:
value |= BASE_COLOR_SIZE_121212;
break;
default:
WARN(1, "%u bits-per-color not supported\n", state->bpc);
value |= BASE_COLOR_SIZE_888;
break;
}
tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
/* XXX set display head owner */
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_OWNER_MASK;
value |= SOR_STATE_ASY_OWNER(1 + dc->pipe);
tegra_sor_writel(sor, value, SOR_STATE1);
err = tegra_sor_power_up(sor, 250);
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
/* configure dynamic range of output */
value = tegra_sor_readl(sor, sor->soc->regs->head_state0 + dc->pipe);
value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
tegra_sor_writel(sor, value, sor->soc->regs->head_state0 + dc->pipe);
/* configure colorspace */
value = tegra_sor_readl(sor, sor->soc->regs->head_state0 + dc->pipe);
value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
value |= SOR_HEAD_STATE_COLORSPACE_RGB;
tegra_sor_writel(sor, value, sor->soc->regs->head_state0 + dc->pipe);
tegra_sor_mode_set(sor, mode, state);
tegra_sor_update(sor);
/* program preamble timing in SOR (XXX) */
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
value &= ~SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
err = tegra_sor_attach(sor);
if (err < 0)
dev_err(sor->dev, "failed to attach SOR: %d\n", err);
/* enable display to SOR clock and generate HDMI preamble */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
value |= SOR1_TIMING_CYA;
value |= SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
if (dc->soc->has_nvdisplay) {
value = tegra_dc_readl(dc, DC_DISP_CORE_SOR_SET_CONTROL(sor->index));
value &= ~PROTOCOL_MASK;
value |= PROTOCOL_SINGLE_TMDS_A;
tegra_dc_writel(dc, value, DC_DISP_CORE_SOR_SET_CONTROL(sor->index));
}
tegra_dc_commit(dc);
err = tegra_sor_wakeup(sor);
if (err < 0)
dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
tegra_sor_hdmi_scdc_start(sor);
tegra_sor_audio_prepare(sor);
}
static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
.disable = tegra_sor_hdmi_disable,
.enable = tegra_sor_hdmi_enable,
.atomic_check = tegra_sor_encoder_atomic_check,
};
static void tegra_sor_dp_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor *sor = to_sor(output);
u32 value;
int err;
if (output->panel)
drm_panel_disable(output->panel);
/*
* Do not attempt to power down a DP link if we're not connected since
* the AUX transactions would just be timing out.
*/
if (output->connector.status != connector_status_disconnected) {
err = drm_dp_link_power_down(sor->aux, &sor->link);
if (err < 0)
dev_err(sor->dev, "failed to power down link: %d\n",
err);
}
err = tegra_sor_detach(sor);
if (err < 0)
dev_err(sor->dev, "failed to detach SOR: %d\n", err);
tegra_sor_writel(sor, 0, SOR_STATE1);
tegra_sor_update(sor);
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value &= ~SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
value &= ~SOR_STATE_ASY_SUBOWNER_MASK;
value &= ~SOR_STATE_ASY_OWNER_MASK;
tegra_sor_writel(sor, value, SOR_STATE1);
tegra_sor_update(sor);
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe clock: %d\n", err);
err = tegra_sor_power_down(sor);
if (err < 0)
dev_err(sor->dev, "failed to power down SOR: %d\n", err);
err = tegra_io_pad_power_disable(sor->pad);
if (err < 0)
dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
err = drm_dp_aux_disable(sor->aux);
if (err < 0)
dev_err(sor->dev, "failed disable DPAUX: %d\n", err);
if (output->panel)
drm_panel_unprepare(output->panel);
host1x_client_suspend(&sor->client);
}
static void tegra_sor_dp_enable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor *sor = to_sor(output);
struct tegra_sor_config config;
struct tegra_sor_state *state;
struct drm_display_mode *mode;
struct drm_display_info *info;
unsigned int i;
u32 value;
int err;
state = to_sor_state(output->connector.state);
mode = &encoder->crtc->state->adjusted_mode;
info = &output->connector.display_info;
err = host1x_client_resume(&sor->client);
if (err < 0) {
dev_err(sor->dev, "failed to resume: %d\n", err);
return;
}
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
err = tegra_io_pad_power_enable(sor->pad);
if (err < 0)
dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err);
usleep_range(20, 100);
err = drm_dp_aux_enable(sor->aux);
if (err < 0)
dev_err(sor->dev, "failed to enable DPAUX: %d\n", err);
err = drm_dp_link_probe(sor->aux, &sor->link);
if (err < 0)
dev_err(sor->dev, "failed to probe DP link: %d\n", err);
tegra_sor_filter_rates(sor);
err = drm_dp_link_choose(&sor->link, mode, info);
if (err < 0)
dev_err(sor->dev, "failed to choose link: %d\n", err);
if (output->panel)
drm_panel_prepare(output->panel);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 40);
value = tegra_sor_readl(sor, sor->soc->regs->pll3);
value |= SOR_PLL3_PLL_VDD_MODE_3V3;
tegra_sor_writel(sor, value, sor->soc->regs->pll3);
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR);
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
value |= SOR_PLL2_SEQ_PLLCAPPD;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(200, 400);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
value &= ~SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
if (output->panel)
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
else
value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
usleep_range(200, 400);
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
/* XXX not in TRM */
if (output->panel)
value |= SOR_DP_SPARE_PANEL_INTERNAL;
else
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
value |= SOR_DP_SPARE_SEQ_ENABLE;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
/* XXX not in TRM */
tegra_sor_writel(sor, 0, SOR_LVDS);
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_ICHPMP_MASK;
value &= ~SOR_PLL0_VCOCAP_MASK;
value |= SOR_PLL0_ICHPMP(0x1);
value |= SOR_PLL0_VCOCAP(0x3);
value |= SOR_PLL0_RESISTOR_EXT;
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
/* XXX not in TRM */
for (value = 0, i = 0; i < 5; i++)
value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
SOR_XBAR_CTRL_LINK1_XSEL(i, i);
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
/*
* Switch the pad clock to the DP clock. Note that we cannot actually
* do this because Tegra186 and later don't support clk_set_parent()
* on the sorX_pad_clkout clocks. We already do the equivalent above
* using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
*/
#if 0
err = clk_set_parent(sor->clk_pad, sor->clk_parent);
if (err < 0) {
dev_err(sor->dev, "failed to select pad parent clock: %d\n",
err);
return;
}
#endif
/* switch the SOR clock to the pad clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
if (err < 0) {
dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
err);
return;
}
/* switch the output clock to the parent pixel clock */
err = clk_set_parent(sor->clk, sor->clk_parent);
if (err < 0) {
dev_err(sor->dev, "failed to select output parent clock: %d\n",
err);
return;
}
/* use DP-A protocol */
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
value |= SOR_STATE_ASY_PROTOCOL_DP_A;
tegra_sor_writel(sor, value, SOR_STATE1);
/* enable port */
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value |= SOR_DP_LINKCTL_ENABLE;
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
tegra_sor_dp_term_calibrate(sor);
err = drm_dp_link_train(&sor->link);
if (err < 0)
dev_err(sor->dev, "link training failed: %d\n", err);
else
dev_dbg(sor->dev, "link training succeeded\n");
err = drm_dp_link_power_up(sor->aux, &sor->link);
if (err < 0)
dev_err(sor->dev, "failed to power up DP link: %d\n", err);
/* compute configuration */
memset(&config, 0, sizeof(config));
config.bits_per_pixel = state->bpc * 3;
err = tegra_sor_compute_config(sor, mode, &config, &sor->link);
if (err < 0)
dev_err(sor->dev, "failed to compute configuration: %d\n", err);
tegra_sor_apply_config(sor, &config);
tegra_sor_mode_set(sor, mode, state);
if (output->panel) {
/* CSTM (LVDS, link A/B, upper) */
value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
SOR_CSTM_UPPER;
tegra_sor_writel(sor, value, SOR_CSTM);
/* PWM setup */
err = tegra_sor_setup_pwm(sor, 250);
if (err < 0)
dev_err(sor->dev, "failed to setup PWM: %d\n", err);
}
tegra_sor_update(sor);
err = tegra_sor_power_up(sor, 250);
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
/* attach and wake up */
err = tegra_sor_attach(sor);
if (err < 0)
dev_err(sor->dev, "failed to attach SOR: %d\n", err);
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value |= SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
err = tegra_sor_wakeup(sor);
if (err < 0)
dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
if (output->panel)
drm_panel_enable(output->panel);
}
static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = {
.disable = tegra_sor_dp_disable,
.enable = tegra_sor_dp_enable,
.atomic_check = tegra_sor_encoder_atomic_check,
};
static void tegra_sor_disable_regulator(void *data)
{
struct regulator *reg = data;
regulator_disable(reg);
}
static int tegra_sor_enable_regulator(struct tegra_sor *sor, struct regulator *reg)
{
int err;
err = regulator_enable(reg);
if (err)
return err;
return devm_add_action_or_reset(sor->dev, tegra_sor_disable_regulator, reg);
}
static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
{
int err;
sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
if (IS_ERR(sor->avdd_io_supply))
return dev_err_probe(sor->dev, PTR_ERR(sor->avdd_io_supply),
"cannot get AVDD I/O supply\n");
err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
err);
return err;
}
sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
if (IS_ERR(sor->vdd_pll_supply))
return dev_err_probe(sor->dev, PTR_ERR(sor->vdd_pll_supply),
"cannot get VDD PLL supply\n");
err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
err);
return err;
}
sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
if (IS_ERR(sor->hdmi_supply))
return dev_err_probe(sor->dev, PTR_ERR(sor->hdmi_supply),
"cannot get HDMI supply\n");
err = tegra_sor_enable_regulator(sor, sor->hdmi_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
return err;
}
INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
return 0;
}
static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
.name = "HDMI",
.probe = tegra_sor_hdmi_probe,
.audio_enable = tegra_sor_hdmi_audio_enable,
.audio_disable = tegra_sor_hdmi_audio_disable,
};
static int tegra_sor_dp_probe(struct tegra_sor *sor)
{
int err;
sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
if (IS_ERR(sor->avdd_io_supply))
return PTR_ERR(sor->avdd_io_supply);
err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply);
if (err < 0)
return err;
sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
if (IS_ERR(sor->vdd_pll_supply))
return PTR_ERR(sor->vdd_pll_supply);
err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply);
if (err < 0)
return err;
return 0;
}
static const struct tegra_sor_ops tegra_sor_dp_ops = {
.name = "DP",
.probe = tegra_sor_dp_probe,
};
static int tegra_sor_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->host);
const struct drm_encoder_helper_funcs *helpers = NULL;
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
int encoder = DRM_MODE_ENCODER_NONE;
int err;
if (!sor->aux) {
if (sor->ops == &tegra_sor_hdmi_ops) {
connector = DRM_MODE_CONNECTOR_HDMIA;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_hdmi_helpers;
} else if (sor->soc->supports_lvds) {
connector = DRM_MODE_CONNECTOR_LVDS;
encoder = DRM_MODE_ENCODER_LVDS;
}
} else {
if (sor->output.panel) {
connector = DRM_MODE_CONNECTOR_eDP;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_dp_helpers;
} else {
connector = DRM_MODE_CONNECTOR_DisplayPort;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_dp_helpers;
}
sor->link.ops = &tegra_sor_dp_link_ops;
sor->link.aux = sor->aux;
}
sor->output.dev = sor->dev;
drm_connector_init_with_ddc(drm, &sor->output.connector,
&tegra_sor_connector_funcs,
connector,
sor->output.ddc);
drm_connector_helper_add(&sor->output.connector,
&tegra_sor_connector_helper_funcs);
sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
drm_simple_encoder_init(drm, &sor->output.encoder, encoder);
drm_encoder_helper_add(&sor->output.encoder, helpers);
drm_connector_attach_encoder(&sor->output.connector,
&sor->output.encoder);
drm_connector_register(&sor->output.connector);
err = tegra_output_init(drm, &sor->output);
if (err < 0) {
dev_err(client->dev, "failed to initialize output: %d\n", err);
return err;
}
tegra_output_find_possible_crtcs(&sor->output, drm);
if (sor->aux) {
err = drm_dp_aux_attach(sor->aux, &sor->output);
if (err < 0) {
dev_err(sor->dev, "failed to attach DP: %d\n", err);
return err;
}
}
/*
* XXX: Remove this reset once proper hand-over from firmware to
* kernel is possible.
*/
if (sor->rst) {
err = pm_runtime_resume_and_get(sor->dev);
if (err < 0) {
dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
return err;
}
err = reset_control_acquire(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
err);
goto rpm_put;
}
err = reset_control_assert(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to assert SOR reset: %d\n",
err);
goto rpm_put;
}
}
err = clk_prepare_enable(sor->clk);
if (err < 0) {
dev_err(sor->dev, "failed to enable clock: %d\n", err);
goto rpm_put;
}
usleep_range(1000, 3000);
if (sor->rst) {
err = reset_control_deassert(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
err);
clk_disable_unprepare(sor->clk);
goto rpm_put;
}
reset_control_release(sor->rst);
pm_runtime_put(sor->dev);
}
err = clk_prepare_enable(sor->clk_safe);
if (err < 0) {
clk_disable_unprepare(sor->clk);
return err;
}
err = clk_prepare_enable(sor->clk_dp);
if (err < 0) {
clk_disable_unprepare(sor->clk_safe);
clk_disable_unprepare(sor->clk);
return err;
}
return 0;
rpm_put:
if (sor->rst)
pm_runtime_put(sor->dev);
return err;
}
static int tegra_sor_exit(struct host1x_client *client)
{
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
tegra_output_exit(&sor->output);
if (sor->aux) {
err = drm_dp_aux_detach(sor->aux);
if (err < 0) {
dev_err(sor->dev, "failed to detach DP: %d\n", err);
return err;
}
}
clk_disable_unprepare(sor->clk_safe);
clk_disable_unprepare(sor->clk_dp);
clk_disable_unprepare(sor->clk);
return 0;
}
static int tegra_sor_runtime_suspend(struct host1x_client *client)
{
struct tegra_sor *sor = host1x_client_to_sor(client);
struct device *dev = client->dev;
int err;
if (sor->rst) {
err = reset_control_assert(sor->rst);
if (err < 0) {
dev_err(dev, "failed to assert reset: %d\n", err);
return err;
}
reset_control_release(sor->rst);
}
usleep_range(1000, 2000);
clk_disable_unprepare(sor->clk);
pm_runtime_put_sync(dev);
return 0;
}
static int tegra_sor_runtime_resume(struct host1x_client *client)
{
struct tegra_sor *sor = host1x_client_to_sor(client);
struct device *dev = client->dev;
int err;
err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
}
err = clk_prepare_enable(sor->clk);
if (err < 0) {
dev_err(dev, "failed to enable clock: %d\n", err);
goto put_rpm;
}
usleep_range(1000, 2000);
if (sor->rst) {
err = reset_control_acquire(sor->rst);
if (err < 0) {
dev_err(dev, "failed to acquire reset: %d\n", err);
goto disable_clk;
}
err = reset_control_deassert(sor->rst);
if (err < 0) {
dev_err(dev, "failed to deassert reset: %d\n", err);
goto release_reset;
}
}
return 0;
release_reset:
reset_control_release(sor->rst);
disable_clk:
clk_disable_unprepare(sor->clk);
put_rpm:
pm_runtime_put_sync(dev);
return err;
}
static const struct host1x_client_ops sor_client_ops = {
.init = tegra_sor_init,
.exit = tegra_sor_exit,
.suspend = tegra_sor_runtime_suspend,
.resume = tegra_sor_runtime_resume,
};
static const u8 tegra124_sor_xbar_cfg[5] = {
0, 1, 2, 3, 4
};
static const struct tegra_sor_regs tegra124_sor_regs = {
.head_state0 = 0x05,
.head_state1 = 0x07,
.head_state2 = 0x09,
.head_state3 = 0x0b,
.head_state4 = 0x0d,
.head_state5 = 0x0f,
.pll0 = 0x17,
.pll1 = 0x18,
.pll2 = 0x19,
.pll3 = 0x1a,
.dp_padctl0 = 0x5c,
.dp_padctl2 = 0x73,
};
/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */
static const u8 tegra124_sor_lane_map[4] = {
2, 1, 0, 3,
};
static const u8 tegra124_sor_voltage_swing[4][4][4] = {
{
{ 0x13, 0x19, 0x1e, 0x28 },
{ 0x1e, 0x25, 0x2d, },
{ 0x28, 0x32, },
{ 0x3c, },
}, {
{ 0x12, 0x17, 0x1b, 0x25 },
{ 0x1c, 0x23, 0x2a, },
{ 0x25, 0x2f, },
{ 0x39, }
}, {
{ 0x12, 0x16, 0x1a, 0x22 },
{ 0x1b, 0x20, 0x27, },
{ 0x24, 0x2d, },
{ 0x36, },
}, {
{ 0x11, 0x14, 0x17, 0x1f },
{ 0x19, 0x1e, 0x24, },
{ 0x22, 0x2a, },
{ 0x32, },
},
};
static const u8 tegra124_sor_pre_emphasis[4][4][4] = {
{
{ 0x00, 0x09, 0x13, 0x25 },
{ 0x00, 0x0f, 0x1e, },
{ 0x00, 0x14, },
{ 0x00, },
}, {
{ 0x00, 0x0a, 0x14, 0x28 },
{ 0x00, 0x0f, 0x1e, },
{ 0x00, 0x14, },
{ 0x00 },
}, {
{ 0x00, 0x0a, 0x14, 0x28 },
{ 0x00, 0x0f, 0x1e, },
{ 0x00, 0x14, },
{ 0x00, },
}, {
{ 0x00, 0x0a, 0x14, 0x28 },
{ 0x00, 0x0f, 0x1e, },
{ 0x00, 0x14, },
{ 0x00, },
},
};
static const u8 tegra124_sor_post_cursor[4][4][4] = {
{
{ 0x00, 0x00, 0x00, 0x00 },
{ 0x00, 0x00, 0x00, },
{ 0x00, 0x00, },
{ 0x00, },
}, {
{ 0x02, 0x02, 0x04, 0x05 },
{ 0x02, 0x04, 0x05, },
{ 0x04, 0x05, },
{ 0x05, },
}, {
{ 0x04, 0x05, 0x08, 0x0b },
{ 0x05, 0x09, 0x0b, },
{ 0x08, 0x0a, },
{ 0x0b, },
}, {
{ 0x05, 0x09, 0x0b, 0x12 },
{ 0x09, 0x0d, 0x12, },
{ 0x0b, 0x0f, },
{ 0x12, },
},
};
static const u8 tegra124_sor_tx_pu[4][4][4] = {
{
{ 0x20, 0x30, 0x40, 0x60 },
{ 0x30, 0x40, 0x60, },
{ 0x40, 0x60, },
{ 0x60, },
}, {
{ 0x20, 0x20, 0x30, 0x50 },
{ 0x30, 0x40, 0x50, },
{ 0x40, 0x50, },
{ 0x60, },
}, {
{ 0x20, 0x20, 0x30, 0x40, },
{ 0x30, 0x30, 0x40, },
{ 0x40, 0x50, },
{ 0x60, },
}, {
{ 0x20, 0x20, 0x20, 0x40, },
{ 0x30, 0x30, 0x40, },
{ 0x40, 0x40, },
{ 0x60, },
},
};
static const struct tegra_sor_soc tegra124_sor = {
.supports_lvds = true,
.supports_hdmi = false,
.supports_dp = true,
.supports_audio = false,
.supports_hdcp = false,
.regs = &tegra124_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
.lane_map = tegra124_sor_lane_map,
.voltage_swing = tegra124_sor_voltage_swing,
.pre_emphasis = tegra124_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const u8 tegra132_sor_pre_emphasis[4][4][4] = {
{
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x01, 0x0e, 0x1d, },
{ 0x01, 0x13, },
{ 0x00, },
}, {
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00 },
}, {
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00, },
}, {
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00, },
},
};
static const struct tegra_sor_soc tegra132_sor = {
.supports_lvds = true,
.supports_hdmi = false,
.supports_dp = true,
.supports_audio = false,
.supports_hdcp = false,
.regs = &tegra124_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
.lane_map = tegra124_sor_lane_map,
.voltage_swing = tegra124_sor_voltage_swing,
.pre_emphasis = tegra132_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra210_sor_regs = {
.head_state0 = 0x05,
.head_state1 = 0x07,
.head_state2 = 0x09,
.head_state3 = 0x0b,
.head_state4 = 0x0d,
.head_state5 = 0x0f,
.pll0 = 0x17,
.pll1 = 0x18,
.pll2 = 0x19,
.pll3 = 0x1a,
.dp_padctl0 = 0x5c,
.dp_padctl2 = 0x73,
};
static const u8 tegra210_sor_xbar_cfg[5] = {
2, 1, 0, 3, 4
};
static const u8 tegra210_sor_lane_map[4] = {
0, 1, 2, 3,
};
static const struct tegra_sor_soc tegra210_sor = {
.supports_lvds = false,
.supports_hdmi = false,
.supports_dp = true,
.supports_audio = false,
.supports_hdcp = false,
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra210_sor_xbar_cfg,
.lane_map = tegra210_sor_lane_map,
.voltage_swing = tegra124_sor_voltage_swing,
.pre_emphasis = tegra124_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_soc tegra210_sor1 = {
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
.supports_audio = true,
.supports_hdcp = true,
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
.xbar_cfg = tegra210_sor_xbar_cfg,
.lane_map = tegra210_sor_lane_map,
.voltage_swing = tegra124_sor_voltage_swing,
.pre_emphasis = tegra124_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra186_sor_regs = {
.head_state0 = 0x151,
.head_state1 = 0x154,
.head_state2 = 0x157,
.head_state3 = 0x15a,
.head_state4 = 0x15d,
.head_state5 = 0x160,
.pll0 = 0x163,
.pll1 = 0x164,
.pll2 = 0x165,
.pll3 = 0x166,
.dp_padctl0 = 0x168,
.dp_padctl2 = 0x16a,
};
static const u8 tegra186_sor_voltage_swing[4][4][4] = {
{
{ 0x13, 0x19, 0x1e, 0x28 },
{ 0x1e, 0x25, 0x2d, },
{ 0x28, 0x32, },
{ 0x39, },
}, {
{ 0x12, 0x16, 0x1b, 0x25 },
{ 0x1c, 0x23, 0x2a, },
{ 0x25, 0x2f, },
{ 0x37, }
}, {
{ 0x12, 0x16, 0x1a, 0x22 },
{ 0x1b, 0x20, 0x27, },
{ 0x24, 0x2d, },
{ 0x35, },
}, {
{ 0x11, 0x14, 0x17, 0x1f },
{ 0x19, 0x1e, 0x24, },
{ 0x22, 0x2a, },
{ 0x32, },
},
};
static const u8 tegra186_sor_pre_emphasis[4][4][4] = {
{
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x01, 0x0e, 0x1d, },
{ 0x01, 0x13, },
{ 0x00, },
}, {
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00 },
}, {
{ 0x00, 0x08, 0x14, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00, },
}, {
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00, },
},
};
static const struct tegra_sor_soc tegra186_sor = {
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
.supports_audio = true,
.supports_hdcp = true,
.regs = &tegra186_sor_regs,
.has_nvdisplay = true,
.num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults),
.settings = tegra186_sor_hdmi_defaults,
.xbar_cfg = tegra124_sor_xbar_cfg,
.lane_map = tegra124_sor_lane_map,
.voltage_swing = tegra186_sor_voltage_swing,
.pre_emphasis = tegra186_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra194_sor_regs = {
.head_state0 = 0x151,
.head_state1 = 0x155,
.head_state2 = 0x159,
.head_state3 = 0x15d,
.head_state4 = 0x161,
.head_state5 = 0x165,
.pll0 = 0x169,
.pll1 = 0x16a,
.pll2 = 0x16b,
.pll3 = 0x16c,
.dp_padctl0 = 0x16e,
.dp_padctl2 = 0x16f,
};
static const struct tegra_sor_soc tegra194_sor = {
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
.supports_audio = true,
.supports_hdcp = true,
.regs = &tegra194_sor_regs,
.has_nvdisplay = true,
.num_settings = ARRAY_SIZE(tegra194_sor_hdmi_defaults),
.settings = tegra194_sor_hdmi_defaults,
.xbar_cfg = tegra210_sor_xbar_cfg,
.lane_map = tegra124_sor_lane_map,
.voltage_swing = tegra186_sor_voltage_swing,
.pre_emphasis = tegra186_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const struct of_device_id tegra_sor_of_match[] = {
{ .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor },
{ .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor },
{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
{ .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor },
{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
static int tegra_sor_parse_dt(struct tegra_sor *sor)
{
struct device_node *np = sor->dev->of_node;
u32 xbar_cfg[5];
unsigned int i;
u32 value;
int err;
if (sor->soc->has_nvdisplay) {
err = of_property_read_u32(np, "nvidia,interface", &value);
if (err < 0)
return err;
sor->index = value;
/*
* override the default that we already set for Tegra210 and
* earlier
*/
sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
} else {
if (!sor->soc->supports_audio)
sor->index = 0;
else
sor->index = 1;
}
err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5);
if (err < 0) {
/* fall back to default per-SoC XBAR configuration */
for (i = 0; i < 5; i++)
sor->xbar_cfg[i] = sor->soc->xbar_cfg[i];
} else {
/* copy cells to SOR XBAR configuration */
for (i = 0; i < 5; i++)
sor->xbar_cfg[i] = xbar_cfg[i];
}
return 0;
}
static irqreturn_t tegra_sor_irq(int irq, void *data)
{
struct tegra_sor *sor = data;
u32 value;
value = tegra_sor_readl(sor, SOR_INT_STATUS);
tegra_sor_writel(sor, value, SOR_INT_STATUS);
if (value & SOR_INT_CODEC_SCRATCH0) {
value = tegra_sor_readl(sor, SOR_AUDIO_HDA_CODEC_SCRATCH0);
if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
unsigned int format;
format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
tegra_hda_parse_format(format, &sor->format);
if (sor->ops->audio_enable)
sor->ops->audio_enable(sor);
} else {
if (sor->ops->audio_disable)
sor->ops->audio_disable(sor);
}
}
return IRQ_HANDLED;
}
static int tegra_sor_probe(struct platform_device *pdev)
{
struct device_node *np;
struct tegra_sor *sor;
int err;
sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
if (!sor)
return -ENOMEM;
sor->soc = of_device_get_match_data(&pdev->dev);
sor->output.dev = sor->dev = &pdev->dev;
sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings,
sor->soc->num_settings *
sizeof(*sor->settings),
GFP_KERNEL);
if (!sor->settings)
return -ENOMEM;
sor->num_settings = sor->soc->num_settings;
np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
if (np) {
sor->aux = drm_dp_aux_find_by_of_node(np);
of_node_put(np);
if (!sor->aux)
return -EPROBE_DEFER;
if (get_device(sor->aux->dev))
sor->output.ddc = &sor->aux->ddc;
}
if (!sor->aux) {
if (sor->soc->supports_hdmi) {
sor->ops = &tegra_sor_hdmi_ops;
sor->pad = TEGRA_IO_PAD_HDMI;
} else if (sor->soc->supports_lvds) {
dev_err(&pdev->dev, "LVDS not supported yet\n");
return -ENODEV;
} else {
dev_err(&pdev->dev, "unknown (non-DP) support\n");
return -ENODEV;
}
} else {
np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0);
/*
* No need to keep this around since we only use it as a check
* to see if a panel is connected (eDP) or not (DP).
*/
of_node_put(np);
sor->ops = &tegra_sor_dp_ops;
sor->pad = TEGRA_IO_PAD_LVDS;
}
err = tegra_sor_parse_dt(sor);
if (err < 0)
goto put_aux;
err = tegra_output_probe(&sor->output);
if (err < 0) {
dev_err_probe(&pdev->dev, err, "failed to probe output\n");
goto put_aux;
}
if (sor->ops && sor->ops->probe) {
err = sor->ops->probe(sor);
if (err < 0) {
dev_err(&pdev->dev, "failed to probe %s: %d\n",
sor->ops->name, err);
goto remove;
}
}
sor->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sor->regs)) {
err = PTR_ERR(sor->regs);
goto remove;
}
err = platform_get_irq(pdev, 0);
if (err < 0)
goto remove;
sor->irq = err;
err = devm_request_irq(sor->dev, sor->irq, tegra_sor_irq, 0,
dev_name(sor->dev), sor);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
goto remove;
}
sor->rst = devm_reset_control_get_exclusive_released(&pdev->dev, "sor");
if (IS_ERR(sor->rst)) {
err = PTR_ERR(sor->rst);
if (err != -EBUSY || WARN_ON(!pdev->dev.pm_domain)) {
dev_err(&pdev->dev, "failed to get reset control: %d\n",
err);
goto remove;
}
/*
* At this point, the reset control is most likely being used
* by the generic power domain implementation. With any luck
* the power domain will have taken care of resetting the SOR
* and we don't have to do anything.
*/
sor->rst = NULL;
}
sor->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sor->clk)) {
err = PTR_ERR(sor->clk);
dev_err(&pdev->dev, "failed to get module clock: %d\n", err);
goto remove;
}
if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
struct device_node *np = pdev->dev.of_node;
const char *name;
/*
* For backwards compatibility with Tegra210 device trees,
* fall back to the old clock name "source" if the new "out"
* clock is not available.
*/
if (of_property_match_string(np, "clock-names", "out") < 0)
name = "source";
else
name = "out";
sor->clk_out = devm_clk_get(&pdev->dev, name);
if (IS_ERR(sor->clk_out)) {
err = PTR_ERR(sor->clk_out);
dev_err(sor->dev, "failed to get %s clock: %d\n",
name, err);
goto remove;
}
} else {
/* fall back to the module clock on SOR0 (eDP/LVDS only) */
sor->clk_out = sor->clk;
}
sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(sor->clk_parent)) {
err = PTR_ERR(sor->clk_parent);
dev_err(&pdev->dev, "failed to get parent clock: %d\n", err);
goto remove;
}
sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
if (IS_ERR(sor->clk_safe)) {
err = PTR_ERR(sor->clk_safe);
dev_err(&pdev->dev, "failed to get safe clock: %d\n", err);
goto remove;
}
sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(sor->clk_dp)) {
err = PTR_ERR(sor->clk_dp);
dev_err(&pdev->dev, "failed to get DP clock: %d\n", err);
goto remove;
}
/*
* Starting with Tegra186, the BPMP provides an implementation for
* the pad output clock, so we have to look it up from device tree.
*/
sor->clk_pad = devm_clk_get(&pdev->dev, "pad");
if (IS_ERR(sor->clk_pad)) {
if (sor->clk_pad != ERR_PTR(-ENOENT)) {
err = PTR_ERR(sor->clk_pad);
goto remove;
}
/*
* If the pad output clock is not available, then we assume
* we're on Tegra210 or earlier and have to provide our own
* implementation.
*/
sor->clk_pad = NULL;
}
/*
* The bootloader may have set up the SOR such that it's module clock
* is sourced by one of the display PLLs. However, that doesn't work
* without properly having set up other bits of the SOR.
*/
err = clk_set_parent(sor->clk_out, sor->clk_safe);
if (err < 0) {
dev_err(&pdev->dev, "failed to use safe clock: %d\n", err);
goto remove;
}
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
host1x_client_init(&sor->client);
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
/*
* On Tegra210 and earlier, provide our own implementation for the
* pad output clock.
*/
if (!sor->clk_pad) {
char *name;
name = devm_kasprintf(sor->dev, GFP_KERNEL, "sor%u_pad_clkout",
sor->index);
if (!name) {
err = -ENOMEM;
goto uninit;
}
err = host1x_client_resume(&sor->client);
if (err < 0) {
dev_err(sor->dev, "failed to resume: %d\n", err);
goto uninit;
}
sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
host1x_client_suspend(&sor->client);
}
if (IS_ERR(sor->clk_pad)) {
err = PTR_ERR(sor->clk_pad);
dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
err);
goto uninit;
}
err = __host1x_client_register(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
goto uninit;
}
return 0;
uninit:
host1x_client_exit(&sor->client);
pm_runtime_disable(&pdev->dev);
remove:
if (sor->aux)
sor->output.ddc = NULL;
tegra_output_remove(&sor->output);
put_aux:
if (sor->aux)
put_device(sor->aux->dev);
return err;
}
static void tegra_sor_remove(struct platform_device *pdev)
{
struct tegra_sor *sor = platform_get_drvdata(pdev);
host1x_client_unregister(&sor->client);
pm_runtime_disable(&pdev->dev);
if (sor->aux) {
put_device(sor->aux->dev);
sor->output.ddc = NULL;
}
tegra_output_remove(&sor->output);
}
static int __maybe_unused tegra_sor_suspend(struct device *dev)
{
struct tegra_sor *sor = dev_get_drvdata(dev);
int err;
err = tegra_output_suspend(&sor->output);
if (err < 0) {
dev_err(dev, "failed to suspend output: %d\n", err);
return err;
}
if (sor->hdmi_supply) {
err = regulator_disable(sor->hdmi_supply);
if (err < 0) {
tegra_output_resume(&sor->output);
return err;
}
}
return 0;
}
static int __maybe_unused tegra_sor_resume(struct device *dev)
{
struct tegra_sor *sor = dev_get_drvdata(dev);
int err;
if (sor->hdmi_supply) {
err = regulator_enable(sor->hdmi_supply);
if (err < 0)
return err;
}
err = tegra_output_resume(&sor->output);
if (err < 0) {
dev_err(dev, "failed to resume output: %d\n", err);
if (sor->hdmi_supply)
regulator_disable(sor->hdmi_supply);
return err;
}
return 0;
}
static const struct dev_pm_ops tegra_sor_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_sor_suspend, tegra_sor_resume)
};
struct platform_driver tegra_sor_driver = {
.driver = {
.name = "tegra-sor",
.of_match_table = tegra_sor_of_match,
.pm = &tegra_sor_pm_ops,
},
.probe = tegra_sor_probe,
.remove_new = tegra_sor_remove,
};
| linux-master | drivers/gpu/drm/tegra/sor.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 NVIDIA Corporation */
#include <linux/dma-fence-array.h>
#include <linux/dma-mapping.h>
#include <linux/file.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/nospec.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/sync_file.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
#include "drm.h"
#include "gem.h"
#include "submit.h"
#include "uapi.h"
#define SUBMIT_ERR(context, fmt, ...) \
dev_err_ratelimited(context->client->base.dev, \
"%s: job submission failed: " fmt "\n", \
current->comm, ##__VA_ARGS__)
struct gather_bo {
struct host1x_bo base;
struct kref ref;
struct device *dev;
u32 *gather_data;
dma_addr_t gather_data_dma;
size_t gather_data_words;
};
static struct host1x_bo *gather_bo_get(struct host1x_bo *host_bo)
{
struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
kref_get(&bo->ref);
return host_bo;
}
static void gather_bo_release(struct kref *ref)
{
struct gather_bo *bo = container_of(ref, struct gather_bo, ref);
dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma,
0);
kfree(bo);
}
static void gather_bo_put(struct host1x_bo *host_bo)
{
struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
kref_put(&bo->ref, gather_bo_release);
}
static struct host1x_bo_mapping *
gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
{
struct gather_bo *gather = container_of(bo, struct gather_bo, base);
struct host1x_bo_mapping *map;
int err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return ERR_PTR(-ENOMEM);
kref_init(&map->ref);
map->bo = host1x_bo_get(bo);
map->direction = direction;
map->dev = dev;
map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
if (!map->sgt) {
err = -ENOMEM;
goto free;
}
err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma,
gather->gather_data_words * 4);
if (err)
goto free_sgt;
err = dma_map_sgtable(dev, map->sgt, direction, 0);
if (err)
goto free_sgt;
map->phys = sg_dma_address(map->sgt->sgl);
map->size = gather->gather_data_words * 4;
map->chunks = err;
return map;
free_sgt:
sg_free_table(map->sgt);
kfree(map->sgt);
free:
kfree(map);
return ERR_PTR(err);
}
static void gather_bo_unpin(struct host1x_bo_mapping *map)
{
if (!map)
return;
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
sg_free_table(map->sgt);
kfree(map->sgt);
host1x_bo_put(map->bo);
kfree(map);
}
static void *gather_bo_mmap(struct host1x_bo *host_bo)
{
struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
return bo->gather_data;
}
static void gather_bo_munmap(struct host1x_bo *host_bo, void *addr)
{
}
static const struct host1x_bo_ops gather_bo_ops = {
.get = gather_bo_get,
.put = gather_bo_put,
.pin = gather_bo_pin,
.unpin = gather_bo_unpin,
.mmap = gather_bo_mmap,
.munmap = gather_bo_munmap,
};
static struct tegra_drm_mapping *
tegra_drm_mapping_get(struct tegra_drm_context *context, u32 id)
{
struct tegra_drm_mapping *mapping;
xa_lock(&context->mappings);
mapping = xa_load(&context->mappings, id);
if (mapping)
kref_get(&mapping->ref);
xa_unlock(&context->mappings);
return mapping;
}
static void *alloc_copy_user_array(void __user *from, size_t count, size_t size)
{
size_t copy_len;
void *data;
if (check_mul_overflow(count, size, ©_len))
return ERR_PTR(-EINVAL);
if (copy_len > 0x4000)
return ERR_PTR(-E2BIG);
data = vmemdup_user(from, copy_len);
if (IS_ERR(data))
return ERR_CAST(data);
return data;
}
static int submit_copy_gather_data(struct gather_bo **pbo, struct device *dev,
struct tegra_drm_context *context,
struct drm_tegra_channel_submit *args)
{
struct gather_bo *bo;
size_t copy_len;
if (args->gather_data_words == 0) {
SUBMIT_ERR(context, "gather_data_words cannot be zero");
return -EINVAL;
}
if (check_mul_overflow((size_t)args->gather_data_words, (size_t)4, ©_len)) {
SUBMIT_ERR(context, "gather_data_words is too large");
return -EINVAL;
}
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo) {
SUBMIT_ERR(context, "failed to allocate memory for bo info");
return -ENOMEM;
}
host1x_bo_init(&bo->base, &gather_bo_ops);
kref_init(&bo->ref);
bo->dev = dev;
bo->gather_data = dma_alloc_attrs(dev, copy_len, &bo->gather_data_dma,
GFP_KERNEL | __GFP_NOWARN, 0);
if (!bo->gather_data) {
SUBMIT_ERR(context, "failed to allocate memory for gather data");
kfree(bo);
return -ENOMEM;
}
if (copy_from_user(bo->gather_data, u64_to_user_ptr(args->gather_data_ptr), copy_len)) {
SUBMIT_ERR(context, "failed to copy gather data from userspace");
dma_free_attrs(dev, copy_len, bo->gather_data, bo->gather_data_dma, 0);
kfree(bo);
return -EFAULT;
}
bo->gather_data_words = args->gather_data_words;
*pbo = bo;
return 0;
}
static int submit_write_reloc(struct tegra_drm_context *context, struct gather_bo *bo,
struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping)
{
/* TODO check that target_offset is within bounds */
dma_addr_t iova = mapping->iova + buf->reloc.target_offset;
u32 written_ptr;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (buf->flags & DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT)
iova |= BIT_ULL(39);
#endif
written_ptr = iova >> buf->reloc.shift;
if (buf->reloc.gather_offset_words >= bo->gather_data_words) {
SUBMIT_ERR(context,
"relocation has too large gather offset (%u vs gather length %zu)",
buf->reloc.gather_offset_words, bo->gather_data_words);
return -EINVAL;
}
buf->reloc.gather_offset_words = array_index_nospec(buf->reloc.gather_offset_words,
bo->gather_data_words);
bo->gather_data[buf->reloc.gather_offset_words] = written_ptr;
return 0;
}
static int submit_process_bufs(struct tegra_drm_context *context, struct gather_bo *bo,
struct drm_tegra_channel_submit *args,
struct tegra_drm_submit_data *job_data)
{
struct tegra_drm_used_mapping *mappings;
struct drm_tegra_submit_buf *bufs;
int err;
u32 i;
bufs = alloc_copy_user_array(u64_to_user_ptr(args->bufs_ptr), args->num_bufs,
sizeof(*bufs));
if (IS_ERR(bufs)) {
SUBMIT_ERR(context, "failed to copy bufs array from userspace");
return PTR_ERR(bufs);
}
mappings = kcalloc(args->num_bufs, sizeof(*mappings), GFP_KERNEL);
if (!mappings) {
SUBMIT_ERR(context, "failed to allocate memory for mapping info");
err = -ENOMEM;
goto done;
}
for (i = 0; i < args->num_bufs; i++) {
struct drm_tegra_submit_buf *buf = &bufs[i];
struct tegra_drm_mapping *mapping;
if (buf->flags & ~DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT) {
SUBMIT_ERR(context, "invalid flag specified for buffer");
err = -EINVAL;
goto drop_refs;
}
mapping = tegra_drm_mapping_get(context, buf->mapping);
if (!mapping) {
SUBMIT_ERR(context, "invalid mapping ID '%u' for buffer", buf->mapping);
err = -EINVAL;
goto drop_refs;
}
err = submit_write_reloc(context, bo, buf, mapping);
if (err) {
tegra_drm_mapping_put(mapping);
goto drop_refs;
}
mappings[i].mapping = mapping;
mappings[i].flags = buf->flags;
}
job_data->used_mappings = mappings;
job_data->num_used_mappings = i;
err = 0;
goto done;
drop_refs:
while (i--)
tegra_drm_mapping_put(mappings[i].mapping);
kfree(mappings);
job_data->used_mappings = NULL;
done:
kvfree(bufs);
return err;
}
static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job,
struct xarray *syncpoints, struct drm_tegra_channel_submit *args)
{
struct host1x_syncpt *sp;
if (args->syncpt.flags) {
SUBMIT_ERR(context, "invalid flag specified for syncpt");
return -EINVAL;
}
/* Syncpt ref will be dropped on job release */
sp = xa_load(syncpoints, args->syncpt.id);
if (!sp) {
SUBMIT_ERR(context, "syncpoint specified in syncpt was not allocated");
return -EINVAL;
}
job->syncpt = host1x_syncpt_get(sp);
job->syncpt_incrs = args->syncpt.increments;
return 0;
}
static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context,
struct drm_tegra_submit_cmd_gather_uptr *cmd,
struct gather_bo *bo, u32 *offset,
struct tegra_drm_submit_data *job_data,
u32 *class)
{
u32 next_offset;
if (cmd->reserved[0] || cmd->reserved[1] || cmd->reserved[2]) {
SUBMIT_ERR(context, "non-zero reserved field in GATHER_UPTR command");
return -EINVAL;
}
/* Check for maximum gather size */
if (cmd->words > 16383) {
SUBMIT_ERR(context, "too many words in GATHER_UPTR command");
return -EINVAL;
}
if (check_add_overflow(*offset, cmd->words, &next_offset)) {
SUBMIT_ERR(context, "too many total words in job");
return -EINVAL;
}
if (next_offset > bo->gather_data_words) {
SUBMIT_ERR(context, "GATHER_UPTR command overflows gather data");
return -EINVAL;
}
if (tegra_drm_fw_validate(context->client, bo->gather_data, *offset,
cmd->words, job_data, class)) {
SUBMIT_ERR(context, "job was rejected by firewall");
return -EINVAL;
}
host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4);
*offset = next_offset;
return 0;
}
static struct host1x_job *
submit_create_job(struct tegra_drm_context *context, struct gather_bo *bo,
struct drm_tegra_channel_submit *args, struct tegra_drm_submit_data *job_data,
struct xarray *syncpoints)
{
struct drm_tegra_submit_cmd *cmds;
u32 i, gather_offset = 0, class;
struct host1x_job *job;
int err;
/* Set initial class for firewall. */
class = context->client->base.class;
cmds = alloc_copy_user_array(u64_to_user_ptr(args->cmds_ptr), args->num_cmds,
sizeof(*cmds));
if (IS_ERR(cmds)) {
SUBMIT_ERR(context, "failed to copy cmds array from userspace");
return ERR_CAST(cmds);
}
job = host1x_job_alloc(context->channel, args->num_cmds, 0, true);
if (!job) {
SUBMIT_ERR(context, "failed to allocate memory for job");
job = ERR_PTR(-ENOMEM);
goto done;
}
err = submit_get_syncpt(context, job, syncpoints, args);
if (err < 0)
goto free_job;
job->client = &context->client->base;
job->class = context->client->base.class;
job->serialize = true;
for (i = 0; i < args->num_cmds; i++) {
struct drm_tegra_submit_cmd *cmd = &cmds[i];
if (cmd->flags) {
SUBMIT_ERR(context, "unknown flags given for cmd");
err = -EINVAL;
goto free_job;
}
if (cmd->type == DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR) {
err = submit_job_add_gather(job, context, &cmd->gather_uptr, bo,
&gather_offset, job_data, &class);
if (err)
goto free_job;
} else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT) {
if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
SUBMIT_ERR(context, "non-zero reserved value");
err = -EINVAL;
goto free_job;
}
host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
false, class);
} else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE) {
if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
SUBMIT_ERR(context, "non-zero reserved value");
err = -EINVAL;
goto free_job;
}
if (cmd->wait_syncpt.id != args->syncpt.id) {
SUBMIT_ERR(context, "syncpoint ID in CMD_WAIT_SYNCPT_RELATIVE is not used by the job");
err = -EINVAL;
goto free_job;
}
host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
true, class);
} else {
SUBMIT_ERR(context, "unknown cmd type");
err = -EINVAL;
goto free_job;
}
}
if (gather_offset == 0) {
SUBMIT_ERR(context, "job must have at least one gather");
err = -EINVAL;
goto free_job;
}
goto done;
free_job:
host1x_job_put(job);
job = ERR_PTR(err);
done:
kvfree(cmds);
return job;
}
static void release_job(struct host1x_job *job)
{
struct tegra_drm_client *client = container_of(job->client, struct tegra_drm_client, base);
struct tegra_drm_submit_data *job_data = job->user_data;
u32 i;
if (job->memory_context)
host1x_memory_context_put(job->memory_context);
for (i = 0; i < job_data->num_used_mappings; i++)
tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
kfree(job_data->used_mappings);
kfree(job_data);
pm_runtime_mark_last_busy(client->base.dev);
pm_runtime_put_autosuspend(client->base.dev);
}
int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_channel_submit *args = data;
struct tegra_drm_submit_data *job_data;
struct drm_syncobj *syncobj = NULL;
struct tegra_drm_context *context;
struct host1x_job *job;
struct gather_bo *bo;
u32 i;
int err;
mutex_lock(&fpriv->lock);
context = xa_load(&fpriv->contexts, args->context);
if (!context) {
mutex_unlock(&fpriv->lock);
pr_err_ratelimited("%s: %s: invalid channel context '%#x'", __func__,
current->comm, args->context);
return -EINVAL;
}
if (args->syncobj_in) {
struct dma_fence *fence;
err = drm_syncobj_find_fence(file, args->syncobj_in, 0, 0, &fence);
if (err) {
SUBMIT_ERR(context, "invalid syncobj_in '%#x'", args->syncobj_in);
goto unlock;
}
err = dma_fence_wait_timeout(fence, true, msecs_to_jiffies(10000));
dma_fence_put(fence);
if (err) {
SUBMIT_ERR(context, "wait for syncobj_in timed out");
goto unlock;
}
}
if (args->syncobj_out) {
syncobj = drm_syncobj_find(file, args->syncobj_out);
if (!syncobj) {
SUBMIT_ERR(context, "invalid syncobj_out '%#x'", args->syncobj_out);
err = -ENOENT;
goto unlock;
}
}
/* Allocate gather BO and copy gather words in. */
err = submit_copy_gather_data(&bo, drm->dev, context, args);
if (err)
goto unlock;
job_data = kzalloc(sizeof(*job_data), GFP_KERNEL);
if (!job_data) {
SUBMIT_ERR(context, "failed to allocate memory for job data");
err = -ENOMEM;
goto put_bo;
}
/* Get data buffer mappings and do relocation patching. */
err = submit_process_bufs(context, bo, args, job_data);
if (err)
goto free_job_data;
/* Allocate host1x_job and add gathers and waits to it. */
job = submit_create_job(context, bo, args, job_data, &fpriv->syncpoints);
if (IS_ERR(job)) {
err = PTR_ERR(job);
goto free_job_data;
}
/* Map gather data for Host1x. */
err = host1x_job_pin(job, context->client->base.dev);
if (err) {
SUBMIT_ERR(context, "failed to pin job: %d", err);
goto put_job;
}
if (context->client->ops->get_streamid_offset) {
err = context->client->ops->get_streamid_offset(
context->client, &job->engine_streamid_offset);
if (err) {
SUBMIT_ERR(context, "failed to get streamid offset: %d", err);
goto unpin_job;
}
}
if (context->memory_context && context->client->ops->can_use_memory_ctx) {
bool supported;
err = context->client->ops->can_use_memory_ctx(context->client, &supported);
if (err) {
SUBMIT_ERR(context, "failed to detect if engine can use memory context: %d", err);
goto unpin_job;
}
if (supported) {
job->memory_context = context->memory_context;
host1x_memory_context_get(job->memory_context);
}
} else if (context->client->ops->get_streamid_offset) {
/*
* Job submission will need to temporarily change stream ID,
* so need to tell it what to change it back to.
*/
if (!tegra_dev_iommu_get_stream_id(context->client->base.dev,
&job->engine_fallback_streamid))
job->engine_fallback_streamid = TEGRA_STREAM_ID_BYPASS;
}
/* Boot engine. */
err = pm_runtime_resume_and_get(context->client->base.dev);
if (err < 0) {
SUBMIT_ERR(context, "could not power up engine: %d", err);
goto put_memory_context;
}
job->user_data = job_data;
job->release = release_job;
job->timeout = 10000;
/*
* job_data is now part of job reference counting, so don't release
* it from here.
*/
job_data = NULL;
/* Submit job to hardware. */
err = host1x_job_submit(job);
if (err) {
SUBMIT_ERR(context, "host1x job submission failed: %d", err);
goto unpin_job;
}
/* Return postfences to userspace and add fences to DMA reservations. */
args->syncpt.value = job->syncpt_end;
if (syncobj) {
struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end, true);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
SUBMIT_ERR(context, "failed to create postfence: %d", err);
}
drm_syncobj_replace_fence(syncobj, fence);
}
goto put_job;
put_memory_context:
if (job->memory_context)
host1x_memory_context_put(job->memory_context);
unpin_job:
host1x_job_unpin(job);
put_job:
host1x_job_put(job);
free_job_data:
if (job_data && job_data->used_mappings) {
for (i = 0; i < job_data->num_used_mappings; i++)
tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
kfree(job_data->used_mappings);
}
kfree(job_data);
put_bo:
gather_bo_put(&bo->base);
unlock:
if (syncobj)
drm_syncobj_put(syncobj);
mutex_unlock(&fpriv->lock);
return err;
}
| linux-master | drivers/gpu/drm/tegra/submit.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, NVIDIA Corporation.
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/pci_ids.h>
#include <linux/iopoll.h>
#include "falcon.h"
#include "drm.h"
enum falcon_memory {
FALCON_MEMORY_IMEM,
FALCON_MEMORY_DATA,
};
static void falcon_writel(struct falcon *falcon, u32 value, u32 offset)
{
writel(value, falcon->regs + offset);
}
int falcon_wait_idle(struct falcon *falcon)
{
u32 value;
return readl_poll_timeout(falcon->regs + FALCON_IDLESTATE, value,
(value == 0), 10, 100000);
}
static int falcon_dma_wait_idle(struct falcon *falcon)
{
u32 value;
return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value,
(value & FALCON_DMATRFCMD_IDLE), 10, 100000);
}
static int falcon_copy_chunk(struct falcon *falcon,
phys_addr_t base,
unsigned long offset,
enum falcon_memory target)
{
u32 cmd = FALCON_DMATRFCMD_SIZE_256B;
if (target == FALCON_MEMORY_IMEM)
cmd |= FALCON_DMATRFCMD_IMEM;
/*
* Use second DMA context (i.e. the one for firmware). Strictly
* speaking, at this point both DMA contexts point to the firmware
* stream ID, but this register's value will be reused by the firmware
* for later DMA transactions, so we need to use the correct value.
*/
cmd |= FALCON_DMATRFCMD_DMACTX(1);
falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
return falcon_dma_wait_idle(falcon);
}
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
u32 *virt = falcon->firmware.virt;
size_t i;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
virt[i] = le32_to_cpu(((__le32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
if (bin->magic != PCI_VENDOR_ID_NVIDIA && bin->magic != 0x10fe) {
dev_err(falcon->dev, "incorrect firmware magic\n");
return -EINVAL;
}
/* currently only version 1 is supported */
if (bin->version != 1) {
dev_err(falcon->dev, "unsupported firmware version\n");
return -EINVAL;
}
/* check that the firmware size is consistent */
if (bin->size > falcon->firmware.size) {
dev_err(falcon->dev, "firmware image size inconsistency\n");
return -EINVAL;
}
os = falcon->firmware.virt + bin->os_header_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
falcon->firmware.code.offset = os->code_offset;
falcon->firmware.code.size = os->code_size;
falcon->firmware.data.offset = os->data_offset;
falcon->firmware.data.size = os->data_size;
return 0;
}
int falcon_read_firmware(struct falcon *falcon, const char *name)
{
int err;
/* request_firmware prints error if it fails */
err = request_firmware(&falcon->firmware.firmware, name, falcon->dev);
if (err < 0)
return err;
falcon->firmware.size = falcon->firmware.firmware->size;
return 0;
}
int falcon_load_firmware(struct falcon *falcon)
{
const struct firmware *firmware = falcon->firmware.firmware;
int err;
/* copy firmware image into local area. this also ensures endianness */
falcon_copy_firmware_image(falcon, firmware);
/* parse the image data */
err = falcon_parse_firmware_image(falcon);
if (err < 0) {
dev_err(falcon->dev, "failed to parse firmware image\n");
return err;
}
release_firmware(firmware);
falcon->firmware.firmware = NULL;
return 0;
}
int falcon_init(struct falcon *falcon)
{
falcon->firmware.virt = NULL;
return 0;
}
void falcon_exit(struct falcon *falcon)
{
if (falcon->firmware.firmware)
release_firmware(falcon->firmware.firmware);
}
int falcon_boot(struct falcon *falcon)
{
unsigned long offset;
u32 value;
int err;
if (!falcon->firmware.virt)
return -EINVAL;
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
(value & (FALCON_DMACTL_IMEM_SCRUBBING |
FALCON_DMACTL_DMEM_SCRUBBING)) == 0,
10, 10000);
if (err < 0)
return err;
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
falcon_writel(falcon, (falcon->firmware.iova +
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
/* copy the data segment into Falcon internal memory */
for (offset = 0; offset < falcon->firmware.data.size; offset += 256)
falcon_copy_chunk(falcon,
falcon->firmware.data.offset + offset,
offset, FALCON_MEMORY_DATA);
/* copy the code segment into Falcon internal memory */
for (offset = 0; offset < falcon->firmware.code.size; offset += 256)
falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
offset, FALCON_MEMORY_IMEM);
/* setup falcon interrupts */
falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
FALCON_IRQMSET_SWGEN1 |
FALCON_IRQMSET_SWGEN0 |
FALCON_IRQMSET_EXTERR |
FALCON_IRQMSET_HALT |
FALCON_IRQMSET_WDTMR,
FALCON_IRQMSET);
falcon_writel(falcon, FALCON_IRQDEST_EXT(0xff) |
FALCON_IRQDEST_SWGEN1 |
FALCON_IRQDEST_SWGEN0 |
FALCON_IRQDEST_EXTERR |
FALCON_IRQDEST_HALT,
FALCON_IRQDEST);
/* enable interface */
falcon_writel(falcon, FALCON_ITFEN_MTHDEN |
FALCON_ITFEN_CTXEN,
FALCON_ITFEN);
/* boot falcon */
falcon_writel(falcon, 0x00000000, FALCON_BOOTVEC);
falcon_writel(falcon, FALCON_CPUCTL_STARTCPU, FALCON_CPUCTL);
err = falcon_wait_idle(falcon);
if (err < 0) {
dev_err(falcon->dev, "Falcon boot failed due to timeout\n");
return err;
}
return 0;
}
void falcon_execute_method(struct falcon *falcon, u32 method, u32 data)
{
falcon_writel(falcon, method >> 2, FALCON_UCLASS_METHOD_OFFSET);
falcon_writel(falcon, data, FALCON_UCLASS_METHOD_DATA);
}
| linux-master | drivers/gpu/drm/tegra/falcon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 NVIDIA Corporation
*/
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
#include "dc.h"
#include "drm.h"
#include "dsi.h"
#include "mipi-phy.h"
#include "trace.h"
struct tegra_dsi_state {
struct drm_connector_state base;
struct mipi_dphy_timing timing;
unsigned long period;
unsigned int vrefresh;
unsigned int lanes;
unsigned long pclk;
unsigned long bclk;
enum tegra_dsi_format format;
unsigned int mul;
unsigned int div;
};
static inline struct tegra_dsi_state *
to_dsi_state(struct drm_connector_state *state)
{
return container_of(state, struct tegra_dsi_state, base);
}
struct tegra_dsi {
struct host1x_client client;
struct tegra_output output;
struct device *dev;
void __iomem *regs;
struct reset_control *rst;
struct clk *clk_parent;
struct clk *clk_lp;
struct clk *clk;
struct drm_info_list *debugfs_files;
unsigned long flags;
enum mipi_dsi_pixel_format format;
unsigned int lanes;
struct tegra_mipi_device *mipi;
struct mipi_dsi_host host;
struct regulator *vdd;
unsigned int video_fifo_depth;
unsigned int host_fifo_depth;
/* for ganged-mode support */
struct tegra_dsi *master;
struct tegra_dsi *slave;
};
static inline struct tegra_dsi *
host1x_client_to_dsi(struct host1x_client *client)
{
return container_of(client, struct tegra_dsi, client);
}
static inline struct tegra_dsi *host_to_tegra(struct mipi_dsi_host *host)
{
return container_of(host, struct tegra_dsi, host);
}
static inline struct tegra_dsi *to_dsi(struct tegra_output *output)
{
return container_of(output, struct tegra_dsi, output);
}
static struct tegra_dsi_state *tegra_dsi_get_state(struct tegra_dsi *dsi)
{
return to_dsi_state(dsi->output.connector.state);
}
static inline u32 tegra_dsi_readl(struct tegra_dsi *dsi, unsigned int offset)
{
u32 value = readl(dsi->regs + (offset << 2));
trace_dsi_readl(dsi->dev, offset, value);
return value;
}
static inline void tegra_dsi_writel(struct tegra_dsi *dsi, u32 value,
unsigned int offset)
{
trace_dsi_writel(dsi->dev, offset, value);
writel(value, dsi->regs + (offset << 2));
}
#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
static const struct debugfs_reg32 tegra_dsi_regs[] = {
DEBUGFS_REG32(DSI_INCR_SYNCPT),
DEBUGFS_REG32(DSI_INCR_SYNCPT_CONTROL),
DEBUGFS_REG32(DSI_INCR_SYNCPT_ERROR),
DEBUGFS_REG32(DSI_CTXSW),
DEBUGFS_REG32(DSI_RD_DATA),
DEBUGFS_REG32(DSI_WR_DATA),
DEBUGFS_REG32(DSI_POWER_CONTROL),
DEBUGFS_REG32(DSI_INT_ENABLE),
DEBUGFS_REG32(DSI_INT_STATUS),
DEBUGFS_REG32(DSI_INT_MASK),
DEBUGFS_REG32(DSI_HOST_CONTROL),
DEBUGFS_REG32(DSI_CONTROL),
DEBUGFS_REG32(DSI_SOL_DELAY),
DEBUGFS_REG32(DSI_MAX_THRESHOLD),
DEBUGFS_REG32(DSI_TRIGGER),
DEBUGFS_REG32(DSI_TX_CRC),
DEBUGFS_REG32(DSI_STATUS),
DEBUGFS_REG32(DSI_INIT_SEQ_CONTROL),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_0),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_1),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_2),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_3),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_4),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_5),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_6),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_7),
DEBUGFS_REG32(DSI_PKT_SEQ_0_LO),
DEBUGFS_REG32(DSI_PKT_SEQ_0_HI),
DEBUGFS_REG32(DSI_PKT_SEQ_1_LO),
DEBUGFS_REG32(DSI_PKT_SEQ_1_HI),
DEBUGFS_REG32(DSI_PKT_SEQ_2_LO),
DEBUGFS_REG32(DSI_PKT_SEQ_2_HI),
DEBUGFS_REG32(DSI_PKT_SEQ_3_LO),
DEBUGFS_REG32(DSI_PKT_SEQ_3_HI),
DEBUGFS_REG32(DSI_PKT_SEQ_4_LO),
DEBUGFS_REG32(DSI_PKT_SEQ_4_HI),
DEBUGFS_REG32(DSI_PKT_SEQ_5_LO),
DEBUGFS_REG32(DSI_PKT_SEQ_5_HI),
DEBUGFS_REG32(DSI_DCS_CMDS),
DEBUGFS_REG32(DSI_PKT_LEN_0_1),
DEBUGFS_REG32(DSI_PKT_LEN_2_3),
DEBUGFS_REG32(DSI_PKT_LEN_4_5),
DEBUGFS_REG32(DSI_PKT_LEN_6_7),
DEBUGFS_REG32(DSI_PHY_TIMING_0),
DEBUGFS_REG32(DSI_PHY_TIMING_1),
DEBUGFS_REG32(DSI_PHY_TIMING_2),
DEBUGFS_REG32(DSI_BTA_TIMING),
DEBUGFS_REG32(DSI_TIMEOUT_0),
DEBUGFS_REG32(DSI_TIMEOUT_1),
DEBUGFS_REG32(DSI_TO_TALLY),
DEBUGFS_REG32(DSI_PAD_CONTROL_0),
DEBUGFS_REG32(DSI_PAD_CONTROL_CD),
DEBUGFS_REG32(DSI_PAD_CD_STATUS),
DEBUGFS_REG32(DSI_VIDEO_MODE_CONTROL),
DEBUGFS_REG32(DSI_PAD_CONTROL_1),
DEBUGFS_REG32(DSI_PAD_CONTROL_2),
DEBUGFS_REG32(DSI_PAD_CONTROL_3),
DEBUGFS_REG32(DSI_PAD_CONTROL_4),
DEBUGFS_REG32(DSI_GANGED_MODE_CONTROL),
DEBUGFS_REG32(DSI_GANGED_MODE_START),
DEBUGFS_REG32(DSI_GANGED_MODE_SIZE),
DEBUGFS_REG32(DSI_RAW_DATA_BYTE_COUNT),
DEBUGFS_REG32(DSI_ULTRA_LOW_POWER_CONTROL),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_8),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_9),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_10),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_11),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_12),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_13),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_14),
DEBUGFS_REG32(DSI_INIT_SEQ_DATA_15),
};
static int tegra_dsi_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_dsi *dsi = node->info_ent->data;
struct drm_crtc *crtc = dsi->output.encoder.crtc;
struct drm_device *drm = node->minor->dev;
unsigned int i;
int err = 0;
drm_modeset_lock_all(drm);
if (!crtc || !crtc->state->active) {
err = -EBUSY;
goto unlock;
}
for (i = 0; i < ARRAY_SIZE(tegra_dsi_regs); i++) {
unsigned int offset = tegra_dsi_regs[i].offset;
seq_printf(s, "%-32s %#05x %08x\n", tegra_dsi_regs[i].name,
offset, tegra_dsi_readl(dsi, offset));
}
unlock:
drm_modeset_unlock_all(drm);
return err;
}
static struct drm_info_list debugfs_files[] = {
{ "regs", tegra_dsi_show_regs, 0, NULL },
};
static int tegra_dsi_late_register(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
unsigned int i, count = ARRAY_SIZE(debugfs_files);
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_dsi *dsi = to_dsi(output);
dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
if (!dsi->debugfs_files)
return -ENOMEM;
for (i = 0; i < count; i++)
dsi->debugfs_files[i].data = dsi;
drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
return 0;
}
static void tegra_dsi_early_unregister(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
unsigned int count = ARRAY_SIZE(debugfs_files);
struct tegra_dsi *dsi = to_dsi(output);
drm_debugfs_remove_files(dsi->debugfs_files, count,
connector->dev->primary);
kfree(dsi->debugfs_files);
dsi->debugfs_files = NULL;
}
#define PKT_ID0(id) ((((id) & 0x3f) << 3) | (1 << 9))
#define PKT_LEN0(len) (((len) & 0x07) << 0)
#define PKT_ID1(id) ((((id) & 0x3f) << 13) | (1 << 19))
#define PKT_LEN1(len) (((len) & 0x07) << 10)
#define PKT_ID2(id) ((((id) & 0x3f) << 23) | (1 << 29))
#define PKT_LEN2(len) (((len) & 0x07) << 20)
#define PKT_LP (1 << 30)
#define NUM_PKT_SEQ 12
/*
* non-burst mode with sync pulses
*/
static const u32 pkt_seq_video_non_burst_sync_pulses[NUM_PKT_SEQ] = {
[ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
PKT_LP,
[ 1] = 0,
[ 2] = PKT_ID0(MIPI_DSI_V_SYNC_END) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
PKT_LP,
[ 3] = 0,
[ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
PKT_LP,
[ 5] = 0,
[ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
[ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
[ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
PKT_LP,
[ 9] = 0,
[10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
[11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
};
/*
* non-burst mode with sync events
*/
static const u32 pkt_seq_video_non_burst_sync_events[NUM_PKT_SEQ] = {
[ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
PKT_LP,
[ 1] = 0,
[ 2] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
PKT_LP,
[ 3] = 0,
[ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
PKT_LP,
[ 5] = 0,
[ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(2) |
PKT_ID2(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN2(3),
[ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
[ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
PKT_LP,
[ 9] = 0,
[10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(2) |
PKT_ID2(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN2(3),
[11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
};
static const u32 pkt_seq_command_mode[NUM_PKT_SEQ] = {
[ 0] = 0,
[ 1] = 0,
[ 2] = 0,
[ 3] = 0,
[ 4] = 0,
[ 5] = 0,
[ 6] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(3) | PKT_LP,
[ 7] = 0,
[ 8] = 0,
[ 9] = 0,
[10] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(5) | PKT_LP,
[11] = 0,
};
static void tegra_dsi_set_phy_timing(struct tegra_dsi *dsi,
unsigned long period,
const struct mipi_dphy_timing *timing)
{
u32 value;
value = DSI_TIMING_FIELD(timing->hsexit, period, 1) << 24 |
DSI_TIMING_FIELD(timing->hstrail, period, 0) << 16 |
DSI_TIMING_FIELD(timing->hszero, period, 3) << 8 |
DSI_TIMING_FIELD(timing->hsprepare, period, 1);
tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0);
value = DSI_TIMING_FIELD(timing->clktrail, period, 1) << 24 |
DSI_TIMING_FIELD(timing->clkpost, period, 1) << 16 |
DSI_TIMING_FIELD(timing->clkzero, period, 1) << 8 |
DSI_TIMING_FIELD(timing->lpx, period, 1);
tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1);
value = DSI_TIMING_FIELD(timing->clkprepare, period, 1) << 16 |
DSI_TIMING_FIELD(timing->clkpre, period, 1) << 8 |
DSI_TIMING_FIELD(0xff * period, period, 0) << 0;
tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2);
value = DSI_TIMING_FIELD(timing->taget, period, 1) << 16 |
DSI_TIMING_FIELD(timing->tasure, period, 1) << 8 |
DSI_TIMING_FIELD(timing->tago, period, 1);
tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
if (dsi->slave)
tegra_dsi_set_phy_timing(dsi->slave, period, timing);
}
static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format,
unsigned int *mulp, unsigned int *divp)
{
switch (format) {
case MIPI_DSI_FMT_RGB666_PACKED:
case MIPI_DSI_FMT_RGB888:
*mulp = 3;
*divp = 1;
break;
case MIPI_DSI_FMT_RGB565:
*mulp = 2;
*divp = 1;
break;
case MIPI_DSI_FMT_RGB666:
*mulp = 9;
*divp = 4;
break;
default:
return -EINVAL;
}
return 0;
}
static int tegra_dsi_get_format(enum mipi_dsi_pixel_format format,
enum tegra_dsi_format *fmt)
{
switch (format) {
case MIPI_DSI_FMT_RGB888:
*fmt = TEGRA_DSI_FORMAT_24P;
break;
case MIPI_DSI_FMT_RGB666:
*fmt = TEGRA_DSI_FORMAT_18NP;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
*fmt = TEGRA_DSI_FORMAT_18P;
break;
case MIPI_DSI_FMT_RGB565:
*fmt = TEGRA_DSI_FORMAT_16P;
break;
default:
return -EINVAL;
}
return 0;
}
static void tegra_dsi_ganged_enable(struct tegra_dsi *dsi, unsigned int start,
unsigned int size)
{
u32 value;
tegra_dsi_writel(dsi, start, DSI_GANGED_MODE_START);
tegra_dsi_writel(dsi, size << 16 | size, DSI_GANGED_MODE_SIZE);
value = DSI_GANGED_MODE_CONTROL_ENABLE;
tegra_dsi_writel(dsi, value, DSI_GANGED_MODE_CONTROL);
}
static void tegra_dsi_enable(struct tegra_dsi *dsi)
{
u32 value;
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
value |= DSI_POWER_CONTROL_ENABLE;
tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
if (dsi->slave)
tegra_dsi_enable(dsi->slave);
}
static unsigned int tegra_dsi_get_lanes(struct tegra_dsi *dsi)
{
if (dsi->master)
return dsi->master->lanes + dsi->lanes;
if (dsi->slave)
return dsi->lanes + dsi->slave->lanes;
return dsi->lanes;
}
static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
const struct drm_display_mode *mode)
{
unsigned int hact, hsw, hbp, hfp, i, mul, div;
struct tegra_dsi_state *state;
const u32 *pkt_seq;
u32 value;
/* XXX: pass in state into this function? */
if (dsi->master)
state = tegra_dsi_get_state(dsi->master);
else
state = tegra_dsi_get_state(dsi);
mul = state->mul;
div = state->div;
if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n");
pkt_seq = pkt_seq_video_non_burst_sync_pulses;
} else if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
DRM_DEBUG_KMS("Non-burst video mode with sync events\n");
pkt_seq = pkt_seq_video_non_burst_sync_events;
} else {
DRM_DEBUG_KMS("Command mode\n");
pkt_seq = pkt_seq_command_mode;
}
value = DSI_CONTROL_CHANNEL(0) |
DSI_CONTROL_FORMAT(state->format) |
DSI_CONTROL_LANES(dsi->lanes - 1) |
DSI_CONTROL_SOURCE(pipe);
tegra_dsi_writel(dsi, value, DSI_CONTROL);
tegra_dsi_writel(dsi, dsi->video_fifo_depth, DSI_MAX_THRESHOLD);
value = DSI_HOST_CONTROL_HS;
tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
value = tegra_dsi_readl(dsi, DSI_CONTROL);
if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
value |= DSI_CONTROL_HS_CLK_CTRL;
value &= ~DSI_CONTROL_TX_TRIG(3);
/* enable DCS commands for command mode */
if (dsi->flags & MIPI_DSI_MODE_VIDEO)
value &= ~DSI_CONTROL_DCS_ENABLE;
else
value |= DSI_CONTROL_DCS_ENABLE;
value |= DSI_CONTROL_VIDEO_ENABLE;
value &= ~DSI_CONTROL_HOST_ENABLE;
tegra_dsi_writel(dsi, value, DSI_CONTROL);
for (i = 0; i < NUM_PKT_SEQ; i++)
tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
/* horizontal active pixels */
hact = mode->hdisplay * mul / div;
/* horizontal sync width */
hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
/* horizontal back porch */
hbp = (mode->htotal - mode->hsync_end) * mul / div;
if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
hbp += hsw;
/* horizontal front porch */
hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
/* subtract packet overhead */
hsw -= 10;
hbp -= 14;
hfp -= 8;
tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
/* set SOL delay (for non-burst mode only) */
tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
/* TODO: implement ganged mode */
} else {
u16 bytes;
if (dsi->master || dsi->slave) {
/*
* For ganged mode, assume symmetric left-right mode.
*/
bytes = 1 + (mode->hdisplay / 2) * mul / div;
} else {
/* 1 byte (DCS command) + pixel data */
bytes = 1 + mode->hdisplay * mul / div;
}
tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1);
tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_2_3);
tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_4_5);
tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_6_7);
value = MIPI_DCS_WRITE_MEMORY_START << 8 |
MIPI_DCS_WRITE_MEMORY_CONTINUE;
tegra_dsi_writel(dsi, value, DSI_DCS_CMDS);
/* set SOL delay */
if (dsi->master || dsi->slave) {
unsigned long delay, bclk, bclk_ganged;
unsigned int lanes = state->lanes;
/* SOL to valid, valid to FIFO and FIFO write delay */
delay = 4 + 4 + 2;
delay = DIV_ROUND_UP(delay * mul, div * lanes);
/* FIFO read delay */
delay = delay + 6;
bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes);
bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes);
value = bclk - bclk_ganged + delay + 20;
} else {
/* TODO: revisit for non-ganged mode */
value = 8 * mul / div;
}
tegra_dsi_writel(dsi, value, DSI_SOL_DELAY);
}
if (dsi->slave) {
tegra_dsi_configure(dsi->slave, pipe, mode);
/*
* TODO: Support modes other than symmetrical left-right
* split.
*/
tegra_dsi_ganged_enable(dsi, 0, mode->hdisplay / 2);
tegra_dsi_ganged_enable(dsi->slave, mode->hdisplay / 2,
mode->hdisplay / 2);
}
}
static int tegra_dsi_wait_idle(struct tegra_dsi *dsi, unsigned long timeout)
{
u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, timeout)) {
value = tegra_dsi_readl(dsi, DSI_STATUS);
if (value & DSI_STATUS_IDLE)
return 0;
usleep_range(1000, 2000);
}
return -ETIMEDOUT;
}
static void tegra_dsi_video_disable(struct tegra_dsi *dsi)
{
u32 value;
value = tegra_dsi_readl(dsi, DSI_CONTROL);
value &= ~DSI_CONTROL_VIDEO_ENABLE;
tegra_dsi_writel(dsi, value, DSI_CONTROL);
if (dsi->slave)
tegra_dsi_video_disable(dsi->slave);
}
static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
{
tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
}
static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
{
u32 value;
value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
return 0;
}
static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
{
u32 value;
int err;
/*
* XXX Is this still needed? The module reset is deasserted right
* before this function is called.
*/
tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
/* start calibration */
tegra_dsi_pad_enable(dsi);
value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
DSI_PAD_OUT_CLK(0x0);
tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
err = tegra_mipi_start_calibration(dsi->mipi);
if (err < 0)
return err;
return tegra_mipi_finish_calibration(dsi->mipi);
}
static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
unsigned int vrefresh)
{
unsigned int timeout;
u32 value;
/* one frame high-speed transmission timeout */
timeout = (bclk / vrefresh) / 512;
value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
/* 2 ms peripheral timeout for panel */
timeout = 2 * bclk / 512 * 1000;
value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
if (dsi->slave)
tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh);
}
static void tegra_dsi_disable(struct tegra_dsi *dsi)
{
u32 value;
if (dsi->slave) {
tegra_dsi_ganged_disable(dsi->slave);
tegra_dsi_ganged_disable(dsi);
}
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
value &= ~DSI_POWER_CONTROL_ENABLE;
tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
if (dsi->slave)
tegra_dsi_disable(dsi->slave);
usleep_range(5000, 10000);
}
static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
{
u32 value;
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
value &= ~DSI_POWER_CONTROL_ENABLE;
tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
usleep_range(300, 1000);
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
value |= DSI_POWER_CONTROL_ENABLE;
tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
usleep_range(300, 1000);
value = tegra_dsi_readl(dsi, DSI_TRIGGER);
if (value)
tegra_dsi_writel(dsi, 0, DSI_TRIGGER);
if (dsi->slave)
tegra_dsi_soft_reset(dsi->slave);
}
static void tegra_dsi_connector_reset(struct drm_connector *connector)
{
struct tegra_dsi_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return;
if (connector->state) {
__drm_atomic_helper_connector_destroy_state(connector->state);
kfree(connector->state);
}
__drm_atomic_helper_connector_reset(connector, &state->base);
}
static struct drm_connector_state *
tegra_dsi_connector_duplicate_state(struct drm_connector *connector)
{
struct tegra_dsi_state *state = to_dsi_state(connector->state);
struct tegra_dsi_state *copy;
copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
if (!copy)
return NULL;
__drm_atomic_helper_connector_duplicate_state(connector,
©->base);
return ©->base;
}
static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
.reset = tegra_dsi_connector_reset,
.detect = tegra_output_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = tegra_output_connector_destroy,
.atomic_duplicate_state = tegra_dsi_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.late_register = tegra_dsi_late_register,
.early_unregister = tegra_dsi_early_unregister,
};
static enum drm_mode_status
tegra_dsi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = {
.get_modes = tegra_output_connector_get_modes,
.mode_valid = tegra_dsi_connector_mode_valid,
};
static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
{
int err;
if (dsi->slave)
tegra_dsi_unprepare(dsi->slave);
err = tegra_mipi_disable(dsi->mipi);
if (err < 0)
dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
err);
err = host1x_client_suspend(&dsi->client);
if (err < 0)
dev_err(dsi->dev, "failed to suspend: %d\n", err);
}
static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_dsi *dsi = to_dsi(output);
u32 value;
int err;
if (output->panel)
drm_panel_disable(output->panel);
tegra_dsi_video_disable(dsi);
/*
* The following accesses registers of the display controller, so make
* sure it's only executed when the output is attached to one.
*/
if (dc) {
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value &= ~DSI_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
}
err = tegra_dsi_wait_idle(dsi, 100);
if (err < 0)
dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
tegra_dsi_soft_reset(dsi);
if (output->panel)
drm_panel_unprepare(output->panel);
tegra_dsi_disable(dsi);
tegra_dsi_unprepare(dsi);
}
static int tegra_dsi_prepare(struct tegra_dsi *dsi)
{
int err;
err = host1x_client_resume(&dsi->client);
if (err < 0) {
dev_err(dsi->dev, "failed to resume: %d\n", err);
return err;
}
err = tegra_mipi_enable(dsi->mipi);
if (err < 0)
dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n",
err);
err = tegra_dsi_pad_calibrate(dsi);
if (err < 0)
dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
if (dsi->slave)
tegra_dsi_prepare(dsi->slave);
return 0;
}
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_dsi *dsi = to_dsi(output);
struct tegra_dsi_state *state;
u32 value;
int err;
/* If the bootloader enabled DSI it needs to be disabled
* in order for the panel initialization commands to be
* properly sent.
*/
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
if (value & DSI_POWER_CONTROL_ENABLE)
tegra_dsi_disable(dsi);
err = tegra_dsi_prepare(dsi);
if (err < 0) {
dev_err(dsi->dev, "failed to prepare: %d\n", err);
return;
}
state = tegra_dsi_get_state(dsi);
tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh);
/*
* The D-PHY timing fields are expressed in byte-clock cycles, so
* multiply the period by 8.
*/
tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing);
if (output->panel)
drm_panel_prepare(output->panel);
tegra_dsi_configure(dsi, dc->pipe, mode);
/* enable display controller */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value |= DSI_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
/* enable DSI controller */
tegra_dsi_enable(dsi);
if (output->panel)
drm_panel_enable(output->panel);
}
static int
tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dsi_state *state = to_dsi_state(conn_state);
struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
struct tegra_dsi *dsi = to_dsi(output);
unsigned int scdiv;
unsigned long plld;
int err;
state->pclk = crtc_state->mode.clock * 1000;
err = tegra_dsi_get_muldiv(dsi->format, &state->mul, &state->div);
if (err < 0)
return err;
state->lanes = tegra_dsi_get_lanes(dsi);
err = tegra_dsi_get_format(dsi->format, &state->format);
if (err < 0)
return err;
state->vrefresh = drm_mode_vrefresh(&crtc_state->mode);
/* compute byte clock */
state->bclk = (state->pclk * state->mul) / (state->div * state->lanes);
DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", state->mul, state->div,
state->lanes);
DRM_DEBUG_KMS("format: %u, vrefresh: %u\n", state->format,
state->vrefresh);
DRM_DEBUG_KMS("bclk: %lu\n", state->bclk);
/*
* Compute bit clock and round up to the next MHz.
*/
plld = DIV_ROUND_UP(state->bclk * 8, USEC_PER_SEC) * USEC_PER_SEC;
state->period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, plld);
err = mipi_dphy_timing_get_default(&state->timing, state->period);
if (err < 0)
return err;
err = mipi_dphy_timing_validate(&state->timing, state->period);
if (err < 0) {
dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err);
return err;
}
/*
* We divide the frequency by two here, but we make up for that by
* setting the shift clock divider (further below) to half of the
* correct value.
*/
plld /= 2;
/*
* Derive pixel clock from bit clock using the shift clock divider.
* Note that this is only half of what we would expect, but we need
* that to make up for the fact that we divided the bit clock by a
* factor of two above.
*
* It's not clear exactly why this is necessary, but the display is
* not working properly otherwise. Perhaps the PLLs cannot generate
* frequencies sufficiently high.
*/
scdiv = ((8 * state->mul) / (state->div * state->lanes)) - 2;
err = tegra_dc_state_setup_clock(dc, crtc_state, dsi->clk_parent,
plld, scdiv);
if (err < 0) {
dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
return err;
}
return err;
}
static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
.disable = tegra_dsi_encoder_disable,
.enable = tegra_dsi_encoder_enable,
.atomic_check = tegra_dsi_encoder_atomic_check,
};
static int tegra_dsi_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
int err;
/* Gangsters must not register their own outputs. */
if (!dsi->master) {
dsi->output.dev = client->dev;
drm_connector_init(drm, &dsi->output.connector,
&tegra_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(&dsi->output.connector,
&tegra_dsi_connector_helper_funcs);
dsi->output.connector.dpms = DRM_MODE_DPMS_OFF;
drm_simple_encoder_init(drm, &dsi->output.encoder,
DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(&dsi->output.encoder,
&tegra_dsi_encoder_helper_funcs);
drm_connector_attach_encoder(&dsi->output.connector,
&dsi->output.encoder);
drm_connector_register(&dsi->output.connector);
err = tegra_output_init(drm, &dsi->output);
if (err < 0)
dev_err(dsi->dev, "failed to initialize output: %d\n",
err);
dsi->output.encoder.possible_crtcs = 0x3;
}
return 0;
}
static int tegra_dsi_exit(struct host1x_client *client)
{
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
tegra_output_exit(&dsi->output);
return 0;
}
static int tegra_dsi_runtime_suspend(struct host1x_client *client)
{
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
struct device *dev = client->dev;
int err;
if (dsi->rst) {
err = reset_control_assert(dsi->rst);
if (err < 0) {
dev_err(dev, "failed to assert reset: %d\n", err);
return err;
}
}
usleep_range(1000, 2000);
clk_disable_unprepare(dsi->clk_lp);
clk_disable_unprepare(dsi->clk);
regulator_disable(dsi->vdd);
pm_runtime_put_sync(dev);
return 0;
}
static int tegra_dsi_runtime_resume(struct host1x_client *client)
{
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
struct device *dev = client->dev;
int err;
err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
}
err = regulator_enable(dsi->vdd);
if (err < 0) {
dev_err(dev, "failed to enable VDD supply: %d\n", err);
goto put_rpm;
}
err = clk_prepare_enable(dsi->clk);
if (err < 0) {
dev_err(dev, "cannot enable DSI clock: %d\n", err);
goto disable_vdd;
}
err = clk_prepare_enable(dsi->clk_lp);
if (err < 0) {
dev_err(dev, "cannot enable low-power clock: %d\n", err);
goto disable_clk;
}
usleep_range(1000, 2000);
if (dsi->rst) {
err = reset_control_deassert(dsi->rst);
if (err < 0) {
dev_err(dev, "cannot assert reset: %d\n", err);
goto disable_clk_lp;
}
}
return 0;
disable_clk_lp:
clk_disable_unprepare(dsi->clk_lp);
disable_clk:
clk_disable_unprepare(dsi->clk);
disable_vdd:
regulator_disable(dsi->vdd);
put_rpm:
pm_runtime_put_sync(dev);
return err;
}
static const struct host1x_client_ops dsi_client_ops = {
.init = tegra_dsi_init,
.exit = tegra_dsi_exit,
.suspend = tegra_dsi_runtime_suspend,
.resume = tegra_dsi_runtime_resume,
};
static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
{
struct clk *parent;
int err;
parent = clk_get_parent(dsi->clk);
if (!parent)
return -EINVAL;
err = clk_set_parent(parent, dsi->clk_parent);
if (err < 0)
return err;
return 0;
}
static const char * const error_report[16] = {
"SoT Error",
"SoT Sync Error",
"EoT Sync Error",
"Escape Mode Entry Command Error",
"Low-Power Transmit Sync Error",
"Peripheral Timeout Error",
"False Control Error",
"Contention Detected",
"ECC Error, single-bit",
"ECC Error, multi-bit",
"Checksum Error",
"DSI Data Type Not Recognized",
"DSI VC ID Invalid",
"Invalid Transmission Length",
"Reserved",
"DSI Protocol Violation",
};
static ssize_t tegra_dsi_read_response(struct tegra_dsi *dsi,
const struct mipi_dsi_msg *msg,
size_t count)
{
u8 *rx = msg->rx_buf;
unsigned int i, j, k;
size_t size = 0;
u16 errors;
u32 value;
/* read and parse packet header */
value = tegra_dsi_readl(dsi, DSI_RD_DATA);
switch (value & 0x3f) {
case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
errors = (value >> 8) & 0xffff;
dev_dbg(dsi->dev, "Acknowledge and error report: %04x\n",
errors);
for (i = 0; i < ARRAY_SIZE(error_report); i++)
if (errors & BIT(i))
dev_dbg(dsi->dev, " %2u: %s\n", i,
error_report[i]);
break;
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
rx[0] = (value >> 8) & 0xff;
size = 1;
break;
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
rx[0] = (value >> 8) & 0xff;
rx[1] = (value >> 16) & 0xff;
size = 2;
break;
case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
break;
case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
break;
default:
dev_err(dsi->dev, "unhandled response type: %02x\n",
value & 0x3f);
return -EPROTO;
}
size = min(size, msg->rx_len);
if (msg->rx_buf && size > 0) {
for (i = 0, j = 0; i < count - 1; i++, j += 4) {
u8 *rx = msg->rx_buf + j;
value = tegra_dsi_readl(dsi, DSI_RD_DATA);
for (k = 0; k < 4 && (j + k) < msg->rx_len; k++)
rx[j + k] = (value >> (k << 3)) & 0xff;
}
}
return size;
}
static int tegra_dsi_transmit(struct tegra_dsi *dsi, unsigned long timeout)
{
tegra_dsi_writel(dsi, DSI_TRIGGER_HOST, DSI_TRIGGER);
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, timeout)) {
u32 value = tegra_dsi_readl(dsi, DSI_TRIGGER);
if ((value & DSI_TRIGGER_HOST) == 0)
return 0;
usleep_range(1000, 2000);
}
DRM_DEBUG_KMS("timeout waiting for transmission to complete\n");
return -ETIMEDOUT;
}
static int tegra_dsi_wait_for_response(struct tegra_dsi *dsi,
unsigned long timeout)
{
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
u32 value = tegra_dsi_readl(dsi, DSI_STATUS);
u8 count = value & 0x1f;
if (count > 0)
return count;
usleep_range(1000, 2000);
}
DRM_DEBUG_KMS("peripheral returned no data\n");
return -ETIMEDOUT;
}
static void tegra_dsi_writesl(struct tegra_dsi *dsi, unsigned long offset,
const void *buffer, size_t size)
{
const u8 *buf = buffer;
size_t i, j;
u32 value;
for (j = 0; j < size; j += 4) {
value = 0;
for (i = 0; i < 4 && j + i < size; i++)
value |= buf[j + i] << (i << 3);
tegra_dsi_writel(dsi, value, DSI_WR_DATA);
}
}
static ssize_t tegra_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct tegra_dsi *dsi = host_to_tegra(host);
struct mipi_dsi_packet packet;
const u8 *header;
size_t count;
ssize_t err;
u32 value;
err = mipi_dsi_create_packet(&packet, msg);
if (err < 0)
return err;
header = packet.header;
/* maximum FIFO depth is 1920 words */
if (packet.size > dsi->video_fifo_depth * 4)
return -ENOSPC;
/* reset underflow/overflow flags */
value = tegra_dsi_readl(dsi, DSI_STATUS);
if (value & (DSI_STATUS_UNDERFLOW | DSI_STATUS_OVERFLOW)) {
value = DSI_HOST_CONTROL_FIFO_RESET;
tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
usleep_range(10, 20);
}
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
value |= DSI_POWER_CONTROL_ENABLE;
tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
usleep_range(5000, 10000);
value = DSI_HOST_CONTROL_CRC_RESET | DSI_HOST_CONTROL_TX_TRIG_HOST |
DSI_HOST_CONTROL_CS | DSI_HOST_CONTROL_ECC;
if ((msg->flags & MIPI_DSI_MSG_USE_LPM) == 0)
value |= DSI_HOST_CONTROL_HS;
/*
* The host FIFO has a maximum of 64 words, so larger transmissions
* need to use the video FIFO.
*/
if (packet.size > dsi->host_fifo_depth * 4)
value |= DSI_HOST_CONTROL_FIFO_SEL;
tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
/*
* For reads and messages with explicitly requested ACK, generate a
* BTA sequence after the transmission of the packet.
*/
if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
(msg->rx_buf && msg->rx_len > 0)) {
value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
value |= DSI_HOST_CONTROL_PKT_BTA;
tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
}
value = DSI_CONTROL_LANES(0) | DSI_CONTROL_HOST_ENABLE;
tegra_dsi_writel(dsi, value, DSI_CONTROL);
/* write packet header, ECC is generated by hardware */
value = header[2] << 16 | header[1] << 8 | header[0];
tegra_dsi_writel(dsi, value, DSI_WR_DATA);
/* write payload (if any) */
if (packet.payload_length > 0)
tegra_dsi_writesl(dsi, DSI_WR_DATA, packet.payload,
packet.payload_length);
err = tegra_dsi_transmit(dsi, 250);
if (err < 0)
return err;
if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
(msg->rx_buf && msg->rx_len > 0)) {
err = tegra_dsi_wait_for_response(dsi, 250);
if (err < 0)
return err;
count = err;
value = tegra_dsi_readl(dsi, DSI_RD_DATA);
switch (value) {
case 0x84:
/*
dev_dbg(dsi->dev, "ACK\n");
*/
break;
case 0x87:
/*
dev_dbg(dsi->dev, "ESCAPE\n");
*/
break;
default:
dev_err(dsi->dev, "unknown status: %08x\n", value);
break;
}
if (count > 1) {
err = tegra_dsi_read_response(dsi, msg, count);
if (err < 0)
dev_err(dsi->dev,
"failed to parse response: %zd\n",
err);
else {
/*
* For read commands, return the number of
* bytes returned by the peripheral.
*/
count = err;
}
}
} else {
/*
* For write commands, we have transmitted the 4-byte header
* plus the variable-length payload.
*/
count = 4 + packet.payload_length;
}
return count;
}
static int tegra_dsi_ganged_setup(struct tegra_dsi *dsi)
{
struct clk *parent;
int err;
/* make sure both DSI controllers share the same PLL */
parent = clk_get_parent(dsi->slave->clk);
if (!parent)
return -EINVAL;
err = clk_set_parent(parent, dsi->clk_parent);
if (err < 0)
return err;
return 0;
}
static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct tegra_dsi *dsi = host_to_tegra(host);
dsi->flags = device->mode_flags;
dsi->format = device->format;
dsi->lanes = device->lanes;
if (dsi->slave) {
int err;
dev_dbg(dsi->dev, "attaching dual-channel device %s\n",
dev_name(&device->dev));
err = tegra_dsi_ganged_setup(dsi);
if (err < 0) {
dev_err(dsi->dev, "failed to set up ganged mode: %d\n",
err);
return err;
}
}
/*
* Slaves don't have a panel associated with them, so they provide
* merely the second channel.
*/
if (!dsi->master) {
struct tegra_output *output = &dsi->output;
output->panel = of_drm_find_panel(device->dev.of_node);
if (IS_ERR(output->panel))
output->panel = NULL;
if (output->panel && output->connector.dev)
drm_helper_hpd_irq_event(output->connector.dev);
}
return 0;
}
static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct tegra_dsi *dsi = host_to_tegra(host);
struct tegra_output *output = &dsi->output;
if (output->panel && &device->dev == output->panel->dev) {
output->panel = NULL;
if (output->connector.dev)
drm_helper_hpd_irq_event(output->connector.dev);
}
return 0;
}
static const struct mipi_dsi_host_ops tegra_dsi_host_ops = {
.attach = tegra_dsi_host_attach,
.detach = tegra_dsi_host_detach,
.transfer = tegra_dsi_host_transfer,
};
static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
{
struct device_node *np;
np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
if (np) {
struct platform_device *gangster = of_find_device_by_node(np);
dsi->slave = platform_get_drvdata(gangster);
of_node_put(np);
if (!dsi->slave) {
put_device(&gangster->dev);
return -EPROBE_DEFER;
}
dsi->slave->master = dsi;
}
return 0;
}
static int tegra_dsi_probe(struct platform_device *pdev)
{
struct tegra_dsi *dsi;
struct resource *regs;
int err;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
dsi->output.dev = dsi->dev = &pdev->dev;
dsi->video_fifo_depth = 1920;
dsi->host_fifo_depth = 64;
err = tegra_dsi_ganged_probe(dsi);
if (err < 0)
return err;
err = tegra_output_probe(&dsi->output);
if (err < 0)
return err;
dsi->output.connector.polled = DRM_CONNECTOR_POLL_HPD;
/*
* Assume these values by default. When a DSI peripheral driver
* attaches to the DSI host, the parameters will be taken from
* the attached device.
*/
dsi->flags = MIPI_DSI_MODE_VIDEO;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 4;
if (!pdev->dev.pm_domain) {
dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
if (IS_ERR(dsi->rst))
return PTR_ERR(dsi->rst);
}
dsi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dsi->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
"cannot get DSI clock\n");
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
if (IS_ERR(dsi->clk_lp))
return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
"cannot get low-power clock\n");
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dsi->clk_parent))
return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
"cannot get parent clock\n");
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
if (IS_ERR(dsi->vdd))
return dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
"cannot get VDD supply\n");
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
return err;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
if (IS_ERR(dsi->mipi))
return PTR_ERR(dsi->mipi);
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
err = mipi_dsi_host_register(&dsi->host);
if (err < 0) {
dev_err(&pdev->dev, "failed to register DSI host: %d\n", err);
goto mipi_free;
}
platform_set_drvdata(pdev, dsi);
pm_runtime_enable(&pdev->dev);
INIT_LIST_HEAD(&dsi->client.list);
dsi->client.ops = &dsi_client_ops;
dsi->client.dev = &pdev->dev;
err = host1x_client_register(&dsi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
goto unregister;
}
return 0;
unregister:
mipi_dsi_host_unregister(&dsi->host);
mipi_free:
tegra_mipi_free(dsi->mipi);
return err;
}
static void tegra_dsi_remove(struct platform_device *pdev)
{
struct tegra_dsi *dsi = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&dsi->client);
tegra_output_remove(&dsi->output);
mipi_dsi_host_unregister(&dsi->host);
tegra_mipi_free(dsi->mipi);
}
static const struct of_device_id tegra_dsi_of_match[] = {
{ .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra132-dsi", },
{ .compatible = "nvidia,tegra124-dsi", },
{ .compatible = "nvidia,tegra114-dsi", },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_dsi_of_match);
struct platform_driver tegra_dsi_driver = {
.driver = {
.name = "tegra-dsi",
.of_match_table = tegra_dsi_of_match,
},
.probe = tegra_dsi_probe,
.remove_new = tegra_dsi_remove,
};
| linux-master | drivers/gpu/drm/tegra/dsi.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 NVIDIA Corporation */
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_utils.h>
#include "drm.h"
#include "uapi.h"
static void tegra_drm_mapping_release(struct kref *ref)
{
struct tegra_drm_mapping *mapping =
container_of(ref, struct tegra_drm_mapping, ref);
host1x_bo_unpin(mapping->map);
host1x_bo_put(mapping->bo);
kfree(mapping);
}
void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
{
kref_put(&mapping->ref, tegra_drm_mapping_release);
}
static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
{
struct tegra_drm_mapping *mapping;
unsigned long id;
if (context->memory_context)
host1x_memory_context_put(context->memory_context);
xa_for_each(&context->mappings, id, mapping)
tegra_drm_mapping_put(mapping);
xa_destroy(&context->mappings);
host1x_channel_put(context->channel);
kfree(context);
}
void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
{
struct tegra_drm_context *context;
struct host1x_syncpt *sp;
unsigned long id;
xa_for_each(&file->contexts, id, context)
tegra_drm_channel_context_close(context);
xa_for_each(&file->syncpoints, id, sp)
host1x_syncpt_put(sp);
xa_destroy(&file->contexts);
xa_destroy(&file->syncpoints);
}
static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class)
{
struct tegra_drm_client *client;
list_for_each_entry(client, &tegra->clients, list)
if (client->base.class == class)
return client;
return NULL;
}
int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
{
struct host1x *host = tegra_drm_to_host1x(drm->dev_private);
struct tegra_drm_file *fpriv = file->driver_priv;
struct tegra_drm *tegra = drm->dev_private;
struct drm_tegra_channel_open *args = data;
struct tegra_drm_client *client = NULL;
struct tegra_drm_context *context;
int err;
if (args->flags)
return -EINVAL;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
client = tegra_drm_find_client(tegra, args->host1x_class);
if (!client) {
err = -ENODEV;
goto free;
}
if (client->shared_channel) {
context->channel = host1x_channel_get(client->shared_channel);
} else {
context->channel = host1x_channel_request(&client->base);
if (!context->channel) {
err = -EBUSY;
goto free;
}
}
/* Only allocate context if the engine supports context isolation. */
if (device_iommu_mapped(client->base.dev) && client->ops->can_use_memory_ctx) {
bool supported;
err = client->ops->can_use_memory_ctx(client, &supported);
if (err)
goto put_channel;
if (supported)
context->memory_context = host1x_memory_context_alloc(
host, client->base.dev, get_task_pid(current, PIDTYPE_TGID));
if (IS_ERR(context->memory_context)) {
if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
err = PTR_ERR(context->memory_context);
goto put_channel;
} else {
/*
* OK, HW does not support contexts or contexts
* are disabled.
*/
context->memory_context = NULL;
}
}
}
err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
GFP_KERNEL);
if (err < 0)
goto put_memctx;
context->client = client;
xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
args->version = client->version;
args->capabilities = 0;
if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT)
args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT;
return 0;
put_memctx:
if (context->memory_context)
host1x_memory_context_put(context->memory_context);
put_channel:
host1x_channel_put(context->channel);
free:
kfree(context);
return err;
}
int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_channel_close *args = data;
struct tegra_drm_context *context;
mutex_lock(&fpriv->lock);
context = xa_load(&fpriv->contexts, args->context);
if (!context) {
mutex_unlock(&fpriv->lock);
return -EINVAL;
}
xa_erase(&fpriv->contexts, args->context);
mutex_unlock(&fpriv->lock);
tegra_drm_channel_context_close(context);
return 0;
}
int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_channel_map *args = data;
struct tegra_drm_mapping *mapping;
struct tegra_drm_context *context;
enum dma_data_direction direction;
struct device *mapping_dev;
int err = 0;
if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
return -EINVAL;
mutex_lock(&fpriv->lock);
context = xa_load(&fpriv->contexts, args->context);
if (!context) {
mutex_unlock(&fpriv->lock);
return -EINVAL;
}
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
err = -ENOMEM;
goto unlock;
}
kref_init(&mapping->ref);
if (context->memory_context)
mapping_dev = &context->memory_context->dev;
else
mapping_dev = context->client->base.dev;
mapping->bo = tegra_gem_lookup(file, args->handle);
if (!mapping->bo) {
err = -EINVAL;
goto free;
}
switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
direction = DMA_BIDIRECTIONAL;
break;
case DRM_TEGRA_CHANNEL_MAP_WRITE:
direction = DMA_FROM_DEVICE;
break;
case DRM_TEGRA_CHANNEL_MAP_READ:
direction = DMA_TO_DEVICE;
break;
default:
err = -EINVAL;
goto put_gem;
}
mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL);
if (IS_ERR(mapping->map)) {
err = PTR_ERR(mapping->map);
goto put_gem;
}
mapping->iova = mapping->map->phys;
mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
GFP_KERNEL);
if (err < 0)
goto unpin;
mutex_unlock(&fpriv->lock);
return 0;
unpin:
host1x_bo_unpin(mapping->map);
put_gem:
host1x_bo_put(mapping->bo);
free:
kfree(mapping);
unlock:
mutex_unlock(&fpriv->lock);
return err;
}
int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_channel_unmap *args = data;
struct tegra_drm_mapping *mapping;
struct tegra_drm_context *context;
mutex_lock(&fpriv->lock);
context = xa_load(&fpriv->contexts, args->context);
if (!context) {
mutex_unlock(&fpriv->lock);
return -EINVAL;
}
mapping = xa_erase(&context->mappings, args->mapping);
mutex_unlock(&fpriv->lock);
if (!mapping)
return -EINVAL;
tegra_drm_mapping_put(mapping);
return 0;
}
int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data, struct drm_file *file)
{
struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_syncpoint_allocate *args = data;
struct host1x_syncpt *sp;
int err;
if (args->id)
return -EINVAL;
sp = host1x_syncpt_alloc(host1x, HOST1X_SYNCPT_CLIENT_MANAGED, current->comm);
if (!sp)
return -EBUSY;
args->id = host1x_syncpt_id(sp);
err = xa_insert(&fpriv->syncpoints, args->id, sp, GFP_KERNEL);
if (err) {
host1x_syncpt_put(sp);
return err;
}
return 0;
}
int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data, struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_syncpoint_allocate *args = data;
struct host1x_syncpt *sp;
mutex_lock(&fpriv->lock);
sp = xa_erase(&fpriv->syncpoints, args->id);
mutex_unlock(&fpriv->lock);
if (!sp)
return -EINVAL;
host1x_syncpt_put(sp);
return 0;
}
int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data, struct drm_file *file)
{
struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
struct drm_tegra_syncpoint_wait *args = data;
signed long timeout_jiffies;
struct host1x_syncpt *sp;
if (args->padding != 0)
return -EINVAL;
sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
if (!sp)
return -EINVAL;
timeout_jiffies = drm_timeout_abs_to_jiffies(args->timeout_ns);
return host1x_syncpt_wait(sp, args->threshold, timeout_jiffies, &args->value);
}
| linux-master | drivers/gpu/drm/tegra/uapi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 NVIDIA Corporation
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include "mipi-phy.h"
/*
* Default D-PHY timings based on MIPI D-PHY specification. Derived from the
* valid ranges specified in Section 6.9, Table 14, Page 40 of the D-PHY
* specification (v1.2) with minor adjustments.
*/
int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
unsigned long period)
{
timing->clkmiss = 0;
timing->clkpost = 70 + 52 * period;
timing->clkpre = 8;
timing->clkprepare = 65;
timing->clksettle = 95;
timing->clktermen = 0;
timing->clktrail = 80;
timing->clkzero = 260;
timing->dtermen = 0;
timing->eot = 0;
timing->hsexit = 120;
timing->hsprepare = 65 + 5 * period;
timing->hszero = 145 + 5 * period;
timing->hssettle = 85 + 6 * period;
timing->hsskip = 40;
/*
* The MIPI D-PHY specification (Section 6.9, v1.2, Table 14, Page 40)
* contains this formula as:
*
* T_HS-TRAIL = max(n * 8 * period, 60 + n * 4 * period)
*
* where n = 1 for forward-direction HS mode and n = 4 for reverse-
* direction HS mode. There's only one setting and this function does
* not parameterize on anything other that period, so this code will
* assumes that reverse-direction HS mode is supported and uses n = 4.
*/
timing->hstrail = max(4 * 8 * period, 60 + 4 * 4 * period);
timing->init = 100000;
timing->lpx = 60;
timing->taget = 5 * timing->lpx;
timing->tago = 4 * timing->lpx;
timing->tasure = 2 * timing->lpx;
timing->wakeup = 1000000;
return 0;
}
/*
* Validate D-PHY timing according to MIPI D-PHY specification (v1.2, Section
* Section 6.9 "Global Operation Timing Parameters").
*/
int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
unsigned long period)
{
if (timing->clkmiss > 60)
return -EINVAL;
if (timing->clkpost < (60 + 52 * period))
return -EINVAL;
if (timing->clkpre < 8)
return -EINVAL;
if (timing->clkprepare < 38 || timing->clkprepare > 95)
return -EINVAL;
if (timing->clksettle < 95 || timing->clksettle > 300)
return -EINVAL;
if (timing->clktermen > 38)
return -EINVAL;
if (timing->clktrail < 60)
return -EINVAL;
if (timing->clkprepare + timing->clkzero < 300)
return -EINVAL;
if (timing->dtermen > 35 + 4 * period)
return -EINVAL;
if (timing->eot > 105 + 12 * period)
return -EINVAL;
if (timing->hsexit < 100)
return -EINVAL;
if (timing->hsprepare < 40 + 4 * period ||
timing->hsprepare > 85 + 6 * period)
return -EINVAL;
if (timing->hsprepare + timing->hszero < 145 + 10 * period)
return -EINVAL;
if ((timing->hssettle < 85 + 6 * period) ||
(timing->hssettle > 145 + 10 * period))
return -EINVAL;
if (timing->hsskip < 40 || timing->hsskip > 55 + 4 * period)
return -EINVAL;
if (timing->hstrail < max(8 * period, 60 + 4 * period))
return -EINVAL;
if (timing->init < 100000)
return -EINVAL;
if (timing->lpx < 50)
return -EINVAL;
if (timing->taget != 5 * timing->lpx)
return -EINVAL;
if (timing->tago != 4 * timing->lpx)
return -EINVAL;
if (timing->tasure < timing->lpx || timing->tasure > 2 * timing->lpx)
return -EINVAL;
if (timing->wakeup < 1000000)
return -EINVAL;
return 0;
}
| linux-master | drivers/gpu/drm/tegra/mipi-phy.c |
// SPDX-License-Identifier: MIT
/*
* Copyright (C) 2019 NVIDIA Corporation
*/
#include <linux/bug.h>
#include <sound/hda_verbs.h>
#include "hda.h"
void tegra_hda_parse_format(unsigned int format, struct tegra_hda_format *fmt)
{
unsigned int mul, div, bits, channels;
if (format & AC_FMT_TYPE_NON_PCM)
fmt->pcm = false;
else
fmt->pcm = true;
if (format & AC_FMT_BASE_44K)
fmt->sample_rate = 44100;
else
fmt->sample_rate = 48000;
mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
fmt->sample_rate *= (mul + 1) / (div + 1);
switch (format & AC_FMT_BITS_MASK) {
case AC_FMT_BITS_8:
fmt->bits = 8;
break;
case AC_FMT_BITS_16:
fmt->bits = 16;
break;
case AC_FMT_BITS_20:
fmt->bits = 20;
break;
case AC_FMT_BITS_24:
fmt->bits = 24;
break;
case AC_FMT_BITS_32:
fmt->bits = 32;
break;
default:
bits = (format & AC_FMT_BITS_MASK) >> AC_FMT_BITS_SHIFT;
WARN(1, "invalid number of bits: %#x\n", bits);
fmt->bits = 8;
break;
}
channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
/* channels are encoded as n - 1 */
fmt->channels = channels + 1;
}
| linux-master | drivers/gpu/drm/tegra/hda.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Avionic Design GmbH
* Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/bitops.h>
#include <linux/host1x.h>
#include <linux/idr.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
#include <asm/dma-iommu.h>
#endif
#include "dc.h"
#include "drm.h"
#include "gem.h"
#include "uapi.h"
#define DRIVER_NAME "tegra"
#define DRIVER_DESC "NVIDIA Tegra graphics"
#define DRIVER_DATE "20120330"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#define CARVEOUT_SZ SZ_64M
#define CDMA_GATHER_FETCHES_MAX_NB 16383
static int tegra_atomic_check(struct drm_device *drm,
struct drm_atomic_state *state)
{
int err;
err = drm_atomic_helper_check(drm, state);
if (err < 0)
return err;
return tegra_display_hub_atomic_check(drm, state);
}
static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
.fb_create = tegra_fb_create,
.atomic_check = tegra_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
static void tegra_atomic_post_commit(struct drm_device *drm,
struct drm_atomic_state *old_state)
{
struct drm_crtc_state *old_crtc_state __maybe_unused;
struct drm_crtc *crtc;
unsigned int i;
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
tegra_crtc_atomic_post_commit(crtc, old_state);
}
static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *drm = old_state->dev;
struct tegra_drm *tegra = drm->dev_private;
if (tegra->hub) {
bool fence_cookie = dma_fence_begin_signalling();
drm_atomic_helper_commit_modeset_disables(drm, old_state);
tegra_display_hub_atomic_commit(drm, old_state);
drm_atomic_helper_commit_planes(drm, old_state, 0);
drm_atomic_helper_commit_modeset_enables(drm, old_state);
drm_atomic_helper_commit_hw_done(old_state);
dma_fence_end_signalling(fence_cookie);
drm_atomic_helper_wait_for_vblanks(drm, old_state);
drm_atomic_helper_cleanup_planes(drm, old_state);
} else {
drm_atomic_helper_commit_tail_rpm(old_state);
}
tegra_atomic_post_commit(drm, old_state);
}
static const struct drm_mode_config_helper_funcs
tegra_drm_mode_config_helpers = {
.atomic_commit_tail = tegra_atomic_commit_tail,
};
static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
{
struct tegra_drm_file *fpriv;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (!fpriv)
return -ENOMEM;
idr_init_base(&fpriv->legacy_contexts, 1);
xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1);
xa_init(&fpriv->syncpoints);
mutex_init(&fpriv->lock);
filp->driver_priv = fpriv;
return 0;
}
static void tegra_drm_context_free(struct tegra_drm_context *context)
{
context->client->ops->close_channel(context);
pm_runtime_put(context->client->base.dev);
kfree(context);
}
static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
struct drm_tegra_reloc __user *src,
struct drm_device *drm,
struct drm_file *file)
{
u32 cmdbuf, target;
int err;
err = get_user(cmdbuf, &src->cmdbuf.handle);
if (err < 0)
return err;
err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
if (err < 0)
return err;
err = get_user(target, &src->target.handle);
if (err < 0)
return err;
err = get_user(dest->target.offset, &src->target.offset);
if (err < 0)
return err;
err = get_user(dest->shift, &src->shift);
if (err < 0)
return err;
dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
dest->cmdbuf.bo = tegra_gem_lookup(file, cmdbuf);
if (!dest->cmdbuf.bo)
return -ENOENT;
dest->target.bo = tegra_gem_lookup(file, target);
if (!dest->target.bo)
return -ENOENT;
return 0;
}
int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file)
{
struct host1x_client *client = &context->client->base;
unsigned int num_cmdbufs = args->num_cmdbufs;
unsigned int num_relocs = args->num_relocs;
struct drm_tegra_cmdbuf __user *user_cmdbufs;
struct drm_tegra_reloc __user *user_relocs;
struct drm_tegra_syncpt __user *user_syncpt;
struct drm_tegra_syncpt syncpt;
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
struct drm_gem_object **refs;
struct host1x_syncpt *sp = NULL;
struct host1x_job *job;
unsigned int num_refs;
int err;
user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
user_relocs = u64_to_user_ptr(args->relocs);
user_syncpt = u64_to_user_ptr(args->syncpts);
/* We don't yet support other than one syncpt_incr struct per submit */
if (args->num_syncpts != 1)
return -EINVAL;
/* We don't yet support waitchks */
if (args->num_waitchks != 0)
return -EINVAL;
job = host1x_job_alloc(context->channel, args->num_cmdbufs,
args->num_relocs, false);
if (!job)
return -ENOMEM;
job->num_relocs = args->num_relocs;
job->client = client;
job->class = client->class;
job->serialize = true;
job->syncpt_recovery = true;
/*
* Track referenced BOs so that they can be unreferenced after the
* submission is complete.
*/
num_refs = num_cmdbufs + num_relocs * 2;
refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
if (!refs) {
err = -ENOMEM;
goto put;
}
/* reuse as an iterator later */
num_refs = 0;
while (num_cmdbufs) {
struct drm_tegra_cmdbuf cmdbuf;
struct host1x_bo *bo;
struct tegra_bo *obj;
u64 offset;
if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
err = -EFAULT;
goto fail;
}
/*
* The maximum number of CDMA gather fetches is 16383, a higher
* value means the words count is malformed.
*/
if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
err = -EINVAL;
goto fail;
}
bo = tegra_gem_lookup(file, cmdbuf.handle);
if (!bo) {
err = -ENOENT;
goto fail;
}
offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
obj = host1x_to_tegra_bo(bo);
refs[num_refs++] = &obj->gem;
/*
* Gather buffer base address must be 4-bytes aligned,
* unaligned offset is malformed and cause commands stream
* corruption on the buffer address relocation.
*/
if (offset & 3 || offset > obj->gem.size) {
err = -EINVAL;
goto fail;
}
host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
num_cmdbufs--;
user_cmdbufs++;
}
/* copy and resolve relocations from submit */
while (num_relocs--) {
struct host1x_reloc *reloc;
struct tegra_bo *obj;
err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
&user_relocs[num_relocs], drm,
file);
if (err < 0)
goto fail;
reloc = &job->relocs[num_relocs];
obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
refs[num_refs++] = &obj->gem;
/*
* The unaligned cmdbuf offset will cause an unaligned write
* during of the relocations patching, corrupting the commands
* stream.
*/
if (reloc->cmdbuf.offset & 3 ||
reloc->cmdbuf.offset >= obj->gem.size) {
err = -EINVAL;
goto fail;
}
obj = host1x_to_tegra_bo(reloc->target.bo);
refs[num_refs++] = &obj->gem;
if (reloc->target.offset >= obj->gem.size) {
err = -EINVAL;
goto fail;
}
}
if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
err = -EFAULT;
goto fail;
}
/* Syncpoint ref will be dropped on job release. */
sp = host1x_syncpt_get_by_id(host1x, syncpt.id);
if (!sp) {
err = -ENOENT;
goto fail;
}
job->is_addr_reg = context->client->ops->is_addr_reg;
job->is_valid_class = context->client->ops->is_valid_class;
job->syncpt_incrs = syncpt.incrs;
job->syncpt = sp;
job->timeout = 10000;
if (args->timeout && args->timeout < 10000)
job->timeout = args->timeout;
err = host1x_job_pin(job, context->client->base.dev);
if (err)
goto fail;
err = host1x_job_submit(job);
if (err) {
host1x_job_unpin(job);
goto fail;
}
args->fence = job->syncpt_end;
fail:
while (num_refs--)
drm_gem_object_put(refs[num_refs]);
kfree(refs);
put:
host1x_job_put(job);
return err;
}
#ifdef CONFIG_DRM_TEGRA_STAGING
static int tegra_gem_create(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_create *args = data;
struct tegra_bo *bo;
bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
&args->handle);
if (IS_ERR(bo))
return PTR_ERR(bo);
return 0;
}
static int tegra_gem_mmap(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_mmap *args = data;
struct drm_gem_object *gem;
struct tegra_bo *bo;
gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -EINVAL;
bo = to_tegra_bo(gem);
args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
drm_gem_object_put(gem);
return 0;
}
static int tegra_syncpt_read(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct host1x *host = dev_get_drvdata(drm->dev->parent);
struct drm_tegra_syncpt_read *args = data;
struct host1x_syncpt *sp;
sp = host1x_syncpt_get_by_id_noref(host, args->id);
if (!sp)
return -EINVAL;
args->value = host1x_syncpt_read_min(sp);
return 0;
}
static int tegra_syncpt_incr(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
struct drm_tegra_syncpt_incr *args = data;
struct host1x_syncpt *sp;
sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
if (!sp)
return -EINVAL;
return host1x_syncpt_incr(sp);
}
static int tegra_syncpt_wait(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
struct drm_tegra_syncpt_wait *args = data;
struct host1x_syncpt *sp;
sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
if (!sp)
return -EINVAL;
return host1x_syncpt_wait(sp, args->thresh,
msecs_to_jiffies(args->timeout),
&args->value);
}
static int tegra_client_open(struct tegra_drm_file *fpriv,
struct tegra_drm_client *client,
struct tegra_drm_context *context)
{
int err;
err = pm_runtime_resume_and_get(client->base.dev);
if (err)
return err;
err = client->ops->open_channel(client, context);
if (err < 0) {
pm_runtime_put(client->base.dev);
return err;
}
err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL);
if (err < 0) {
client->ops->close_channel(context);
pm_runtime_put(client->base.dev);
return err;
}
context->client = client;
context->id = err;
return 0;
}
static int tegra_open_channel(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct tegra_drm *tegra = drm->dev_private;
struct drm_tegra_open_channel *args = data;
struct tegra_drm_context *context;
struct tegra_drm_client *client;
int err = -ENODEV;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
mutex_lock(&fpriv->lock);
list_for_each_entry(client, &tegra->clients, list)
if (client->base.class == args->client) {
err = tegra_client_open(fpriv, client, context);
if (err < 0)
break;
args->context = context->id;
break;
}
if (err < 0)
kfree(context);
mutex_unlock(&fpriv->lock);
return err;
}
static int tegra_close_channel(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_close_channel *args = data;
struct tegra_drm_context *context;
int err = 0;
mutex_lock(&fpriv->lock);
context = idr_find(&fpriv->legacy_contexts, args->context);
if (!context) {
err = -EINVAL;
goto unlock;
}
idr_remove(&fpriv->legacy_contexts, context->id);
tegra_drm_context_free(context);
unlock:
mutex_unlock(&fpriv->lock);
return err;
}
static int tegra_get_syncpt(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_get_syncpt *args = data;
struct tegra_drm_context *context;
struct host1x_syncpt *syncpt;
int err = 0;
mutex_lock(&fpriv->lock);
context = idr_find(&fpriv->legacy_contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
}
if (args->index >= context->client->base.num_syncpts) {
err = -EINVAL;
goto unlock;
}
syncpt = context->client->base.syncpts[args->index];
args->id = host1x_syncpt_id(syncpt);
unlock:
mutex_unlock(&fpriv->lock);
return err;
}
static int tegra_submit(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_submit *args = data;
struct tegra_drm_context *context;
int err;
mutex_lock(&fpriv->lock);
context = idr_find(&fpriv->legacy_contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
}
err = context->client->ops->submit(context, args, drm, file);
unlock:
mutex_unlock(&fpriv->lock);
return err;
}
static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_get_syncpt_base *args = data;
struct tegra_drm_context *context;
struct host1x_syncpt_base *base;
struct host1x_syncpt *syncpt;
int err = 0;
mutex_lock(&fpriv->lock);
context = idr_find(&fpriv->legacy_contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
}
if (args->syncpt >= context->client->base.num_syncpts) {
err = -EINVAL;
goto unlock;
}
syncpt = context->client->base.syncpts[args->syncpt];
base = host1x_syncpt_get_base(syncpt);
if (!base) {
err = -ENXIO;
goto unlock;
}
args->id = host1x_syncpt_base_id(base);
unlock:
mutex_unlock(&fpriv->lock);
return err;
}
static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_set_tiling *args = data;
enum tegra_bo_tiling_mode mode;
struct drm_gem_object *gem;
unsigned long value = 0;
struct tegra_bo *bo;
switch (args->mode) {
case DRM_TEGRA_GEM_TILING_MODE_PITCH:
mode = TEGRA_BO_TILING_MODE_PITCH;
if (args->value != 0)
return -EINVAL;
break;
case DRM_TEGRA_GEM_TILING_MODE_TILED:
mode = TEGRA_BO_TILING_MODE_TILED;
if (args->value != 0)
return -EINVAL;
break;
case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
mode = TEGRA_BO_TILING_MODE_BLOCK;
if (args->value > 5)
return -EINVAL;
value = args->value;
break;
default:
return -EINVAL;
}
gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
bo = to_tegra_bo(gem);
bo->tiling.mode = mode;
bo->tiling.value = value;
drm_gem_object_put(gem);
return 0;
}
static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_get_tiling *args = data;
struct drm_gem_object *gem;
struct tegra_bo *bo;
int err = 0;
gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
bo = to_tegra_bo(gem);
switch (bo->tiling.mode) {
case TEGRA_BO_TILING_MODE_PITCH:
args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
args->value = 0;
break;
case TEGRA_BO_TILING_MODE_TILED:
args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
args->value = 0;
break;
case TEGRA_BO_TILING_MODE_BLOCK:
args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
args->value = bo->tiling.value;
break;
default:
err = -EINVAL;
break;
}
drm_gem_object_put(gem);
return err;
}
static int tegra_gem_set_flags(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_set_flags *args = data;
struct drm_gem_object *gem;
struct tegra_bo *bo;
if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
return -EINVAL;
gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
bo = to_tegra_bo(gem);
bo->flags = 0;
if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
bo->flags |= TEGRA_BO_BOTTOM_UP;
drm_gem_object_put(gem);
return 0;
}
static int tegra_gem_get_flags(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_get_flags *args = data;
struct drm_gem_object *gem;
struct tegra_bo *bo;
gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
bo = to_tegra_bo(gem);
args->flags = 0;
if (bo->flags & TEGRA_BO_BOTTOM_UP)
args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
drm_gem_object_put(gem);
return 0;
}
#endif
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_OPEN, tegra_drm_ioctl_channel_open,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_CLOSE, tegra_drm_ioctl_channel_close,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_MAP, tegra_drm_ioctl_channel_map,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_SUBMIT, tegra_drm_ioctl_channel_submit,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_ALLOCATE, tegra_drm_ioctl_syncpoint_allocate,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_FREE, tegra_drm_ioctl_syncpoint_free,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_WAIT, tegra_drm_ioctl_syncpoint_wait,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
DRM_RENDER_ALLOW),
#endif
};
static const struct file_operations tegra_drm_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = tegra_drm_mmap,
.poll = drm_poll,
.read = drm_read,
.compat_ioctl = drm_compat_ioctl,
.llseek = noop_llseek,
};
static int tegra_drm_context_cleanup(int id, void *p, void *data)
{
struct tegra_drm_context *context = p;
tegra_drm_context_free(context);
return 0;
}
static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
mutex_lock(&fpriv->lock);
idr_for_each(&fpriv->legacy_contexts, tegra_drm_context_cleanup, NULL);
tegra_drm_uapi_close_file(fpriv);
mutex_unlock(&fpriv->lock);
idr_destroy(&fpriv->legacy_contexts);
mutex_destroy(&fpriv->lock);
kfree(fpriv);
}
#ifdef CONFIG_DEBUG_FS
static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct drm_device *drm = node->minor->dev;
struct drm_framebuffer *fb;
mutex_lock(&drm->mode_config.fb_lock);
list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
fb->base.id, fb->width, fb->height,
fb->format->depth,
fb->format->cpp[0] * 8,
drm_framebuffer_read_refcount(fb));
}
mutex_unlock(&drm->mode_config.fb_lock);
return 0;
}
static int tegra_debugfs_iova(struct seq_file *s, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct drm_device *drm = node->minor->dev;
struct tegra_drm *tegra = drm->dev_private;
struct drm_printer p = drm_seq_file_printer(s);
if (tegra->domain) {
mutex_lock(&tegra->mm_lock);
drm_mm_print(&tegra->mm, &p);
mutex_unlock(&tegra->mm_lock);
}
return 0;
}
static struct drm_info_list tegra_debugfs_list[] = {
{ "framebuffers", tegra_debugfs_framebuffers, 0 },
{ "iova", tegra_debugfs_iova, 0 },
};
static void tegra_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(tegra_debugfs_list,
ARRAY_SIZE(tegra_debugfs_list),
minor->debugfs_root, minor);
}
#endif
static const struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER | DRIVER_SYNCOBJ,
.open = tegra_drm_open,
.postclose = tegra_drm_postclose,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = tegra_debugfs_init,
#endif
.gem_prime_import = tegra_gem_prime_import,
.dumb_create = tegra_bo_dumb_create,
.ioctls = tegra_drm_ioctls,
.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
.fops = &tegra_drm_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client)
{
/*
* When MLOCKs are implemented, change to allocate a shared channel
* only when MLOCKs are disabled.
*/
client->shared_channel = host1x_channel_request(&client->base);
if (!client->shared_channel)
return -EBUSY;
mutex_lock(&tegra->clients_lock);
list_add_tail(&client->list, &tegra->clients);
client->drm = tegra;
mutex_unlock(&tegra->clients_lock);
return 0;
}
int tegra_drm_unregister_client(struct tegra_drm *tegra,
struct tegra_drm_client *client)
{
mutex_lock(&tegra->clients_lock);
list_del_init(&client->list);
client->drm = NULL;
mutex_unlock(&tegra->clients_lock);
if (client->shared_channel)
host1x_channel_put(client->shared_channel);
return 0;
}
int host1x_client_iommu_attach(struct host1x_client *client)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
if (client->dev->archdata.mapping) {
struct dma_iommu_mapping *mapping =
to_dma_iommu_mapping(client->dev);
arm_iommu_detach_device(client->dev);
arm_iommu_release_mapping(mapping);
domain = iommu_get_domain_for_dev(client->dev);
}
#endif
/*
* If the host1x client is already attached to an IOMMU domain that is
* not the shared IOMMU domain, don't try to attach it to a different
* domain. This allows using the IOMMU-backed DMA API.
*/
if (domain && domain != tegra->domain)
return 0;
if (tegra->domain) {
group = iommu_group_get(client->dev);
if (!group)
return -ENODEV;
if (domain != tegra->domain) {
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
iommu_group_put(group);
return err;
}
}
tegra->use_explicit_iommu = true;
}
client->group = group;
return 0;
}
void host1x_client_iommu_detach(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_domain *domain;
if (client->group) {
/*
* Devices that are part of the same group may no longer be
* attached to a domain at this point because their group may
* have been detached by an earlier client.
*/
domain = iommu_get_domain_for_dev(client->dev);
if (domain)
iommu_detach_group(tegra->domain, client->group);
iommu_group_put(client->group);
client->group = NULL;
}
}
void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
{
struct iova *alloc;
void *virt;
gfp_t gfp;
int err;
if (tegra->domain)
size = iova_align(&tegra->carveout.domain, size);
else
size = PAGE_ALIGN(size);
gfp = GFP_KERNEL | __GFP_ZERO;
if (!tegra->domain) {
/*
* Many units only support 32-bit addresses, even on 64-bit
* SoCs. If there is no IOMMU to translate into a 32-bit IO
* virtual address space, force allocations to be in the
* lower 32-bit range.
*/
gfp |= GFP_DMA;
}
virt = (void *)__get_free_pages(gfp, get_order(size));
if (!virt)
return ERR_PTR(-ENOMEM);
if (!tegra->domain) {
/*
* If IOMMU is disabled, devices address physical memory
* directly.
*/
*dma = virt_to_phys(virt);
return virt;
}
alloc = alloc_iova(&tegra->carveout.domain,
size >> tegra->carveout.shift,
tegra->carveout.limit, true);
if (!alloc) {
err = -EBUSY;
goto free_pages;
}
*dma = iova_dma_addr(&tegra->carveout.domain, alloc);
err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (err < 0)
goto free_iova;
return virt;
free_iova:
__free_iova(&tegra->carveout.domain, alloc);
free_pages:
free_pages((unsigned long)virt, get_order(size));
return ERR_PTR(err);
}
void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
dma_addr_t dma)
{
if (tegra->domain)
size = iova_align(&tegra->carveout.domain, size);
else
size = PAGE_ALIGN(size);
if (tegra->domain) {
iommu_unmap(tegra->domain, dma, size);
free_iova(&tegra->carveout.domain,
iova_pfn(&tegra->carveout.domain, dma));
}
free_pages((unsigned long)virt, get_order(size));
}
static bool host1x_drm_wants_iommu(struct host1x_device *dev)
{
struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
struct iommu_domain *domain;
/* Our IOMMU usage policy doesn't currently play well with GART */
if (of_machine_is_compatible("nvidia,tegra20"))
return false;
/*
* If the Tegra DRM clients are backed by an IOMMU, push buffers are
* likely to be allocated beyond the 32-bit boundary if sufficient
* system memory is available. This is problematic on earlier Tegra
* generations where host1x supports a maximum of 32 address bits in
* the GATHER opcode. In this case, unless host1x is behind an IOMMU
* as well it won't be able to process buffers allocated beyond the
* 32-bit boundary.
*
* The DMA API will use bounce buffers in this case, so that could
* perhaps still be made to work, even if less efficient, but there
* is another catch: in order to perform cache maintenance on pages
* allocated for discontiguous buffers we need to map and unmap the
* SG table representing these buffers. This is fine for something
* small like a push buffer, but it exhausts the bounce buffer pool
* (typically on the order of a few MiB) for framebuffers (many MiB
* for any modern resolution).
*
* Work around this by making sure that Tegra DRM clients only use
* an IOMMU if the parent host1x also uses an IOMMU.
*
* Note that there's still a small gap here that we don't cover: if
* the DMA API is backed by an IOMMU there's no way to control which
* device is attached to an IOMMU and which isn't, except via wiring
* up the device tree appropriately. This is considered an problem
* of integration, so care must be taken for the DT to be consistent.
*/
domain = iommu_get_domain_for_dev(dev->dev.parent);
/*
* Tegra20 and Tegra30 don't support addressing memory beyond the
* 32-bit boundary, so the regular GATHER opcodes will always be
* sufficient and whether or not the host1x is attached to an IOMMU
* doesn't matter.
*/
if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
return true;
return domain != NULL;
}
static int host1x_drm_probe(struct host1x_device *dev)
{
struct tegra_drm *tegra;
struct drm_device *drm;
int err;
drm = drm_dev_alloc(&tegra_drm_driver, &dev->dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
if (!tegra) {
err = -ENOMEM;
goto put;
}
if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
tegra->domain = iommu_domain_alloc(&platform_bus_type);
if (!tegra->domain) {
err = -ENOMEM;
goto free;
}
err = iova_cache_get();
if (err < 0)
goto domain;
}
mutex_init(&tegra->clients_lock);
INIT_LIST_HEAD(&tegra->clients);
dev_set_drvdata(&dev->dev, drm);
drm->dev_private = tegra;
tegra->drm = drm;
drm_mode_config_init(drm);
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = 0;
drm->mode_config.max_height = 0;
drm->mode_config.normalize_zpos = true;
drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
drm_kms_helper_poll_init(drm);
err = host1x_device_init(dev);
if (err < 0)
goto poll;
/*
* Now that all display controller have been initialized, the maximum
* supported resolution is known and the bitmask for horizontal and
* vertical bitfields can be computed.
*/
tegra->hmask = drm->mode_config.max_width - 1;
tegra->vmask = drm->mode_config.max_height - 1;
if (tegra->use_explicit_iommu) {
u64 carveout_start, carveout_end, gem_start, gem_end;
u64 dma_mask = dma_get_mask(&dev->dev);
dma_addr_t start, end;
unsigned long order;
start = tegra->domain->geometry.aperture_start & dma_mask;
end = tegra->domain->geometry.aperture_end & dma_mask;
gem_start = start;
gem_end = end - CARVEOUT_SZ;
carveout_start = gem_end + 1;
carveout_end = end;
order = __ffs(tegra->domain->pgsize_bitmap);
init_iova_domain(&tegra->carveout.domain, 1UL << order,
carveout_start >> order);
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
mutex_init(&tegra->mm_lock);
DRM_DEBUG_DRIVER("IOMMU apertures:\n");
DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
carveout_end);
} else if (tegra->domain) {
iommu_domain_free(tegra->domain);
tegra->domain = NULL;
iova_cache_put();
}
if (tegra->hub) {
err = tegra_display_hub_prepare(tegra->hub);
if (err < 0)
goto device;
}
/* syncpoints are used for full 32-bit hardware VBLANK counters */
drm->max_vblank_count = 0xffffffff;
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
goto hub;
drm_mode_config_reset(drm);
err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
if (err < 0)
goto hub;
err = drm_dev_register(drm, 0);
if (err < 0)
goto hub;
tegra_fbdev_setup(drm);
return 0;
hub:
if (tegra->hub)
tegra_display_hub_cleanup(tegra->hub);
device:
if (tegra->domain) {
mutex_destroy(&tegra->mm_lock);
drm_mm_takedown(&tegra->mm);
put_iova_domain(&tegra->carveout.domain);
iova_cache_put();
}
host1x_device_exit(dev);
poll:
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
domain:
if (tegra->domain)
iommu_domain_free(tegra->domain);
free:
kfree(tegra);
put:
drm_dev_put(drm);
return err;
}
static int host1x_drm_remove(struct host1x_device *dev)
{
struct drm_device *drm = dev_get_drvdata(&dev->dev);
struct tegra_drm *tegra = drm->dev_private;
int err;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
if (tegra->hub)
tegra_display_hub_cleanup(tegra->hub);
err = host1x_device_exit(dev);
if (err < 0)
dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
if (tegra->domain) {
mutex_destroy(&tegra->mm_lock);
drm_mm_takedown(&tegra->mm);
put_iova_domain(&tegra->carveout.domain);
iova_cache_put();
iommu_domain_free(tegra->domain);
}
kfree(tegra);
drm_dev_put(drm);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int host1x_drm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm);
}
static int host1x_drm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(drm);
}
#endif
static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
host1x_drm_resume);
static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra20-dc", },
{ .compatible = "nvidia,tegra20-hdmi", },
{ .compatible = "nvidia,tegra20-gr2d", },
{ .compatible = "nvidia,tegra20-gr3d", },
{ .compatible = "nvidia,tegra30-dc", },
{ .compatible = "nvidia,tegra30-hdmi", },
{ .compatible = "nvidia,tegra30-gr2d", },
{ .compatible = "nvidia,tegra30-gr3d", },
{ .compatible = "nvidia,tegra114-dc", },
{ .compatible = "nvidia,tegra114-dsi", },
{ .compatible = "nvidia,tegra114-hdmi", },
{ .compatible = "nvidia,tegra114-gr2d", },
{ .compatible = "nvidia,tegra114-gr3d", },
{ .compatible = "nvidia,tegra124-dc", },
{ .compatible = "nvidia,tegra124-sor", },
{ .compatible = "nvidia,tegra124-hdmi", },
{ .compatible = "nvidia,tegra124-dsi", },
{ .compatible = "nvidia,tegra124-vic", },
{ .compatible = "nvidia,tegra132-dsi", },
{ .compatible = "nvidia,tegra210-dc", },
{ .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra210-sor", },
{ .compatible = "nvidia,tegra210-sor1", },
{ .compatible = "nvidia,tegra210-vic", },
{ .compatible = "nvidia,tegra210-nvdec", },
{ .compatible = "nvidia,tegra186-display", },
{ .compatible = "nvidia,tegra186-dc", },
{ .compatible = "nvidia,tegra186-sor", },
{ .compatible = "nvidia,tegra186-sor1", },
{ .compatible = "nvidia,tegra186-vic", },
{ .compatible = "nvidia,tegra186-nvdec", },
{ .compatible = "nvidia,tegra194-display", },
{ .compatible = "nvidia,tegra194-dc", },
{ .compatible = "nvidia,tegra194-sor", },
{ .compatible = "nvidia,tegra194-vic", },
{ .compatible = "nvidia,tegra194-nvdec", },
{ .compatible = "nvidia,tegra234-vic", },
{ .compatible = "nvidia,tegra234-nvdec", },
{ /* sentinel */ }
};
static struct host1x_driver host1x_drm_driver = {
.driver = {
.name = "drm",
.pm = &host1x_drm_pm_ops,
},
.probe = host1x_drm_probe,
.remove = host1x_drm_remove,
.subdevs = host1x_drm_subdevs,
};
static struct platform_driver * const drivers[] = {
&tegra_display_hub_driver,
&tegra_dc_driver,
&tegra_hdmi_driver,
&tegra_dsi_driver,
&tegra_dpaux_driver,
&tegra_sor_driver,
&tegra_gr2d_driver,
&tegra_gr3d_driver,
&tegra_vic_driver,
&tegra_nvdec_driver,
};
static int __init host1x_drm_init(void)
{
int err;
if (drm_firmware_drivers_only())
return -ENODEV;
err = host1x_driver_register(&host1x_drm_driver);
if (err < 0)
return err;
err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (err < 0)
goto unregister_host1x;
return 0;
unregister_host1x:
host1x_driver_unregister(&host1x_drm_driver);
return err;
}
module_init(host1x_drm_init);
static void __exit host1x_drm_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
host1x_driver_unregister(&host1x_drm_driver);
}
module_exit(host1x_drm_exit);
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/tegra/drm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 NVIDIA Corporation
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/workqueue.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/drm_panel.h>
#include "dp.h"
#include "dpaux.h"
#include "drm.h"
#include "trace.h"
static DEFINE_MUTEX(dpaux_lock);
static LIST_HEAD(dpaux_list);
struct tegra_dpaux_soc {
unsigned int cmh;
unsigned int drvz;
unsigned int drvi;
};
struct tegra_dpaux {
struct drm_dp_aux aux;
struct device *dev;
const struct tegra_dpaux_soc *soc;
void __iomem *regs;
int irq;
struct tegra_output *output;
struct reset_control *rst;
struct clk *clk_parent;
struct clk *clk;
struct regulator *vdd;
struct completion complete;
struct work_struct work;
struct list_head list;
#ifdef CONFIG_GENERIC_PINCONF
struct pinctrl_dev *pinctrl;
struct pinctrl_desc desc;
#endif
};
static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
{
return container_of(aux, struct tegra_dpaux, aux);
}
static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work)
{
return container_of(work, struct tegra_dpaux, work);
}
static inline u32 tegra_dpaux_readl(struct tegra_dpaux *dpaux,
unsigned int offset)
{
u32 value = readl(dpaux->regs + (offset << 2));
trace_dpaux_readl(dpaux->dev, offset, value);
return value;
}
static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
u32 value, unsigned int offset)
{
trace_dpaux_writel(dpaux->dev, offset, value);
writel(value, dpaux->regs + (offset << 2));
}
static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer,
size_t size)
{
size_t i, j;
for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
size_t num = min_t(size_t, size - i * 4, 4);
u32 value = 0;
for (j = 0; j < num; j++)
value |= buffer[i * 4 + j] << (j * 8);
tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXDATA_WRITE(i));
}
}
static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
size_t size)
{
size_t i, j;
for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
size_t num = min_t(size_t, size - i * 4, 4);
u32 value;
value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXDATA_READ(i));
for (j = 0; j < num; j++)
buffer[i * 4 + j] = value >> (j * 8);
}
}
static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
unsigned long timeout = msecs_to_jiffies(250);
struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long status;
ssize_t ret = 0;
u8 reply = 0;
u32 value;
/* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
if (msg->size > 16)
return -EINVAL;
/*
* Allow zero-sized messages only for I2C, in which case they specify
* address-only transactions.
*/
if (msg->size < 1) {
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_I2C_WRITE_STATUS_UPDATE:
case DP_AUX_I2C_WRITE:
case DP_AUX_I2C_READ:
value = DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY;
break;
default:
return -EINVAL;
}
} else {
/* For non-zero-sized messages, set the CMDLEN field. */
value = DPAUX_DP_AUXCTL_CMDLEN(msg->size - 1);
}
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_I2C_WRITE:
if (msg->request & DP_AUX_I2C_MOT)
value |= DPAUX_DP_AUXCTL_CMD_MOT_WR;
else
value |= DPAUX_DP_AUXCTL_CMD_I2C_WR;
break;
case DP_AUX_I2C_READ:
if (msg->request & DP_AUX_I2C_MOT)
value |= DPAUX_DP_AUXCTL_CMD_MOT_RD;
else
value |= DPAUX_DP_AUXCTL_CMD_I2C_RD;
break;
case DP_AUX_I2C_WRITE_STATUS_UPDATE:
if (msg->request & DP_AUX_I2C_MOT)
value |= DPAUX_DP_AUXCTL_CMD_MOT_RQ;
else
value |= DPAUX_DP_AUXCTL_CMD_I2C_RQ;
break;
case DP_AUX_NATIVE_WRITE:
value |= DPAUX_DP_AUXCTL_CMD_AUX_WR;
break;
case DP_AUX_NATIVE_READ:
value |= DPAUX_DP_AUXCTL_CMD_AUX_RD;
break;
default:
return -EINVAL;
}
tegra_dpaux_writel(dpaux, msg->address, DPAUX_DP_AUXADDR);
tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
if ((msg->request & DP_AUX_I2C_READ) == 0) {
tegra_dpaux_write_fifo(dpaux, msg->buffer, msg->size);
ret = msg->size;
}
/* start transaction */
value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXCTL);
value |= DPAUX_DP_AUXCTL_TRANSACTREQ;
tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
status = wait_for_completion_timeout(&dpaux->complete, timeout);
if (!status)
return -ETIMEDOUT;
/* read status and clear errors */
value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
tegra_dpaux_writel(dpaux, 0xf00, DPAUX_DP_AUXSTAT);
if (value & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR)
return -ETIMEDOUT;
if ((value & DPAUX_DP_AUXSTAT_RX_ERROR) ||
(value & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR) ||
(value & DPAUX_DP_AUXSTAT_NO_STOP_ERROR))
return -EIO;
switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
case 0x00:
reply = DP_AUX_NATIVE_REPLY_ACK;
break;
case 0x01:
reply = DP_AUX_NATIVE_REPLY_NACK;
break;
case 0x02:
reply = DP_AUX_NATIVE_REPLY_DEFER;
break;
case 0x04:
reply = DP_AUX_I2C_REPLY_NACK;
break;
case 0x08:
reply = DP_AUX_I2C_REPLY_DEFER;
break;
}
if ((msg->size > 0) && (msg->reply == DP_AUX_NATIVE_REPLY_ACK)) {
if (msg->request & DP_AUX_I2C_READ) {
size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
/*
* There might be a smarter way to do this, but since
* the DP helpers will already retry transactions for
* an -EBUSY return value, simply reuse that instead.
*/
if (count != msg->size) {
ret = -EBUSY;
goto out;
}
tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
ret = count;
}
}
msg->reply = reply;
out:
return ret;
}
static void tegra_dpaux_hotplug(struct work_struct *work)
{
struct tegra_dpaux *dpaux = work_to_dpaux(work);
if (dpaux->output)
drm_helper_hpd_irq_event(dpaux->output->connector.dev);
}
static irqreturn_t tegra_dpaux_irq(int irq, void *data)
{
struct tegra_dpaux *dpaux = data;
u32 value;
/* clear interrupts */
value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX);
tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
if (value & (DPAUX_INTR_PLUG_EVENT | DPAUX_INTR_UNPLUG_EVENT))
schedule_work(&dpaux->work);
if (value & DPAUX_INTR_IRQ_EVENT) {
/* TODO: handle this */
}
if (value & DPAUX_INTR_AUX_DONE)
complete(&dpaux->complete);
return IRQ_HANDLED;
}
enum tegra_dpaux_functions {
DPAUX_PADCTL_FUNC_AUX,
DPAUX_PADCTL_FUNC_I2C,
DPAUX_PADCTL_FUNC_OFF,
};
static void tegra_dpaux_pad_power_down(struct tegra_dpaux *dpaux)
{
u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
}
static void tegra_dpaux_pad_power_up(struct tegra_dpaux *dpaux)
{
u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
}
static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
{
u32 value;
switch (function) {
case DPAUX_PADCTL_FUNC_AUX:
value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
DPAUX_HYBRID_PADCTL_MODE_AUX;
break;
case DPAUX_PADCTL_FUNC_I2C:
value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_MODE_I2C;
break;
case DPAUX_PADCTL_FUNC_OFF:
tegra_dpaux_pad_power_down(dpaux);
return 0;
default:
return -ENOTSUPP;
}
tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
tegra_dpaux_pad_power_up(dpaux);
return 0;
}
#ifdef CONFIG_GENERIC_PINCONF
static const struct pinctrl_pin_desc tegra_dpaux_pins[] = {
PINCTRL_PIN(0, "DP_AUX_CHx_P"),
PINCTRL_PIN(1, "DP_AUX_CHx_N"),
};
static const unsigned tegra_dpaux_pin_numbers[] = { 0, 1 };
static const char * const tegra_dpaux_groups[] = {
"dpaux-io",
};
static const char * const tegra_dpaux_functions[] = {
"aux",
"i2c",
"off",
};
static int tegra_dpaux_get_groups_count(struct pinctrl_dev *pinctrl)
{
return ARRAY_SIZE(tegra_dpaux_groups);
}
static const char *tegra_dpaux_get_group_name(struct pinctrl_dev *pinctrl,
unsigned int group)
{
return tegra_dpaux_groups[group];
}
static int tegra_dpaux_get_group_pins(struct pinctrl_dev *pinctrl,
unsigned group, const unsigned **pins,
unsigned *num_pins)
{
*pins = tegra_dpaux_pin_numbers;
*num_pins = ARRAY_SIZE(tegra_dpaux_pin_numbers);
return 0;
}
static const struct pinctrl_ops tegra_dpaux_pinctrl_ops = {
.get_groups_count = tegra_dpaux_get_groups_count,
.get_group_name = tegra_dpaux_get_group_name,
.get_group_pins = tegra_dpaux_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
.dt_free_map = pinconf_generic_dt_free_map,
};
static int tegra_dpaux_get_functions_count(struct pinctrl_dev *pinctrl)
{
return ARRAY_SIZE(tegra_dpaux_functions);
}
static const char *tegra_dpaux_get_function_name(struct pinctrl_dev *pinctrl,
unsigned int function)
{
return tegra_dpaux_functions[function];
}
static int tegra_dpaux_get_function_groups(struct pinctrl_dev *pinctrl,
unsigned int function,
const char * const **groups,
unsigned * const num_groups)
{
*num_groups = ARRAY_SIZE(tegra_dpaux_groups);
*groups = tegra_dpaux_groups;
return 0;
}
static int tegra_dpaux_set_mux(struct pinctrl_dev *pinctrl,
unsigned int function, unsigned int group)
{
struct tegra_dpaux *dpaux = pinctrl_dev_get_drvdata(pinctrl);
return tegra_dpaux_pad_config(dpaux, function);
}
static const struct pinmux_ops tegra_dpaux_pinmux_ops = {
.get_functions_count = tegra_dpaux_get_functions_count,
.get_function_name = tegra_dpaux_get_function_name,
.get_function_groups = tegra_dpaux_get_function_groups,
.set_mux = tegra_dpaux_set_mux,
};
#endif
static int tegra_dpaux_probe(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux;
u32 value;
int err;
dpaux = devm_kzalloc(&pdev->dev, sizeof(*dpaux), GFP_KERNEL);
if (!dpaux)
return -ENOMEM;
dpaux->soc = of_device_get_match_data(&pdev->dev);
INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
init_completion(&dpaux->complete);
INIT_LIST_HEAD(&dpaux->list);
dpaux->dev = &pdev->dev;
dpaux->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dpaux->regs))
return PTR_ERR(dpaux->regs);
dpaux->irq = platform_get_irq(pdev, 0);
if (dpaux->irq < 0)
return dpaux->irq;
if (!pdev->dev.pm_domain) {
dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
if (IS_ERR(dpaux->rst)) {
dev_err(&pdev->dev,
"failed to get reset control: %ld\n",
PTR_ERR(dpaux->rst));
return PTR_ERR(dpaux->rst);
}
}
dpaux->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dpaux->clk)) {
dev_err(&pdev->dev, "failed to get module clock: %ld\n",
PTR_ERR(dpaux->clk));
return PTR_ERR(dpaux->clk);
}
dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dpaux->clk_parent)) {
dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
PTR_ERR(dpaux->clk_parent));
return PTR_ERR(dpaux->clk_parent);
}
err = clk_set_rate(dpaux->clk_parent, 270000000);
if (err < 0) {
dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
err);
return err;
}
dpaux->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
if (IS_ERR(dpaux->vdd)) {
if (PTR_ERR(dpaux->vdd) != -ENODEV) {
if (PTR_ERR(dpaux->vdd) != -EPROBE_DEFER)
dev_err(&pdev->dev,
"failed to get VDD supply: %ld\n",
PTR_ERR(dpaux->vdd));
return PTR_ERR(dpaux->vdd);
}
dpaux->vdd = NULL;
}
platform_set_drvdata(pdev, dpaux);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
dev_name(dpaux->dev), dpaux);
if (err < 0) {
dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
dpaux->irq, err);
return err;
}
disable_irq(dpaux->irq);
dpaux->aux.transfer = tegra_dpaux_transfer;
dpaux->aux.dev = &pdev->dev;
drm_dp_aux_init(&dpaux->aux);
/*
* Assume that by default the DPAUX/I2C pads will be used for HDMI,
* so power them up and configure them in I2C mode.
*
* The DPAUX code paths reconfigure the pads in AUX mode, but there
* is no possibility to perform the I2C mode configuration in the
* HDMI path.
*/
err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C);
if (err < 0)
return err;
#ifdef CONFIG_GENERIC_PINCONF
dpaux->desc.name = dev_name(&pdev->dev);
dpaux->desc.pins = tegra_dpaux_pins;
dpaux->desc.npins = ARRAY_SIZE(tegra_dpaux_pins);
dpaux->desc.pctlops = &tegra_dpaux_pinctrl_ops;
dpaux->desc.pmxops = &tegra_dpaux_pinmux_ops;
dpaux->desc.owner = THIS_MODULE;
dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
if (IS_ERR(dpaux->pinctrl)) {
dev_err(&pdev->dev, "failed to register pincontrol\n");
return PTR_ERR(dpaux->pinctrl);
}
#endif
/* enable and clear all interrupts */
value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
tegra_dpaux_writel(dpaux, value, DPAUX_INTR_EN_AUX);
tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
mutex_lock(&dpaux_lock);
list_add_tail(&dpaux->list, &dpaux_list);
mutex_unlock(&dpaux_lock);
err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
if (err < 0) {
dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
return err;
}
return 0;
}
static void tegra_dpaux_remove(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
cancel_work_sync(&dpaux->work);
/* make sure pads are powered down when not in use */
tegra_dpaux_pad_power_down(dpaux);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
mutex_lock(&dpaux_lock);
list_del(&dpaux->list);
mutex_unlock(&dpaux_lock);
}
static int tegra_dpaux_suspend(struct device *dev)
{
struct tegra_dpaux *dpaux = dev_get_drvdata(dev);
int err = 0;
if (dpaux->rst) {
err = reset_control_assert(dpaux->rst);
if (err < 0) {
dev_err(dev, "failed to assert reset: %d\n", err);
return err;
}
}
usleep_range(1000, 2000);
clk_disable_unprepare(dpaux->clk_parent);
clk_disable_unprepare(dpaux->clk);
return err;
}
static int tegra_dpaux_resume(struct device *dev)
{
struct tegra_dpaux *dpaux = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(dpaux->clk);
if (err < 0) {
dev_err(dev, "failed to enable clock: %d\n", err);
return err;
}
err = clk_prepare_enable(dpaux->clk_parent);
if (err < 0) {
dev_err(dev, "failed to enable parent clock: %d\n", err);
goto disable_clk;
}
usleep_range(1000, 2000);
if (dpaux->rst) {
err = reset_control_deassert(dpaux->rst);
if (err < 0) {
dev_err(dev, "failed to deassert reset: %d\n", err);
goto disable_parent;
}
usleep_range(1000, 2000);
}
return 0;
disable_parent:
clk_disable_unprepare(dpaux->clk_parent);
disable_clk:
clk_disable_unprepare(dpaux->clk);
return err;
}
static const struct dev_pm_ops tegra_dpaux_pm_ops = {
RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
};
static const struct tegra_dpaux_soc tegra124_dpaux_soc = {
.cmh = 0x02,
.drvz = 0x04,
.drvi = 0x18,
};
static const struct tegra_dpaux_soc tegra210_dpaux_soc = {
.cmh = 0x02,
.drvz = 0x04,
.drvi = 0x30,
};
static const struct tegra_dpaux_soc tegra194_dpaux_soc = {
.cmh = 0x02,
.drvz = 0x04,
.drvi = 0x2c,
};
static const struct of_device_id tegra_dpaux_of_match[] = {
{ .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc },
{ .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc },
{ .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc },
{ .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
struct platform_driver tegra_dpaux_driver = {
.driver = {
.name = "tegra-dpaux",
.of_match_table = tegra_dpaux_of_match,
.pm = pm_ptr(&tegra_dpaux_pm_ops),
},
.probe = tegra_dpaux_probe,
.remove_new = tegra_dpaux_remove,
};
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np)
{
struct tegra_dpaux *dpaux;
mutex_lock(&dpaux_lock);
list_for_each_entry(dpaux, &dpaux_list, list)
if (np == dpaux->dev->of_node) {
mutex_unlock(&dpaux_lock);
return &dpaux->aux;
}
mutex_unlock(&dpaux_lock);
return NULL;
}
int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long timeout;
int err;
aux->drm_dev = output->connector.dev;
err = drm_dp_aux_register(aux);
if (err < 0)
return err;
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
dpaux->output = output;
if (output->panel) {
enum drm_connector_status status;
if (dpaux->vdd) {
err = regulator_enable(dpaux->vdd);
if (err < 0)
return err;
}
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
status = drm_dp_aux_detect(aux);
if (status == connector_status_connected)
break;
usleep_range(1000, 2000);
}
if (status != connector_status_connected)
return -ETIMEDOUT;
}
enable_irq(dpaux->irq);
return 0;
}
int drm_dp_aux_detach(struct drm_dp_aux *aux)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long timeout;
int err;
drm_dp_aux_unregister(aux);
disable_irq(dpaux->irq);
if (dpaux->output->panel) {
enum drm_connector_status status;
if (dpaux->vdd) {
err = regulator_disable(dpaux->vdd);
if (err < 0)
return err;
}
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
status = drm_dp_aux_detect(aux);
if (status == connector_status_disconnected)
break;
usleep_range(1000, 2000);
}
if (status != connector_status_disconnected)
return -ETIMEDOUT;
dpaux->output = NULL;
}
return 0;
}
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
u32 value;
value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
if (value & DPAUX_DP_AUXSTAT_HPD_STATUS)
return connector_status_connected;
return connector_status_disconnected;
}
int drm_dp_aux_enable(struct drm_dp_aux *aux)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
return tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_AUX);
}
int drm_dp_aux_disable(struct drm_dp_aux *aux)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
tegra_dpaux_pad_power_down(dpaux);
return 0;
}
| linux-master | drivers/gpu/drm/tegra/dpaux.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Avionic Design GmbH
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/i2c.h>
#include <linux/of.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
#include "drm.h"
#include "dc.h"
#include <media/cec-notifier.h>
int tegra_output_connector_get_modes(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
struct edid *edid = NULL;
int err = 0;
/*
* If the panel provides one or more modes, use them exclusively and
* ignore any other means of obtaining a mode.
*/
if (output->panel) {
err = drm_panel_get_modes(output->panel, connector);
if (err > 0)
return err;
}
if (output->edid)
edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
else if (output->ddc)
edid = drm_get_edid(connector, output->ddc);
cec_notifier_set_phys_addr_from_edid(output->cec, edid);
drm_connector_update_edid_property(connector, edid);
if (edid) {
err = drm_add_edid_modes(connector, edid);
kfree(edid);
}
return err;
}
enum drm_connector_status
tegra_output_connector_detect(struct drm_connector *connector, bool force)
{
struct tegra_output *output = connector_to_output(connector);
enum drm_connector_status status = connector_status_unknown;
if (output->hpd_gpio) {
if (gpiod_get_value(output->hpd_gpio) == 0)
status = connector_status_disconnected;
else
status = connector_status_connected;
} else {
if (!output->panel)
status = connector_status_disconnected;
else
status = connector_status_connected;
}
if (status != connector_status_connected)
cec_notifier_phys_addr_invalidate(output->cec);
return status;
}
void tegra_output_connector_destroy(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
if (output->cec)
cec_notifier_conn_unregister(output->cec);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static irqreturn_t hpd_irq(int irq, void *data)
{
struct tegra_output *output = data;
if (output->connector.dev)
drm_helper_hpd_irq_event(output->connector.dev);
return IRQ_HANDLED;
}
int tegra_output_probe(struct tegra_output *output)
{
struct device_node *ddc, *panel;
unsigned long flags;
int err, size;
if (!output->of_node)
output->of_node = output->dev->of_node;
err = drm_of_find_panel_or_bridge(output->of_node, -1, -1,
&output->panel, &output->bridge);
if (err && err != -ENODEV)
return err;
panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
if (panel) {
/*
* Don't mix nvidia,panel phandle with the graph in a
* device-tree.
*/
WARN_ON(output->panel || output->bridge);
output->panel = of_drm_find_panel(panel);
of_node_put(panel);
if (IS_ERR(output->panel))
return PTR_ERR(output->panel);
}
output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
if (ddc) {
output->ddc = of_get_i2c_adapter_by_node(ddc);
of_node_put(ddc);
if (!output->ddc) {
err = -EPROBE_DEFER;
return err;
}
}
output->hpd_gpio = devm_fwnode_gpiod_get(output->dev,
of_fwnode_handle(output->of_node),
"nvidia,hpd",
GPIOD_IN,
"HDMI hotplug detect");
if (IS_ERR(output->hpd_gpio)) {
if (PTR_ERR(output->hpd_gpio) != -ENOENT)
return PTR_ERR(output->hpd_gpio);
output->hpd_gpio = NULL;
}
if (output->hpd_gpio) {
err = gpiod_to_irq(output->hpd_gpio);
if (err < 0) {
dev_err(output->dev, "gpiod_to_irq(): %d\n", err);
return err;
}
output->hpd_irq = err;
flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_ONESHOT;
err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq,
flags, "hpd", output);
if (err < 0) {
dev_err(output->dev, "failed to request IRQ#%u: %d\n",
output->hpd_irq, err);
return err;
}
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
/*
* Disable the interrupt until the connector has been
* initialized to avoid a race in the hotplug interrupt
* handler.
*/
disable_irq(output->hpd_irq);
}
return 0;
}
void tegra_output_remove(struct tegra_output *output)
{
if (output->hpd_gpio)
free_irq(output->hpd_irq, output);
if (output->ddc)
i2c_put_adapter(output->ddc);
}
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
{
int connector_type;
/*
* The connector is now registered and ready to receive hotplug events
* so the hotplug interrupt can be enabled.
*/
if (output->hpd_gpio)
enable_irq(output->hpd_irq);
connector_type = output->connector.connector_type;
/*
* Create a CEC notifier for HDMI connector.
*/
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_HDMIB) {
struct cec_connector_info conn_info;
cec_fill_conn_info_from_drm(&conn_info, &output->connector);
output->cec = cec_notifier_conn_register(output->dev, NULL,
&conn_info);
if (!output->cec)
return -ENOMEM;
}
return 0;
}
void tegra_output_exit(struct tegra_output *output)
{
/*
* The connector is going away, so the interrupt must be disabled to
* prevent the hotplug interrupt handler from potentially crashing.
*/
if (output->hpd_gpio)
disable_irq(output->hpd_irq);
}
void tegra_output_find_possible_crtcs(struct tegra_output *output,
struct drm_device *drm)
{
struct device *dev = output->dev;
struct drm_crtc *crtc;
unsigned int mask = 0;
drm_for_each_crtc(crtc, drm) {
struct tegra_dc *dc = to_tegra_dc(crtc);
if (tegra_dc_has_output(dc, dev))
mask |= drm_crtc_mask(crtc);
}
if (mask == 0) {
dev_warn(dev, "missing output definition for heads in DT\n");
mask = 0x3;
}
output->encoder.possible_crtcs = mask;
}
int tegra_output_suspend(struct tegra_output *output)
{
if (output->hpd_irq)
disable_irq(output->hpd_irq);
return 0;
}
int tegra_output_resume(struct tegra_output *output)
{
if (output->hpd_irq)
enable_irq(output->hpd_irq);
return 0;
}
| linux-master | drivers/gpu/drm/tegra/output.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/interconnect.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include "dc.h"
#include "plane.h"
static void tegra_plane_destroy(struct drm_plane *plane)
{
struct tegra_plane *p = to_tegra_plane(plane);
drm_plane_cleanup(plane);
kfree(p);
}
static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_plane_state *state;
unsigned int i;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(plane->state);
plane->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state) {
plane->state = &state->base;
plane->state->plane = plane;
plane->state->zpos = p->index;
plane->state->normalized_zpos = p->index;
for (i = 0; i < 3; i++)
state->iova[i] = DMA_MAPPING_ERROR;
}
}
static struct drm_plane_state *
tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
{
struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
struct tegra_plane_state *copy;
unsigned int i;
copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, ©->base);
copy->tiling = state->tiling;
copy->format = state->format;
copy->swap = state->swap;
copy->reflect_x = state->reflect_x;
copy->reflect_y = state->reflect_y;
copy->opaque = state->opaque;
copy->total_peak_memory_bandwidth = state->total_peak_memory_bandwidth;
copy->peak_memory_bandwidth = state->peak_memory_bandwidth;
copy->avg_memory_bandwidth = state->avg_memory_bandwidth;
for (i = 0; i < 2; i++)
copy->blending[i] = state->blending[i];
for (i = 0; i < 3; i++) {
copy->iova[i] = DMA_MAPPING_ERROR;
copy->map[i] = NULL;
}
return ©->base;
}
static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(state);
}
static bool tegra_plane_supports_sector_layout(struct drm_plane *plane)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, plane->dev) {
if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
struct tegra_dc *dc = to_tegra_dc(crtc);
if (!dc->soc->supports_sector_layout)
return false;
}
}
return true;
}
static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
const struct drm_format_info *info = drm_format_info(format);
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
/* check for the sector layout bit */
if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
if (!tegra_plane_supports_sector_layout(plane))
return false;
}
}
if (info->num_planes == 1)
return true;
return false;
}
const struct drm_plane_funcs tegra_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = tegra_plane_destroy,
.reset = tegra_plane_reset,
.atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
.atomic_destroy_state = tegra_plane_atomic_destroy_state,
.format_mod_supported = tegra_plane_format_mod_supported,
};
static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
{
unsigned int i;
int err;
for (i = 0; i < state->base.fb->format->num_planes; i++) {
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
struct host1x_bo_mapping *map;
map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto unpin;
}
if (!dc->client.group) {
/*
* The display controller needs contiguous memory, so
* fail if the buffer is discontiguous and we fail to
* map its SG table to a single contiguous chunk of
* I/O virtual memory.
*/
if (map->chunks > 1) {
err = -EINVAL;
goto unpin;
}
state->iova[i] = map->phys;
} else {
state->iova[i] = bo->iova;
}
state->map[i] = map;
}
return 0;
unpin:
dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
while (i--) {
host1x_bo_unpin(state->map[i]);
state->iova[i] = DMA_MAPPING_ERROR;
state->map[i] = NULL;
}
return err;
}
static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
{
unsigned int i;
for (i = 0; i < state->base.fb->format->num_planes; i++) {
host1x_bo_unpin(state->map[i]);
state->iova[i] = DMA_MAPPING_ERROR;
state->map[i] = NULL;
}
}
int tegra_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct tegra_dc *dc = to_tegra_dc(state->crtc);
int err;
if (!state->fb)
return 0;
err = drm_gem_plane_helper_prepare_fb(plane, state);
if (err < 0)
return err;
return tegra_dc_pin(dc, to_tegra_plane_state(state));
}
void tegra_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct tegra_dc *dc = to_tegra_dc(state->crtc);
if (dc)
tegra_dc_unpin(dc, to_tegra_plane_state(state));
}
static int tegra_plane_calculate_memory_bandwidth(struct drm_plane_state *state)
{
struct tegra_plane_state *tegra_state = to_tegra_plane_state(state);
unsigned int i, bpp, dst_w, dst_h, src_w, src_h, mul;
const struct tegra_dc_soc_info *soc;
const struct drm_format_info *fmt;
struct drm_crtc_state *crtc_state;
u64 avg_bandwidth, peak_bandwidth;
if (!state->visible)
return 0;
crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
if (!crtc_state)
return -EINVAL;
src_w = drm_rect_width(&state->src) >> 16;
src_h = drm_rect_height(&state->src) >> 16;
dst_w = drm_rect_width(&state->dst);
dst_h = drm_rect_height(&state->dst);
fmt = state->fb->format;
soc = to_tegra_dc(state->crtc)->soc;
/*
* Note that real memory bandwidth vary depending on format and
* memory layout, we are not taking that into account because small
* estimation error isn't important since bandwidth is rounded up
* anyway.
*/
for (i = 0, bpp = 0; i < fmt->num_planes; i++) {
unsigned int bpp_plane = fmt->cpp[i] * 8;
/*
* Sub-sampling is relevant for chroma planes only and vertical
* readouts are not cached, hence only horizontal sub-sampling
* matters.
*/
if (i > 0)
bpp_plane /= fmt->hsub;
bpp += bpp_plane;
}
/* average bandwidth in kbytes/sec */
avg_bandwidth = min(src_w, dst_w) * min(src_h, dst_h);
avg_bandwidth *= drm_mode_vrefresh(&crtc_state->adjusted_mode);
avg_bandwidth = DIV_ROUND_UP(avg_bandwidth * bpp, 8) + 999;
do_div(avg_bandwidth, 1000);
/* mode.clock in kHz, peak bandwidth in kbytes/sec */
peak_bandwidth = DIV_ROUND_UP(crtc_state->adjusted_mode.clock * bpp, 8);
/*
* Tegra30/114 Memory Controller can't interleave DC memory requests
* for the tiled windows because DC uses 16-bytes atom, while DDR3
* uses 32-bytes atom. Hence there is x2 memory overfetch for tiled
* framebuffer and DDR3 on these SoCs.
*/
if (soc->plane_tiled_memory_bandwidth_x2 &&
tegra_state->tiling.mode == TEGRA_BO_TILING_MODE_TILED)
mul = 2;
else
mul = 1;
/* ICC bandwidth in kbytes/sec */
tegra_state->peak_memory_bandwidth = kBps_to_icc(peak_bandwidth) * mul;
tegra_state->avg_memory_bandwidth = kBps_to_icc(avg_bandwidth) * mul;
return 0;
}
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc_state *crtc_state;
struct tegra_dc_state *tegra;
int err;
/* Propagate errors from allocation or locking failures. */
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
/* Check plane state for visibility and calculate clipping bounds */
err = drm_atomic_helper_check_plane_state(state, crtc_state,
0, INT_MAX, true, true);
if (err < 0)
return err;
err = tegra_plane_calculate_memory_bandwidth(state);
if (err < 0)
return err;
tegra = to_dc_state(crtc_state);
tegra->planes |= WIN_A_ACT_REQ << plane->index;
return 0;
}
int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap)
{
/* assume no swapping of fetched data */
if (swap)
*swap = BYTE_SWAP_NOSWAP;
switch (fourcc) {
case DRM_FORMAT_ARGB4444:
*format = WIN_COLOR_DEPTH_B4G4R4A4;
break;
case DRM_FORMAT_ARGB1555:
*format = WIN_COLOR_DEPTH_B5G5R5A1;
break;
case DRM_FORMAT_RGB565:
*format = WIN_COLOR_DEPTH_B5G6R5;
break;
case DRM_FORMAT_RGBA5551:
*format = WIN_COLOR_DEPTH_A1B5G5R5;
break;
case DRM_FORMAT_ARGB8888:
*format = WIN_COLOR_DEPTH_B8G8R8A8;
break;
case DRM_FORMAT_ABGR8888:
*format = WIN_COLOR_DEPTH_R8G8B8A8;
break;
case DRM_FORMAT_ABGR4444:
*format = WIN_COLOR_DEPTH_R4G4B4A4;
break;
case DRM_FORMAT_ABGR1555:
*format = WIN_COLOR_DEPTH_R5G5B5A;
break;
case DRM_FORMAT_BGRA5551:
*format = WIN_COLOR_DEPTH_AR5G5B5;
break;
case DRM_FORMAT_XRGB1555:
*format = WIN_COLOR_DEPTH_B5G5R5X1;
break;
case DRM_FORMAT_RGBX5551:
*format = WIN_COLOR_DEPTH_X1B5G5R5;
break;
case DRM_FORMAT_XBGR1555:
*format = WIN_COLOR_DEPTH_R5G5B5X1;
break;
case DRM_FORMAT_BGRX5551:
*format = WIN_COLOR_DEPTH_X1R5G5B5;
break;
case DRM_FORMAT_BGR565:
*format = WIN_COLOR_DEPTH_R5G6B5;
break;
case DRM_FORMAT_BGRA8888:
*format = WIN_COLOR_DEPTH_A8R8G8B8;
break;
case DRM_FORMAT_RGBA8888:
*format = WIN_COLOR_DEPTH_A8B8G8R8;
break;
case DRM_FORMAT_XRGB8888:
*format = WIN_COLOR_DEPTH_B8G8R8X8;
break;
case DRM_FORMAT_XBGR8888:
*format = WIN_COLOR_DEPTH_R8G8B8X8;
break;
case DRM_FORMAT_UYVY:
*format = WIN_COLOR_DEPTH_YCbCr422;
break;
case DRM_FORMAT_YUYV:
if (!swap)
return -EINVAL;
*format = WIN_COLOR_DEPTH_YCbCr422;
*swap = BYTE_SWAP_SWAP2;
break;
case DRM_FORMAT_YVYU:
if (!swap)
return -EINVAL;
*format = WIN_COLOR_DEPTH_YCbCr422;
*swap = BYTE_SWAP_SWAP4;
break;
case DRM_FORMAT_VYUY:
if (!swap)
return -EINVAL;
*format = WIN_COLOR_DEPTH_YCbCr422;
*swap = BYTE_SWAP_SWAP4HW;
break;
case DRM_FORMAT_YUV420:
*format = WIN_COLOR_DEPTH_YCbCr420P;
break;
case DRM_FORMAT_YUV422:
*format = WIN_COLOR_DEPTH_YCbCr422P;
break;
case DRM_FORMAT_YUV444:
*format = WIN_COLOR_DEPTH_YCbCr444P;
break;
case DRM_FORMAT_NV12:
*format = WIN_COLOR_DEPTH_YCbCr420SP;
break;
case DRM_FORMAT_NV21:
*format = WIN_COLOR_DEPTH_YCrCb420SP;
break;
case DRM_FORMAT_NV16:
*format = WIN_COLOR_DEPTH_YCbCr422SP;
break;
case DRM_FORMAT_NV61:
*format = WIN_COLOR_DEPTH_YCrCb422SP;
break;
case DRM_FORMAT_NV24:
*format = WIN_COLOR_DEPTH_YCbCr444SP;
break;
case DRM_FORMAT_NV42:
*format = WIN_COLOR_DEPTH_YCrCb444SP;
break;
default:
return -EINVAL;
}
return 0;
}
bool tegra_plane_format_is_indexed(unsigned int format)
{
switch (format) {
case WIN_COLOR_DEPTH_P1:
case WIN_COLOR_DEPTH_P2:
case WIN_COLOR_DEPTH_P4:
case WIN_COLOR_DEPTH_P8:
return true;
}
return false;
}
bool tegra_plane_format_is_yuv(unsigned int format, unsigned int *planes, unsigned int *bpc)
{
switch (format) {
case WIN_COLOR_DEPTH_YCbCr422:
case WIN_COLOR_DEPTH_YUV422:
if (planes)
*planes = 1;
if (bpc)
*bpc = 8;
return true;
case WIN_COLOR_DEPTH_YCbCr420P:
case WIN_COLOR_DEPTH_YUV420P:
case WIN_COLOR_DEPTH_YCbCr422P:
case WIN_COLOR_DEPTH_YUV422P:
case WIN_COLOR_DEPTH_YCbCr422R:
case WIN_COLOR_DEPTH_YUV422R:
case WIN_COLOR_DEPTH_YCbCr422RA:
case WIN_COLOR_DEPTH_YUV422RA:
case WIN_COLOR_DEPTH_YCbCr444P:
if (planes)
*planes = 3;
if (bpc)
*bpc = 8;
return true;
case WIN_COLOR_DEPTH_YCrCb420SP:
case WIN_COLOR_DEPTH_YCbCr420SP:
case WIN_COLOR_DEPTH_YCrCb422SP:
case WIN_COLOR_DEPTH_YCbCr422SP:
case WIN_COLOR_DEPTH_YCrCb444SP:
case WIN_COLOR_DEPTH_YCbCr444SP:
if (planes)
*planes = 2;
if (bpc)
*bpc = 8;
return true;
}
if (planes)
*planes = 1;
return false;
}
static bool __drm_format_has_alpha(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_ARGB8888:
return true;
}
return false;
}
static int tegra_plane_format_get_alpha(unsigned int opaque,
unsigned int *alpha)
{
if (tegra_plane_format_is_yuv(opaque, NULL, NULL)) {
*alpha = opaque;
return 0;
}
switch (opaque) {
case WIN_COLOR_DEPTH_B5G5R5X1:
*alpha = WIN_COLOR_DEPTH_B5G5R5A1;
return 0;
case WIN_COLOR_DEPTH_X1B5G5R5:
*alpha = WIN_COLOR_DEPTH_A1B5G5R5;
return 0;
case WIN_COLOR_DEPTH_R8G8B8X8:
*alpha = WIN_COLOR_DEPTH_R8G8B8A8;
return 0;
case WIN_COLOR_DEPTH_B8G8R8X8:
*alpha = WIN_COLOR_DEPTH_B8G8R8A8;
return 0;
case WIN_COLOR_DEPTH_B5G6R5:
*alpha = opaque;
return 0;
}
return -EINVAL;
}
/*
* This is applicable to Tegra20 and Tegra30 only where the opaque formats can
* be emulated using the alpha formats and alpha blending disabled.
*/
static int tegra_plane_setup_opacity(struct tegra_plane *tegra,
struct tegra_plane_state *state)
{
unsigned int format;
int err;
switch (state->format) {
case WIN_COLOR_DEPTH_B5G5R5A1:
case WIN_COLOR_DEPTH_A1B5G5R5:
case WIN_COLOR_DEPTH_R8G8B8A8:
case WIN_COLOR_DEPTH_B8G8R8A8:
state->opaque = false;
break;
default:
err = tegra_plane_format_get_alpha(state->format, &format);
if (err < 0)
return err;
state->format = format;
state->opaque = true;
break;
}
return 0;
}
static int tegra_plane_check_transparency(struct tegra_plane *tegra,
struct tegra_plane_state *state)
{
struct drm_plane_state *old, *plane_state;
struct drm_plane *plane;
old = drm_atomic_get_old_plane_state(state->base.state, &tegra->base);
/* check if zpos / transparency changed */
if (old->normalized_zpos == state->base.normalized_zpos &&
to_tegra_plane_state(old)->opaque == state->opaque)
return 0;
/* include all sibling planes into this commit */
drm_for_each_plane(plane, tegra->base.dev) {
struct tegra_plane *p = to_tegra_plane(plane);
/* skip this plane and planes on different CRTCs */
if (p == tegra || p->dc != tegra->dc)
continue;
plane_state = drm_atomic_get_plane_state(state->base.state,
plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
}
return 1;
}
static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
struct tegra_plane *other)
{
unsigned int index = 0, i;
WARN_ON(plane == other);
for (i = 0; i < 3; i++) {
if (i == plane->index)
continue;
if (i == other->index)
break;
index++;
}
return index;
}
static void tegra_plane_update_transparency(struct tegra_plane *tegra,
struct tegra_plane_state *state)
{
struct drm_plane_state *new;
struct drm_plane *plane;
unsigned int i;
for_each_new_plane_in_state(state->base.state, plane, new, i) {
struct tegra_plane *p = to_tegra_plane(plane);
unsigned index;
/* skip this plane and planes on different CRTCs */
if (p == tegra || p->dc != tegra->dc)
continue;
index = tegra_plane_get_overlap_index(tegra, p);
if (new->fb && __drm_format_has_alpha(new->fb->format->format))
state->blending[index].alpha = true;
else
state->blending[index].alpha = false;
if (new->normalized_zpos > state->base.normalized_zpos)
state->blending[index].top = true;
else
state->blending[index].top = false;
/*
* Missing framebuffer means that plane is disabled, in this
* case mark B / C window as top to be able to differentiate
* windows indices order in regards to zPos for the middle
* window X / Y registers programming.
*/
if (!new->fb)
state->blending[index].top = (index == 1);
}
}
static int tegra_plane_setup_transparency(struct tegra_plane *tegra,
struct tegra_plane_state *state)
{
struct tegra_plane_state *tegra_state;
struct drm_plane_state *new;
struct drm_plane *plane;
int err;
/*
* If planes zpos / transparency changed, sibling planes blending
* state may require adjustment and in this case they will be included
* into this atom commit, otherwise blending state is unchanged.
*/
err = tegra_plane_check_transparency(tegra, state);
if (err <= 0)
return err;
/*
* All planes are now in the atomic state, walk them up and update
* transparency state for each plane.
*/
drm_for_each_plane(plane, tegra->base.dev) {
struct tegra_plane *p = to_tegra_plane(plane);
/* skip planes on different CRTCs */
if (p->dc != tegra->dc)
continue;
new = drm_atomic_get_new_plane_state(state->base.state, plane);
tegra_state = to_tegra_plane_state(new);
/*
* There is no need to update blending state for the disabled
* plane.
*/
if (new->fb)
tegra_plane_update_transparency(p, tegra_state);
}
return 0;
}
int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
struct tegra_plane_state *state)
{
int err;
err = tegra_plane_setup_opacity(tegra, state);
if (err < 0)
return err;
err = tegra_plane_setup_transparency(tegra, state);
if (err < 0)
return err;
return 0;
}
static const char * const tegra_plane_icc_names[TEGRA_DC_LEGACY_PLANES_NUM] = {
"wina", "winb", "winc", NULL, NULL, NULL, "cursor",
};
int tegra_plane_interconnect_init(struct tegra_plane *plane)
{
const char *icc_name = tegra_plane_icc_names[plane->index];
struct device *dev = plane->dc->dev;
struct tegra_dc *dc = plane->dc;
int err;
if (WARN_ON(plane->index >= TEGRA_DC_LEGACY_PLANES_NUM) ||
WARN_ON(!tegra_plane_icc_names[plane->index]))
return -EINVAL;
plane->icc_mem = devm_of_icc_get(dev, icc_name);
err = PTR_ERR_OR_ZERO(plane->icc_mem);
if (err)
return dev_err_probe(dev, err, "failed to get %s interconnect\n",
icc_name);
/* plane B on T20/30 has a dedicated memory client for a 6-tap vertical filter */
if (plane->index == 1 && dc->soc->has_win_b_vfilter_mem_client) {
plane->icc_mem_vfilter = devm_of_icc_get(dev, "winb-vfilter");
err = PTR_ERR_OR_ZERO(plane->icc_mem_vfilter);
if (err)
return dev_err_probe(dev, err, "failed to get %s interconnect\n",
"winb-vfilter");
}
return 0;
}
| linux-master | drivers/gpu/drm/tegra/plane.c |
// SPDX-License-Identifier: MIT
/*
* Copyright (C) 2013-2019 NVIDIA Corporation
* Copyright (C) 2015 Rob Clark
*/
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include "dp.h"
static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 };
static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps)
{
caps->enhanced_framing = false;
caps->tps3_supported = false;
caps->fast_training = false;
caps->channel_coding = false;
caps->alternate_scrambler_reset = false;
}
void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
const struct drm_dp_link_caps *src)
{
dest->enhanced_framing = src->enhanced_framing;
dest->tps3_supported = src->tps3_supported;
dest->fast_training = src->fast_training;
dest->channel_coding = src->channel_coding;
dest->alternate_scrambler_reset = src->alternate_scrambler_reset;
}
static void drm_dp_link_reset(struct drm_dp_link *link)
{
unsigned int i;
if (!link)
return;
link->revision = 0;
link->max_rate = 0;
link->max_lanes = 0;
drm_dp_link_caps_reset(&link->caps);
link->aux_rd_interval.cr = 0;
link->aux_rd_interval.ce = 0;
link->edp = 0;
link->rate = 0;
link->lanes = 0;
for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++)
link->rates[i] = 0;
link->num_rates = 0;
}
/**
* drm_dp_link_add_rate() - add a rate to the list of supported rates
* @link: the link to add the rate to
* @rate: the rate to add
*
* Add a link rate to the list of supported link rates.
*
* Returns:
* 0 on success or one of the following negative error codes on failure:
* - ENOSPC if the maximum number of supported rates has been reached
* - EEXISTS if the link already supports this rate
*
* See also:
* drm_dp_link_remove_rate()
*/
int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate)
{
unsigned int i, pivot;
if (link->num_rates == DP_MAX_SUPPORTED_RATES)
return -ENOSPC;
for (pivot = 0; pivot < link->num_rates; pivot++)
if (rate <= link->rates[pivot])
break;
if (pivot != link->num_rates && rate == link->rates[pivot])
return -EEXIST;
for (i = link->num_rates; i > pivot; i--)
link->rates[i] = link->rates[i - 1];
link->rates[pivot] = rate;
link->num_rates++;
return 0;
}
/**
* drm_dp_link_remove_rate() - remove a rate from the list of supported rates
* @link: the link from which to remove the rate
* @rate: the rate to remove
*
* Removes a link rate from the list of supported link rates.
*
* Returns:
* 0 on success or one of the following negative error codes on failure:
* - EINVAL if the specified rate is not among the supported rates
*
* See also:
* drm_dp_link_add_rate()
*/
int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate)
{
unsigned int i;
for (i = 0; i < link->num_rates; i++)
if (rate == link->rates[i])
break;
if (i == link->num_rates)
return -EINVAL;
link->num_rates--;
while (i < link->num_rates) {
link->rates[i] = link->rates[i + 1];
i++;
}
return 0;
}
/**
* drm_dp_link_update_rates() - normalize the supported link rates array
* @link: the link for which to normalize the supported link rates
*
* Users should call this function after they've manually modified the array
* of supported link rates. This function removes any stale entries, compacts
* the array and updates the supported link rate count. Note that calling the
* drm_dp_link_remove_rate() function already does this janitorial work.
*
* See also:
* drm_dp_link_add_rate(), drm_dp_link_remove_rate()
*/
void drm_dp_link_update_rates(struct drm_dp_link *link)
{
unsigned int i, count = 0;
for (i = 0; i < link->num_rates; i++) {
if (link->rates[i] != 0)
link->rates[count++] = link->rates[i];
}
for (i = count; i < link->num_rates; i++)
link->rates[i] = 0;
link->num_rates = count;
}
/**
* drm_dp_link_probe() - probe a DisplayPort link for capabilities
* @aux: DisplayPort AUX channel
* @link: pointer to structure in which to return link capabilities
*
* The structure filled in by this function can usually be passed directly
* into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
* configure the link based on the link's capabilities.
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 dpcd[DP_RECEIVER_CAP_SIZE], value;
unsigned int rd_interval;
int err;
drm_dp_link_reset(link);
err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
if (err < 0)
return err;
link->revision = dpcd[DP_DPCD_REV];
link->max_rate = drm_dp_max_link_rate(dpcd);
link->max_lanes = drm_dp_max_lane_count(dpcd);
link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd);
link->caps.tps3_supported = drm_dp_tps3_supported(dpcd);
link->caps.fast_training = drm_dp_fast_training_cap(dpcd);
link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd);
if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
link->caps.alternate_scrambler_reset = true;
err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value);
if (err < 0)
return err;
if (value >= ARRAY_SIZE(drm_dp_edp_revisions))
DRM_ERROR("unsupported eDP version: %02x\n", value);
else
link->edp = drm_dp_edp_revisions[value];
}
/*
* The DPCD stores the AUX read interval in units of 4 ms. There are
* two special cases:
*
* 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery
* and channel equalization should use 100 us or 400 us AUX read
* intervals, respectively
*
* 2) for DP v1.4 and above, clock recovery should always use 100 us
* AUX read intervals
*/
rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4) {
DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n",
rd_interval);
rd_interval = 4;
}
rd_interval *= 4 * USEC_PER_MSEC;
if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14)
link->aux_rd_interval.cr = 100;
if (rd_interval == 0)
link->aux_rd_interval.ce = 400;
link->rate = link->max_rate;
link->lanes = link->max_lanes;
/* Parse SUPPORTED_LINK_RATES from eDP 1.4 */
if (link->edp >= 0x14) {
u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2];
unsigned int i;
u16 rate;
err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES,
supported_rates,
sizeof(supported_rates));
if (err < 0)
return err;
for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) {
rate = supported_rates[i * 2 + 1] << 8 |
supported_rates[i * 2 + 0];
drm_dp_link_add_rate(link, rate * 200);
}
}
return 0;
}
/**
* drm_dp_link_power_up() - power up a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 value;
int err;
/* DP_SET_POWER register is only available on DPCD v1.1 and later */
if (link->revision < 0x11)
return 0;
err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
if (err < 0)
return err;
value &= ~DP_SET_POWER_MASK;
value |= DP_SET_POWER_D0;
err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
if (err < 0)
return err;
/*
* According to the DP 1.1 specification, a "Sink Device must exit the
* power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
* Control Field" (register 0x600).
*/
usleep_range(1000, 2000);
return 0;
}
/**
* drm_dp_link_power_down() - power down a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 value;
int err;
/* DP_SET_POWER register is only available on DPCD v1.1 and later */
if (link->revision < 0x11)
return 0;
err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
if (err < 0)
return err;
value &= ~DP_SET_POWER_MASK;
value |= DP_SET_POWER_D3;
err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
if (err < 0)
return err;
return 0;
}
/**
* drm_dp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 values[2], value;
int err;
if (link->ops && link->ops->configure) {
err = link->ops->configure(link);
if (err < 0) {
DRM_ERROR("failed to configure DP link: %d\n", err);
return err;
}
}
values[0] = drm_dp_link_rate_to_bw_code(link->rate);
values[1] = link->lanes;
if (link->caps.enhanced_framing)
values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
if (err < 0)
return err;
if (link->caps.channel_coding)
value = DP_SET_ANSI_8B10B;
else
value = 0;
err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value);
if (err < 0)
return err;
if (link->caps.alternate_scrambler_reset) {
err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET,
DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
if (err < 0)
return err;
}
return 0;
}
/**
* drm_dp_link_choose() - choose the lowest possible configuration for a mode
* @link: DRM DP link object
* @mode: DRM display mode
* @info: DRM display information
*
* According to the eDP specification, a source should select a configuration
* with the lowest number of lanes and the lowest possible link rate that can
* match the bitrate requirements of a video mode. However it must ensure not
* to exceed the capabilities of the sink.
*
* Returns: 0 on success or a negative error code on failure.
*/
int drm_dp_link_choose(struct drm_dp_link *link,
const struct drm_display_mode *mode,
const struct drm_display_info *info)
{
/* available link symbol clock rates */
static const unsigned int rates[3] = { 162000, 270000, 540000 };
/* available number of lanes */
static const unsigned int lanes[3] = { 1, 2, 4 };
unsigned long requirement, capacity;
unsigned int rate = link->max_rate;
unsigned int i, j;
/* bandwidth requirement */
requirement = mode->clock * info->bpc * 3;
for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) {
for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) {
/*
* Capacity for this combination of lanes and rate,
* factoring in the ANSI 8B/10B encoding.
*
* Link rates in the DRM DP helpers are really link
* symbol frequencies, so a tenth of the actual rate
* of the link.
*/
capacity = lanes[i] * (rates[j] * 10) * 8 / 10;
if (capacity >= requirement) {
DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n",
lanes[i], rates[j], requirement,
capacity);
link->lanes = lanes[i];
link->rate = rates[j];
return 0;
}
}
}
return -ERANGE;
}
/**
* DOC: Link training
*
* These functions contain common logic and helpers to implement DisplayPort
* link training.
*/
/**
* drm_dp_link_train_init() - initialize DisplayPort link training state
* @train: DisplayPort link training state
*/
void drm_dp_link_train_init(struct drm_dp_link_train *train)
{
struct drm_dp_link_train_set *request = &train->request;
struct drm_dp_link_train_set *adjust = &train->adjust;
unsigned int i;
for (i = 0; i < 4; i++) {
request->voltage_swing[i] = 0;
adjust->voltage_swing[i] = 0;
request->pre_emphasis[i] = 0;
adjust->pre_emphasis[i] = 0;
request->post_cursor[i] = 0;
adjust->post_cursor[i] = 0;
}
train->pattern = DP_TRAINING_PATTERN_DISABLE;
train->clock_recovered = false;
train->channel_equalized = false;
}
static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train)
{
return train->clock_recovered && train->channel_equalized;
}
static int drm_dp_link_apply_training(struct drm_dp_link *link)
{
struct drm_dp_link_train_set *request = &link->train.request;
unsigned int lanes = link->lanes, *vs, *pe, *pc, i;
struct drm_dp_aux *aux = link->aux;
u8 values[4], pattern = 0;
int err;
err = link->ops->apply_training(link);
if (err < 0) {
DRM_ERROR("failed to apply link training: %d\n", err);
return err;
}
vs = request->voltage_swing;
pe = request->pre_emphasis;
pc = request->post_cursor;
/* write currently selected voltage-swing and pre-emphasis levels */
for (i = 0; i < lanes; i++)
values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) |
DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]);
err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes);
if (err < 0) {
DRM_ERROR("failed to set training parameters: %d\n", err);
return err;
}
/* write currently selected post-cursor level (if supported) */
if (link->revision >= 0x12 && link->rate == 540000) {
values[0] = values[1] = 0;
for (i = 0; i < lanes; i++)
values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]);
err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values,
DIV_ROUND_UP(lanes, 2));
if (err < 0) {
DRM_ERROR("failed to set post-cursor: %d\n", err);
return err;
}
}
/* write link pattern */
if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE)
pattern |= DP_LINK_SCRAMBLING_DISABLE;
pattern |= link->train.pattern;
err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
if (err < 0) {
DRM_ERROR("failed to set training pattern: %d\n", err);
return err;
}
return 0;
}
static void drm_dp_link_train_wait(struct drm_dp_link *link)
{
unsigned long min = 0;
switch (link->train.pattern) {
case DP_TRAINING_PATTERN_1:
min = link->aux_rd_interval.cr;
break;
case DP_TRAINING_PATTERN_2:
case DP_TRAINING_PATTERN_3:
min = link->aux_rd_interval.ce;
break;
default:
break;
}
if (min > 0)
usleep_range(min, 2 * min);
}
static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
u8 status[DP_LINK_STATUS_SIZE])
{
struct drm_dp_link_train_set *adjust = &link->train.adjust;
unsigned int i;
u8 post_cursor;
int err;
err = drm_dp_dpcd_read(link->aux, DP_ADJUST_REQUEST_POST_CURSOR2,
&post_cursor, sizeof(post_cursor));
if (err < 0) {
DRM_ERROR("failed to read post_cursor2: %d\n", err);
post_cursor = 0;
}
for (i = 0; i < link->lanes; i++) {
adjust->voltage_swing[i] =
drm_dp_get_adjust_request_voltage(status, i) >>
DP_TRAIN_VOLTAGE_SWING_SHIFT;
adjust->pre_emphasis[i] =
drm_dp_get_adjust_request_pre_emphasis(status, i) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT;
adjust->post_cursor[i] =
(post_cursor >> (i << 1)) & 0x3;
}
}
static void drm_dp_link_train_adjust(struct drm_dp_link_train *train)
{
struct drm_dp_link_train_set *request = &train->request;
struct drm_dp_link_train_set *adjust = &train->adjust;
unsigned int i;
for (i = 0; i < 4; i++)
if (request->voltage_swing[i] != adjust->voltage_swing[i])
request->voltage_swing[i] = adjust->voltage_swing[i];
for (i = 0; i < 4; i++)
if (request->pre_emphasis[i] != adjust->pre_emphasis[i])
request->pre_emphasis[i] = adjust->pre_emphasis[i];
for (i = 0; i < 4; i++)
if (request->post_cursor[i] != adjust->post_cursor[i])
request->post_cursor[i] = adjust->post_cursor[i];
}
static int drm_dp_link_recover_clock(struct drm_dp_link *link)
{
u8 status[DP_LINK_STATUS_SIZE];
int err;
err = drm_dp_link_apply_training(link);
if (err < 0)
return err;
drm_dp_link_train_wait(link);
err = drm_dp_dpcd_read_link_status(link->aux, status);
if (err < 0) {
DRM_ERROR("failed to read link status: %d\n", err);
return err;
}
if (!drm_dp_clock_recovery_ok(status, link->lanes))
drm_dp_link_get_adjustments(link, status);
else
link->train.clock_recovered = true;
return 0;
}
static int drm_dp_link_clock_recovery(struct drm_dp_link *link)
{
unsigned int repeat;
int err;
/* start clock recovery using training pattern 1 */
link->train.pattern = DP_TRAINING_PATTERN_1;
for (repeat = 1; repeat < 5; repeat++) {
err = drm_dp_link_recover_clock(link);
if (err < 0) {
DRM_ERROR("failed to recover clock: %d\n", err);
return err;
}
if (link->train.clock_recovered)
break;
drm_dp_link_train_adjust(&link->train);
}
return 0;
}
static int drm_dp_link_equalize_channel(struct drm_dp_link *link)
{
struct drm_dp_aux *aux = link->aux;
u8 status[DP_LINK_STATUS_SIZE];
int err;
err = drm_dp_link_apply_training(link);
if (err < 0)
return err;
drm_dp_link_train_wait(link);
err = drm_dp_dpcd_read_link_status(aux, status);
if (err < 0) {
DRM_ERROR("failed to read link status: %d\n", err);
return err;
}
if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
DRM_ERROR("clock recovery lost while equalizing channel\n");
link->train.clock_recovered = false;
return 0;
}
if (!drm_dp_channel_eq_ok(status, link->lanes))
drm_dp_link_get_adjustments(link, status);
else
link->train.channel_equalized = true;
return 0;
}
static int drm_dp_link_channel_equalization(struct drm_dp_link *link)
{
unsigned int repeat;
int err;
/* start channel equalization using pattern 2 or 3 */
if (link->caps.tps3_supported)
link->train.pattern = DP_TRAINING_PATTERN_3;
else
link->train.pattern = DP_TRAINING_PATTERN_2;
for (repeat = 1; repeat < 5; repeat++) {
err = drm_dp_link_equalize_channel(link);
if (err < 0) {
DRM_ERROR("failed to equalize channel: %d\n", err);
return err;
}
if (link->train.channel_equalized)
break;
drm_dp_link_train_adjust(&link->train);
}
return 0;
}
static int drm_dp_link_downgrade(struct drm_dp_link *link)
{
switch (link->rate) {
case 162000:
return -EINVAL;
case 270000:
link->rate = 162000;
break;
case 540000:
link->rate = 270000;
return 0;
}
return 0;
}
static void drm_dp_link_train_disable(struct drm_dp_link *link)
{
int err;
link->train.pattern = DP_TRAINING_PATTERN_DISABLE;
err = drm_dp_link_apply_training(link);
if (err < 0)
DRM_ERROR("failed to disable link training: %d\n", err);
}
static int drm_dp_link_train_full(struct drm_dp_link *link)
{
int err;
retry:
DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n",
link->lanes, (link->lanes > 1) ? "s" : "",
link->rate / 100);
err = drm_dp_link_configure(link->aux, link);
if (err < 0) {
DRM_ERROR("failed to configure DP link: %d\n", err);
return err;
}
err = drm_dp_link_clock_recovery(link);
if (err < 0) {
DRM_ERROR("clock recovery failed: %d\n", err);
goto out;
}
if (!link->train.clock_recovered) {
DRM_ERROR("clock recovery failed, downgrading link\n");
err = drm_dp_link_downgrade(link);
if (err < 0)
goto out;
goto retry;
}
DRM_DEBUG_KMS("clock recovery succeeded\n");
err = drm_dp_link_channel_equalization(link);
if (err < 0) {
DRM_ERROR("channel equalization failed: %d\n", err);
goto out;
}
if (!link->train.channel_equalized) {
DRM_ERROR("channel equalization failed, downgrading link\n");
err = drm_dp_link_downgrade(link);
if (err < 0)
goto out;
goto retry;
}
DRM_DEBUG_KMS("channel equalization succeeded\n");
out:
drm_dp_link_train_disable(link);
return err;
}
static int drm_dp_link_train_fast(struct drm_dp_link *link)
{
u8 status[DP_LINK_STATUS_SIZE];
int err;
DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n",
link->lanes, (link->lanes > 1) ? "s" : "",
link->rate / 100);
err = drm_dp_link_configure(link->aux, link);
if (err < 0) {
DRM_ERROR("failed to configure DP link: %d\n", err);
return err;
}
/* transmit training pattern 1 for 500 microseconds */
link->train.pattern = DP_TRAINING_PATTERN_1;
err = drm_dp_link_apply_training(link);
if (err < 0)
goto out;
usleep_range(500, 1000);
/* transmit training pattern 2 or 3 for 500 microseconds */
if (link->caps.tps3_supported)
link->train.pattern = DP_TRAINING_PATTERN_3;
else
link->train.pattern = DP_TRAINING_PATTERN_2;
err = drm_dp_link_apply_training(link);
if (err < 0)
goto out;
usleep_range(500, 1000);
err = drm_dp_dpcd_read_link_status(link->aux, status);
if (err < 0) {
DRM_ERROR("failed to read link status: %d\n", err);
goto out;
}
if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
DRM_ERROR("clock recovery failed\n");
err = -EIO;
}
if (!drm_dp_channel_eq_ok(status, link->lanes)) {
DRM_ERROR("channel equalization failed\n");
err = -EIO;
}
out:
drm_dp_link_train_disable(link);
return err;
}
/**
* drm_dp_link_train() - perform DisplayPort link training
* @link: a DP link object
*
* Uses the context stored in the DP link object to perform link training. It
* is expected that drivers will call drm_dp_link_probe() to obtain the link
* capabilities before performing link training.
*
* If the sink supports fast link training (no AUX CH handshake) and valid
* training settings are available, this function will try to perform fast
* link training and fall back to full link training on failure.
*
* Returns: 0 on success or a negative error code on failure.
*/
int drm_dp_link_train(struct drm_dp_link *link)
{
int err;
drm_dp_link_train_init(&link->train);
if (link->caps.fast_training) {
if (drm_dp_link_train_valid(&link->train)) {
err = drm_dp_link_train_fast(link);
if (err < 0)
DRM_ERROR("fast link training failed: %d\n",
err);
else
return 0;
} else {
DRM_DEBUG_KMS("training parameters not available\n");
}
} else {
DRM_DEBUG_KMS("fast link training not supported\n");
}
err = drm_dp_link_train_full(link);
if (err < 0)
DRM_ERROR("full link training failed: %d\n", err);
return err;
}
| linux-master | drivers/gpu/drm/tegra/dp.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "nouveau_drv.h"
#include "nouveau_usif.h"
#include "nouveau_abi16.h"
#include <nvif/unpack.h>
#include <nvif/client.h>
#include <nvif/ioctl.h>
#include <nvif/class.h>
#include <nvif/cl0080.h>
struct usif_object {
struct list_head head;
u8 route;
u64 token;
};
static void
usif_object_dtor(struct usif_object *object)
{
list_del(&object->head);
kfree(object);
}
static int
usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
{
struct nouveau_cli *cli = nouveau_cli(f);
struct nvif_client *client = &cli->base;
union {
struct nvif_ioctl_new_v0 v0;
} *args = data;
struct usif_object *object;
int ret = -ENOSYS;
if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
return ret;
switch (args->v0.oclass) {
case NV_DMA_FROM_MEMORY:
case NV_DMA_TO_MEMORY:
case NV_DMA_IN_MEMORY:
return -EINVAL;
case NV_DEVICE: {
union {
struct nv_device_v0 v0;
} *args = data;
if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
return ret;
args->v0.priv = false;
break;
}
default:
if (!parent_abi16)
return -EINVAL;
break;
}
if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
return -ENOMEM;
list_add(&object->head, &cli->objects);
object->route = args->v0.route;
object->token = args->v0.token;
args->v0.route = NVDRM_OBJECT_USIF;
args->v0.token = (unsigned long)(void *)object;
ret = nvif_client_ioctl(client, argv, argc);
if (ret) {
usif_object_dtor(object);
return ret;
}
args->v0.token = object->token;
args->v0.route = object->route;
return 0;
}
int
usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
{
struct nouveau_cli *cli = nouveau_cli(filp);
struct nvif_client *client = &cli->base;
void *data = kmalloc(argc, GFP_KERNEL);
u32 size = argc;
union {
struct nvif_ioctl_v0 v0;
} *argv = data;
struct usif_object *object;
bool abi16 = false;
u8 owner;
int ret;
if (ret = -ENOMEM, !argv)
goto done;
if (ret = -EFAULT, copy_from_user(argv, user, size))
goto done;
if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
/* block access to objects not created via this interface */
owner = argv->v0.owner;
if (argv->v0.object == 0ULL &&
argv->v0.type != NVIF_IOCTL_V0_DEL)
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
else
argv->v0.owner = NVDRM_OBJECT_USIF;
} else
goto done;
/* USIF slightly abuses some return-only ioctl members in order
* to provide interoperability with the older ABI16 objects
*/
mutex_lock(&cli->mutex);
if (argv->v0.route) {
if (ret = -EINVAL, argv->v0.route == 0xff)
ret = nouveau_abi16_usif(filp, argv, argc);
if (ret) {
mutex_unlock(&cli->mutex);
goto done;
}
abi16 = true;
}
switch (argv->v0.type) {
case NVIF_IOCTL_V0_NEW:
ret = usif_object_new(filp, data, size, argv, argc, abi16);
break;
default:
ret = nvif_client_ioctl(client, argv, argc);
break;
}
if (argv->v0.route == NVDRM_OBJECT_USIF) {
object = (void *)(unsigned long)argv->v0.token;
argv->v0.route = object->route;
argv->v0.token = object->token;
if (ret == 0 && argv->v0.type == NVIF_IOCTL_V0_DEL) {
list_del(&object->head);
kfree(object);
}
} else {
argv->v0.route = NVIF_IOCTL_V0_ROUTE_HIDDEN;
argv->v0.token = 0;
}
argv->v0.owner = owner;
mutex_unlock(&cli->mutex);
if (copy_to_user(user, argv, argc))
ret = -EFAULT;
done:
kfree(argv);
return ret;
}
void
usif_client_fini(struct nouveau_cli *cli)
{
struct usif_object *object, *otemp;
list_for_each_entry_safe(object, otemp, &cli->objects, head) {
usif_object_dtor(object);
}
}
void
usif_client_init(struct nouveau_cli *cli)
{
INIT_LIST_HEAD(&cli->objects);
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_usif.c |
/*
* Copyright 2009 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <drm/display/drm_dp_helper.h>
#include "nouveau_drv.h"
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include <nvif/if0011.h>
MODULE_PARM_DESC(mst, "Enable DisplayPort multi-stream (default: enabled)");
static int nouveau_mst = 1;
module_param_named(mst, nouveau_mst, int, 0400);
static bool
nouveau_dp_has_sink_count(struct drm_connector *connector,
struct nouveau_encoder *outp)
{
return drm_dp_read_sink_count_cap(connector, outp->dp.dpcd, &outp->dp.desc);
}
static enum drm_connector_status
nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
struct nouveau_encoder *outp)
{
struct drm_connector *connector = &nv_connector->base;
struct drm_dp_aux *aux = &nv_connector->aux;
struct nv50_mstm *mstm = NULL;
enum drm_connector_status status = connector_status_disconnected;
int ret;
u8 *dpcd = outp->dp.dpcd;
ret = drm_dp_read_dpcd_caps(aux, dpcd);
if (ret < 0)
goto out;
ret = drm_dp_read_desc(aux, &outp->dp.desc, drm_dp_is_branch(dpcd));
if (ret < 0)
goto out;
if (nouveau_mst) {
mstm = outp->dp.mstm;
if (mstm)
mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd);
}
if (nouveau_dp_has_sink_count(connector, outp)) {
ret = drm_dp_read_sink_count(aux);
if (ret < 0)
goto out;
outp->dp.sink_count = ret;
/*
* Dongle connected, but no display. Don't bother reading
* downstream port info
*/
if (!outp->dp.sink_count)
return connector_status_disconnected;
}
ret = drm_dp_read_downstream_info(aux, dpcd,
outp->dp.downstream_ports);
if (ret < 0)
goto out;
status = connector_status_connected;
out:
if (status != connector_status_connected) {
/* Clear any cached info */
outp->dp.sink_count = 0;
}
return status;
}
int
nouveau_dp_detect(struct nouveau_connector *nv_connector,
struct nouveau_encoder *nv_encoder)
{
struct drm_device *dev = nv_encoder->base.base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector = &nv_connector->base;
struct nv50_mstm *mstm = nv_encoder->dp.mstm;
enum drm_connector_status status;
u8 *dpcd = nv_encoder->dp.dpcd;
int ret = NOUVEAU_DP_NONE, hpd;
/* If we've already read the DPCD on an eDP device, we don't need to
* reread it as it won't change
*/
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
dpcd[DP_DPCD_REV] != 0)
return NOUVEAU_DP_SST;
mutex_lock(&nv_encoder->dp.hpd_irq_lock);
if (mstm) {
/* If we're not ready to handle MST state changes yet, just
* report the last status of the connector. We'll reprobe it
* once we've resumed.
*/
if (mstm->suspended) {
if (mstm->is_mst)
ret = NOUVEAU_DP_MST;
else if (connector->status ==
connector_status_connected)
ret = NOUVEAU_DP_SST;
goto out;
}
}
/* Check status of HPD pin before attempting an AUX transaction that
* would result in a number of (futile) retries on a connector which
* has no display plugged.
*
* TODO: look into checking this before probing I2C to detect DVI/HDMI
*/
hpd = nvif_conn_hpd_status(&nv_connector->conn);
if (hpd == NVIF_CONN_HPD_STATUS_NOT_PRESENT) {
nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
goto out;
}
nvif_outp_dp_aux_pwr(&nv_encoder->outp, true);
status = nouveau_dp_probe_dpcd(nv_connector, nv_encoder);
if (status == connector_status_disconnected) {
nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
goto out;
}
/* If we're in MST mode, we're done here */
if (mstm && mstm->can_mst && mstm->is_mst) {
ret = NOUVEAU_DP_MST;
goto out;
}
nv_encoder->dp.link_bw = 27000 * dpcd[DP_MAX_LINK_RATE];
nv_encoder->dp.link_nr =
dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && dpcd[DP_DPCD_REV] >= 0x13) {
struct drm_dp_aux *aux = &nv_connector->aux;
int ret, i;
u8 sink_rates[16];
ret = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, sink_rates, sizeof(sink_rates));
if (ret == sizeof(sink_rates)) {
for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
int val = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
if (val && (i == 0 || val > nv_encoder->dp.link_bw))
nv_encoder->dp.link_bw = val;
}
}
}
NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n",
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw,
dpcd[DP_DPCD_REV]);
NV_DEBUG(drm, "encoder: %dx%d\n",
nv_encoder->dcb->dpconf.link_nr,
nv_encoder->dcb->dpconf.link_bw);
if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr)
nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw)
nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
NV_DEBUG(drm, "maximum: %dx%d\n",
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
if (mstm && mstm->can_mst) {
ret = nv50_mstm_detect(nv_encoder);
if (ret == 1) {
ret = NOUVEAU_DP_MST;
goto out;
} else if (ret != 0) {
nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
goto out;
}
}
ret = NOUVEAU_DP_SST;
out:
if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
nv50_mstm_remove(mstm);
mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
return ret;
}
bool
nouveau_dp_link_check(struct nouveau_connector *nv_connector)
{
struct nouveau_encoder *nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
if (!nv_encoder || nv_encoder->outp.or.id < 0)
return true;
return nvif_outp_dp_retrain(&nv_encoder->outp) == 0;
}
void
nouveau_dp_irq(struct work_struct *work)
{
struct nouveau_connector *nv_connector =
container_of(work, typeof(*nv_connector), irq_work);
struct drm_connector *connector = &nv_connector->base;
struct nouveau_encoder *outp = find_encoder(connector, DCB_OUTPUT_DP);
struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
struct nv50_mstm *mstm;
u64 hpd = 0;
int ret;
if (!outp)
return;
mstm = outp->dp.mstm;
NV_DEBUG(drm, "service %s\n", connector->name);
mutex_lock(&outp->dp.hpd_irq_lock);
if (mstm && mstm->is_mst) {
if (!nv50_mstm_service(drm, nv_connector, mstm))
hpd |= NVIF_CONN_EVENT_V0_UNPLUG;
} else {
drm_dp_cec_irq(&nv_connector->aux);
if (nouveau_dp_has_sink_count(connector, outp)) {
ret = drm_dp_read_sink_count(&nv_connector->aux);
if (ret != outp->dp.sink_count)
hpd |= NVIF_CONN_EVENT_V0_PLUG;
if (ret >= 0)
outp->dp.sink_count = ret;
}
}
mutex_unlock(&outp->dp.hpd_irq_lock);
nouveau_connector_hpd(nv_connector, NVIF_CONN_EVENT_V0_IRQ | hpd);
}
/* TODO:
* - Validate against the DP caps advertised by the GPU (we don't check these
* yet)
*/
enum drm_mode_status
nv50_dp_mode_valid(struct nouveau_encoder *outp,
const struct drm_display_mode *mode,
unsigned *out_clock)
{
const unsigned int min_clock = 25000;
unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
/* Check with the minmum bpc always, so we can advertise better modes.
* In particlar not doing this causes modes to be dropped on HDR
* displays as we might check with a bpc of 16 even.
*/
const u8 bpp = 6 * 3;
if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
return MODE_NO_INTERLACE;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;
max_rate = outp->dp.link_nr * outp->dp.link_bw;
mode_rate = DIV_ROUND_UP(clock * bpp, 8);
if (mode_rate > max_rate)
return MODE_CLOCK_HIGH;
ds_max_dotclock = drm_dp_downstream_max_dotclock(outp->dp.dpcd, outp->dp.downstream_ports);
if (ds_max_dotclock && clock > ds_max_dotclock)
return MODE_CLOCK_HIGH;
if (clock < min_clock)
return MODE_CLOCK_LOW;
if (out_clock)
*out_clock = clock;
return MODE_OK;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_dp.c |
// SPDX-License-Identifier: MIT
#include <linux/slab.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_syncobj.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_dma.h"
#include "nouveau_exec.h"
#include "nouveau_abi16.h"
#include "nouveau_sched.h"
/* FIXME
*
* We want to make sure that jobs currently executing can't be deferred by
* other jobs competing for the hardware. Otherwise we might end up with job
* timeouts just because of too many clients submitting too many jobs. We don't
* want jobs to time out because of system load, but because of the job being
* too bulky.
*
* For now allow for up to 16 concurrent jobs in flight until we know how many
* rings the hardware can process in parallel.
*/
#define NOUVEAU_SCHED_HW_SUBMISSIONS 16
#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
int
nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args)
{
struct nouveau_sched_entity *entity = args->sched_entity;
int ret;
job->file_priv = args->file_priv;
job->cli = nouveau_cli(args->file_priv);
job->entity = entity;
job->sync = args->sync;
job->resv_usage = args->resv_usage;
job->ops = args->ops;
job->in_sync.count = args->in_sync.count;
if (job->in_sync.count) {
if (job->sync)
return -EINVAL;
job->in_sync.data = kmemdup(args->in_sync.s,
sizeof(*args->in_sync.s) *
args->in_sync.count,
GFP_KERNEL);
if (!job->in_sync.data)
return -ENOMEM;
}
job->out_sync.count = args->out_sync.count;
if (job->out_sync.count) {
if (job->sync) {
ret = -EINVAL;
goto err_free_in_sync;
}
job->out_sync.data = kmemdup(args->out_sync.s,
sizeof(*args->out_sync.s) *
args->out_sync.count,
GFP_KERNEL);
if (!job->out_sync.data) {
ret = -ENOMEM;
goto err_free_in_sync;
}
job->out_sync.objs = kcalloc(job->out_sync.count,
sizeof(*job->out_sync.objs),
GFP_KERNEL);
if (!job->out_sync.objs) {
ret = -ENOMEM;
goto err_free_out_sync;
}
job->out_sync.chains = kcalloc(job->out_sync.count,
sizeof(*job->out_sync.chains),
GFP_KERNEL);
if (!job->out_sync.chains) {
ret = -ENOMEM;
goto err_free_objs;
}
}
ret = drm_sched_job_init(&job->base, &entity->base, NULL);
if (ret)
goto err_free_chains;
job->state = NOUVEAU_JOB_INITIALIZED;
return 0;
err_free_chains:
kfree(job->out_sync.chains);
err_free_objs:
kfree(job->out_sync.objs);
err_free_out_sync:
kfree(job->out_sync.data);
err_free_in_sync:
kfree(job->in_sync.data);
return ret;
}
void
nouveau_job_free(struct nouveau_job *job)
{
kfree(job->in_sync.data);
kfree(job->out_sync.data);
kfree(job->out_sync.objs);
kfree(job->out_sync.chains);
}
void nouveau_job_fini(struct nouveau_job *job)
{
dma_fence_put(job->done_fence);
drm_sched_job_cleanup(&job->base);
job->ops->free(job);
}
static int
sync_find_fence(struct nouveau_job *job,
struct drm_nouveau_sync *sync,
struct dma_fence **fence)
{
u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
u64 point = 0;
int ret;
if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
return -EOPNOTSUPP;
if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
point = sync->timeline_value;
ret = drm_syncobj_find_fence(job->file_priv,
sync->handle, point,
0 /* flags */, fence);
if (ret)
return ret;
return 0;
}
static int
nouveau_job_add_deps(struct nouveau_job *job)
{
struct dma_fence *in_fence = NULL;
int ret, i;
for (i = 0; i < job->in_sync.count; i++) {
struct drm_nouveau_sync *sync = &job->in_sync.data[i];
ret = sync_find_fence(job, sync, &in_fence);
if (ret) {
NV_PRINTK(warn, job->cli,
"Failed to find syncobj (-> in): handle=%d\n",
sync->handle);
return ret;
}
ret = drm_sched_job_add_dependency(&job->base, in_fence);
if (ret)
return ret;
}
return 0;
}
static void
nouveau_job_fence_attach_cleanup(struct nouveau_job *job)
{
int i;
for (i = 0; i < job->out_sync.count; i++) {
struct drm_syncobj *obj = job->out_sync.objs[i];
struct dma_fence_chain *chain = job->out_sync.chains[i];
if (obj)
drm_syncobj_put(obj);
if (chain)
dma_fence_chain_free(chain);
}
}
static int
nouveau_job_fence_attach_prepare(struct nouveau_job *job)
{
int i, ret;
for (i = 0; i < job->out_sync.count; i++) {
struct drm_nouveau_sync *sync = &job->out_sync.data[i];
struct drm_syncobj **pobj = &job->out_sync.objs[i];
struct dma_fence_chain **pchain = &job->out_sync.chains[i];
u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
ret = -EINVAL;
goto err_sync_cleanup;
}
*pobj = drm_syncobj_find(job->file_priv, sync->handle);
if (!*pobj) {
NV_PRINTK(warn, job->cli,
"Failed to find syncobj (-> out): handle=%d\n",
sync->handle);
ret = -ENOENT;
goto err_sync_cleanup;
}
if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
*pchain = dma_fence_chain_alloc();
if (!*pchain) {
ret = -ENOMEM;
goto err_sync_cleanup;
}
}
}
return 0;
err_sync_cleanup:
nouveau_job_fence_attach_cleanup(job);
return ret;
}
static void
nouveau_job_fence_attach(struct nouveau_job *job)
{
struct dma_fence *fence = job->done_fence;
int i;
for (i = 0; i < job->out_sync.count; i++) {
struct drm_nouveau_sync *sync = &job->out_sync.data[i];
struct drm_syncobj **pobj = &job->out_sync.objs[i];
struct dma_fence_chain **pchain = &job->out_sync.chains[i];
u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
drm_syncobj_add_point(*pobj, *pchain, fence,
sync->timeline_value);
} else {
drm_syncobj_replace_fence(*pobj, fence);
}
drm_syncobj_put(*pobj);
*pobj = NULL;
*pchain = NULL;
}
}
int
nouveau_job_submit(struct nouveau_job *job)
{
struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
struct dma_fence *done_fence = NULL;
int ret;
ret = nouveau_job_add_deps(job);
if (ret)
goto err;
ret = nouveau_job_fence_attach_prepare(job);
if (ret)
goto err;
/* Make sure the job appears on the sched_entity's queue in the same
* order as it was submitted.
*/
mutex_lock(&entity->mutex);
/* Guarantee we won't fail after the submit() callback returned
* successfully.
*/
if (job->ops->submit) {
ret = job->ops->submit(job);
if (ret)
goto err_cleanup;
}
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
if (job->sync)
done_fence = dma_fence_get(job->done_fence);
/* If a sched job depends on a dma-fence from a job from the same GPU
* scheduler instance, but a different scheduler entity, the GPU
* scheduler does only wait for the particular job to be scheduled,
* rather than for the job to fully complete. This is due to the GPU
* scheduler assuming that there is a scheduler instance per ring.
* However, the current implementation, in order to avoid arbitrary
* amounts of kthreads, has a single scheduler instance while scheduler
* entities represent rings.
*
* As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all
* out-fences in order to force the scheduler to wait for full job
* completion for dependent jobs from different entities and same
* scheduler instance.
*
* There is some work in progress [1] to address the issues of firmware
* schedulers; once it is in-tree the scheduler topology in Nouveau
* should be re-worked accordingly.
*
* [1] https://lore.kernel.org/dri-devel/[email protected]/
*/
set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags);
if (job->ops->armed_submit)
job->ops->armed_submit(job);
nouveau_job_fence_attach(job);
/* Set job state before pushing the job to the scheduler,
* such that we do not overwrite the job state set in run().
*/
job->state = NOUVEAU_JOB_SUBMIT_SUCCESS;
drm_sched_entity_push_job(&job->base);
mutex_unlock(&entity->mutex);
if (done_fence) {
dma_fence_wait(done_fence, true);
dma_fence_put(done_fence);
}
return 0;
err_cleanup:
mutex_unlock(&entity->mutex);
nouveau_job_fence_attach_cleanup(job);
err:
job->state = NOUVEAU_JOB_SUBMIT_FAILED;
return ret;
}
bool
nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
struct work_struct *work)
{
return queue_work(entity->sched_wq, work);
}
static struct dma_fence *
nouveau_job_run(struct nouveau_job *job)
{
struct dma_fence *fence;
fence = job->ops->run(job);
if (IS_ERR(fence))
job->state = NOUVEAU_JOB_RUN_FAILED;
else
job->state = NOUVEAU_JOB_RUN_SUCCESS;
return fence;
}
static struct dma_fence *
nouveau_sched_run_job(struct drm_sched_job *sched_job)
{
struct nouveau_job *job = to_nouveau_job(sched_job);
return nouveau_job_run(job);
}
static enum drm_gpu_sched_stat
nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct drm_gpu_scheduler *sched = sched_job->sched;
struct nouveau_job *job = to_nouveau_job(sched_job);
enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_NOMINAL;
drm_sched_stop(sched, sched_job);
if (job->ops->timeout)
stat = job->ops->timeout(job);
else
NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
drm_sched_start(sched, true);
return stat;
}
static void
nouveau_sched_free_job(struct drm_sched_job *sched_job)
{
struct nouveau_job *job = to_nouveau_job(sched_job);
nouveau_job_fini(job);
}
int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
struct drm_gpu_scheduler *sched,
struct workqueue_struct *sched_wq)
{
mutex_init(&entity->mutex);
spin_lock_init(&entity->job.list.lock);
INIT_LIST_HEAD(&entity->job.list.head);
init_waitqueue_head(&entity->job.wq);
entity->sched_wq = sched_wq;
return drm_sched_entity_init(&entity->base,
DRM_SCHED_PRIORITY_NORMAL,
&sched, 1, NULL);
}
void
nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
{
drm_sched_entity_destroy(&entity->base);
}
static const struct drm_sched_backend_ops nouveau_sched_ops = {
.run_job = nouveau_sched_run_job,
.timedout_job = nouveau_sched_timedout_job,
.free_job = nouveau_sched_free_job,
};
int nouveau_sched_init(struct nouveau_drm *drm)
{
struct drm_gpu_scheduler *sched = &drm->sched;
long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
if (!drm->sched_wq)
return -ENOMEM;
return drm_sched_init(sched, &nouveau_sched_ops,
NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
NULL, NULL, "nouveau_sched", drm->dev->dev);
}
void nouveau_sched_fini(struct nouveau_drm *drm)
{
destroy_workqueue(drm->sched_wq);
drm_sched_fini(&drm->sched);
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_sched.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nv10_fence.h"
#include <nvif/push006c.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvhw/class/cl176e.h>
int
nv17_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
struct nouveau_cli *cli = (void *)prev->user.client;
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx = chan->fence;
struct nvif_push *ppush = prev->chan.push;
struct nvif_push *npush = chan->chan.push;
u32 value;
int ret;
if (!mutex_trylock(&cli->mutex))
return -EBUSY;
spin_lock(&priv->lock);
value = priv->sequence;
priv->sequence += 2;
spin_unlock(&priv->lock);
ret = PUSH_WAIT(ppush, 5);
if (!ret) {
PUSH_MTHD(ppush, NV176E, SET_CONTEXT_DMA_SEMAPHORE, fctx->sema.handle,
SEMAPHORE_OFFSET, 0,
SEMAPHORE_ACQUIRE, value + 0,
SEMAPHORE_RELEASE, value + 1);
PUSH_KICK(ppush);
}
if (!ret && !(ret = PUSH_WAIT(npush, 5))) {
PUSH_MTHD(npush, NV176E, SET_CONTEXT_DMA_SEMAPHORE, fctx->sema.handle,
SEMAPHORE_OFFSET, 0,
SEMAPHORE_ACQUIRE, value + 1,
SEMAPHORE_RELEASE, value + 2);
PUSH_KICK(npush);
}
mutex_unlock(&cli->mutex);
return 0;
}
static int
nv17_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
struct ttm_resource *reg = priv->bo->bo.resource;
struct nv10_fence_chan *fctx;
u32 start = reg->start * PAGE_SIZE;
u32 limit = start + priv->bo->bo.base.size - 1;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
nouveau_fence_context_new(chan, &fctx->base);
fctx->base.emit = nv10_fence_emit;
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
ret = nvif_object_ctor(&chan->user, "fenceCtxDma", NvSema,
NV_DMA_FROM_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
}, sizeof(struct nv_dma_v0),
&fctx->sema);
if (ret)
nv10_fence_context_del(chan);
return ret;
}
void
nv17_fence_resume(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
nouveau_bo_wr32(priv->bo, 0, priv->sequence);
}
int
nv17_fence_create(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv;
int ret = 0;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.dtor = nv10_fence_destroy;
priv->base.resume = nv17_fence_resume;
priv->base.context_new = nv17_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
nouveau_bo_unpin(priv->bo);
}
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
if (ret) {
nv10_fence_destroy(drm);
return ret;
}
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nv17_fence.c |
/*
* Copyright (C) 2008 Maarten Maathuis.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <acpi/button.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_atomic.h>
#include "nouveau_reg.h"
#include "nouveau_drv.h"
#include "dispnv04/hw.h"
#include "dispnv50/disp.h"
#include "nouveau_acpi.h"
#include "nouveau_display.h"
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include <nvif/class.h>
#include <nvif/if0011.h>
struct drm_display_mode *
nouveau_conn_native_mode(struct drm_connector *connector)
{
const struct drm_connector_helper_funcs *helper = connector->helper_private;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *largest = NULL;
int high_w = 0, high_h = 0, high_v = 0;
list_for_each_entry(mode, &connector->probed_modes, head) {
if (helper->mode_valid(connector, mode) != MODE_OK ||
(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
/* Use preferred mode if there is one.. */
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
NV_DEBUG(drm, "native mode from preferred\n");
return drm_mode_duplicate(dev, mode);
}
/* Otherwise, take the resolution with the largest width, then
* height, then vertical refresh
*/
if (mode->hdisplay < high_w)
continue;
if (mode->hdisplay == high_w && mode->vdisplay < high_h)
continue;
if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
drm_mode_vrefresh(mode) < high_v)
continue;
high_w = mode->hdisplay;
high_h = mode->vdisplay;
high_v = drm_mode_vrefresh(mode);
largest = mode;
}
NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
high_w, high_h, high_v);
return largest ? drm_mode_duplicate(dev, largest) : NULL;
}
int
nouveau_conn_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property, u64 *val)
{
struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
struct nouveau_display *disp = nouveau_display(connector->dev);
struct drm_device *dev = connector->dev;
if (property == dev->mode_config.scaling_mode_property)
*val = asyc->scaler.mode;
else if (property == disp->underscan_property)
*val = asyc->scaler.underscan.mode;
else if (property == disp->underscan_hborder_property)
*val = asyc->scaler.underscan.hborder;
else if (property == disp->underscan_vborder_property)
*val = asyc->scaler.underscan.vborder;
else if (property == disp->dithering_mode)
*val = asyc->dither.mode;
else if (property == disp->dithering_depth)
*val = asyc->dither.depth;
else if (property == disp->vibrant_hue_property)
*val = asyc->procamp.vibrant_hue;
else if (property == disp->color_vibrance_property)
*val = asyc->procamp.color_vibrance;
else
return -EINVAL;
return 0;
}
int
nouveau_conn_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property, u64 val)
{
struct drm_device *dev = connector->dev;
struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
struct nouveau_display *disp = nouveau_display(dev);
if (property == dev->mode_config.scaling_mode_property) {
switch (val) {
case DRM_MODE_SCALE_NONE:
/* We allow 'None' for EDID modes, even on a fixed
* panel (some exist with support for lower refresh
* rates, which people might want to use for power-
* saving purposes).
*
* Non-EDID modes will force the use of GPU scaling
* to the native mode regardless of this setting.
*/
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
/* ... except prior to G80, where the code
* doesn't support such things.
*/
if (disp->disp.object.oclass < NV50_DISP)
return -EINVAL;
break;
default:
break;
}
break;
case DRM_MODE_SCALE_FULLSCREEN:
case DRM_MODE_SCALE_CENTER:
case DRM_MODE_SCALE_ASPECT:
break;
default:
return -EINVAL;
}
if (asyc->scaler.mode != val) {
asyc->scaler.mode = val;
asyc->set.scaler = true;
}
} else
if (property == disp->underscan_property) {
if (asyc->scaler.underscan.mode != val) {
asyc->scaler.underscan.mode = val;
asyc->set.scaler = true;
}
} else
if (property == disp->underscan_hborder_property) {
if (asyc->scaler.underscan.hborder != val) {
asyc->scaler.underscan.hborder = val;
asyc->set.scaler = true;
}
} else
if (property == disp->underscan_vborder_property) {
if (asyc->scaler.underscan.vborder != val) {
asyc->scaler.underscan.vborder = val;
asyc->set.scaler = true;
}
} else
if (property == disp->dithering_mode) {
if (asyc->dither.mode != val) {
asyc->dither.mode = val;
asyc->set.dither = true;
}
} else
if (property == disp->dithering_depth) {
if (asyc->dither.mode != val) {
asyc->dither.depth = val;
asyc->set.dither = true;
}
} else
if (property == disp->vibrant_hue_property) {
if (asyc->procamp.vibrant_hue != val) {
asyc->procamp.vibrant_hue = val;
asyc->set.procamp = true;
}
} else
if (property == disp->color_vibrance_property) {
if (asyc->procamp.color_vibrance != val) {
asyc->procamp.color_vibrance = val;
asyc->set.procamp = true;
}
} else {
return -EINVAL;
}
return 0;
}
void
nouveau_conn_atomic_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
__drm_atomic_helper_connector_destroy_state(&asyc->state);
kfree(asyc);
}
struct drm_connector_state *
nouveau_conn_atomic_duplicate_state(struct drm_connector *connector)
{
struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
struct nouveau_conn_atom *asyc;
if (!(asyc = kmalloc(sizeof(*asyc), GFP_KERNEL)))
return NULL;
__drm_atomic_helper_connector_duplicate_state(connector, &asyc->state);
asyc->dither = armc->dither;
asyc->scaler = armc->scaler;
asyc->procamp = armc->procamp;
asyc->set.mask = 0;
return &asyc->state;
}
void
nouveau_conn_reset(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_conn_atom *asyc;
if (drm_drv_uses_atomic_modeset(connector->dev)) {
if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
return;
if (connector->state)
nouveau_conn_atomic_destroy_state(connector,
connector->state);
__drm_atomic_helper_connector_reset(connector, &asyc->state);
} else {
asyc = &nv_connector->properties_state;
}
asyc->dither.mode = DITHERING_MODE_AUTO;
asyc->dither.depth = DITHERING_DEPTH_AUTO;
asyc->scaler.mode = DRM_MODE_SCALE_NONE;
asyc->scaler.underscan.mode = UNDERSCAN_OFF;
asyc->procamp.color_vibrance = 150;
asyc->procamp.vibrant_hue = 90;
if (nouveau_display(connector->dev)->disp.object.oclass < NV50_DISP) {
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_LVDS:
/* See note in nouveau_conn_atomic_set_property(). */
asyc->scaler.mode = DRM_MODE_SCALE_FULLSCREEN;
break;
default:
break;
}
}
}
void
nouveau_conn_attach_properties(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_conn_atom *armc;
if (drm_drv_uses_atomic_modeset(connector->dev))
armc = nouveau_conn_atom(connector->state);
else
armc = &nv_connector->properties_state;
/* Init DVI-I specific properties. */
if (connector->connector_type == DRM_MODE_CONNECTOR_DVII)
drm_object_attach_property(&connector->base, dev->mode_config.
dvi_i_subconnector_property, 0);
/* Add overscan compensation options to digital outputs. */
if (disp->underscan_property &&
(connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)) {
drm_object_attach_property(&connector->base,
disp->underscan_property,
UNDERSCAN_OFF);
drm_object_attach_property(&connector->base,
disp->underscan_hborder_property, 0);
drm_object_attach_property(&connector->base,
disp->underscan_vborder_property, 0);
}
/* Add hue and saturation options. */
if (disp->vibrant_hue_property)
drm_object_attach_property(&connector->base,
disp->vibrant_hue_property,
armc->procamp.vibrant_hue);
if (disp->color_vibrance_property)
drm_object_attach_property(&connector->base,
disp->color_vibrance_property,
armc->procamp.color_vibrance);
/* Scaling mode property. */
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_TV:
break;
case DRM_MODE_CONNECTOR_VGA:
if (disp->disp.object.oclass < NV50_DISP)
break; /* Can only scale on DFPs. */
fallthrough;
default:
drm_object_attach_property(&connector->base, dev->mode_config.
scaling_mode_property,
armc->scaler.mode);
break;
}
/* Dithering properties. */
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_TV:
case DRM_MODE_CONNECTOR_VGA:
break;
default:
if (disp->dithering_mode) {
drm_object_attach_property(&connector->base,
disp->dithering_mode,
armc->dither.mode);
}
if (disp->dithering_depth) {
drm_object_attach_property(&connector->base,
disp->dithering_depth,
armc->dither.depth);
}
break;
}
}
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
int nouveau_tv_disable = 0;
module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
int nouveau_duallink = 1;
module_param_named(duallink, nouveau_duallink, int, 0400);
MODULE_PARM_DESC(hdmimhz, "Force a maximum HDMI pixel clock (in MHz)");
int nouveau_hdmimhz = 0;
module_param_named(hdmimhz, nouveau_hdmimhz, int, 0400);
struct nouveau_encoder *
find_encoder(struct drm_connector *connector, int type)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *enc;
drm_connector_for_each_possible_encoder(connector, enc) {
nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
(nv_encoder->dcb && nv_encoder->dcb->type == type))
return nv_encoder;
}
return NULL;
}
static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
nvif_event_dtor(&nv_connector->irq);
nvif_event_dtor(&nv_connector->hpd);
kfree(nv_connector->edid);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
if (nv_connector->aux.transfer) {
drm_dp_cec_unregister_connector(&nv_connector->aux);
kfree(nv_connector->aux.name);
}
nvif_conn_dtor(&nv_connector->conn);
kfree(connector);
}
static struct nouveau_encoder *
nouveau_connector_ddc_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
int ret;
bool switcheroo_ddc = false;
drm_connector_for_each_possible_encoder(connector, encoder) {
nv_encoder = nouveau_encoder(encoder);
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_DP:
ret = nouveau_dp_detect(nouveau_connector(connector),
nv_encoder);
if (ret == NOUVEAU_DP_MST)
return NULL;
else if (ret == NOUVEAU_DP_SST)
found = nv_encoder;
break;
case DCB_OUTPUT_LVDS:
switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
VGA_SWITCHEROO_CAN_SWITCH_DDC);
fallthrough;
default:
if (!nv_encoder->i2c)
break;
if (switcheroo_ddc)
vga_switcheroo_lock_ddc(pdev);
if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
found = nv_encoder;
if (switcheroo_ddc)
vga_switcheroo_unlock_ddc(pdev);
break;
}
if (found)
break;
}
return found;
}
static struct nouveau_encoder *
nouveau_connector_of_detect(struct drm_connector *connector)
{
#ifdef __powerpc__
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct device_node *cn, *dn = pci_device_to_OF_node(pdev);
if (!dn ||
!((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) ||
(nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG))))
return NULL;
for_each_child_of_node(dn, cn) {
const char *name = of_get_property(cn, "name", NULL);
const void *edid = of_get_property(cn, "EDID", NULL);
int idx = name ? name[strlen(name) - 1] - 'A' : 0;
if (nv_encoder->dcb->i2c_index == idx && edid) {
nv_connector->edid =
kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
of_node_put(cn);
return nv_encoder;
}
}
#endif
return NULL;
}
static void
nouveau_connector_set_encoder(struct drm_connector *connector,
struct nouveau_encoder *nv_encoder)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct drm_device *dev = connector->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (nv_connector->detected_encoder == nv_encoder)
return;
nv_connector->detected_encoder = nv_encoder;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
connector->interlace_allowed =
nv_encoder->caps.dp_interlace;
else
connector->interlace_allowed =
drm->client.device.info.family < NV_DEVICE_INFO_V0_VOLTA;
connector->doublescan_allowed = true;
} else
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
connector->doublescan_allowed = false;
connector->interlace_allowed = false;
} else {
connector->doublescan_allowed = true;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
(drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
(pdev->device & 0x0ff0) != 0x0100 &&
(pdev->device & 0x0ff0) != 0x0150))
/* HW is broken */
connector->interlace_allowed = false;
else
connector->interlace_allowed = true;
}
if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
drm_object_property_set_value(&connector->base,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
DRM_MODE_SUBCONNECTOR_DVID :
DRM_MODE_SUBCONNECTOR_DVIA);
}
}
static void
nouveau_connector_set_edid(struct nouveau_connector *nv_connector,
struct edid *edid)
{
if (nv_connector->edid != edid) {
struct edid *old_edid = nv_connector->edid;
drm_connector_update_edid_property(&nv_connector->base, edid);
kfree(old_edid);
nv_connector->edid = edid;
}
}
static enum drm_connector_status
nouveau_connector_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
struct nouveau_encoder *nv_partner;
struct i2c_adapter *i2c;
int type;
int ret;
enum drm_connector_status conn_status = connector_status_disconnected;
/* Outputs are only polled while runtime active, so resuming the
* device here is unnecessary (and would deadlock upon runtime suspend
* because it waits for polling to finish). We do however, want to
* prevent the autosuspend timer from elapsing during this operation
* if possible.
*/
if (drm_kms_helper_is_poll_worker()) {
pm_runtime_get_noresume(dev->dev);
} else {
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(dev->dev);
nouveau_connector_set_edid(nv_connector, NULL);
return conn_status;
}
}
nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
struct edid *new_edid;
if ((vga_switcheroo_handler_flags() &
VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
nv_connector->type == DCB_CONNECTOR_LVDS)
new_edid = drm_get_edid_switcheroo(connector, i2c);
else
new_edid = drm_get_edid(connector, i2c);
nouveau_connector_set_edid(nv_connector, new_edid);
if (!nv_connector->edid) {
NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
connector->name);
goto detect_analog;
}
/* Override encoder type for DVI-I based on whether EDID
* says the display is digital or analog, both use the
* same i2c channel so the value returned from ddc_detect
* isn't necessarily correct.
*/
nv_partner = NULL;
if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
nv_partner = find_encoder(connector, DCB_OUTPUT_ANALOG);
if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
nv_partner = find_encoder(connector, DCB_OUTPUT_TMDS);
if (nv_partner && ((nv_encoder->dcb->type == DCB_OUTPUT_ANALOG &&
nv_partner->dcb->type == DCB_OUTPUT_TMDS) ||
(nv_encoder->dcb->type == DCB_OUTPUT_TMDS &&
nv_partner->dcb->type == DCB_OUTPUT_ANALOG))) {
if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
type = DCB_OUTPUT_TMDS;
else
type = DCB_OUTPUT_ANALOG;
nv_encoder = find_encoder(connector, type);
}
nouveau_connector_set_encoder(connector, nv_encoder);
conn_status = connector_status_connected;
if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
drm_dp_cec_set_edid(&nv_connector->aux, nv_connector->edid);
goto out;
} else {
nouveau_connector_set_edid(nv_connector, NULL);
}
nv_encoder = nouveau_connector_of_detect(connector);
if (nv_encoder) {
nouveau_connector_set_encoder(connector, nv_encoder);
conn_status = connector_status_connected;
goto out;
}
detect_analog:
nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG);
if (!nv_encoder && !nouveau_tv_disable)
nv_encoder = find_encoder(connector, DCB_OUTPUT_TV);
if (nv_encoder && force) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
const struct drm_encoder_helper_funcs *helper =
encoder->helper_private;
if (helper->detect(encoder, connector) ==
connector_status_connected) {
nouveau_connector_set_encoder(connector, nv_encoder);
conn_status = connector_status_connected;
goto out;
}
}
out:
if (!nv_connector->edid)
drm_dp_cec_unset_edid(&nv_connector->aux);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return conn_status;
}
static enum drm_connector_status
nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
if (!nv_encoder)
goto out;
/* Try retrieving EDID via DDC */
if (!drm->vbios.fp_no_ddc) {
status = nouveau_connector_detect(connector, force);
if (status == connector_status_connected) {
edid = nv_connector->edid;
goto out;
}
}
/* On some laptops (Sony, i'm looking at you) there appears to
* be no direct way of accessing the panel's EDID. The only
* option available to us appears to be to ask ACPI for help..
*
* It's important this check's before trying straps, one of the
* said manufacturer's laptops are configured in such a way
* the nouveau decides an entry in the VBIOS FP mode table is
* valid - it's not (rh#613284)
*/
if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
edid = nouveau_acpi_edid(dev, connector);
if (edid) {
status = connector_status_connected;
goto out;
}
}
/* If no EDID found above, and the VBIOS indicates a hardcoded
* modeline is avalilable for the panel, set it as the panel's
* native mode and exit.
*/
if (nouveau_bios_fp_mode(dev, NULL) && (drm->vbios.fp_no_ddc ||
nv_encoder->dcb->lvdsconf.use_straps_for_mode)) {
status = connector_status_connected;
goto out;
}
/* Still nothing, some VBIOS images have a hardcoded EDID block
* stored for the panel stored in them.
*/
if (!drm->vbios.fp_no_ddc) {
edid = (struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
if (edid)
status = connector_status_connected;
}
}
out:
#if defined(CONFIG_ACPI_BUTTON) || \
(defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
if (status == connector_status_connected &&
!nouveau_ignorelid && !acpi_lid_open())
status = connector_status_unknown;
#endif
nouveau_connector_set_edid(nv_connector, edid);
if (nv_encoder)
nouveau_connector_set_encoder(connector, nv_encoder);
return status;
}
static void
nouveau_connector_force(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
int type;
if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
if (connector->force == DRM_FORCE_ON_DIGITAL)
type = DCB_OUTPUT_TMDS;
else
type = DCB_OUTPUT_ANALOG;
} else
type = DCB_OUTPUT_ANY;
nv_encoder = find_encoder(connector, type);
if (!nv_encoder) {
NV_ERROR(drm, "can't find encoder to force %s on!\n",
connector->name);
connector->status = connector_status_disconnected;
return;
}
nouveau_connector_set_encoder(connector, nv_encoder);
}
static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct nouveau_conn_atom *asyc = &nv_connector->properties_state;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
int ret;
ret = connector->funcs->atomic_set_property(&nv_connector->base,
&asyc->state,
property, value);
if (ret) {
if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
return get_slave_funcs(encoder)->set_property(
encoder, connector, property, value);
return ret;
}
nv_connector->scaling_mode = asyc->scaler.mode;
nv_connector->dithering_mode = asyc->dither.mode;
if (connector->encoder && connector->encoder->crtc) {
ret = drm_crtc_helper_set_mode(connector->encoder->crtc,
&connector->encoder->crtc->mode,
connector->encoder->crtc->x,
connector->encoder->crtc->y,
NULL);
if (!ret)
return -EINVAL;
}
return 0;
}
struct moderec {
int hdisplay;
int vdisplay;
};
static struct moderec scaler_modes[] = {
{ 1920, 1200 },
{ 1920, 1080 },
{ 1680, 1050 },
{ 1600, 1200 },
{ 1400, 1050 },
{ 1280, 1024 },
{ 1280, 960 },
{ 1152, 864 },
{ 1024, 768 },
{ 800, 600 },
{ 720, 400 },
{ 640, 480 },
{ 640, 400 },
{ 640, 350 },
{}
};
static int
nouveau_connector_scaler_modes_add(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_display_mode *native = nv_connector->native_mode, *m;
struct drm_device *dev = connector->dev;
struct moderec *mode = &scaler_modes[0];
int modes = 0;
if (!native)
return 0;
while (mode->hdisplay) {
if (mode->hdisplay <= native->hdisplay &&
mode->vdisplay <= native->vdisplay &&
(mode->hdisplay != native->hdisplay ||
mode->vdisplay != native->vdisplay)) {
m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
drm_mode_vrefresh(native), false,
false, false);
if (!m)
continue;
drm_mode_probed_add(connector, m);
modes++;
}
mode++;
}
return modes;
}
static void
nouveau_connector_detect_depth(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct nvbios *bios = &drm->vbios;
struct drm_display_mode *mode = nv_connector->native_mode;
bool duallink;
/* if the edid is feeling nice enough to provide this info, use it */
if (nv_connector->edid && connector->display_info.bpc)
return;
/* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */
if (nv_connector->type == DCB_CONNECTOR_eDP) {
connector->display_info.bpc = 6;
return;
}
/* we're out of options unless we're LVDS, default to 8bpc */
if (nv_encoder->dcb->type != DCB_OUTPUT_LVDS) {
connector->display_info.bpc = 8;
return;
}
connector->display_info.bpc = 6;
/* LVDS: panel straps */
if (bios->fp_no_ddc) {
if (bios->fp.if_is_24bit)
connector->display_info.bpc = 8;
return;
}
/* LVDS: DDC panel, need to first determine the number of links to
* know which if_is_24bit flag to check...
*/
if (nv_connector->edid &&
nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
duallink = ((u8 *)nv_connector->edid)[121] == 2;
else
duallink = mode->clock >= bios->fp.duallink_transition_clk;
if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
( duallink && (bios->fp.strapless_is_24bit & 2)))
connector->display_info.bpc = 8;
}
static int
nouveau_connector_late_register(struct drm_connector *connector)
{
int ret;
ret = nouveau_backlight_init(connector);
if (ret)
return ret;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
ret = drm_dp_aux_register(&nouveau_connector(connector)->aux);
if (ret)
goto backlight_fini;
}
return 0;
backlight_fini:
nouveau_backlight_fini(connector);
return ret;
}
static void
nouveau_connector_early_unregister(struct drm_connector *connector)
{
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
drm_dp_aux_unregister(&nouveau_connector(connector)->aux);
nouveau_backlight_fini(connector);
}
static int
nouveau_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
int ret = 0;
/* destroy the native mode, the attached monitor could have changed.
*/
if (nv_connector->native_mode) {
drm_mode_destroy(dev, nv_connector->native_mode);
nv_connector->native_mode = NULL;
}
if (nv_connector->edid)
ret = drm_add_edid_modes(connector, nv_connector->edid);
else
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
(nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
drm->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
struct drm_display_mode mode;
nouveau_bios_fp_mode(dev, &mode);
nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
}
/* Determine display colour depth for everything except LVDS now,
* DP requires this before mode_valid() is called.
*/
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
nouveau_connector_detect_depth(connector);
/* Find the native mode if this is a digital panel, if we didn't
* find any modes through DDC previously add the native mode to
* the list of modes.
*/
if (!nv_connector->native_mode)
nv_connector->native_mode = nouveau_conn_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, nv_connector->native_mode);
drm_mode_probed_add(connector, mode);
ret = 1;
}
/* Determine LVDS colour depth, must happen after determining
* "native" mode as some VBIOS tables require us to use the
* pixel clock as part of the lookup...
*/
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
nouveau_connector_detect_depth(connector);
if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
if (nv_connector->type == DCB_CONNECTOR_LVDS ||
nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
nv_connector->type == DCB_CONNECTOR_eDP)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
}
static unsigned
get_tmds_link_bandwidth(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
struct drm_display_info *info = NULL;
unsigned duallink_scale =
nouveau_duallink && nv_encoder->dcb->duallink_possible ? 2 : 1;
if (drm_detect_hdmi_monitor(nv_connector->edid)) {
info = &nv_connector->base.display_info;
duallink_scale = 1;
}
if (info) {
if (nouveau_hdmimhz > 0)
return nouveau_hdmimhz * 1000;
/* Note: these limits are conservative, some Fermi's
* can do 297 MHz. Unclear how this can be determined.
*/
if (drm->client.device.info.chipset >= 0x120) {
const int max_tmds_clock =
info->hdmi.scdc.scrambling.supported ?
594000 : 340000;
return info->max_tmds_clock ?
min(info->max_tmds_clock, max_tmds_clock) :
max_tmds_clock;
}
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KEPLER)
return 297000;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
return 225000;
}
if (dcb->location != DCB_LOC_ON_CHIP ||
drm->client.device.info.chipset >= 0x46)
return 165000 * duallink_scale;
else if (drm->client.device.info.chipset >= 0x40)
return 155000 * duallink_scale;
else if (drm->client.device.info.chipset >= 0x18)
return 135000 * duallink_scale;
else
return 112000 * duallink_scale;
}
static enum drm_mode_status
nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
unsigned int min_clock = 25000, max_clock = min_clock, clock = mode->clock;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_LVDS:
if (nv_connector->native_mode &&
(mode->hdisplay > nv_connector->native_mode->hdisplay ||
mode->vdisplay > nv_connector->native_mode->vdisplay))
return MODE_PANEL;
min_clock = 0;
max_clock = 400000;
break;
case DCB_OUTPUT_TMDS:
max_clock = get_tmds_link_bandwidth(connector);
break;
case DCB_OUTPUT_ANALOG:
max_clock = nv_encoder->dcb->crtconf.maxfreq;
if (!max_clock)
max_clock = 350000;
break;
case DCB_OUTPUT_TV:
return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case DCB_OUTPUT_DP:
return nv50_dp_mode_valid(nv_encoder, mode, NULL);
default:
BUG();
return MODE_BAD;
}
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;
if (clock < min_clock)
return MODE_CLOCK_LOW;
if (clock > max_clock)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static struct drm_encoder *
nouveau_connector_best_encoder(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
if (nv_connector->detected_encoder)
return to_drm_encoder(nv_connector->detected_encoder);
return NULL;
}
static int
nouveau_connector_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state)
{
struct nouveau_connector *nv_conn = nouveau_connector(connector);
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
if (!nv_conn->dp_encoder || !nv50_has_mst(nouveau_drm(connector->dev)))
return 0;
return drm_dp_mst_root_conn_atomic_check(conn_state, &nv_conn->dp_encoder->dp.mstm->mgr);
}
static const struct drm_connector_helper_funcs
nouveau_connector_helper_funcs = {
.get_modes = nouveau_connector_get_modes,
.mode_valid = nouveau_connector_mode_valid,
.best_encoder = nouveau_connector_best_encoder,
.atomic_check = nouveau_connector_atomic_check,
};
static const struct drm_connector_funcs
nouveau_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.reset = nouveau_conn_reset,
.detect = nouveau_connector_detect,
.force = nouveau_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
.destroy = nouveau_connector_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
.atomic_set_property = nouveau_conn_atomic_set_property,
.atomic_get_property = nouveau_conn_atomic_get_property,
.late_register = nouveau_connector_late_register,
.early_unregister = nouveau_connector_early_unregister,
};
static const struct drm_connector_funcs
nouveau_connector_funcs_lvds = {
.dpms = drm_helper_connector_dpms,
.reset = nouveau_conn_reset,
.detect = nouveau_connector_detect_lvds,
.force = nouveau_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
.destroy = nouveau_connector_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
.atomic_set_property = nouveau_conn_atomic_set_property,
.atomic_get_property = nouveau_conn_atomic_get_property,
.late_register = nouveau_connector_late_register,
.early_unregister = nouveau_connector_early_unregister,
};
void
nouveau_connector_hpd(struct nouveau_connector *nv_connector, u64 bits)
{
struct nouveau_drm *drm = nouveau_drm(nv_connector->base.dev);
u32 mask = drm_connector_mask(&nv_connector->base);
unsigned long flags;
spin_lock_irqsave(&drm->hpd_lock, flags);
if (!(drm->hpd_pending & mask)) {
nv_connector->hpd_pending |= bits;
drm->hpd_pending |= mask;
schedule_work(&drm->hpd_work);
}
spin_unlock_irqrestore(&drm->hpd_lock, flags);
}
static int
nouveau_connector_irq(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_connector *nv_connector = container_of(event, typeof(*nv_connector), irq);
schedule_work(&nv_connector->irq_work);
return NVIF_EVENT_KEEP;
}
static int
nouveau_connector_hotplug(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_connector *nv_connector = container_of(event, typeof(*nv_connector), hpd);
struct nvif_conn_event_v0 *rep = repv;
nouveau_connector_hpd(nv_connector, rep->types);
return NVIF_EVENT_KEEP;
}
static ssize_t
nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
{
struct nouveau_connector *nv_connector =
container_of(obj, typeof(*nv_connector), aux);
struct nouveau_encoder *nv_encoder;
struct nvkm_i2c_aux *aux;
u8 size = msg->size;
int ret;
nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
if (!nv_encoder || !(aux = nv_encoder->aux))
return -ENODEV;
if (WARN_ON(msg->size > 16))
return -E2BIG;
ret = nvkm_i2c_aux_acquire(aux);
if (ret)
return ret;
ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
msg->buffer, &size);
nvkm_i2c_aux_release(aux);
if (ret >= 0) {
msg->reply = ret;
return size;
}
return ret;
}
static int
drm_conntype_from_dcb(enum dcb_connector_type dcb)
{
switch (dcb) {
case DCB_CONNECTOR_VGA : return DRM_MODE_CONNECTOR_VGA;
case DCB_CONNECTOR_TV_0 :
case DCB_CONNECTOR_TV_1 :
case DCB_CONNECTOR_TV_3 : return DRM_MODE_CONNECTOR_TV;
case DCB_CONNECTOR_DMS59_0 :
case DCB_CONNECTOR_DMS59_1 :
case DCB_CONNECTOR_DVI_I : return DRM_MODE_CONNECTOR_DVII;
case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID;
case DCB_CONNECTOR_LVDS :
case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
case DCB_CONNECTOR_DMS59_DP0:
case DCB_CONNECTOR_DMS59_DP1:
case DCB_CONNECTOR_DP :
case DCB_CONNECTOR_mDP :
case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort;
case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
case DCB_CONNECTOR_HDMI_0 :
case DCB_CONNECTOR_HDMI_1 :
case DCB_CONNECTOR_HDMI_C : return DRM_MODE_CONNECTOR_HDMIA;
case DCB_CONNECTOR_WFD : return DRM_MODE_CONNECTOR_VIRTUAL;
default:
break;
}
return DRM_MODE_CONNECTOR_Unknown;
}
struct drm_connector *
nouveau_connector_create(struct drm_device *dev,
const struct dcb_output *dcbe)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
char aux_name[48] = {0};
int index = dcbe->connector;
int type, ret = 0;
bool dummy;
drm_connector_list_iter_begin(dev, &conn_iter);
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
nv_connector = nouveau_connector(connector);
if (nv_connector->index == index) {
drm_connector_list_iter_end(&conn_iter);
return connector;
}
}
drm_connector_list_iter_end(&conn_iter);
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
return ERR_PTR(-ENOMEM);
connector = &nv_connector->base;
nv_connector->index = index;
INIT_WORK(&nv_connector->irq_work, nouveau_dp_irq);
/* attempt to parse vbios connector type and hotplug gpio */
nv_connector->dcb = olddcb_conn(dev, index);
if (nv_connector->dcb) {
u32 entry = ROM16(nv_connector->dcb[0]);
if (olddcb_conntab(dev)[3] >= 4)
entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
nv_connector->type = nv_connector->dcb[0];
if (drm_conntype_from_dcb(nv_connector->type) ==
DRM_MODE_CONNECTOR_Unknown) {
NV_WARN(drm, "unknown connector type %02x\n",
nv_connector->type);
nv_connector->type = DCB_CONNECTOR_NONE;
}
/* Gigabyte NX85T */
if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
nv_connector->type = DCB_CONNECTOR_DVI_I;
}
/* Gigabyte GV-NX86T512H */
if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
nv_connector->type = DCB_CONNECTOR_DVI_I;
}
} else {
nv_connector->type = DCB_CONNECTOR_NONE;
}
/* no vbios data, or an unknown dcb connector type - attempt to
* figure out something suitable ourselves
*/
if (nv_connector->type == DCB_CONNECTOR_NONE) {
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcbt = &drm->vbios.dcb;
u32 encoders = 0;
int i;
for (i = 0; i < dcbt->entries; i++) {
if (dcbt->entry[i].connector == nv_connector->index)
encoders |= (1 << dcbt->entry[i].type);
}
if (encoders & (1 << DCB_OUTPUT_DP)) {
if (encoders & (1 << DCB_OUTPUT_TMDS))
nv_connector->type = DCB_CONNECTOR_DP;
else
nv_connector->type = DCB_CONNECTOR_eDP;
} else
if (encoders & (1 << DCB_OUTPUT_TMDS)) {
if (encoders & (1 << DCB_OUTPUT_ANALOG))
nv_connector->type = DCB_CONNECTOR_DVI_I;
else
nv_connector->type = DCB_CONNECTOR_DVI_D;
} else
if (encoders & (1 << DCB_OUTPUT_ANALOG)) {
nv_connector->type = DCB_CONNECTOR_VGA;
} else
if (encoders & (1 << DCB_OUTPUT_LVDS)) {
nv_connector->type = DCB_CONNECTOR_LVDS;
} else
if (encoders & (1 << DCB_OUTPUT_TV)) {
nv_connector->type = DCB_CONNECTOR_TV_0;
}
}
switch ((type = drm_conntype_from_dcb(nv_connector->type))) {
case DRM_MODE_CONNECTOR_LVDS:
ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
if (ret) {
NV_ERROR(drm, "Error parsing LVDS table, disabling\n");
kfree(nv_connector);
return ERR_PTR(ret);
}
funcs = &nouveau_connector_funcs_lvds;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
nv_connector->aux.dev = connector->kdev;
nv_connector->aux.drm_dev = dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
dcbe->hasht, dcbe->hashm);
nv_connector->aux.name = kstrdup(aux_name, GFP_KERNEL);
if (!nv_connector->aux.name) {
kfree(nv_connector);
return ERR_PTR(-ENOMEM);
}
drm_dp_aux_init(&nv_connector->aux);
break;
default:
funcs = &nouveau_connector_funcs;
break;
}
/* HDMI 3D support */
if ((disp->disp.object.oclass >= G82_DISP)
&& ((type == DRM_MODE_CONNECTOR_DisplayPort)
|| (type == DRM_MODE_CONNECTOR_eDP)
|| (type == DRM_MODE_CONNECTOR_HDMIA)))
connector->stereo_allowed = true;
/* defaults, will get overridden in detect() */
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_connector_init(dev, connector, funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
if (nv_connector->dcb && (disp->disp.conn_mask & BIT(nv_connector->index))) {
ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
&nv_connector->conn);
if (ret) {
goto drm_conn_err;
}
ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsHotplug",
nouveau_connector_hotplug,
NVIF_CONN_EVENT_V0_PLUG | NVIF_CONN_EVENT_V0_UNPLUG,
&nv_connector->hpd);
if (ret == 0)
connector->polled = DRM_CONNECTOR_POLL_HPD;
if (nv_connector->aux.transfer) {
ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsDpIrq",
nouveau_connector_irq, NVIF_CONN_EVENT_V0_IRQ,
&nv_connector->irq);
if (ret) {
nvif_event_dtor(&nv_connector->hpd);
nvif_conn_dtor(&nv_connector->conn);
goto drm_conn_err;
}
}
}
connector->funcs->reset(connector);
nouveau_conn_attach_properties(connector);
/* Default scaling mode */
switch (nv_connector->type) {
case DCB_CONNECTOR_LVDS:
case DCB_CONNECTOR_LVDS_SPWG:
case DCB_CONNECTOR_eDP:
/* see note in nouveau_connector_set_property() */
if (disp->disp.object.oclass < NV50_DISP) {
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
break;
}
nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
}
/* dithering properties */
switch (nv_connector->type) {
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
case DCB_CONNECTOR_TV_3:
case DCB_CONNECTOR_VGA:
break;
default:
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
break;
}
switch (type) {
case DRM_MODE_CONNECTOR_DisplayPort:
nv_connector->dp_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
fallthrough;
case DRM_MODE_CONNECTOR_eDP:
drm_dp_cec_register_connector(&nv_connector->aux, connector);
break;
}
drm_connector_register(connector);
return connector;
drm_conn_err:
drm_connector_cleanup(connector);
kfree(nv_connector);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_connector.c |
/**
* \file mga_ioc32.c
*
* 32-bit ioctl compatibility routines for the MGA DRM.
*
* \author Dave Airlie <[email protected]> with code from patches by Egbert Eich
*
*
* Copyright (C) Paul Mackerras 2005
* Copyright (C) Egbert Eich 2003,2004
* Copyright (C) Dave Airlie 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <drm/drm.h>
#include <drm/drm_ioctl.h>
#include "nouveau_ioctl.h"
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*/
long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
#if 0
if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
#endif
if (fn != NULL)
ret = (*fn)(filp, cmd, arg);
else
ret = nouveau_drm_ioctl(filp, cmd, arg);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_ioc32.c |
/*
* Copyright 2007 Dave Airlied
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Authors: Dave Airlied <[email protected]>
* Ben Skeggs <[email protected]>
* Jeremy Kolb <[email protected]>
*/
#include "nouveau_bo.h"
#include "nouveau_dma.h"
#include "nouveau_mem.h"
#include <nvif/push206e.h>
/*XXX: Fixup class to be compatible with NVIDIA's, which will allow sharing
* code with KeplerDmaCopyA.
*/
int
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = PFN_UP(new_reg->size);
int ret;
page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 8191) ? 8191 : page_count;
ret = PUSH_WAIT(push, 11);
if (ret)
return ret;
PUSH_NVSQ(push, NV85B5, 0x030c, upper_32_bits(src_offset),
0x0310, lower_32_bits(src_offset),
0x0314, upper_32_bits(dst_offset),
0x0318, lower_32_bits(dst_offset),
0x031c, PAGE_SIZE,
0x0320, PAGE_SIZE,
0x0324, PAGE_SIZE,
0x0328, line_count);
PUSH_NVSQ(push, NV85B5, 0x0300, 0x00000110);
page_count -= line_count;
src_offset += (PAGE_SIZE * line_count);
dst_offset += (PAGE_SIZE * line_count);
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_bo85b5.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nouveau_dmem.h"
#include "nouveau_drv.h"
#include "nouveau_chan.h"
#include "nouveau_dma.h"
#include "nouveau_mem.h"
#include "nouveau_bo.h"
#include "nouveau_svm.h"
#include <nvif/class.h>
#include <nvif/object.h>
#include <nvif/push906f.h>
#include <nvif/if000c.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
#include <nvhw/class/cla0b5.h>
#include <linux/sched/mm.h>
#include <linux/hmm.h>
#include <linux/memremap.h>
#include <linux/migrate.h>
/*
* FIXME: this is ugly right now we are using TTM to allocate vram and we pin
* it in vram while in use. We likely want to overhaul memory management for
* nouveau to be more page like (not necessarily with system page size but a
* bigger page size) at lowest level and have some shim layer on top that would
* provide the same functionality as TTM.
*/
#define DMEM_CHUNK_SIZE (2UL << 20)
#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
enum nouveau_aper {
NOUVEAU_APER_VIRT,
NOUVEAU_APER_VRAM,
NOUVEAU_APER_HOST,
};
typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
enum nouveau_aper, u64 dst_addr,
enum nouveau_aper, u64 src_addr);
typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
enum nouveau_aper, u64 dst_addr);
struct nouveau_dmem_chunk {
struct list_head list;
struct nouveau_bo *bo;
struct nouveau_drm *drm;
unsigned long callocated;
struct dev_pagemap pagemap;
};
struct nouveau_dmem_migrate {
nouveau_migrate_copy_t copy_func;
nouveau_clear_page_t clear_func;
struct nouveau_channel *chan;
};
struct nouveau_dmem {
struct nouveau_drm *drm;
struct nouveau_dmem_migrate migrate;
struct list_head chunks;
struct mutex mutex;
struct page *free_pages;
spinlock_t lock;
};
static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
{
return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
}
static struct nouveau_drm *page_to_drm(struct page *page)
{
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
return chunk->drm;
}
unsigned long nouveau_dmem_page_addr(struct page *page)
{
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
chunk->pagemap.range.start;
return chunk->bo->offset + off;
}
static void nouveau_dmem_page_free(struct page *page)
{
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
struct nouveau_dmem *dmem = chunk->drm->dmem;
spin_lock(&dmem->lock);
page->zone_device_data = dmem->free_pages;
dmem->free_pages = page;
WARN_ON(!chunk->callocated);
chunk->callocated--;
/*
* FIXME when chunk->callocated reach 0 we should add the chunk to
* a reclaim list so that it can be freed in case of memory pressure.
*/
spin_unlock(&dmem->lock);
}
static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
{
if (fence) {
nouveau_fence_wait(*fence, true, false);
nouveau_fence_unref(fence);
} else {
/*
* FIXME wait for channel to be IDLE before calling finalizing
* the hmem object.
*/
}
}
static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
struct page *dpage, dma_addr_t *dma_addr)
{
struct device *dev = drm->dev->dev;
lock_page(dpage);
*dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr))
return -EIO;
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
return -EIO;
}
return 0;
}
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
{
struct nouveau_drm *drm = page_to_drm(vmf->page);
struct nouveau_dmem *dmem = drm->dmem;
struct nouveau_fence *fence;
struct nouveau_svmm *svmm;
struct page *spage, *dpage;
unsigned long src = 0, dst = 0;
dma_addr_t dma_addr = 0;
vm_fault_t ret = 0;
struct migrate_vma args = {
.vma = vmf->vma,
.start = vmf->address,
.end = vmf->address + PAGE_SIZE,
.src = &src,
.dst = &dst,
.pgmap_owner = drm->dev,
.fault_page = vmf->page,
.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
};
/*
* FIXME what we really want is to find some heuristic to migrate more
* than just one page on CPU fault. When such fault happens it is very
* likely that more surrounding page will CPU fault too.
*/
if (migrate_vma_setup(&args) < 0)
return VM_FAULT_SIGBUS;
if (!args.cpages)
return 0;
spage = migrate_pfn_to_page(src);
if (!spage || !(src & MIGRATE_PFN_MIGRATE))
goto done;
dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
if (!dpage)
goto done;
dst = migrate_pfn(page_to_pfn(dpage));
svmm = spage->zone_device_data;
mutex_lock(&svmm->mutex);
nouveau_svmm_invalidate(svmm, args.start, args.end);
ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
mutex_unlock(&svmm->mutex);
if (ret) {
ret = VM_FAULT_SIGBUS;
goto done;
}
nouveau_fence_new(&fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
done:
migrate_vma_finalize(&args);
return ret;
}
static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
.page_free = nouveau_dmem_page_free,
.migrate_to_ram = nouveau_dmem_migrate_to_ram,
};
static int
nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
{
struct nouveau_dmem_chunk *chunk;
struct resource *res;
struct page *page;
void *ptr;
unsigned long i, pfn_first;
int ret;
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
if (chunk == NULL) {
ret = -ENOMEM;
goto out;
}
/* Allocate unused physical address space for device private pages. */
res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
"nouveau_dmem");
if (IS_ERR(res)) {
ret = PTR_ERR(res);
goto out_free;
}
chunk->drm = drm;
chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
chunk->pagemap.range.start = res->start;
chunk->pagemap.range.end = res->end;
chunk->pagemap.nr_range = 1;
chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
chunk->pagemap.owner = drm->dev;
ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
&chunk->bo);
if (ret)
goto out_release;
ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
goto out_bo_free;
ptr = memremap_pages(&chunk->pagemap, numa_node_id());
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
goto out_bo_unpin;
}
mutex_lock(&drm->dmem->mutex);
list_add(&chunk->list, &drm->dmem->chunks);
mutex_unlock(&drm->dmem->mutex);
pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
page = pfn_to_page(pfn_first);
spin_lock(&drm->dmem->lock);
for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
page->zone_device_data = drm->dmem->free_pages;
drm->dmem->free_pages = page;
}
*ppage = page;
chunk->callocated++;
spin_unlock(&drm->dmem->lock);
NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
DMEM_CHUNK_SIZE >> 20);
return 0;
out_bo_unpin:
nouveau_bo_unpin(chunk->bo);
out_bo_free:
nouveau_bo_ref(NULL, &chunk->bo);
out_release:
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
out_free:
kfree(chunk);
out:
return ret;
}
static struct page *
nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
{
struct nouveau_dmem_chunk *chunk;
struct page *page = NULL;
int ret;
spin_lock(&drm->dmem->lock);
if (drm->dmem->free_pages) {
page = drm->dmem->free_pages;
drm->dmem->free_pages = page->zone_device_data;
chunk = nouveau_page_to_chunk(page);
chunk->callocated++;
spin_unlock(&drm->dmem->lock);
} else {
spin_unlock(&drm->dmem->lock);
ret = nouveau_dmem_chunk_alloc(drm, &page);
if (ret)
return NULL;
}
zone_device_page_init(page);
return page;
}
static void
nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
{
unlock_page(page);
put_page(page);
}
void
nouveau_dmem_resume(struct nouveau_drm *drm)
{
struct nouveau_dmem_chunk *chunk;
int ret;
if (drm->dmem == NULL)
return;
mutex_lock(&drm->dmem->mutex);
list_for_each_entry(chunk, &drm->dmem->chunks, list) {
ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
/* FIXME handle pin failure */
WARN_ON(ret);
}
mutex_unlock(&drm->dmem->mutex);
}
void
nouveau_dmem_suspend(struct nouveau_drm *drm)
{
struct nouveau_dmem_chunk *chunk;
if (drm->dmem == NULL)
return;
mutex_lock(&drm->dmem->mutex);
list_for_each_entry(chunk, &drm->dmem->chunks, list)
nouveau_bo_unpin(chunk->bo);
mutex_unlock(&drm->dmem->mutex);
}
/*
* Evict all pages mapping a chunk.
*/
static void
nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
{
unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
unsigned long *src_pfns, *dst_pfns;
dma_addr_t *dma_addrs;
struct nouveau_fence *fence;
src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
npages);
for (i = 0; i < npages; i++) {
if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
struct page *dpage;
/*
* _GFP_NOFAIL because the GPU is going away and there
* is nothing sensible we can do if we can't copy the
* data back.
*/
dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
nouveau_dmem_copy_one(chunk->drm,
migrate_pfn_to_page(src_pfns[i]), dpage,
&dma_addrs[i]);
}
}
nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
kfree(src_pfns);
kfree(dst_pfns);
for (i = 0; i < npages; i++)
dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
kfree(dma_addrs);
}
void
nouveau_dmem_fini(struct nouveau_drm *drm)
{
struct nouveau_dmem_chunk *chunk, *tmp;
if (drm->dmem == NULL)
return;
mutex_lock(&drm->dmem->mutex);
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
nouveau_dmem_evict_chunk(chunk);
nouveau_bo_unpin(chunk->bo);
nouveau_bo_ref(NULL, &chunk->bo);
WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
release_mem_region(chunk->pagemap.range.start,
range_len(&chunk->pagemap.range));
kfree(chunk);
}
mutex_unlock(&drm->dmem->mutex);
}
static int
nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
enum nouveau_aper dst_aper, u64 dst_addr,
enum nouveau_aper src_aper, u64 src_addr)
{
struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
u32 launch_dma = 0;
int ret;
ret = PUSH_WAIT(push, 13);
if (ret)
return ret;
if (src_aper != NOUVEAU_APER_VIRT) {
switch (src_aper) {
case NOUVEAU_APER_VRAM:
PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
break;
case NOUVEAU_APER_HOST:
PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
break;
default:
return -EINVAL;
}
launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
}
if (dst_aper != NOUVEAU_APER_VIRT) {
switch (dst_aper) {
case NOUVEAU_APER_VRAM:
PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
break;
case NOUVEAU_APER_HOST:
PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
break;
default:
return -EINVAL;
}
launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
}
PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
OFFSET_IN_LOWER, lower_32_bits(src_addr),
OFFSET_OUT_UPPER,
NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
PITCH_IN, PAGE_SIZE,
PITCH_OUT, PAGE_SIZE,
LINE_LENGTH_IN, PAGE_SIZE,
LINE_COUNT, npages);
PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
return 0;
}
static int
nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
enum nouveau_aper dst_aper, u64 dst_addr)
{
struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
u32 launch_dma = 0;
int ret;
ret = PUSH_WAIT(push, 12);
if (ret)
return ret;
switch (dst_aper) {
case NOUVEAU_APER_VRAM:
PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
break;
case NOUVEAU_APER_HOST:
PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
break;
default:
return -EINVAL;
}
launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
SET_REMAP_CONST_B, 0,
SET_REMAP_COMPONENTS,
NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
return 0;
}
static int
nouveau_dmem_migrate_init(struct nouveau_drm *drm)
{
switch (drm->ttm.copy.oclass) {
case PASCAL_DMA_COPY_A:
case PASCAL_DMA_COPY_B:
case VOLTA_DMA_COPY_A:
case TURING_DMA_COPY_A:
drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
drm->dmem->migrate.chan = drm->ttm.chan;
return 0;
default:
break;
}
return -ENODEV;
}
void
nouveau_dmem_init(struct nouveau_drm *drm)
{
int ret;
/* This only make sense on PASCAL or newer */
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
return;
if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
return;
drm->dmem->drm = drm;
mutex_init(&drm->dmem->mutex);
INIT_LIST_HEAD(&drm->dmem->chunks);
mutex_init(&drm->dmem->mutex);
spin_lock_init(&drm->dmem->lock);
/* Initialize migration dma helpers before registering memory */
ret = nouveau_dmem_migrate_init(drm);
if (ret) {
kfree(drm->dmem);
drm->dmem = NULL;
}
}
static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
struct nouveau_svmm *svmm, unsigned long src,
dma_addr_t *dma_addr, u64 *pfn)
{
struct device *dev = drm->dev->dev;
struct page *dpage, *spage;
unsigned long paddr;
spage = migrate_pfn_to_page(src);
if (!(src & MIGRATE_PFN_MIGRATE))
goto out;
dpage = nouveau_dmem_page_alloc_locked(drm);
if (!dpage)
goto out;
paddr = nouveau_dmem_page_addr(dpage);
if (spage) {
*dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr))
goto out_free_page;
if (drm->dmem->migrate.copy_func(drm, 1,
NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
goto out_dma_unmap;
} else {
*dma_addr = DMA_MAPPING_ERROR;
if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
NOUVEAU_APER_VRAM, paddr))
goto out_free_page;
}
dpage->zone_device_data = svmm;
*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE)
*pfn |= NVIF_VMM_PFNMAP_V0_W;
return migrate_pfn(page_to_pfn(dpage));
out_dma_unmap:
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
out_free_page:
nouveau_dmem_page_free_locked(drm, dpage);
out:
*pfn = NVIF_VMM_PFNMAP_V0_NONE;
return 0;
}
static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
struct nouveau_svmm *svmm, struct migrate_vma *args,
dma_addr_t *dma_addrs, u64 *pfns)
{
struct nouveau_fence *fence;
unsigned long addr = args->start, nr_dma = 0, i;
for (i = 0; addr < args->end; i++) {
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
args->src[i], dma_addrs + nr_dma, pfns + i);
if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
nr_dma++;
addr += PAGE_SIZE;
}
nouveau_fence_new(&fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
while (nr_dma--) {
dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
DMA_BIDIRECTIONAL);
}
migrate_vma_finalize(args);
}
int
nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
struct nouveau_svmm *svmm,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
unsigned long npages = (end - start) >> PAGE_SHIFT;
unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
dma_addr_t *dma_addrs;
struct migrate_vma args = {
.vma = vma,
.start = start,
.pgmap_owner = drm->dev,
.flags = MIGRATE_VMA_SELECT_SYSTEM,
};
unsigned long i;
u64 *pfns;
int ret = -ENOMEM;
if (drm->dmem == NULL)
return -ENODEV;
args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
if (!args.src)
goto out;
args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
if (!args.dst)
goto out_free_src;
dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
if (!dma_addrs)
goto out_free_dst;
pfns = nouveau_pfns_alloc(max);
if (!pfns)
goto out_free_dma;
for (i = 0; i < npages; i += max) {
if (args.start + (max << PAGE_SHIFT) > end)
args.end = end;
else
args.end = args.start + (max << PAGE_SHIFT);
ret = migrate_vma_setup(&args);
if (ret)
goto out_free_pfns;
if (args.cpages)
nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
pfns);
args.start = args.end;
}
ret = 0;
out_free_pfns:
nouveau_pfns_free(pfns);
out_free_dma:
kfree(dma_addrs);
out_free_dst:
kfree(args.dst);
out_free_src:
kfree(args.src);
out:
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_dmem.c |
/*
* Copyright (C) 2009 Red Hat <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*
* Authors:
* Ben Skeggs <[email protected]>
*/
#include <linux/debugfs.h>
#include <nvif/class.h>
#include <nvif/if0001.h>
#include "nouveau_debugfs.h"
#include "nouveau_drv.h"
static int
nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
int i;
for (i = 0; i < drm->vbios.length; i++)
seq_printf(m, "%c", drm->vbios.data[i]);
return 0;
}
static int
nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
int ret;
ret = pm_runtime_get_sync(drm->dev->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(drm->dev->dev);
return ret;
}
seq_printf(m, "0x%08x\n",
nvif_rd32(&drm->client.device.object, 0x101000));
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_autosuspend(drm->dev->dev);
return 0;
}
static int
nouveau_debugfs_pstate_get(struct seq_file *m, void *data)
{
struct drm_device *drm = m->private;
struct nouveau_debugfs *debugfs = nouveau_debugfs(drm);
struct nvif_object *ctrl;
struct nvif_control_pstate_info_v0 info = {};
int ret, i;
if (!debugfs)
return -ENODEV;
ctrl = &debugfs->ctrl;
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_INFO, &info, sizeof(info));
if (ret)
return ret;
for (i = 0; i < info.count + 1; i++) {
const s32 state = i < info.count ? i :
NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT;
struct nvif_control_pstate_attr_v0 attr = {
.state = state,
.index = 0,
};
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_ATTR,
&attr, sizeof(attr));
if (ret)
return ret;
if (i < info.count)
seq_printf(m, "%02x:", attr.state);
else
seq_printf(m, "%s:", info.pwrsrc == 0 ? "DC" :
info.pwrsrc == 1 ? "AC" : "--");
attr.index = 0;
do {
attr.state = state;
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_ATTR,
&attr, sizeof(attr));
if (ret)
return ret;
seq_printf(m, " %s %d", attr.name, attr.min);
if (attr.min != attr.max)
seq_printf(m, "-%d", attr.max);
seq_printf(m, " %s", attr.unit);
} while (attr.index);
if (state >= 0) {
if (info.ustate_ac == state)
seq_puts(m, " AC");
if (info.ustate_dc == state)
seq_puts(m, " DC");
if (info.pstate == state)
seq_puts(m, " *");
} else {
if (info.ustate_ac < -1)
seq_puts(m, " AC");
if (info.ustate_dc < -1)
seq_puts(m, " DC");
}
seq_putc(m, '\n');
}
return 0;
}
static ssize_t
nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
struct drm_device *drm = m->private;
struct nouveau_debugfs *debugfs = nouveau_debugfs(drm);
struct nvif_control_pstate_user_v0 args = { .pwrsrc = -EINVAL };
char buf[32] = {}, *tmp, *cur = buf;
long value, ret;
if (!debugfs)
return -ENODEV;
if (len >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, ubuf, len))
return -EFAULT;
if ((tmp = strchr(buf, '\n')))
*tmp = '\0';
if (!strncasecmp(cur, "dc:", 3)) {
args.pwrsrc = 0;
cur += 3;
} else
if (!strncasecmp(cur, "ac:", 3)) {
args.pwrsrc = 1;
cur += 3;
}
if (!strcasecmp(cur, "none"))
args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN;
else
if (!strcasecmp(cur, "auto"))
args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON;
else {
ret = kstrtol(cur, 16, &value);
if (ret)
return ret;
args.ustate = value;
}
ret = pm_runtime_get_sync(drm->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(drm->dev);
return ret;
}
ret = nvif_mthd(&debugfs->ctrl, NVIF_CONTROL_PSTATE_USER,
&args, sizeof(args));
pm_runtime_put_autosuspend(drm->dev);
if (ret < 0)
return ret;
return len;
}
static int
nouveau_debugfs_pstate_open(struct inode *inode, struct file *file)
{
return single_open(file, nouveau_debugfs_pstate_get, inode->i_private);
}
static void
nouveau_debugfs_gpuva_regions(struct seq_file *m, struct nouveau_uvmm *uvmm)
{
MA_STATE(mas, &uvmm->region_mt, 0, 0);
struct nouveau_uvma_region *reg;
seq_puts (m, " VA regions | start | range | end \n");
seq_puts (m, "----------------------------------------------------------------------------\n");
mas_for_each(&mas, reg, ULONG_MAX)
seq_printf(m, " | 0x%016llx | 0x%016llx | 0x%016llx\n",
reg->va.addr, reg->va.range, reg->va.addr + reg->va.range);
}
static int
nouveau_debugfs_gpuva(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
struct nouveau_cli *cli;
mutex_lock(&drm->clients_lock);
list_for_each_entry(cli, &drm->clients, head) {
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
if (!uvmm)
continue;
nouveau_uvmm_lock(uvmm);
drm_debugfs_gpuva_info(m, &uvmm->umgr);
seq_puts(m, "\n");
nouveau_debugfs_gpuva_regions(m, uvmm);
nouveau_uvmm_unlock(uvmm);
}
mutex_unlock(&drm->clients_lock);
return 0;
}
static const struct file_operations nouveau_pstate_fops = {
.owner = THIS_MODULE,
.open = nouveau_debugfs_pstate_open,
.read = seq_read,
.write = nouveau_debugfs_pstate_set,
.release = single_release,
};
static struct drm_info_list nouveau_debugfs_list[] = {
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
DRM_DEBUGFS_GPUVA_INFO(nouveau_debugfs_gpuva, NULL),
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
static const struct nouveau_debugfs_files {
const char *name;
const struct file_operations *fops;
} nouveau_debugfs_files[] = {
{"pstate", &nouveau_pstate_fops},
};
void
nouveau_drm_debugfs_init(struct drm_minor *minor)
{
struct nouveau_drm *drm = nouveau_drm(minor->dev);
struct dentry *dentry;
int i;
for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) {
debugfs_create_file(nouveau_debugfs_files[i].name,
S_IRUGO | S_IWUSR,
minor->debugfs_root, minor->dev,
nouveau_debugfs_files[i].fops);
}
drm_debugfs_create_files(nouveau_debugfs_list,
NOUVEAU_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
/* Set the size of the vbios since we know it, and it's confusing to
* userspace if it wants to seek() but the file has a length of 0
*/
dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
if (!dentry)
return;
d_inode(dentry)->i_size = drm->vbios.length;
dput(dentry);
}
int
nouveau_debugfs_init(struct nouveau_drm *drm)
{
drm->debugfs = kzalloc(sizeof(*drm->debugfs), GFP_KERNEL);
if (!drm->debugfs)
return -ENOMEM;
return nvif_object_ctor(&drm->client.device.object, "debugfsCtrl", 0,
NVIF_CLASS_CONTROL, NULL, 0,
&drm->debugfs->ctrl);
}
void
nouveau_debugfs_fini(struct nouveau_drm *drm)
{
if (drm->debugfs && drm->debugfs->ctrl.priv)
nvif_object_dtor(&drm->debugfs->ctrl);
kfree(drm->debugfs);
drm->debugfs = NULL;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_debugfs.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <nvif/push006c.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvif/if0020.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_bo.h"
#include "nouveau_chan.h"
#include "nouveau_fence.h"
#include "nouveau_abi16.h"
#include "nouveau_vmm.h"
#include "nouveau_svm.h"
MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
void
nouveau_channel_kill(struct nouveau_channel *chan)
{
atomic_set(&chan->killed, 1);
if (chan->fence)
nouveau_fence_context_kill(chan->fence, -ENODEV);
}
static int
nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_channel *chan = container_of(event, typeof(*chan), kill);
struct nouveau_cli *cli = (void *)chan->user.client;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
if (unlikely(!atomic_read(&chan->killed)))
nouveau_channel_kill(chan);
return NVIF_EVENT_DROP;
}
int
nouveau_channel_idle(struct nouveau_channel *chan)
{
if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_fence *fence = NULL;
int ret;
ret = nouveau_fence_new(&fence, chan);
if (!ret) {
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
if (ret) {
NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n",
chan->chid, nvxx_client(&cli->base)->name);
return ret;
}
}
return 0;
}
void
nouveau_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
if (chan) {
struct nouveau_cli *cli = (void *)chan->user.client;
if (chan->fence)
nouveau_fence(chan->drm)->context_del(chan);
if (cli)
nouveau_svmm_part(chan->vmm->svmm, chan->inst);
nvif_object_dtor(&chan->blit);
nvif_object_dtor(&chan->nvsw);
nvif_object_dtor(&chan->gart);
nvif_object_dtor(&chan->vram);
nvif_event_dtor(&chan->kill);
nvif_object_dtor(&chan->user);
nvif_mem_dtor(&chan->mem_userd);
nvif_object_dtor(&chan->push.ctxdma);
nouveau_vma_del(&chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->bo.pin_count)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
}
*pchan = NULL;
}
static void
nouveau_channel_kick(struct nvif_push *push)
{
struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
FIRE_RING(chan);
chan->chan._push.bgn = chan->chan._push.cur;
}
static int
nouveau_channel_wait(struct nvif_push *push, u32 size)
{
struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
int ret;
chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
ret = RING_SPACE(chan, size);
if (ret == 0) {
chan->chan._push.bgn = chan->chan._push.mem.object.map.ptr;
chan->chan._push.bgn = chan->chan._push.bgn + chan->dma.cur;
chan->chan._push.cur = chan->chan._push.bgn;
chan->chan._push.end = chan->chan._push.bgn + size;
}
return ret;
}
static int
nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
u32 size, struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
u32 target;
int ret;
chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
chan->device = device;
chan->drm = drm;
chan->vmm = nouveau_cli_vmm(cli);
atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */
target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
if (nouveau_vram_pushbuf)
target = NOUVEAU_GEM_DOMAIN_VRAM;
ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
&chan->push.buffer);
if (ret == 0) {
ret = nouveau_bo_pin(chan->push.buffer, target, false);
if (ret == 0)
ret = nouveau_bo_map(chan->push.buffer);
}
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
chan->chan._push.mem.object.parent = cli->base.object.parent;
chan->chan._push.mem.object.client = &cli->base;
chan->chan._push.mem.object.name = "chanPush";
chan->chan._push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
chan->chan._push.wait = nouveau_channel_wait;
chan->chan._push.kick = nouveau_channel_kick;
chan->chan.push = &chan->chan._push;
/* create dma object covering the *entire* memory space that the
* pushbuf lives in, this is because the GEM code requires that
* we be able to call out to other (indirect) push buffers
*/
chan->push.addr = chan->push.buffer->offset;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_vma_new(chan->push.buffer, chan->vmm,
&chan->push.vma);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
chan->push.addr = chan->push.vma->addr;
if (device->info.family >= NV_DEVICE_INFO_V0_FERMI)
return 0;
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
} else
if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
* nfi why this exists, it came from the -nv ddx.
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = nvxx_device(device)->func->
resource_addr(nvxx_device(device), 1);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = device->info.ram_user - 1;
}
} else {
if (chan->drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = chan->drm->agp.base;
args.limit = chan->drm->agp.base +
chan->drm->agp.size - 1;
} else {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
}
}
ret = nvif_object_ctor(&device->object, "abi16PushCtxDma", 0,
NV_DMA_FROM_MEMORY, &args, sizeof(args),
&chan->push.ctxdma);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
return 0;
}
static int
nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm,
struct nouveau_channel **pchan)
{
static const struct {
s32 oclass;
int version;
} hosts[] = {
{ AMPERE_CHANNEL_GPFIFO_B, 0 },
{ AMPERE_CHANNEL_GPFIFO_A, 0 },
{ TURING_CHANNEL_GPFIFO_A, 0 },
{ VOLTA_CHANNEL_GPFIFO_A, 0 },
{ PASCAL_CHANNEL_GPFIFO_A, 0 },
{ MAXWELL_CHANNEL_GPFIFO_A, 0 },
{ KEPLER_CHANNEL_GPFIFO_B, 0 },
{ KEPLER_CHANNEL_GPFIFO_A, 0 },
{ FERMI_CHANNEL_GPFIFO , 0 },
{ G82_CHANNEL_GPFIFO , 0 },
{ NV50_CHANNEL_GPFIFO , 0 },
{ NV40_CHANNEL_DMA , 0 },
{ NV17_CHANNEL_DMA , 0 },
{ NV10_CHANNEL_DMA , 0 },
{ NV03_CHANNEL_DMA , 0 },
{}
};
struct {
struct nvif_chan_v0 chan;
char name[TASK_COMM_LEN+16];
} args;
struct nouveau_cli *cli = (void *)device->object.client;
struct nouveau_channel *chan;
const u64 plength = 0x10000;
const u64 ioffset = plength;
const u64 ilength = 0x02000;
char name[TASK_COMM_LEN];
int cid, ret;
u64 size;
cid = nvif_mclass(&device->object, hosts);
if (cid < 0)
return cid;
if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO)
size = plength;
else
size = ioffset + ilength;
/* allocate dma push buffer */
ret = nouveau_channel_prep(drm, device, size, &chan);
*pchan = chan;
if (ret)
return ret;
/* create channel object */
args.chan.version = 0;
args.chan.namelen = sizeof(args.name);
args.chan.runlist = __ffs64(runm);
args.chan.runq = 0;
args.chan.priv = priv;
args.chan.devm = BIT(0);
if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO) {
args.chan.vmm = 0;
args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
args.chan.offset = chan->push.addr;
args.chan.length = 0;
} else {
args.chan.vmm = nvif_handle(&chan->vmm->vmm.object);
if (hosts[cid].oclass < FERMI_CHANNEL_GPFIFO)
args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
else
args.chan.ctxdma = 0;
args.chan.offset = ioffset + chan->push.addr;
args.chan.length = ilength;
}
args.chan.huserd = 0;
args.chan.ouserd = 0;
/* allocate userd */
if (hosts[cid].oclass >= VOLTA_CHANNEL_GPFIFO_A) {
ret = nvif_mem_ctor(&cli->mmu, "abi16ChanUSERD", NVIF_CLASS_MEM_GF100,
NVIF_MEM_VRAM | NVIF_MEM_COHERENT | NVIF_MEM_MAPPABLE,
0, PAGE_SIZE, NULL, 0, &chan->mem_userd);
if (ret)
return ret;
args.chan.huserd = nvif_handle(&chan->mem_userd.object);
args.chan.ouserd = 0;
chan->userd = &chan->mem_userd.object;
} else {
chan->userd = &chan->user;
}
get_task_comm(name, current);
snprintf(args.name, sizeof(args.name), "%s[%d]", name, task_pid_nr(current));
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
&args, sizeof(args), &chan->user);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
chan->runlist = args.chan.runlist;
chan->chid = args.chan.chid;
chan->inst = args.chan.inst;
chan->token = args.chan.token;
return 0;
}
static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
struct nvif_device *device = chan->device;
struct nouveau_drm *drm = chan->drm;
struct nv_dma_v0 args = {};
int ret, i;
ret = nvif_object_map(chan->userd, NULL, 0);
if (ret)
return ret;
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
struct {
struct nvif_event_v0 base;
struct nvif_chan_event_v0 host;
} args;
args.host.version = 0;
args.host.type = NVIF_CHAN_EVENT_V0_KILLED;
ret = nvif_event_ctor(&chan->user, "abi16ChanKilled", chan->chid,
nouveau_channel_killed, false,
&args.base, sizeof(args), &chan->kill);
if (ret == 0)
ret = nvif_event_allow(&chan->kill);
if (ret) {
NV_ERROR(drm, "Failed to request channel kill "
"notification: %d\n", ret);
return ret;
}
}
/* allocate dma objects to cover all allowed vram, and gart */
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = device->info.ram_user - 1;
}
ret = nvif_object_ctor(&chan->user, "abi16ChanVramCtxDma", vram,
NV_DMA_IN_MEMORY, &args, sizeof(args),
&chan->vram);
if (ret)
return ret;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
} else
if (chan->drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = chan->drm->agp.base;
args.limit = chan->drm->agp.base +
chan->drm->agp.size - 1;
} else {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
}
ret = nvif_object_ctor(&chan->user, "abi16ChanGartCtxDma", gart,
NV_DMA_IN_MEMORY, &args, sizeof(args),
&chan->gart);
if (ret)
return ret;
}
/* initialise dma tracking parameters */
switch (chan->user.oclass & 0x00ff) {
case 0x006b:
case 0x006e:
chan->user_put = 0x40;
chan->user_get = 0x44;
chan->dma.max = (0x10000 / 4) - 2;
break;
default:
chan->user_put = 0x40;
chan->user_get = 0x44;
chan->user_get_hi = 0x60;
chan->dma.ib_base = 0x10000 / 4;
chan->dma.ib_max = (0x02000 / 8) - 1;
chan->dma.ib_put = 0;
chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
chan->dma.max = chan->dma.ib_base;
break;
}
chan->dma.put = 0;
chan->dma.cur = chan->dma.put;
chan->dma.free = chan->dma.max - chan->dma.cur;
ret = PUSH_WAIT(chan->chan.push, NOUVEAU_DMA_SKIPS);
if (ret)
return ret;
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
PUSH_DATA(chan->chan.push, 0x00000000);
/* allocate software object class (used for fences on <= nv05) */
if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
ret = nvif_object_ctor(&chan->user, "abi16NvswFence", 0x006e,
NVIF_CLASS_SW_NV04,
NULL, 0, &chan->nvsw);
if (ret)
return ret;
ret = PUSH_WAIT(chan->chan.push, 2);
if (ret)
return ret;
PUSH_NVSQ(chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
PUSH_KICK(chan->chan.push);
}
/* initialise synchronisation */
return nouveau_fence(chan->drm)->context_new(chan);
}
int
nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
bool priv, u64 runm, u32 vram, u32 gart, struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
int ret;
ret = nouveau_channel_ctor(drm, device, priv, runm, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "channel create, %d\n", ret);
return ret;
}
ret = nouveau_channel_init(*pchan, vram, gart);
if (ret) {
NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
return ret;
}
ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
if (ret)
nouveau_channel_del(pchan);
return ret;
}
void
nouveau_channels_fini(struct nouveau_drm *drm)
{
kfree(drm->runl);
}
int
nouveau_channels_init(struct nouveau_drm *drm)
{
struct {
struct nv_device_info_v1 m;
struct {
struct nv_device_info_v1_data channels;
struct nv_device_info_v1_data runlists;
} v;
} args = {
.m.version = 1,
.m.count = sizeof(args.v) / sizeof(args.v.channels),
.v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
.v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS,
};
struct nvif_object *device = &drm->client.device.object;
int ret, i;
ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
if (ret ||
args.v.runlists.mthd == NV_DEVICE_INFO_INVALID || !args.v.runlists.data ||
args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
return -ENODEV;
drm->chan_nr = drm->chan_total = args.v.channels.data;
drm->runl_nr = fls64(args.v.runlists.data);
drm->runl = kcalloc(drm->runl_nr, sizeof(*drm->runl), GFP_KERNEL);
if (!drm->runl)
return -ENOMEM;
if (drm->chan_nr == 0) {
for (i = 0; i < drm->runl_nr; i++) {
if (!(args.v.runlists.data & BIT(i)))
continue;
args.v.channels.mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
args.v.channels.data = i;
ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
return -ENODEV;
drm->runl[i].chan_nr = args.v.channels.data;
drm->runl[i].chan_id_base = drm->chan_total;
drm->runl[i].context_base = dma_fence_context_alloc(drm->runl[i].chan_nr);
drm->chan_total += drm->runl[i].chan_nr;
}
} else {
drm->runl[0].context_base = dma_fence_context_alloc(drm->chan_nr);
for (i = 1; i < drm->runl_nr; i++)
drm->runl[i].context_base = drm->runl[0].context_base;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_chan.c |
/*
* Copyright 2020 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nouveau_bo.h"
#include "nouveau_dma.h"
#include "nouveau_mem.h"
#include <nvif/push906f.h>
/*XXX: Fixup class to be compatible with NVIDIA's, which will allow sharing
* code with KeplerDmaCopyA.
*/
int
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = PFN_UP(new_reg->size);
int ret;
page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 8191) ? 8191 : page_count;
ret = PUSH_WAIT(push, 10);
if (ret)
return ret;
PUSH_NVSQ(push, NV90B5, 0x030c, upper_32_bits(src_offset),
0x0310, lower_32_bits(src_offset),
0x0314, upper_32_bits(dst_offset),
0x0318, lower_32_bits(dst_offset),
0x031c, PAGE_SIZE,
0x0320, PAGE_SIZE,
0x0324, PAGE_SIZE,
0x0328, line_count);
PUSH_NVIM(push, NV90B5, 0x0300, 0x0110);
page_count -= line_count;
src_offset += (PAGE_SIZE * line_count);
dst_offset += (PAGE_SIZE * line_count);
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_bo90b5.c |
/*
* Copyright (C) 2009 Red Hat <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*
* Authors:
* Matthew Garrett <[email protected]>
*
* Register locations derived from NVClock by Roderick Colenbrander
*/
#include <linux/apple-gmux.h>
#include <linux/backlight.h>
#include <linux/idr.h>
#include <drm/drm_probe_helper.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_acpi.h"
static struct ida bl_ida;
#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0'
static bool
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
struct nouveau_backlight *bl)
{
const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL);
if (nb < 0)
return false;
if (nb > 0)
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
else
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight");
bl->id = nb;
return true;
}
static int
nv40_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
NV40_PMC_BACKLIGHT_MASK) >> 16;
return val;
}
static int
nv40_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
int val = bd->props.brightness;
int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
nvif_wr32(device, NV40_PMC_BACKLIGHT,
(val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
return 0;
}
static const struct backlight_ops nv40_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = nv40_get_intensity,
.update_status = nv40_set_intensity,
};
static int
nv40_backlight_init(struct nouveau_encoder *encoder,
struct backlight_properties *props,
const struct backlight_ops **ops)
{
struct nouveau_drm *drm = nouveau_drm(encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
return -ENODEV;
props->max_brightness = 31;
*ops = &nv40_bl_ops;
return 0;
}
static int
nv50_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
int or = ffs(nv_encoder->dcb->or) - 1;
u32 div = 1025;
u32 val;
val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
val &= NV50_PDISP_SOR_PWM_CTL_VAL;
return ((val * 100) + (div / 2)) / div;
}
static int
nv50_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
int or = ffs(nv_encoder->dcb->or) - 1;
u32 div = 1025;
u32 val = (bd->props.brightness * div) / 100;
nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
NV50_PDISP_SOR_PWM_CTL_NEW | val);
return 0;
}
static const struct backlight_ops nv50_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = nv50_get_intensity,
.update_status = nv50_set_intensity,
};
/*
* eDP brightness callbacks need to happen under lock, since we need to
* enable/disable the backlight ourselves for modesets
*/
static int
nv50_edp_get_brightness(struct backlight_device *bd)
{
struct drm_connector *connector = dev_get_drvdata(bd->dev.parent);
struct drm_device *dev = connector->dev;
struct drm_crtc *crtc;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
drm_modeset_acquire_init(&ctx, 0);
retry:
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
if (ret == -EDEADLK)
goto deadlock;
else if (ret < 0)
goto out;
crtc = connector->state->crtc;
if (!crtc)
goto out;
ret = drm_modeset_lock(&crtc->mutex, &ctx);
if (ret == -EDEADLK)
goto deadlock;
else if (ret < 0)
goto out;
if (!crtc->state->active)
goto out;
ret = bd->props.brightness;
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
deadlock:
drm_modeset_backoff(&ctx);
goto retry;
}
static int
nv50_edp_set_brightness(struct backlight_device *bd)
{
struct drm_connector *connector = dev_get_drvdata(bd->dev.parent);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_crtc *crtc;
struct drm_dp_aux *aux = &nv_connector->aux;
struct nouveau_backlight *nv_bl = nv_connector->backlight;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
drm_modeset_acquire_init(&ctx, 0);
retry:
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
if (ret == -EDEADLK)
goto deadlock;
else if (ret < 0)
goto out;
crtc = connector->state->crtc;
if (!crtc)
goto out;
ret = drm_modeset_lock(&crtc->mutex, &ctx);
if (ret == -EDEADLK)
goto deadlock;
else if (ret < 0)
goto out;
if (crtc->state->active)
ret = drm_edp_backlight_set_level(aux, &nv_bl->edp_info, bd->props.brightness);
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
deadlock:
drm_modeset_backoff(&ctx);
goto retry;
}
static const struct backlight_ops nv50_edp_bl_ops = {
.get_brightness = nv50_edp_get_brightness,
.update_status = nv50_edp_set_brightness,
};
static int
nva3_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
int or = ffs(nv_encoder->dcb->or) - 1;
u32 div, val;
div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
if (div && div >= val)
return ((val * 100) + (div / 2)) / div;
return 100;
}
static int
nva3_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
int or = ffs(nv_encoder->dcb->or) - 1;
u32 div, val;
div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
val = backlight_get_brightness(bd);
if (val)
val = (val * div) / 100;
if (div) {
nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
val |
NV50_PDISP_SOR_PWM_CTL_NEW |
NVA3_PDISP_SOR_PWM_CTL_UNK);
return 0;
}
return -EINVAL;
}
static const struct backlight_ops nva3_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = nva3_get_intensity,
.update_status = nva3_set_intensity,
};
/* FIXME: perform backlight probing for eDP _before_ this, this only gets called after connector
* registration which happens after the initial modeset
*/
static int
nv50_backlight_init(struct nouveau_backlight *bl,
struct nouveau_connector *nv_conn,
struct nouveau_encoder *nv_encoder,
struct backlight_properties *props,
const struct backlight_ops **ops)
{
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
/*
* Note when this runs the connectors have not been probed yet,
* so nv_conn->base.status is not set yet.
*/
if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) ||
drm_helper_probe_detect(&nv_conn->base, NULL, false) != connector_status_connected)
return -ENODEV;
if (nv_conn->type == DCB_CONNECTOR_eDP) {
int ret;
u16 current_level;
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 current_mode;
ret = drm_dp_dpcd_read(&nv_conn->aux, DP_EDP_DPCD_REV, edp_dpcd,
EDP_DISPLAY_CTL_CAP_SIZE);
if (ret < 0)
return ret;
/* TODO: Add support for hybrid PWM/DPCD panels */
if (drm_edp_backlight_supported(edp_dpcd) &&
(edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) {
NV_DEBUG(drm, "DPCD backlight controls supported on %s\n",
nv_conn->base.name);
ret = drm_edp_backlight_init(&nv_conn->aux, &bl->edp_info, 0, edp_dpcd,
¤t_level, ¤t_mode);
if (ret < 0)
return ret;
ret = drm_edp_backlight_enable(&nv_conn->aux, &bl->edp_info, current_level);
if (ret < 0) {
NV_ERROR(drm, "Failed to enable backlight on %s: %d\n",
nv_conn->base.name, ret);
return ret;
}
*ops = &nv50_edp_bl_ops;
props->brightness = current_level;
props->max_brightness = bl->edp_info.max;
bl->uses_dpcd = true;
return 0;
}
}
if (drm->client.device.info.chipset <= 0xa0 ||
drm->client.device.info.chipset == 0xaa ||
drm->client.device.info.chipset == 0xac)
*ops = &nv50_bl_ops;
else
*ops = &nva3_bl_ops;
props->max_brightness = 100;
return 0;
}
int
nouveau_backlight_init(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nouveau_backlight *bl;
struct nouveau_encoder *nv_encoder = NULL;
struct nvif_device *device = &drm->client.device;
char backlight_name[BL_NAME_SIZE];
struct backlight_properties props = {0};
const struct backlight_ops *ops;
int ret;
if (apple_gmux_present()) {
NV_INFO_ONCE(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
return 0;
}
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
nv_encoder = find_encoder(connector, DCB_OUTPUT_DP);
else
return 0;
if (!nv_encoder)
return 0;
bl = kzalloc(sizeof(*bl), GFP_KERNEL);
if (!bl)
return -ENOMEM;
switch (device->info.family) {
case NV_DEVICE_INFO_V0_CURIE:
ret = nv40_backlight_init(nv_encoder, &props, &ops);
break;
case NV_DEVICE_INFO_V0_TESLA:
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
case NV_DEVICE_INFO_V0_PASCAL:
case NV_DEVICE_INFO_V0_VOLTA:
case NV_DEVICE_INFO_V0_TURING:
case NV_DEVICE_INFO_V0_AMPERE: //XXX: not confirmed
ret = nv50_backlight_init(bl, nouveau_connector(connector),
nv_encoder, &props, &ops);
break;
default:
ret = 0;
goto fail_alloc;
}
if (ret) {
if (ret == -ENODEV)
ret = 0;
goto fail_alloc;
}
if (!nouveau_acpi_video_backlight_use_native()) {
NV_INFO(drm, "Skipping nv_backlight registration\n");
goto fail_alloc;
}
if (!nouveau_get_backlight_name(backlight_name, bl)) {
NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n");
goto fail_alloc;
}
props.type = BACKLIGHT_RAW;
bl->dev = backlight_device_register(backlight_name, connector->kdev,
nv_encoder, ops, &props);
if (IS_ERR(bl->dev)) {
if (bl->id >= 0)
ida_free(&bl_ida, bl->id);
ret = PTR_ERR(bl->dev);
goto fail_alloc;
}
nouveau_connector(connector)->backlight = bl;
if (!bl->dev->props.brightness)
bl->dev->props.brightness =
bl->dev->ops->get_brightness(bl->dev);
backlight_update_status(bl->dev);
return 0;
fail_alloc:
kfree(bl);
/*
* If we get here we have an internal panel, but no nv_backlight,
* try registering an ACPI video backlight device instead.
*/
if (ret == 0)
nouveau_acpi_video_register_backlight();
return ret;
}
void
nouveau_backlight_fini(struct drm_connector *connector)
{
struct nouveau_connector *nv_conn = nouveau_connector(connector);
struct nouveau_backlight *bl = nv_conn->backlight;
if (!bl)
return;
if (bl->id >= 0)
ida_free(&bl_ida, bl->id);
backlight_device_unregister(bl->dev);
nv_conn->backlight = NULL;
kfree(bl);
}
void
nouveau_backlight_ctor(void)
{
ida_init(&bl_ida);
}
void
nouveau_backlight_dtor(void)
{
ida_destroy(&bl_ida);
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_backlight.c |
// SPDX-License-Identifier: MIT
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <drm/ttm/ttm_tt.h>
#include "nouveau_drv.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h"
#include "nouveau_bo.h"
struct nouveau_sgdma_be {
/* this has to be the first field so populate/unpopulated in
* nouve_bo.c works properly, otherwise have to move them here
*/
struct ttm_tt ttm;
struct nouveau_mem *mem;
};
void
nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
ttm_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
}
int
nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
if (nvbe->mem)
return 0;
ret = nouveau_mem_host(reg, &nvbe->ttm);
if (ret)
return ret;
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
if (ret) {
nouveau_mem_fini(mem);
return ret;
}
}
nvbe->mem = mem;
return 0;
}
void
nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (nvbe->mem) {
nouveau_mem_fini(nvbe->mem);
nvbe->mem = NULL;
}
}
struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_sgdma_be *nvbe;
enum ttm_caching caching;
if (nvbo->force_coherent)
caching = ttm_uncached;
else if (drm->agp.bridge)
caching = ttm_write_combined;
else
caching = ttm_cached;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
if (ttm_sg_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
kfree(nvbe);
return NULL;
}
return &nvbe->ttm;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_sgdma.c |
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_vmm.h"
#include <nvif/user.h>
/* Fetch and adjust GPU GET pointer
*
* Returns:
* value >= 0, the adjusted GET pointer
* -EINVAL if GET pointer currently outside main push buffer
* -EBUSY if timeout exceeded
*/
static inline int
READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
{
uint64_t val;
val = nvif_rd32(chan->userd, chan->user_get);
if (chan->user_get_hi)
val |= (uint64_t)nvif_rd32(chan->userd, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
* just be processing an operation that takes a long time
*/
if (val != *prev_get) {
*prev_get = val;
*timeout = 0;
}
if ((++*timeout & 0xff) == 0) {
udelay(1);
if (*timeout > 100000)
return -EBUSY;
}
if (val < chan->push.addr ||
val > chan->push.addr + (chan->dma.max << 2))
return -EINVAL;
return (val - chan->push.addr) >> 2;
}
void
nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
bool no_prefetch)
{
struct nvif_user *user = &chan->drm->client.device.user;
struct nouveau_bo *pb = chan->push.buffer;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
BUG_ON(chan->dma.ib_free < 1);
WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH);
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 |
(no_prefetch ? (1 << 31) : 0));
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
mb();
/* Flush writes. */
nouveau_bo_rd32(pb, 0);
nvif_wr32(chan->userd, 0x8c, chan->dma.ib_put);
if (user->func && user->func->doorbell)
user->func->doorbell(user, chan->token);
chan->dma.ib_free--;
}
static int
nv50_dma_push_wait(struct nouveau_channel *chan, int count)
{
uint32_t cnt = 0, prev_get = 0;
while (chan->dma.ib_free < count) {
uint32_t get = nvif_rd32(chan->userd, 0x88);
if (get != prev_get) {
prev_get = get;
cnt = 0;
}
if ((++cnt & 0xff) == 0) {
udelay(1);
if (cnt > 100000)
return -EBUSY;
}
chan->dma.ib_free = get - chan->dma.ib_put;
if (chan->dma.ib_free <= 0)
chan->dma.ib_free += chan->dma.ib_max;
}
return 0;
}
static int
nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
{
uint64_t prev_get = 0;
int ret, cnt = 0;
ret = nv50_dma_push_wait(chan, slots + 1);
if (unlikely(ret))
return ret;
while (chan->dma.free < count) {
int get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get < 0)) {
if (get == -EINVAL)
continue;
return get;
}
if (get <= chan->dma.cur) {
chan->dma.free = chan->dma.max - chan->dma.cur;
if (chan->dma.free >= count)
break;
FIRE_RING(chan);
do {
get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get < 0)) {
if (get == -EINVAL)
continue;
return get;
}
} while (get == 0);
chan->dma.cur = 0;
chan->dma.put = 0;
}
chan->dma.free = get - chan->dma.cur - 1;
}
return 0;
}
int
nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
{
uint64_t prev_get = 0;
int cnt = 0, get;
if (chan->dma.ib_max)
return nv50_dma_wait(chan, slots, size);
while (chan->dma.free < size) {
get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get == -EBUSY))
return -EBUSY;
/* loop until we have a usable GET pointer. the value
* we read from the GPU may be outside the main ring if
* PFIFO is processing a buffer called from the main ring,
* discard these values until something sensible is seen.
*
* the other case we discard GET is while the GPU is fetching
* from the SKIPS area, so the code below doesn't have to deal
* with some fun corner cases.
*/
if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
continue;
if (get <= chan->dma.cur) {
/* engine is fetching behind us, or is completely
* idle (GET == PUT) so we have free space up until
* the end of the push buffer
*
* we can only hit that path once per call due to
* looping back to the beginning of the push buffer,
* we'll hit the fetching-ahead-of-us path from that
* point on.
*
* the *one* exception to that rule is if we read
* GET==PUT, in which case the below conditional will
* always succeed and break us out of the wait loop.
*/
chan->dma.free = chan->dma.max - chan->dma.cur;
if (chan->dma.free >= size)
break;
/* not enough space left at the end of the push buffer,
* instruct the GPU to jump back to the start right
* after processing the currently pending commands.
*/
OUT_RING(chan, chan->push.addr | 0x20000000);
/* wait for GET to depart from the skips area.
* prevents writing GET==PUT and causing a race
* condition that causes us to think the GPU is
* idle when it's not.
*/
do {
get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get == -EBUSY))
return -EBUSY;
if (unlikely(get == -EINVAL))
continue;
} while (get <= NOUVEAU_DMA_SKIPS);
WRITE_PUT(NOUVEAU_DMA_SKIPS);
/* we're now submitting commands at the start of
* the push buffer.
*/
chan->dma.cur =
chan->dma.put = NOUVEAU_DMA_SKIPS;
}
/* engine fetching ahead of us, we have space up until the
* current GET pointer. the "- 1" is to ensure there's
* space left to emit a jump back to the beginning of the
* push buffer if we require it. we can never get GET == PUT
* here, so this is safe.
*/
chan->dma.free = get - chan->dma.cur - 1;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_dma.c |
// SPDX-License-Identifier: MIT
#include <drm/drm_exec.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_dma.h"
#include "nouveau_exec.h"
#include "nouveau_abi16.h"
#include "nouveau_chan.h"
#include "nouveau_sched.h"
#include "nouveau_uvmm.h"
/**
* DOC: Overview
*
* Nouveau's VM_BIND / EXEC UAPI consists of three ioctls: DRM_NOUVEAU_VM_INIT,
* DRM_NOUVEAU_VM_BIND and DRM_NOUVEAU_EXEC.
*
* In order to use the UAPI firstly a user client must initialize the VA space
* using the DRM_NOUVEAU_VM_INIT ioctl specifying which region of the VA space
* should be managed by the kernel and which by the UMD.
*
* The DRM_NOUVEAU_VM_BIND ioctl provides clients an interface to manage the
* userspace-managable portion of the VA space. It provides operations to map
* and unmap memory. Mappings may be flagged as sparse. Sparse mappings are not
* backed by a GEM object and the kernel will ignore GEM handles provided
* alongside a sparse mapping.
*
* Userspace may request memory backed mappings either within or outside of the
* bounds (but not crossing those bounds) of a previously mapped sparse
* mapping. Subsequently requested memory backed mappings within a sparse
* mapping will take precedence over the corresponding range of the sparse
* mapping. If such memory backed mappings are unmapped the kernel will make
* sure that the corresponding sparse mapping will take their place again.
* Requests to unmap a sparse mapping that still contains memory backed mappings
* will result in those memory backed mappings being unmapped first.
*
* Unmap requests are not bound to the range of existing mappings and can even
* overlap the bounds of sparse mappings. For such a request the kernel will
* make sure to unmap all memory backed mappings within the given range,
* splitting up memory backed mappings which are only partially contained
* within the given range. Unmap requests with the sparse flag set must match
* the range of a previously mapped sparse mapping exactly though.
*
* While the kernel generally permits arbitrary sequences and ranges of memory
* backed mappings being mapped and unmapped, either within a single or multiple
* VM_BIND ioctl calls, there are some restrictions for sparse mappings.
*
* The kernel does not permit to:
* - unmap non-existent sparse mappings
* - unmap a sparse mapping and map a new sparse mapping overlapping the range
* of the previously unmapped sparse mapping within the same VM_BIND ioctl
* - unmap a sparse mapping and map new memory backed mappings overlapping the
* range of the previously unmapped sparse mapping within the same VM_BIND
* ioctl
*
* When using the VM_BIND ioctl to request the kernel to map memory to a given
* virtual address in the GPU's VA space there is no guarantee that the actual
* mappings are created in the GPU's MMU. If the given memory is swapped out
* at the time the bind operation is executed the kernel will stash the mapping
* details into it's internal alloctor and create the actual MMU mappings once
* the memory is swapped back in. While this is transparent for userspace, it is
* guaranteed that all the backing memory is swapped back in and all the memory
* mappings, as requested by userspace previously, are actually mapped once the
* DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
*
* A VM_BIND job can be executed either synchronously or asynchronously. If
* exectued asynchronously, userspace may provide a list of syncobjs this job
* will wait for and/or a list of syncobj the kernel will signal once the
* VM_BIND job finished execution. If executed synchronously the ioctl will
* block until the bind job is finished. For synchronous jobs the kernel will
* not permit any syncobjs submitted to the kernel.
*
* To execute a push buffer the UAPI provides the DRM_NOUVEAU_EXEC ioctl. EXEC
* jobs are always executed asynchronously, and, equal to VM_BIND jobs, provide
* the option to synchronize them with syncobjs.
*
* Besides that, EXEC jobs can be scheduled for a specified channel to execute on.
*
* Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
* an up to date view of the VA space. However, the actual mappings might still
* be pending. Hence, EXEC jobs require to have the particular fences - of
* the corresponding VM_BIND jobs they depent on - attached to them.
*/
static int
nouveau_exec_job_submit(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_cli *cli = job->cli;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
struct drm_exec *exec = &job->exec;
struct drm_gem_object *obj;
unsigned long index;
int ret;
/* Create a new fence, but do not emit yet. */
ret = nouveau_fence_create(&exec_job->fence, exec_job->chan);
if (ret)
return ret;
nouveau_uvmm_lock(uvmm);
drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES);
drm_exec_until_all_locked(exec) {
struct drm_gpuva *va;
drm_gpuva_for_each_va(va, &uvmm->umgr) {
if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
continue;
ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
drm_exec_retry_on_contention(exec);
if (ret)
goto err_uvmm_unlock;
}
}
nouveau_uvmm_unlock(uvmm);
drm_exec_for_each_locked_object(exec, index, obj) {
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
ret = nouveau_bo_validate(nvbo, true, false);
if (ret)
goto err_exec_fini;
}
return 0;
err_uvmm_unlock:
nouveau_uvmm_unlock(uvmm);
err_exec_fini:
drm_exec_fini(exec);
return ret;
}
static void
nouveau_exec_job_armed_submit(struct nouveau_job *job)
{
struct drm_exec *exec = &job->exec;
struct drm_gem_object *obj;
unsigned long index;
drm_exec_for_each_locked_object(exec, index, obj)
dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
drm_exec_fini(exec);
}
static struct dma_fence *
nouveau_exec_job_run(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_channel *chan = exec_job->chan;
struct nouveau_fence *fence = exec_job->fence;
int i, ret;
ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
if (ret) {
NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
return ERR_PTR(ret);
}
for (i = 0; i < exec_job->push.count; i++) {
struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
}
ret = nouveau_fence_emit(fence);
if (ret) {
nouveau_fence_unref(&exec_job->fence);
NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
return ERR_PTR(ret);
}
/* The fence was emitted successfully, set the job's fence pointer to
* NULL in order to avoid freeing it up when the job is cleaned up.
*/
exec_job->fence = NULL;
return &fence->base;
}
static void
nouveau_exec_job_free(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
nouveau_job_free(job);
kfree(exec_job->fence);
kfree(exec_job->push.s);
kfree(exec_job);
}
static enum drm_gpu_sched_stat
nouveau_exec_job_timeout(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_channel *chan = exec_job->chan;
if (unlikely(!atomic_read(&chan->killed)))
nouveau_channel_kill(chan);
NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
chan->chid);
nouveau_sched_entity_fini(job->entity);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
static struct nouveau_job_ops nouveau_exec_job_ops = {
.submit = nouveau_exec_job_submit,
.armed_submit = nouveau_exec_job_armed_submit,
.run = nouveau_exec_job_run,
.free = nouveau_exec_job_free,
.timeout = nouveau_exec_job_timeout,
};
int
nouveau_exec_job_init(struct nouveau_exec_job **pjob,
struct nouveau_exec_job_args *__args)
{
struct nouveau_exec_job *job;
struct nouveau_job_args args = {};
int i, ret;
for (i = 0; i < __args->push.count; i++) {
struct drm_nouveau_exec_push *p = &__args->push.s[i];
if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) {
NV_PRINTK(err, nouveau_cli(__args->file_priv),
"pushbuf size exceeds limit: 0x%x max 0x%x\n",
p->va_len, NV50_DMA_PUSH_MAX_LENGTH);
return -EINVAL;
}
}
job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job)
return -ENOMEM;
job->push.count = __args->push.count;
if (__args->push.count) {
job->push.s = kmemdup(__args->push.s,
sizeof(*__args->push.s) *
__args->push.count,
GFP_KERNEL);
if (!job->push.s) {
ret = -ENOMEM;
goto err_free_job;
}
}
job->chan = __args->chan;
args.sched_entity = __args->sched_entity;
args.file_priv = __args->file_priv;
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
args.out_sync.count = __args->out_sync.count;
args.out_sync.s = __args->out_sync.s;
args.ops = &nouveau_exec_job_ops;
args.resv_usage = DMA_RESV_USAGE_WRITE;
ret = nouveau_job_init(&job->base, &args);
if (ret)
goto err_free_pushs;
return 0;
err_free_pushs:
kfree(job->push.s);
err_free_job:
kfree(job);
*pjob = NULL;
return ret;
}
static int
nouveau_exec(struct nouveau_exec_job_args *args)
{
struct nouveau_exec_job *job;
int ret;
ret = nouveau_exec_job_init(&job, args);
if (ret)
return ret;
ret = nouveau_job_submit(&job->base);
if (ret)
goto err_job_fini;
return 0;
err_job_fini:
nouveau_job_fini(&job->base);
return ret;
}
static int
nouveau_exec_ucopy(struct nouveau_exec_job_args *args,
struct drm_nouveau_exec *req)
{
struct drm_nouveau_sync **s;
u32 inc = req->wait_count;
u64 ins = req->wait_ptr;
u32 outc = req->sig_count;
u64 outs = req->sig_ptr;
u32 pushc = req->push_count;
u64 pushs = req->push_ptr;
int ret;
if (pushc) {
args->push.count = pushc;
args->push.s = u_memcpya(pushs, pushc, sizeof(*args->push.s));
if (IS_ERR(args->push.s))
return PTR_ERR(args->push.s);
}
if (inc) {
s = &args->in_sync.s;
args->in_sync.count = inc;
*s = u_memcpya(ins, inc, sizeof(**s));
if (IS_ERR(*s)) {
ret = PTR_ERR(*s);
goto err_free_pushs;
}
}
if (outc) {
s = &args->out_sync.s;
args->out_sync.count = outc;
*s = u_memcpya(outs, outc, sizeof(**s));
if (IS_ERR(*s)) {
ret = PTR_ERR(*s);
goto err_free_ins;
}
}
return 0;
err_free_pushs:
u_free(args->push.s);
err_free_ins:
u_free(args->in_sync.s);
return ret;
}
static void
nouveau_exec_ufree(struct nouveau_exec_job_args *args)
{
u_free(args->push.s);
u_free(args->in_sync.s);
u_free(args->out_sync.s);
}
int
nouveau_exec_ioctl_exec(struct drm_device *dev,
void *data,
struct drm_file *file_priv)
{
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_abi16_chan *chan16;
struct nouveau_channel *chan = NULL;
struct nouveau_exec_job_args args = {};
struct drm_nouveau_exec *req = data;
int ret = 0;
if (unlikely(!abi16))
return -ENOMEM;
/* abi16 locks already */
if (unlikely(!nouveau_cli_uvmm(cli)))
return nouveau_abi16_put(abi16, -ENOSYS);
list_for_each_entry(chan16, &abi16->channels, head) {
if (chan16->chan->chid == req->channel) {
chan = chan16->chan;
break;
}
}
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
if (unlikely(atomic_read(&chan->killed)))
return nouveau_abi16_put(abi16, -ENODEV);
if (!chan->dma.ib_max)
return nouveau_abi16_put(abi16, -ENOSYS);
if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->push_count, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
ret = nouveau_exec_ucopy(&args, req);
if (ret)
goto out;
args.sched_entity = &chan16->sched_entity;
args.file_priv = file_priv;
args.chan = chan;
ret = nouveau_exec(&args);
if (ret)
goto out_free_args;
out_free_args:
nouveau_exec_ufree(&args);
out:
return nouveau_abi16_put(abi16, ret);
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_exec.c |
/*
* Copyright (C) 2008 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <drm/drm_gem_ttm_helper.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
#include "nouveau_abi16.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_vmm.h"
#include <nvif/class.h>
#include <nvif/push206e.h>
static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
pgprot_t prot;
vm_fault_t ret;
ret = ttm_bo_vm_reserve(bo, vmf);
if (ret)
return ret;
ret = nouveau_ttm_fault_reserve_notify(bo);
if (ret)
goto error_unlock;
nouveau_bo_del_io_reserve_lru(bo);
prot = vm_get_page_prot(vma->vm_flags);
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
nouveau_bo_add_io_reserve_lru(bo);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
error_unlock:
dma_resv_unlock(bo->base.resv);
return ret;
}
static const struct vm_operations_struct nouveau_ttm_vm_ops = {
.fault = nouveau_ttm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
.access = ttm_bo_vm_access
};
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
int ret;
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0 && ret != -EACCES)) {
pm_runtime_put_autosuspend(dev);
return;
}
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
int ret;
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv)
return -EPERM;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(dev);
goto out;
}
/* only create a VMA on binding */
if (!nouveau_cli_uvmm(cli))
ret = nouveau_vma_new(nvbo, vmm, &vma);
else
ret = 0;
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out:
ttm_bo_unreserve(&nvbo->bo);
return ret;
}
struct nouveau_gem_object_unmap {
struct nouveau_cli_work work;
struct nouveau_vma *vma;
};
static void
nouveau_gem_object_delete(struct nouveau_vma *vma)
{
nouveau_fence_unref(&vma->fence);
nouveau_vma_del(&vma);
}
static void
nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
{
struct nouveau_gem_object_unmap *work =
container_of(w, typeof(*work), work);
nouveau_gem_object_delete(work->vma);
kfree(work);
}
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
struct nouveau_gem_object_unmap *work;
list_del_init(&vma->head);
if (!fence) {
nouveau_gem_object_delete(vma);
return;
}
if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
nouveau_gem_object_delete(vma);
return;
}
work->work.func = nouveau_gem_object_delete_work;
work->vma = vma;
nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
}
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
int ret;
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return;
if (nouveau_cli_uvmm(cli))
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return;
vma = nouveau_vma_find(nvbo, vmm);
if (vma) {
if (--vma->refs == 0) {
ret = pm_runtime_get_sync(dev);
if (!WARN_ON(ret < 0 && ret != -EACCES)) {
nouveau_gem_object_unmap(nvbo, vma);
pm_runtime_mark_last_busy(dev);
}
pm_runtime_put_autosuspend(dev);
}
}
ttm_bo_unreserve(&nvbo->bo);
}
const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
.free = nouveau_gem_object_del,
.open = nouveau_gem_object_open,
.close = nouveau_gem_object_close,
.export = nouveau_gem_prime_export,
.pin = nouveau_gem_prime_pin,
.unpin = nouveau_gem_prime_unpin,
.get_sg_table = nouveau_gem_prime_get_sg_table,
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
.mmap = drm_gem_ttm_mmap,
.vm_ops = &nouveau_ttm_vm_ops,
};
int
nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
struct nouveau_bo **pnvbo)
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
struct dma_resv *resv = NULL;
struct nouveau_bo *nvbo;
int ret;
if (domain & NOUVEAU_GEM_DOMAIN_NO_SHARE) {
if (unlikely(!uvmm))
return -EINVAL;
resv = &uvmm->resv;
}
if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
domain |= NOUVEAU_GEM_DOMAIN_CPU;
nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags, false);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE;
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
if (ret) {
drm_gem_object_release(&nvbo->bo.base);
kfree(nvbo);
return ret;
}
if (resv)
dma_resv_lock(resv, NULL);
ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
if (resv)
dma_resv_unlock(resv);
if (ret)
return ret;
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time. not possibly on
* earlier chips without busting the ABI.
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
*pnvbo = nvbo;
return 0;
}
static int
nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
struct drm_nouveau_gem_info *rep)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
if (is_power_of_2(nvbo->valid_domains))
rep->domain = nvbo->valid_domains;
else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->offset;
if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50 &&
!nouveau_cli_uvmm(cli)) {
vma = nouveau_vma_find(nvbo, vmm);
if (!vma)
return -EINVAL;
rep->offset = vma->addr;
} else
rep->offset = 0;
rep->size = nvbo->bo.base.size;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
rep->tile_mode = nvbo->mode;
rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
rep->tile_flags |= nvbo->kind << 8;
else
if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
else
rep->tile_flags |= nvbo->zeta;
return 0;
}
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
/* If uvmm wasn't initialized until now disable it completely to prevent
* userspace from mixing up UAPIs.
*/
nouveau_cli_disable_uvmm_noinit(cli);
ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
if (ret)
return ret;
ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
&req->info.handle);
if (ret == 0) {
ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
if (ret)
drm_gem_handle_delete(file_priv, req->info.handle);
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&nvbo->bo.base);
return ret;
}
static int
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
uint32_t write_domains, uint32_t valid_domains)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
uint32_t pref_domains = 0;
if (!domains)
return -EINVAL;
valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
bo->resource->mem_type == TTM_PL_VRAM)
pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
bo->resource->mem_type == TTM_PL_TT)
pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else
pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
return 0;
}
struct validate_op {
struct list_head list;
struct ww_acquire_ctx ticket;
};
static void
validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
struct nouveau_fence *fence,
struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
struct nouveau_bo *nvbo;
struct drm_nouveau_gem_pushbuf_bo *b;
while (!list_empty(&op->list)) {
nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
b = &pbbo[nvbo->pbbo_index];
if (likely(fence)) {
nouveau_bo_fence(nvbo, fence, !!b->write_domains);
if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
struct nouveau_vma *vma =
(void *)(unsigned long)b->user_priv;
nouveau_fence_unref(&vma->fence);
dma_fence_get(&fence->base);
vma->fence = fence;
}
}
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
nvbo->validate_mapped = false;
}
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
drm_gem_object_put(&nvbo->bo.base);
}
}
static void
validate_fini(struct validate_op *op, struct nouveau_channel *chan,
struct nouveau_fence *fence,
struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
validate_fini_no_ticket(op, chan, fence, pbbo);
ww_acquire_fini(&op->ticket);
}
static int
validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
int nr_buffers, struct validate_op *op)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int trycnt = 0;
int ret = -EINVAL, i;
struct nouveau_bo *res_bo = NULL;
LIST_HEAD(gart_list);
LIST_HEAD(vram_list);
LIST_HEAD(both_list);
ww_acquire_init(&op->ticket, &reservation_ww_class);
retry:
if (++trycnt > 100000) {
NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
for (i = 0; i < nr_buffers; i++) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
gem = drm_gem_object_lookup(file_priv, b->handle);
if (!gem) {
NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
ret = -ENOENT;
break;
}
nvbo = nouveau_gem_object(gem);
if (nvbo == res_bo) {
res_bo = NULL;
drm_gem_object_put(gem);
continue;
}
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
NV_PRINTK(err, cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
drm_gem_object_put(gem);
ret = -EINVAL;
break;
}
ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
if (ret) {
list_splice_tail_init(&vram_list, &op->list);
list_splice_tail_init(&gart_list, &op->list);
list_splice_tail_init(&both_list, &op->list);
validate_fini_no_ticket(op, chan, NULL, NULL);
if (unlikely(ret == -EDEADLK)) {
ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
&op->ticket);
if (!ret)
res_bo = nvbo;
}
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "fail reserve\n");
break;
}
}
if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
struct nouveau_vmm *vmm = chan->vmm;
struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
if (!vma) {
NV_PRINTK(err, cli, "vma not found!\n");
ret = -EINVAL;
break;
}
b->user_priv = (uint64_t)(unsigned long)vma;
} else {
b->user_priv = (uint64_t)(unsigned long)nvbo;
}
nvbo->reserved_by = file_priv;
nvbo->pbbo_index = i;
if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
(b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
list_add_tail(&nvbo->entry, &both_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
list_add_tail(&nvbo->entry, &vram_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
list_add_tail(&nvbo->entry, &gart_list);
else {
NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
b->valid_domains);
list_add_tail(&nvbo->entry, &both_list);
ret = -EINVAL;
break;
}
if (nvbo == res_bo)
goto retry;
}
ww_acquire_done(&op->ticket);
list_splice_tail(&vram_list, &op->list);
list_splice_tail(&gart_list, &op->list);
list_splice_tail(&both_list, &op->list);
if (ret)
validate_fini(op, chan, NULL, NULL);
return ret;
}
static int
validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
struct nouveau_drm *drm = chan->drm;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
NV_PRINTK(err, cli, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "fail ttm_validate\n");
return ret;
}
ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "fail post-validate sync\n");
return ret;
}
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->offset == b->presumed.offset &&
((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
(nvbo->bo.resource->mem_type == TTM_PL_TT &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
if (nvbo->bo.resource->mem_type == TTM_PL_TT)
b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
b->presumed.offset = nvbo->offset;
b->presumed.valid = 0;
relocs++;
}
}
return relocs;
}
static int
nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
int nr_buffers,
struct validate_op *op, bool *apply_relocs)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int ret;
INIT_LIST_HEAD(&op->list);
if (nr_buffers == 0)
return 0;
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validate_init\n");
return ret;
}
ret = validate_list(chan, cli, &op->list, pbbo);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validating bo list\n");
validate_fini(op, chan, NULL, NULL);
return ret;
} else if (ret > 0) {
*apply_relocs = true;
}
return 0;
}
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
struct drm_nouveau_gem_pushbuf_reloc *reloc,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
int ret = 0;
unsigned i;
for (i = 0; i < req->nr_relocs; i++) {
struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
struct drm_nouveau_gem_pushbuf_bo *b;
struct nouveau_bo *nvbo;
uint32_t data;
long lret;
if (unlikely(r->bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
b = &bo[r->bo_index];
if (b->presumed.valid)
continue;
if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.base.size)) {
NV_PRINTK(err, cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
&nvbo->kmap);
if (ret) {
NV_PRINTK(err, cli, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
}
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
data = b->presumed.offset + r->data;
else
if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
data = (b->presumed.offset + r->data) >> 32;
else
data = r->data;
if (r->flags & NOUVEAU_GEM_RELOC_OR) {
if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
data |= r->tor;
else
data |= r->vor;
}
lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
DMA_RESV_USAGE_BOOKKEEP,
false, 15 * HZ);
if (!lret)
ret = -EBUSY;
else if (lret > 0)
ret = 0;
else
ret = lret;
if (ret) {
NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n",
ret);
break;
}
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
return ret;
}
int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_abi16_chan *temp;
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf *req = data;
struct drm_nouveau_gem_pushbuf_push *push;
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan = NULL;
struct validate_op op;
struct nouveau_fence *fence = NULL;
int i, j, ret = 0;
bool do_reloc = false, sync = false;
if (unlikely(!abi16))
return -ENOMEM;
if (unlikely(nouveau_cli_uvmm(cli)))
return -ENOSYS;
list_for_each_entry(temp, &abi16->channels, head) {
if (temp->chan->chid == req->channel) {
chan = temp->chan;
break;
}
}
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
if (unlikely(atomic_read(&chan->killed)))
return nouveau_abi16_put(abi16, -ENODEV);
sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
req->vram_available = drm->gem.vram_available;
req->gart_available = drm->gem.gart_available;
if (unlikely(req->nr_push == 0))
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return nouveau_abi16_put(abi16, -EINVAL);
}
push = u_memcpya(req->push, req->nr_push, sizeof(*push));
if (IS_ERR(push))
return nouveau_abi16_put(abi16, PTR_ERR(push));
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) {
u_free(push);
return nouveau_abi16_put(abi16, PTR_ERR(bo));
}
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out_prevalid;
}
}
/* Validate buffer list */
revalidate:
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validate: %d\n", ret);
goto out_prevalid;
}
/* Apply any relocations that are required */
if (do_reloc) {
if (!reloc) {
validate_fini(&op, chan, NULL, bo);
reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
if (IS_ERR(reloc)) {
ret = PTR_ERR(reloc);
goto out_prevalid;
}
goto revalidate;
}
ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
if (ret) {
NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
goto out;
}
}
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_vma *vma = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
u64 addr = vma->addr + push[i].offset;
u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
nv50_dma_push(chan, addr, length, no_prefetch);
}
} else
if (drm->client.device.info.chipset >= 0x25) {
ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
if (ret) {
NV_PRINTK(err, cli, "cal_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
PUSH_DATA(chan->chan.push, 0);
}
} else {
ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
uint32_t cmd;
cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
cmd |= 0x20000000;
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0,
PFN_UP(nvbo->bo.base.size),
&nvbo->kmap);
if (ret) {
WIND_RING(chan);
goto out;
}
nvbo->validate_mapped = true;
}
nouveau_bo_wr32(nvbo, (push[i].offset +
push[i].length - 8) / 4, cmd);
}
PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
PUSH_DATA(chan->chan.push, 0);
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
PUSH_DATA(chan->chan.push, 0);
}
}
ret = nouveau_fence_new(&fence, chan);
if (ret) {
NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
if (sync) {
if (!(ret = nouveau_fence_wait(fence, false, false))) {
if ((ret = dma_fence_get_status(&fence->base)) == 1)
ret = 0;
}
}
out:
validate_fini(&op, chan, fence, bo);
nouveau_fence_unref(&fence);
if (do_reloc) {
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
u64_to_user_ptr(req->buffers);
for (i = 0; i < req->nr_buffers; i++) {
if (bo[i].presumed.valid)
continue;
if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
sizeof(bo[i].presumed))) {
ret = -EFAULT;
break;
}
}
}
out_prevalid:
if (!IS_ERR(reloc))
u_free(reloc);
u_free(bo);
u_free(push);
out_next:
if (chan->dma.ib_max) {
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
if (drm->client.device.info.chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
req->suffix0 = 0x20000000 |
(chan->push.addr + ((chan->dma.cur + 2) << 2));
req->suffix1 = 0x00000000;
}
return nouveau_abi16_put(abi16, ret);
}
int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_cpu_prep *req = data;
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
long lret;
int ret;
gem = drm_gem_object_lookup(file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
dma_resv_usage_rw(write), true,
no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
else if (lret > 0)
ret = 0;
else
ret = lret;
nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_put(gem);
return ret;
}
int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_cpu_fini *req = data;
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
gem = drm_gem_object_lookup(file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
nouveau_bo_sync_for_device(nvbo);
drm_gem_object_put(gem);
return 0;
}
int
nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_info *req = data;
struct drm_gem_object *gem;
int ret;
gem = drm_gem_object_lookup(file_priv, req->handle);
if (!gem)
return -ENOENT;
ret = nouveau_gem_info(file_priv, gem, req);
drm_gem_object_put(gem);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_gem.c |
/*
* Copyright 2007 Dave Airlied
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Authors: Dave Airlied <[email protected]>
* Ben Skeggs <[email protected]>
* Jeremy Kolb <[email protected]>
*/
#include "nouveau_bo.h"
#include "nouveau_dma.h"
#include "nouveau_mem.h"
#include <nvif/push906f.h>
#include <nvhw/class/cl9039.h>
int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nvif_push *push = chan->chan.push;
struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = PFN_UP(new_reg->size);
int ret;
page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
ret = PUSH_WAIT(push, 12);
if (ret)
return ret;
PUSH_MTHD(push, NV9039, OFFSET_OUT_UPPER,
NVVAL(NV9039, OFFSET_OUT_UPPER, VALUE, upper_32_bits(dst_offset)),
OFFSET_OUT, lower_32_bits(dst_offset));
PUSH_MTHD(push, NV9039, OFFSET_IN_UPPER,
NVVAL(NV9039, OFFSET_IN_UPPER, VALUE, upper_32_bits(src_offset)),
OFFSET_IN, lower_32_bits(src_offset),
PITCH_IN, PAGE_SIZE,
PITCH_OUT, PAGE_SIZE,
LINE_LENGTH_IN, PAGE_SIZE,
LINE_COUNT, line_count);
PUSH_MTHD(push, NV9039, LAUNCH_DMA,
NVDEF(NV9039, LAUNCH_DMA, SRC_INLINE, FALSE) |
NVDEF(NV9039, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
NVDEF(NV9039, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
NVDEF(NV9039, LAUNCH_DMA, COMPLETION_TYPE, FLUSH_DISABLE) |
NVDEF(NV9039, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
NVDEF(NV9039, LAUNCH_DMA, SEMAPHORE_STRUCT_SIZE, ONE_WORD));
page_count -= line_count;
src_offset += (PAGE_SIZE * line_count);
dst_offset += (PAGE_SIZE * line_count);
}
return 0;
}
int
nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
struct nvif_push *push = chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 2);
if (ret)
return ret;
PUSH_MTHD(push, NV9039, SET_OBJECT, handle);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_bo9039.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
#include <linux/dynamic_debug.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_vblank.h>
#include <core/gpuobj.h>
#include <core/option.h>
#include <core/pci.h>
#include <core/tegra.h>
#include <nvif/driver.h>
#include <nvif/fifo.h>
#include <nvif/push006c.h>
#include <nvif/user.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "nouveau_vga.h"
#include "nouveau_led.h"
#include "nouveau_hwmon.h"
#include "nouveau_acpi.h"
#include "nouveau_bios.h"
#include "nouveau_ioctl.h"
#include "nouveau_abi16.h"
#include "nouveau_fence.h"
#include "nouveau_debugfs.h"
#include "nouveau_usif.h"
#include "nouveau_connector.h"
#include "nouveau_platform.h"
#include "nouveau_svm.h"
#include "nouveau_dmem.h"
#include "nouveau_exec.h"
#include "nouveau_uvmm.h"
#include "nouveau_sched.h"
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
"DRM_UT_DRIVER",
"DRM_UT_KMS",
"DRM_UT_PRIME",
"DRM_UT_ATOMIC",
"DRM_UT_VBL",
"DRM_UT_STATE",
"DRM_UT_LEASE",
"DRM_UT_DP",
"DRM_UT_DRMRES");
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
module_param_named(config, nouveau_config, charp, 0400);
MODULE_PARM_DESC(debug, "debug string to pass to driver core");
static char *nouveau_debug;
module_param_named(debug, nouveau_debug, charp, 0400);
MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
static int nouveau_noaccel = 0;
module_param_named(noaccel, nouveau_noaccel, int, 0400);
MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
"0 = disabled, 1 = enabled, 2 = headless)");
int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
static int nouveau_atomic = 0;
module_param_named(atomic, nouveau_atomic, int, 0400);
MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
static int nouveau_runtime_pm = -1;
module_param_named(runpm, nouveau_runtime_pm, int, 0400);
static struct drm_driver driver_stub;
static struct drm_driver driver_pci;
static struct drm_driver driver_platform;
static u64
nouveau_pci_name(struct pci_dev *pdev)
{
u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
name |= pdev->bus->number << 16;
name |= PCI_SLOT(pdev->devfn) << 8;
return name | PCI_FUNC(pdev->devfn);
}
static u64
nouveau_platform_name(struct platform_device *platformdev)
{
return platformdev->id;
}
static u64
nouveau_name(struct drm_device *dev)
{
if (dev_is_pci(dev->dev))
return nouveau_pci_name(to_pci_dev(dev->dev));
else
return nouveau_platform_name(to_platform_device(dev->dev));
}
static inline bool
nouveau_cli_work_ready(struct dma_fence *fence)
{
bool ret = true;
spin_lock_irq(fence->lock);
if (!dma_fence_is_signaled_locked(fence))
ret = false;
spin_unlock_irq(fence->lock);
if (ret == true)
dma_fence_put(fence);
return ret;
}
static void
nouveau_cli_work(struct work_struct *w)
{
struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
struct nouveau_cli_work *work, *wtmp;
mutex_lock(&cli->lock);
list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
if (!work->fence || nouveau_cli_work_ready(work->fence)) {
list_del(&work->head);
work->func(work);
}
}
mutex_unlock(&cli->lock);
}
static void
nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
schedule_work(&work->cli->work);
}
void
nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
struct nouveau_cli_work *work)
{
work->fence = dma_fence_get(fence);
work->cli = cli;
mutex_lock(&cli->lock);
list_add_tail(&work->head, &cli->worker);
if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
nouveau_cli_work_fence(fence, &work->cb);
mutex_unlock(&cli->lock);
}
static void
nouveau_cli_fini(struct nouveau_cli *cli)
{
/* All our channels are dead now, which means all the fences they
* own are signalled, and all callback functions have been called.
*
* So, after flushing the workqueue, there should be nothing left.
*/
flush_work(&cli->work);
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
nouveau_uvmm_fini(&cli->uvmm);
nouveau_sched_entity_fini(&cli->sched_entity);
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
nvif_device_dtor(&cli->device);
mutex_lock(&cli->drm->master.lock);
nvif_client_dtor(&cli->base);
mutex_unlock(&cli->drm->master.lock);
}
static int
nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
struct nouveau_cli *cli)
{
static const struct nvif_mclass
mems[] = {
{ NVIF_CLASS_MEM_GF100, -1 },
{ NVIF_CLASS_MEM_NV50 , -1 },
{ NVIF_CLASS_MEM_NV04 , -1 },
{}
};
static const struct nvif_mclass
mmus[] = {
{ NVIF_CLASS_MMU_GF100, -1 },
{ NVIF_CLASS_MMU_NV50 , -1 },
{ NVIF_CLASS_MMU_NV04 , -1 },
{}
};
static const struct nvif_mclass
vmms[] = {
{ NVIF_CLASS_VMM_GP100, -1 },
{ NVIF_CLASS_VMM_GM200, -1 },
{ NVIF_CLASS_VMM_GF100, -1 },
{ NVIF_CLASS_VMM_NV50 , -1 },
{ NVIF_CLASS_VMM_NV04 , -1 },
{}
};
u64 device = nouveau_name(drm->dev);
int ret;
snprintf(cli->name, sizeof(cli->name), "%s", sname);
cli->drm = drm;
mutex_init(&cli->mutex);
usif_client_init(cli);
INIT_WORK(&cli->work, nouveau_cli_work);
INIT_LIST_HEAD(&cli->worker);
mutex_init(&cli->lock);
if (cli == &drm->master) {
ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
cli->name, device, &cli->base);
} else {
mutex_lock(&drm->master.lock);
ret = nvif_client_ctor(&drm->master.base, cli->name, device,
&cli->base);
mutex_unlock(&drm->master.lock);
}
if (ret) {
NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
goto done;
}
ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
&(struct nv_device_v0) {
.device = ~0,
.priv = true,
}, sizeof(struct nv_device_v0),
&cli->device);
if (ret) {
NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->device.object, mmus);
if (ret < 0) {
NV_PRINTK(err, cli, "No supported MMU class\n");
goto done;
}
ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", mmus[ret].oclass,
&cli->mmu);
if (ret) {
NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->mmu.object, vmms);
if (ret < 0) {
NV_PRINTK(err, cli, "No supported VMM class\n");
goto done;
}
ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
if (ret) {
NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->mmu.object, mems);
if (ret < 0) {
NV_PRINTK(err, cli, "No supported MEM class\n");
goto done;
}
cli->mem = &mems[ret];
ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
drm->sched_wq);
if (ret)
goto done;
return 0;
done:
if (ret)
nouveau_cli_fini(cli);
return ret;
}
static void
nouveau_accel_ce_fini(struct nouveau_drm *drm)
{
nouveau_channel_idle(drm->cechan);
nvif_object_dtor(&drm->ttm.copy);
nouveau_channel_del(&drm->cechan);
}
static void
nouveau_accel_ce_init(struct nouveau_drm *drm)
{
struct nvif_device *device = &drm->client.device;
u64 runm;
int ret = 0;
/* Allocate channel that has access to a (preferably async) copy
* engine, to use for TTM buffer moves.
*/
runm = nvif_fifo_runlist_ce(device);
if (!runm) {
NV_DEBUG(drm, "no ce runlist\n");
return;
}
ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
}
static void
nouveau_accel_gr_fini(struct nouveau_drm *drm)
{
nouveau_channel_idle(drm->channel);
nvif_object_dtor(&drm->ntfy);
nvkm_gpuobj_del(&drm->notify);
nouveau_channel_del(&drm->channel);
}
static void
nouveau_accel_gr_init(struct nouveau_drm *drm)
{
struct nvif_device *device = &drm->client.device;
u64 runm;
int ret;
/* Allocate channel that has access to the graphics engine. */
runm = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
if (!runm) {
NV_DEBUG(drm, "no gr runlist\n");
return;
}
ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_gr_fini(drm);
return;
}
/* A SW class is used on pre-NV50 HW to assist with handling the
* synchronisation of page flips, as well as to implement fences
* on TNT/TNT2 HW that lacks any kind of support in host.
*/
if (!drm->channel->nvsw.client && device->info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
NVDRM_NVSW, nouveau_abi16_swclass(drm),
NULL, 0, &drm->channel->nvsw);
if (ret == 0 && device->info.chipset >= 0x11) {
ret = nvif_object_ctor(&drm->channel->user, "drmBlit",
0x005f, 0x009f,
NULL, 0, &drm->channel->blit);
}
if (ret == 0) {
struct nvif_push *push = drm->channel->chan.push;
ret = PUSH_WAIT(push, 8);
if (ret == 0) {
if (device->info.chipset >= 0x11) {
PUSH_NVSQ(push, NV05F, 0x0000, drm->channel->blit.handle);
PUSH_NVSQ(push, NV09F, 0x0120, 0,
0x0124, 1,
0x0128, 2);
}
PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
}
}
if (ret) {
NV_ERROR(drm, "failed to allocate sw or blit class, %d\n", ret);
nouveau_accel_gr_fini(drm);
return;
}
}
/* NvMemoryToMemoryFormat requires a notifier ctxdma for some reason,
* even if notification is never requested, so, allocate a ctxdma on
* any GPU where it's possible we'll end up using M2MF for BO moves.
*/
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
ret = nvkm_gpuobj_new(nvxx_device(device), 32, 0, false, NULL,
&drm->notify);
if (ret) {
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
nouveau_accel_gr_fini(drm);
return;
}
ret = nvif_object_ctor(&drm->channel->user, "drmM2mfNtfy",
NvNotify0, NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = drm->notify->addr,
.limit = drm->notify->addr + 31
}, sizeof(struct nv_dma_v0),
&drm->ntfy);
if (ret) {
nouveau_accel_gr_fini(drm);
return;
}
}
}
static void
nouveau_accel_fini(struct nouveau_drm *drm)
{
nouveau_accel_ce_fini(drm);
nouveau_accel_gr_fini(drm);
if (drm->fence)
nouveau_fence(drm)->dtor(drm);
nouveau_channels_fini(drm);
}
static void
nouveau_accel_init(struct nouveau_drm *drm)
{
struct nvif_device *device = &drm->client.device;
struct nvif_sclass *sclass;
int ret, i, n;
if (nouveau_noaccel)
return;
/* Initialise global support for channels, and synchronisation. */
ret = nouveau_channels_init(drm);
if (ret)
return;
/*XXX: this is crap, but the fence/channel stuff is a little
* backwards in some places. this will be fixed.
*/
ret = n = nvif_object_sclass_get(&device->object, &sclass);
if (ret < 0)
return;
for (ret = -ENOSYS, i = 0; i < n; i++) {
switch (sclass[i].oclass) {
case NV03_CHANNEL_DMA:
ret = nv04_fence_create(drm);
break;
case NV10_CHANNEL_DMA:
ret = nv10_fence_create(drm);
break;
case NV17_CHANNEL_DMA:
case NV40_CHANNEL_DMA:
ret = nv17_fence_create(drm);
break;
case NV50_CHANNEL_GPFIFO:
ret = nv50_fence_create(drm);
break;
case G82_CHANNEL_GPFIFO:
ret = nv84_fence_create(drm);
break;
case FERMI_CHANNEL_GPFIFO:
case KEPLER_CHANNEL_GPFIFO_A:
case KEPLER_CHANNEL_GPFIFO_B:
case MAXWELL_CHANNEL_GPFIFO_A:
case PASCAL_CHANNEL_GPFIFO_A:
case VOLTA_CHANNEL_GPFIFO_A:
case TURING_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_B:
ret = nvc0_fence_create(drm);
break;
default:
break;
}
}
nvif_object_sclass_put(&sclass);
if (ret) {
NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
nouveau_accel_fini(drm);
return;
}
/* Volta requires access to a doorbell register for kickoff. */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
ret = nvif_user_ctor(device, "drmUsermode");
if (ret)
return;
}
/* Allocate channels we need to support various functions. */
nouveau_accel_gr_init(drm);
nouveau_accel_ce_init(drm);
/* Initialise accelerated TTM buffer moves. */
nouveau_bo_move_init(drm);
}
static void __printf(2, 3)
nouveau_drm_errorf(struct nvif_object *object, const char *fmt, ...)
{
struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
struct va_format vaf;
va_list va;
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
NV_ERROR(drm, "%pV", &vaf);
va_end(va);
}
static void __printf(2, 3)
nouveau_drm_debugf(struct nvif_object *object, const char *fmt, ...)
{
struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
struct va_format vaf;
va_list va;
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
NV_DEBUG(drm, "%pV", &vaf);
va_end(va);
}
static const struct nvif_parent_func
nouveau_parent = {
.debugf = nouveau_drm_debugf,
.errorf = nouveau_drm_errorf,
};
static int
nouveau_drm_device_init(struct drm_device *dev)
{
struct nouveau_drm *drm;
int ret;
if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
return -ENOMEM;
dev->dev_private = drm;
drm->dev = dev;
nvif_parent_ctor(&nouveau_parent, &drm->parent);
drm->master.base.object.parent = &drm->parent;
ret = nouveau_sched_init(drm);
if (ret)
goto fail_alloc;
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
if (ret)
goto fail_sched;
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
goto fail_master;
nvxx_client(&drm->client.base)->debug =
nvkm_dbgopt(nouveau_debug, "DRM");
INIT_LIST_HEAD(&drm->clients);
mutex_init(&drm->clients_lock);
spin_lock_init(&drm->tile.lock);
/* workaround an odd issue on nvc1 by disabling the device's
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
*/
if (drm->client.device.info.chipset == 0xc1)
nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000);
nouveau_vga_init(drm);
ret = nouveau_ttm_init(drm);
if (ret)
goto fail_ttm;
ret = nouveau_bios_init(dev);
if (ret)
goto fail_bios;
nouveau_accel_init(drm);
ret = nouveau_display_create(dev);
if (ret)
goto fail_dispctor;
if (dev->mode_config.num_crtc) {
ret = nouveau_display_init(dev, false, false);
if (ret)
goto fail_dispinit;
}
nouveau_debugfs_init(drm);
nouveau_hwmon_init(dev);
nouveau_svm_init(drm);
nouveau_dmem_init(drm);
nouveau_led_init(dev);
if (nouveau_pmops_runtime()) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put(dev->dev);
}
return 0;
fail_dispinit:
nouveau_display_destroy(dev);
fail_dispctor:
nouveau_accel_fini(drm);
nouveau_bios_takedown(dev);
fail_bios:
nouveau_ttm_fini(drm);
fail_ttm:
nouveau_vga_fini(drm);
nouveau_cli_fini(&drm->client);
fail_master:
nouveau_cli_fini(&drm->master);
fail_sched:
nouveau_sched_fini(drm);
fail_alloc:
nvif_parent_dtor(&drm->parent);
kfree(drm);
return ret;
}
static void
nouveau_drm_device_fini(struct drm_device *dev)
{
struct nouveau_cli *cli, *temp_cli;
struct nouveau_drm *drm = nouveau_drm(dev);
if (nouveau_pmops_runtime()) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
}
nouveau_led_fini(dev);
nouveau_dmem_fini(drm);
nouveau_svm_fini(drm);
nouveau_hwmon_fini(dev);
nouveau_debugfs_fini(drm);
if (dev->mode_config.num_crtc)
nouveau_display_fini(dev, false, false);
nouveau_display_destroy(dev);
nouveau_accel_fini(drm);
nouveau_bios_takedown(dev);
nouveau_ttm_fini(drm);
nouveau_vga_fini(drm);
/*
* There may be existing clients from as-yet unclosed files. For now,
* clean them up here rather than deferring until the file is closed,
* but this likely not correct if we want to support hot-unplugging
* properly.
*/
mutex_lock(&drm->clients_lock);
list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
list_del(&cli->head);
mutex_lock(&cli->mutex);
if (cli->abi16)
nouveau_abi16_fini(cli->abi16);
mutex_unlock(&cli->mutex);
nouveau_cli_fini(cli);
kfree(cli);
}
mutex_unlock(&drm->clients_lock);
nouveau_sched_fini(drm);
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
nvif_parent_dtor(&drm->parent);
mutex_destroy(&drm->clients_lock);
kfree(drm);
}
/*
* On some Intel PCIe bridge controllers doing a
* D0 -> D3hot -> D3cold -> D0 sequence causes Nvidia GPUs to not reappear.
* Skipping the intermediate D3hot step seems to make it work again. This is
* probably caused by not meeting the expectation the involved AML code has
* when the GPU is put into D3hot state before invoking it.
*
* This leads to various manifestations of this issue:
* - AML code execution to power on the GPU hits an infinite loop (as the
* code waits on device memory to change).
* - kernel crashes, as all PCI reads return -1, which most code isn't able
* to handle well enough.
*
* In all cases dmesg will contain at least one line like this:
* 'nouveau 0000:01:00.0: Refused to change power state, currently in D3'
* followed by a lot of nouveau timeouts.
*
* In the \_SB.PCI0.PEG0.PG00._OFF code deeper down writes bit 0x80 to the not
* documented PCI config space register 0x248 of the Intel PCIe bridge
* controller (0x1901) in order to change the state of the PCIe link between
* the PCIe port and the GPU. There are alternative code paths using other
* registers, which seem to work fine (executed pre Windows 8):
* - 0xbc bit 0x20 (publicly available documentation claims 'reserved')
* - 0xb0 bit 0x10 (link disable)
* Changing the conditions inside the firmware by poking into the relevant
* addresses does resolve the issue, but it seemed to be ACPI private memory
* and not any device accessible memory at all, so there is no portable way of
* changing the conditions.
* On a XPS 9560 that means bits [0,3] on \CPEX need to be cleared.
*
* The only systems where this behavior can be seen are hybrid graphics laptops
* with a secondary Nvidia Maxwell, Pascal or Turing GPU. It's unclear whether
* this issue only occurs in combination with listed Intel PCIe bridge
* controllers and the mentioned GPUs or other devices as well.
*
* documentation on the PCIe bridge controller can be found in the
* "7th Generation Intel® Processor Families for H Platforms Datasheet Volume 2"
* Section "12 PCI Express* Controller (x16) Registers"
*/
static void quirk_broken_nv_runpm(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct pci_dev *bridge = pci_upstream_bridge(pdev);
if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL)
return;
switch (bridge->device) {
case 0x1901:
drm->old_pm_cap = pdev->pm_cap;
pdev->pm_cap = 0;
NV_INFO(drm, "Disabling PCI power management to avoid bug\n");
break;
}
}
static int nouveau_drm_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
struct nvkm_device *device;
struct drm_device *drm_dev;
int ret;
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
/* We need to check that the chipset is supported before booting
* fbdev off the hardware, as there's no way to put it back.
*/
ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
true, false, 0, &device);
if (ret)
return ret;
nvkm_device_del(&device);
/* Remove conflicting drivers (vesafb, efifb etc). */
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci);
if (ret)
return ret;
ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
true, true, ~0ULL, &device);
if (ret)
return ret;
pci_set_master(pdev);
if (nouveau_atomic)
driver_pci.driver_features |= DRIVER_ATOMIC;
drm_dev = drm_dev_alloc(&driver_pci, &pdev->dev);
if (IS_ERR(drm_dev)) {
ret = PTR_ERR(drm_dev);
goto fail_nvkm;
}
ret = pci_enable_device(pdev);
if (ret)
goto fail_drm;
pci_set_drvdata(pdev, drm_dev);
ret = nouveau_drm_device_init(drm_dev);
if (ret)
goto fail_pci;
ret = drm_dev_register(drm_dev, pent->driver_data);
if (ret)
goto fail_drm_dev_init;
if (nouveau_drm(drm_dev)->client.device.info.ram_size <= 32 * 1024 * 1024)
drm_fbdev_generic_setup(drm_dev, 8);
else
drm_fbdev_generic_setup(drm_dev, 32);
quirk_broken_nv_runpm(pdev);
return 0;
fail_drm_dev_init:
nouveau_drm_device_fini(drm_dev);
fail_pci:
pci_disable_device(pdev);
fail_drm:
drm_dev_put(drm_dev);
fail_nvkm:
nvkm_device_del(&device);
return ret;
}
void
nouveau_drm_device_remove(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_client *client;
struct nvkm_device *device;
drm_dev_unplug(dev);
client = nvxx_client(&drm->client.base);
device = nvkm_device_find(client->device);
nouveau_drm_device_fini(dev);
drm_dev_put(dev);
nvkm_device_del(&device);
}
static void
nouveau_drm_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(dev);
/* revert our workaround */
if (drm->old_pm_cap)
pdev->pm_cap = drm->old_pm_cap;
nouveau_drm_device_remove(dev);
pci_disable_device(pdev);
}
static int
nouveau_do_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct ttm_resource_manager *man;
int ret;
nouveau_svm_suspend(drm);
nouveau_dmem_suspend(drm);
nouveau_led_suspend(dev);
if (dev->mode_config.num_crtc) {
NV_DEBUG(drm, "suspending display...\n");
ret = nouveau_display_suspend(dev, runtime);
if (ret)
return ret;
}
NV_DEBUG(drm, "evicting buffers...\n");
man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
if (drm->cechan) {
ret = nouveau_channel_idle(drm->cechan);
if (ret)
goto fail_display;
}
if (drm->channel) {
ret = nouveau_channel_idle(drm->channel);
if (ret)
goto fail_display;
}
NV_DEBUG(drm, "suspending fence...\n");
if (drm->fence && nouveau_fence(drm)->suspend) {
if (!nouveau_fence(drm)->suspend(drm)) {
ret = -ENOMEM;
goto fail_display;
}
}
NV_DEBUG(drm, "suspending object tree...\n");
ret = nvif_client_suspend(&drm->master.base);
if (ret)
goto fail_client;
return 0;
fail_client:
if (drm->fence && nouveau_fence(drm)->resume)
nouveau_fence(drm)->resume(drm);
fail_display:
if (dev->mode_config.num_crtc) {
NV_DEBUG(drm, "resuming display...\n");
nouveau_display_resume(dev, runtime);
}
return ret;
}
static int
nouveau_do_resume(struct drm_device *dev, bool runtime)
{
int ret = 0;
struct nouveau_drm *drm = nouveau_drm(dev);
NV_DEBUG(drm, "resuming object tree...\n");
ret = nvif_client_resume(&drm->master.base);
if (ret) {
NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
return ret;
}
NV_DEBUG(drm, "resuming fence...\n");
if (drm->fence && nouveau_fence(drm)->resume)
nouveau_fence(drm)->resume(drm);
nouveau_run_vbios_init(dev);
if (dev->mode_config.num_crtc) {
NV_DEBUG(drm, "resuming display...\n");
nouveau_display_resume(dev, runtime);
}
nouveau_led_resume(dev);
nouveau_dmem_resume(drm);
nouveau_svm_resume(drm);
return 0;
}
int
nouveau_pmops_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
ret = nouveau_do_suspend(drm_dev, false);
if (ret)
return ret;
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
udelay(200);
return 0;
}
int
nouveau_pmops_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, false);
/* Monitors may have been connected / disconnected during suspend */
nouveau_display_hpd_resume(drm_dev);
return ret;
}
static int
nouveau_pmops_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return nouveau_do_suspend(drm_dev, false);
}
static int
nouveau_pmops_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return nouveau_do_resume(drm_dev, false);
}
bool
nouveau_pmops_runtime(void)
{
if (nouveau_runtime_pm == -1)
return nouveau_is_optimus() || nouveau_is_v1_dsm();
return nouveau_runtime_pm == 1;
}
static int
nouveau_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
nouveau_switcheroo_optimus_dsm();
ret = nouveau_do_suspend(drm_dev, true);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return ret;
}
static int
nouveau_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
int ret;
if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
if (ret) {
NV_ERROR(drm, "resume failed with: %d\n", ret);
return ret;
}
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
/* Monitors may have been connected / disconnected during suspend */
nouveau_display_hpd_resume(drm_dev);
return ret;
}
static int
nouveau_pmops_runtime_idle(struct device *dev)
{
if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
return 1;
}
static int
nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
char name[32], tmpname[TASK_COMM_LEN];
int ret;
/* need to bring up power immediately if opening device */
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
ret = -ENOMEM;
goto done;
}
ret = nouveau_cli_init(drm, name, cli);
if (ret)
goto done;
fpriv->driver_priv = cli;
mutex_lock(&drm->clients_lock);
list_add(&cli->head, &drm->clients);
mutex_unlock(&drm->clients_lock);
done:
if (ret && cli) {
nouveau_cli_fini(cli);
kfree(cli);
}
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static void
nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
{
struct nouveau_cli *cli = nouveau_cli(fpriv);
struct nouveau_drm *drm = nouveau_drm(dev);
int dev_index;
/*
* The device is gone, and as it currently stands all clients are
* cleaned up in the removal codepath. In the future this may change
* so that we can support hot-unplugging, but for now we immediately
* return to avoid a double-free situation.
*/
if (!drm_dev_enter(dev, &dev_index))
return;
pm_runtime_get_sync(dev->dev);
mutex_lock(&cli->mutex);
if (cli->abi16)
nouveau_abi16_fini(cli->abi16);
mutex_unlock(&cli->mutex);
mutex_lock(&drm->clients_lock);
list_del(&cli->head);
mutex_unlock(&drm->clients_lock);
nouveau_cli_fini(cli);
kfree(cli);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
drm_dev_exit(dev_index);
}
static const struct drm_ioctl_desc
nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_VM_INIT, nouveau_uvmm_ioctl_vm_init, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_VM_BIND, nouveau_uvmm_ioctl_vm_bind, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW),
};
long
nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct drm_file *filp = file->private_data;
struct drm_device *dev = filp->minor->dev;
long ret;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
case DRM_NOUVEAU_NVIF:
ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
break;
default:
ret = drm_ioctl(file, cmd, arg);
break;
}
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static const struct file_operations
nouveau_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = nouveau_drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#if defined(CONFIG_COMPAT)
.compat_ioctl = nouveau_compat_ioctl,
#endif
.llseek = noop_llseek,
};
static struct drm_driver
driver_stub = {
.driver_features = DRIVER_GEM |
DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE |
DRIVER_GEM_GPUVA |
DRIVER_MODESET |
DRIVER_RENDER,
.open = nouveau_drm_open,
.postclose = nouveau_drm_postclose,
.lastclose = nouveau_vga_lastclose,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = nouveau_drm_debugfs_init,
#endif
.ioctls = nouveau_ioctls,
.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
.fops = &nouveau_driver_fops,
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
#ifdef GIT_REVISION
.date = GIT_REVISION,
#else
.date = DRIVER_DATE,
#endif
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_device_id
nouveau_drm_pci_table[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
.class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16,
},
{
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
.class = PCI_BASE_CLASS_DISPLAY << 16,
.class_mask = 0xff << 16,
},
{}
};
static void nouveau_display_options(void)
{
DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
DRM_DEBUG_DRIVER("... tv_disable : %d\n", nouveau_tv_disable);
DRM_DEBUG_DRIVER("... ignorelid : %d\n", nouveau_ignorelid);
DRM_DEBUG_DRIVER("... duallink : %d\n", nouveau_duallink);
DRM_DEBUG_DRIVER("... config : %s\n", nouveau_config);
DRM_DEBUG_DRIVER("... debug : %s\n", nouveau_debug);
DRM_DEBUG_DRIVER("... noaccel : %d\n", nouveau_noaccel);
DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset);
DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm);
DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
DRM_DEBUG_DRIVER("... hdmimhz : %d\n", nouveau_hdmimhz);
}
static const struct dev_pm_ops nouveau_pm_ops = {
.suspend = nouveau_pmops_suspend,
.resume = nouveau_pmops_resume,
.freeze = nouveau_pmops_freeze,
.thaw = nouveau_pmops_thaw,
.poweroff = nouveau_pmops_freeze,
.restore = nouveau_pmops_resume,
.runtime_suspend = nouveau_pmops_runtime_suspend,
.runtime_resume = nouveau_pmops_runtime_resume,
.runtime_idle = nouveau_pmops_runtime_idle,
};
static struct pci_driver
nouveau_drm_pci_driver = {
.name = "nouveau",
.id_table = nouveau_drm_pci_table,
.probe = nouveau_drm_probe,
.remove = nouveau_drm_remove,
.driver.pm = &nouveau_pm_ops,
};
struct drm_device *
nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
struct nvkm_device **pdevice)
{
struct drm_device *drm;
int err;
err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug,
true, true, ~0ULL, pdevice);
if (err)
goto err_free;
drm = drm_dev_alloc(&driver_platform, &pdev->dev);
if (IS_ERR(drm)) {
err = PTR_ERR(drm);
goto err_free;
}
err = nouveau_drm_device_init(drm);
if (err)
goto err_put;
platform_set_drvdata(pdev, drm);
return drm;
err_put:
drm_dev_put(drm);
err_free:
nvkm_device_del(pdevice);
return ERR_PTR(err);
}
static int __init
nouveau_drm_init(void)
{
driver_pci = driver_stub;
driver_platform = driver_stub;
nouveau_display_options();
if (nouveau_modeset == -1) {
if (drm_firmware_drivers_only())
nouveau_modeset = 0;
}
if (!nouveau_modeset)
return 0;
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
platform_driver_register(&nouveau_platform_driver);
#endif
nouveau_register_dsm_handler();
nouveau_backlight_ctor();
#ifdef CONFIG_PCI
return pci_register_driver(&nouveau_drm_pci_driver);
#else
return 0;
#endif
}
static void __exit
nouveau_drm_exit(void)
{
if (!nouveau_modeset)
return;
#ifdef CONFIG_PCI
pci_unregister_driver(&nouveau_drm_pci_driver);
#endif
nouveau_backlight_dtor();
nouveau_unregister_dsm_handler();
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
platform_driver_unregister(&nouveau_platform_driver);
#endif
if (IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM))
mmu_notifier_synchronize();
}
module_init(nouveau_drm_init);
module_exit(nouveau_drm_exit);
MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/nouveau/nouveau_drm.c |
/*
* Copyright (C) 2008 Maarten Maathuis.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <acpi/video.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "nouveau_crtc.h"
#include "nouveau_gem.h"
#include "nouveau_connector.h"
#include "nv50_display.h"
#include <nvif/class.h>
#include <nvif/if0011.h>
#include <nvif/if0013.h>
#include <dispnv50/crc.h>
int
nouveau_display_vblank_enable(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc;
nv_crtc = nouveau_crtc(crtc);
nvif_event_allow(&nv_crtc->vblank);
return 0;
}
void
nouveau_display_vblank_disable(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc;
nv_crtc = nouveau_crtc(crtc);
nvif_event_block(&nv_crtc->vblank);
}
static inline int
calc(int blanks, int blanke, int total, int line)
{
if (blanke >= blanks) {
if (line >= blanks)
line -= total;
} else {
if (line >= blanks)
line -= total;
line -= blanke + 1;
}
return line;
}
static bool
nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime)
{
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
struct nvif_head *head = &nouveau_crtc(crtc)->head;
struct nvif_head_scanoutpos_v0 args;
int retry = 20;
bool ret = false;
args.version = 0;
do {
ret = nvif_mthd(&head->object, NVIF_HEAD_V0_SCANOUTPOS, &args, sizeof(args));
if (ret != 0)
return false;
if (args.vline) {
ret = true;
break;
}
if (retry) ndelay(vblank->linedur_ns);
} while (retry--);
*hpos = args.hline;
*vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
if (stime) *stime = ns_to_ktime(args.time[0]);
if (etime) *etime = ns_to_ktime(args.time[1]);
return ret;
}
bool
nouveau_display_scanoutpos(struct drm_crtc *crtc,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
stime, etime);
}
static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
static void
nouveau_decode_mod(struct nouveau_drm *drm,
uint64_t modifier,
uint32_t *tile_mode,
uint8_t *kind)
{
struct nouveau_display *disp = nouveau_display(drm->dev);
BUG_ON(!tile_mode || !kind);
if (modifier == DRM_FORMAT_MOD_LINEAR) {
/* tile_mode will not be used in this case */
*tile_mode = 0;
*kind = 0;
} else {
/*
* Extract the block height and kind from the corresponding
* modifier fields. See drm_fourcc.h for details.
*/
if ((modifier & (0xffull << 12)) == 0ull) {
/* Legacy modifier. Translate to this dev's 'kind.' */
modifier |= disp->format_modifiers[0] & (0xffull << 12);
}
*tile_mode = (uint32_t)(modifier & 0xF);
*kind = (uint8_t)((modifier >> 12) & 0xFF);
if (drm->client.device.info.chipset >= 0xc0)
*tile_mode <<= 4;
}
}
void
nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
uint32_t *tile_mode,
uint8_t *kind)
{
if (fb->flags & DRM_MODE_FB_MODIFIERS) {
struct nouveau_drm *drm = nouveau_drm(fb->dev);
nouveau_decode_mod(drm, fb->modifier, tile_mode, kind);
} else {
const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
*tile_mode = nvbo->mode;
*kind = nvbo->kind;
}
}
static const u64 legacy_modifiers[] = {
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
DRM_FORMAT_MOD_INVALID
};
static int
nouveau_validate_decode_mod(struct nouveau_drm *drm,
uint64_t modifier,
uint32_t *tile_mode,
uint8_t *kind)
{
struct nouveau_display *disp = nouveau_display(drm->dev);
int mod;
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
return -EINVAL;
}
BUG_ON(!disp->format_modifiers);
for (mod = 0;
(disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
(disp->format_modifiers[mod] != modifier);
mod++);
if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
for (mod = 0;
(legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
(legacy_modifiers[mod] != modifier);
mod++);
if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
return -EINVAL;
}
nouveau_decode_mod(drm, modifier, tile_mode, kind);
return 0;
}
static inline uint32_t
nouveau_get_width_in_blocks(uint32_t stride)
{
/* GOBs per block in the x direction is always one, and GOBs are
* 64 bytes wide
*/
static const uint32_t log_block_width = 6;
return (stride + (1 << log_block_width) - 1) >> log_block_width;
}
static inline uint32_t
nouveau_get_height_in_blocks(struct nouveau_drm *drm,
uint32_t height,
uint32_t log_block_height_in_gobs)
{
uint32_t log_gob_height;
uint32_t log_block_height;
BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
log_gob_height = 2;
else
log_gob_height = 3;
log_block_height = log_block_height_in_gobs + log_gob_height;
return (height + (1 << log_block_height) - 1) >> log_block_height;
}
static int
nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
uint32_t offset, uint32_t stride, uint32_t h,
uint32_t tile_mode)
{
uint32_t gob_size, bw, bh;
uint64_t bl_size;
BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
if (drm->client.device.info.chipset >= 0xc0) {
if (tile_mode & 0xF)
return -EINVAL;
tile_mode >>= 4;
}
if (tile_mode & 0xFFFFFFF0)
return -EINVAL;
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
gob_size = 256;
else
gob_size = 512;
bw = nouveau_get_width_in_blocks(stride);
bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
bl_size = bw * bh * (1 << tile_mode) * gob_size;
DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
nvbo->bo.base.size);
if (bl_size + offset > nvbo->bo.base.size)
return -ERANGE;
return 0;
}
int
nouveau_framebuffer_new(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct drm_framebuffer *fb;
const struct drm_format_info *info;
unsigned int height, i;
uint32_t tile_mode;
uint8_t kind;
int ret;
/* YUV overlays have special requirements pre-NV50 */
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
(mode_cmd->pixel_format == DRM_FORMAT_YUYV ||
mode_cmd->pixel_format == DRM_FORMAT_UYVY ||
mode_cmd->pixel_format == DRM_FORMAT_NV12 ||
mode_cmd->pixel_format == DRM_FORMAT_NV21) &&
(mode_cmd->pitches[0] & 0x3f || /* align 64 */
mode_cmd->pitches[0] >= 0x10000 || /* at most 64k pitch */
(mode_cmd->pitches[1] && /* pitches for planes must match */
mode_cmd->pitches[0] != mode_cmd->pitches[1]))) {
DRM_DEBUG_KMS("Unsuitable framebuffer: format: %p4cc; pitches: 0x%x\n 0x%x\n",
&mode_cmd->pixel_format,
mode_cmd->pitches[0], mode_cmd->pitches[1]);
return -EINVAL;
}
if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0],
&tile_mode, &kind)) {
DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n",
mode_cmd->modifier[0]);
return -EINVAL;
}
} else {
tile_mode = nvbo->mode;
kind = nvbo->kind;
}
info = drm_get_format_info(dev, mode_cmd);
for (i = 0; i < info->num_planes; i++) {
height = drm_format_info_plane_height(info,
mode_cmd->height,
i);
if (kind) {
ret = nouveau_check_bl_size(drm, nvbo,
mode_cmd->offsets[i],
mode_cmd->pitches[i],
height, tile_mode);
if (ret)
return ret;
} else {
uint32_t size = mode_cmd->pitches[i] * height;
if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
return -ERANGE;
}
}
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
fb->obj[0] = gem;
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
if (ret)
kfree(fb);
return ret;
}
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer *fb;
struct drm_gem_object *gem;
int ret;
gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
if (ret == 0)
return fb;
drm_gem_object_put(gem);
return ERR_PTR(ret);
}
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
};
struct nouveau_drm_prop_enum_list {
u8 gen_mask;
int type;
char *name;
};
static struct nouveau_drm_prop_enum_list underscan[] = {
{ 6, UNDERSCAN_AUTO, "auto" },
{ 6, UNDERSCAN_OFF, "off" },
{ 6, UNDERSCAN_ON, "on" },
{}
};
static struct nouveau_drm_prop_enum_list dither_mode[] = {
{ 7, DITHERING_MODE_AUTO, "auto" },
{ 7, DITHERING_MODE_OFF, "off" },
{ 1, DITHERING_MODE_ON, "on" },
{ 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
{ 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
{ 4, DITHERING_MODE_TEMPORAL, "temporal" },
{}
};
static struct nouveau_drm_prop_enum_list dither_depth[] = {
{ 6, DITHERING_DEPTH_AUTO, "auto" },
{ 6, DITHERING_DEPTH_6BPC, "6 bpc" },
{ 6, DITHERING_DEPTH_8BPC, "8 bpc" },
{}
};
#define PROP_ENUM(p,gen,n,list) do { \
struct nouveau_drm_prop_enum_list *l = (list); \
int c = 0; \
while (l->gen_mask) { \
if (l->gen_mask & (1 << (gen))) \
c++; \
l++; \
} \
if (c) { \
p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \
l = (list); \
while (p && l->gen_mask) { \
if (l->gen_mask & (1 << (gen))) { \
drm_property_add_enum(p, l->type, l->name); \
} \
l++; \
} \
} \
} while(0)
void
nouveau_display_hpd_resume(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
spin_lock_irq(&drm->hpd_lock);
drm->hpd_pending = ~0;
spin_unlock_irq(&drm->hpd_lock);
schedule_work(&drm->hpd_work);
}
static void
nouveau_display_hpd_work(struct work_struct *work)
{
struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
struct drm_device *dev = drm->dev;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
u32 pending;
int changed = 0;
struct drm_connector *first_changed_connector = NULL;
pm_runtime_get_sync(dev->dev);
spin_lock_irq(&drm->hpd_lock);
pending = drm->hpd_pending;
drm->hpd_pending = 0;
spin_unlock_irq(&drm->hpd_lock);
/* Nothing to do, exit early without updating the last busy counter */
if (!pending)
goto noop;
mutex_lock(&dev->mode_config.mutex);
drm_connector_list_iter_begin(dev, &conn_iter);
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *nv_connector = nouveau_connector(connector);
enum drm_connector_status old_status = connector->status;
u64 bits, old_epoch_counter = connector->epoch_counter;
if (!(pending & drm_connector_mask(connector)))
continue;
spin_lock_irq(&drm->hpd_lock);
bits = nv_connector->hpd_pending;
nv_connector->hpd_pending = 0;
spin_unlock_irq(&drm->hpd_lock);
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] plug:%d unplug:%d irq:%d\n",
connector->base.id, connector->name,
!!(bits & NVIF_CONN_EVENT_V0_PLUG),
!!(bits & NVIF_CONN_EVENT_V0_UNPLUG),
!!(bits & NVIF_CONN_EVENT_V0_IRQ));
if (bits & NVIF_CONN_EVENT_V0_IRQ) {
if (nouveau_dp_link_check(nv_connector))
continue;
}
connector->status = drm_helper_probe_detect(connector, NULL, false);
if (old_epoch_counter == connector->epoch_counter)
continue;
changed++;
if (!first_changed_connector) {
drm_connector_get(connector);
first_changed_connector = connector;
}
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
connector->base.id, connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status),
old_epoch_counter, connector->epoch_counter);
}
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
if (changed == 1)
drm_kms_helper_connector_hotplug_event(first_changed_connector);
else if (changed > 0)
drm_kms_helper_hotplug_event(dev);
if (first_changed_connector)
drm_connector_put(first_changed_connector);
pm_runtime_mark_last_busy(drm->dev->dev);
noop:
pm_runtime_put_autosuspend(dev->dev);
}
#ifdef CONFIG_ACPI
static int
nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
void *data)
{
struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
struct acpi_bus_event *info = data;
int ret;
if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
ret = pm_runtime_get(drm->dev->dev);
if (ret == 1 || ret == -EACCES) {
/* If the GPU is already awake, or in a state
* where we can't wake it up, it can handle
* it's own hotplug events.
*/
pm_runtime_put_autosuspend(drm->dev->dev);
} else if (ret == 0 || ret == -EINPROGRESS) {
/* We've started resuming the GPU already, so
* it will handle scheduling a full reprobe
* itself
*/
NV_DEBUG(drm, "ACPI requested connector reprobe\n");
pm_runtime_put_noidle(drm->dev->dev);
} else {
NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
ret);
}
/* acpi-video should not generate keypresses for this */
return NOTIFY_BAD;
}
}
return NOTIFY_DONE;
}
#endif
int
nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
int ret;
/*
* Enable hotplug interrupts (done as early as possible, since we need
* them for MST)
*/
drm_connector_list_iter_begin(dev, &conn_iter);
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_event_allow(&conn->hpd);
nvif_event_allow(&conn->irq);
}
drm_connector_list_iter_end(&conn_iter);
ret = disp->init(dev, resume, runtime);
if (ret)
return ret;
/* enable connector detection and polling for connectors without HPD
* support
*/
drm_kms_helper_poll_enable(dev);
return ret;
}
void
nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
if (!suspend) {
if (drm_drv_uses_atomic_modeset(dev))
drm_atomic_helper_shutdown(dev);
else
drm_helper_force_disable_all(dev);
}
/* disable hotplug interrupts */
drm_connector_list_iter_begin(dev, &conn_iter);
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_event_block(&conn->irq);
nvif_event_block(&conn->hpd);
}
drm_connector_list_iter_end(&conn_iter);
if (!runtime)
cancel_work_sync(&drm->hpd_work);
drm_kms_helper_poll_disable(dev);
disp->fini(dev, runtime, suspend);
}
static void
nouveau_display_create_properties(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
int gen;
if (disp->disp.object.oclass < NV50_DISP)
gen = 0;
else
if (disp->disp.object.oclass < GF110_DISP)
gen = 1;
else
gen = 2;
PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
disp->underscan_hborder_property =
drm_property_create_range(dev, 0, "underscan hborder", 0, 128);
disp->underscan_vborder_property =
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
if (gen < 1)
return;
/* -90..+90 */
disp->vibrant_hue_property =
drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
/* -100..+100 */
disp->color_vibrance_property =
drm_property_create_range(dev, 0, "color vibrance", 0, 200);
}
int
nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp;
int ret;
disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
drm_mode_config_init(dev);
drm_mode_create_scaling_mode_property(dev);
drm_mode_create_dvi_i_properties(dev);
dev->mode_config.funcs = &nouveau_mode_config_funcs;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
} else
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) {
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
} else {
dev->mode_config.max_width = 16384;
dev->mode_config.max_height = 16384;
}
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
if (drm->client.device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
else
dev->mode_config.async_page_flip = true;
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0,
&disp->disp);
if (ret == 0) {
nouveau_display_create_properties(dev);
if (disp->disp.object.oclass < NV50_DISP) {
dev->mode_config.fb_modifiers_not_supported = true;
ret = nv04_display_create(dev);
} else {
ret = nv50_display_create(dev);
}
}
} else {
ret = 0;
}
if (ret)
goto disp_create_err;
drm_mode_config_reset(dev);
if (dev->mode_config.num_crtc) {
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret)
goto vblank_err;
if (disp->disp.object.oclass >= NV50_DISP)
nv50_crc_init(dev);
}
INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
spin_lock_init(&drm->hpd_lock);
#ifdef CONFIG_ACPI
drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
register_acpi_notifier(&drm->acpi_nb);
#endif
return 0;
vblank_err:
disp->dtor(dev);
disp_create_err:
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
return ret;
}
void
nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
#ifdef CONFIG_ACPI
unregister_acpi_notifier(&drm->acpi_nb);
#endif
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
if (disp->dtor)
disp->dtor(dev);
nvif_disp_dtor(&disp->disp);
drm->display = NULL;
kfree(disp);
}
int
nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
/* Disable console. */
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true);
if (drm_drv_uses_atomic_modeset(dev)) {
if (!runtime) {
disp->suspend = drm_atomic_helper_suspend(dev);
if (IS_ERR(disp->suspend)) {
int ret = PTR_ERR(disp->suspend);
disp->suspend = NULL;
return ret;
}
}
}
nouveau_display_fini(dev, true, runtime);
return 0;
}
void
nouveau_display_resume(struct drm_device *dev, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
nouveau_display_init(dev, true, runtime);
if (drm_drv_uses_atomic_modeset(dev)) {
if (disp->suspend) {
drm_atomic_helper_resume(dev, disp->suspend);
disp->suspend = NULL;
}
}
/* Enable console. */
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false);
}
int
nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *bo;
uint32_t domain;
int ret;
args->pitch = roundup(args->width * (args->bpp / 8), 256);
args->size = args->pitch * args->height;
args->size = roundup(args->size, PAGE_SIZE);
/* Use VRAM if there is any ; otherwise fallback to system memory */
if (nouveau_drm(dev)->client.device.info.ram_size != 0)
domain = NOUVEAU_GEM_DOMAIN_VRAM;
else
domain = NOUVEAU_GEM_DOMAIN_GART;
ret = nouveau_gem_new(cli, args->size, 0, domain, 0, 0, &bo);
if (ret)
return ret;
ret = drm_gem_handle_create(file_priv, &bo->bo.base, &args->handle);
drm_gem_object_put(&bo->bo.base);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_display.c |
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#endif
#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include "nouveau_drv.h"
#include "nouveau_hwmon.h"
#include <nvkm/subdev/iccsense.h>
#include <nvkm/subdev/volt.h>
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
static ssize_t
nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
struct device_attribute *a, char *buf)
{
return sysfs_emit(buf, "%d\n", 100);
}
static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, 0444,
nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
static ssize_t
nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return sysfs_emit(buf, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
}
static ssize_t
nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
struct device_attribute *a,
const char *buf, size_t count)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
if (kstrtol(buf, 10, &value))
return -EINVAL;
therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST,
value / 1000);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, 0644,
nouveau_hwmon_temp1_auto_point1_temp,
nouveau_hwmon_set_temp1_auto_point1_temp, 0);
static ssize_t
nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return sysfs_emit(buf, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
}
static ssize_t
nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
struct device_attribute *a,
const char *buf, size_t count)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
if (kstrtol(buf, 10, &value))
return -EINVAL;
therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST,
value / 1000);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, 0644,
nouveau_hwmon_temp1_auto_point1_temp_hyst,
nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0);
static ssize_t
nouveau_hwmon_get_pwm1_max(struct device *d,
struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY);
if (ret < 0)
return ret;
return sprintf(buf, "%i\n", ret);
}
static ssize_t
nouveau_hwmon_get_pwm1_min(struct device *d,
struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY);
if (ret < 0)
return ret;
return sprintf(buf, "%i\n", ret);
}
static ssize_t
nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
const char *buf, size_t count)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
int ret;
if (kstrtol(buf, 10, &value))
return -EINVAL;
ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY, value);
if (ret < 0)
return ret;
return count;
}
static SENSOR_DEVICE_ATTR(pwm1_min, 0644,
nouveau_hwmon_get_pwm1_min,
nouveau_hwmon_set_pwm1_min, 0);
static ssize_t
nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
const char *buf, size_t count)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
int ret;
if (kstrtol(buf, 10, &value))
return -EINVAL;
ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY, value);
if (ret < 0)
return ret;
return count;
}
static SENSOR_DEVICE_ATTR(pwm1_max, 0644,
nouveau_hwmon_get_pwm1_max,
nouveau_hwmon_set_pwm1_max, 0);
static struct attribute *pwm_fan_sensor_attrs[] = {
&sensor_dev_attr_pwm1_min.dev_attr.attr,
&sensor_dev_attr_pwm1_max.dev_attr.attr,
NULL
};
static const struct attribute_group pwm_fan_sensor_group = {
.attrs = pwm_fan_sensor_attrs,
};
static struct attribute *temp1_auto_point_sensor_attrs[] = {
&sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
NULL
};
static const struct attribute_group temp1_auto_point_sensor_group = {
.attrs = temp1_auto_point_sensor_attrs,
};
#define N_ATTR_GROUPS 3
static const struct hwmon_channel_info * const nouveau_info[] = {
HWMON_CHANNEL_INFO(chip,
HWMON_C_UPDATE_INTERVAL),
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT |
HWMON_T_MAX | HWMON_T_MAX_HYST |
HWMON_T_CRIT | HWMON_T_CRIT_HYST |
HWMON_T_EMERGENCY | HWMON_T_EMERGENCY_HYST),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT),
HWMON_CHANNEL_INFO(in,
HWMON_I_INPUT |
HWMON_I_MIN | HWMON_I_MAX |
HWMON_I_LABEL),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
HWMON_CHANNEL_INFO(power,
HWMON_P_INPUT | HWMON_P_CAP_MAX | HWMON_P_CRIT),
NULL
};
static umode_t
nouveau_chip_is_visible(const void *data, u32 attr, int channel)
{
switch (attr) {
case hwmon_chip_update_interval:
return 0444;
default:
return 0;
}
}
static umode_t
nouveau_power_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
if (!iccsense || !iccsense->data_valid || list_empty(&iccsense->rails))
return 0;
switch (attr) {
case hwmon_power_input:
return 0444;
case hwmon_power_max:
if (iccsense->power_w_max)
return 0444;
return 0;
case hwmon_power_crit:
if (iccsense->power_w_crit)
return 0444;
return 0;
default:
return 0;
}
}
static umode_t
nouveau_temp_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
if (!therm || !therm->attr_get || nvkm_therm_temp_get(therm) < 0)
return 0;
switch (attr) {
case hwmon_temp_input:
return 0444;
case hwmon_temp_max:
case hwmon_temp_max_hyst:
case hwmon_temp_crit:
case hwmon_temp_crit_hyst:
case hwmon_temp_emergency:
case hwmon_temp_emergency_hyst:
return 0644;
default:
return 0;
}
}
static umode_t
nouveau_pwm_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
if (!therm || !therm->attr_get || !therm->fan_get ||
therm->fan_get(therm) < 0)
return 0;
switch (attr) {
case hwmon_pwm_enable:
case hwmon_pwm_input:
return 0644;
default:
return 0;
}
}
static umode_t
nouveau_input_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
if (!volt || nvkm_volt_get(volt) < 0)
return 0;
switch (attr) {
case hwmon_in_input:
case hwmon_in_label:
case hwmon_in_min:
case hwmon_in_max:
return 0444;
default:
return 0;
}
}
static umode_t
nouveau_fan_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
if (!therm || !therm->attr_get || nvkm_therm_fan_sense(therm) < 0)
return 0;
switch (attr) {
case hwmon_fan_input:
return 0444;
default:
return 0;
}
}
static int
nouveau_chip_read(struct device *dev, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_chip_update_interval:
*val = 1000;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int
nouveau_temp_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
int ret;
if (!therm || !therm->attr_get)
return -EOPNOTSUPP;
switch (attr) {
case hwmon_temp_input:
if (drm_dev->switch_power_state != DRM_SWITCH_POWER_ON)
return -EINVAL;
ret = nvkm_therm_temp_get(therm);
*val = ret < 0 ? ret : (ret * 1000);
break;
case hwmon_temp_max:
*val = therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK)
* 1000;
break;
case hwmon_temp_max_hyst:
*val = therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST)
* 1000;
break;
case hwmon_temp_crit:
*val = therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL)
* 1000;
break;
case hwmon_temp_crit_hyst:
*val = therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL_HYST)
* 1000;
break;
case hwmon_temp_emergency:
*val = therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN)
* 1000;
break;
case hwmon_temp_emergency_hyst:
*val = therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST)
* 1000;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int
nouveau_fan_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
if (!therm)
return -EOPNOTSUPP;
switch (attr) {
case hwmon_fan_input:
if (drm_dev->switch_power_state != DRM_SWITCH_POWER_ON)
return -EINVAL;
*val = nvkm_therm_fan_sense(therm);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int
nouveau_in_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
int ret;
if (!volt)
return -EOPNOTSUPP;
switch (attr) {
case hwmon_in_input:
if (drm_dev->switch_power_state != DRM_SWITCH_POWER_ON)
return -EINVAL;
ret = nvkm_volt_get(volt);
*val = ret < 0 ? ret : (ret / 1000);
break;
case hwmon_in_min:
*val = volt->min_uv > 0 ? (volt->min_uv / 1000) : -ENODEV;
break;
case hwmon_in_max:
*val = volt->max_uv > 0 ? (volt->max_uv / 1000) : -ENODEV;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int
nouveau_pwm_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
if (!therm || !therm->attr_get || !therm->fan_get)
return -EOPNOTSUPP;
switch (attr) {
case hwmon_pwm_enable:
*val = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MODE);
break;
case hwmon_pwm_input:
if (drm_dev->switch_power_state != DRM_SWITCH_POWER_ON)
return -EINVAL;
*val = therm->fan_get(therm);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int
nouveau_power_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
if (!iccsense)
return -EOPNOTSUPP;
switch (attr) {
case hwmon_power_input:
if (drm_dev->switch_power_state != DRM_SWITCH_POWER_ON)
return -EINVAL;
*val = nvkm_iccsense_read_all(iccsense);
break;
case hwmon_power_max:
*val = iccsense->power_w_max;
break;
case hwmon_power_crit:
*val = iccsense->power_w_crit;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int
nouveau_temp_write(struct device *dev, u32 attr, int channel, long val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
if (!therm || !therm->attr_set)
return -EOPNOTSUPP;
switch (attr) {
case hwmon_temp_max:
return therm->attr_set(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK,
val / 1000);
case hwmon_temp_max_hyst:
return therm->attr_set(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST,
val / 1000);
case hwmon_temp_crit:
return therm->attr_set(therm, NVKM_THERM_ATTR_THRS_CRITICAL,
val / 1000);
case hwmon_temp_crit_hyst:
return therm->attr_set(therm, NVKM_THERM_ATTR_THRS_CRITICAL_HYST,
val / 1000);
case hwmon_temp_emergency:
return therm->attr_set(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN,
val / 1000);
case hwmon_temp_emergency_hyst:
return therm->attr_set(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST,
val / 1000);
default:
return -EOPNOTSUPP;
}
}
static int
nouveau_pwm_write(struct device *dev, u32 attr, int channel, long val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
if (!therm || !therm->attr_set)
return -EOPNOTSUPP;
switch (attr) {
case hwmon_pwm_input:
return therm->fan_set(therm, val);
case hwmon_pwm_enable:
return therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MODE, val);
default:
return -EOPNOTSUPP;
}
}
static umode_t
nouveau_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
int channel)
{
switch (type) {
case hwmon_chip:
return nouveau_chip_is_visible(data, attr, channel);
case hwmon_temp:
return nouveau_temp_is_visible(data, attr, channel);
case hwmon_fan:
return nouveau_fan_is_visible(data, attr, channel);
case hwmon_in:
return nouveau_input_is_visible(data, attr, channel);
case hwmon_pwm:
return nouveau_pwm_is_visible(data, attr, channel);
case hwmon_power:
return nouveau_power_is_visible(data, attr, channel);
default:
return 0;
}
}
static const char input_label[] = "GPU core";
static int
nouveau_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, const char **buf)
{
if (type == hwmon_in && attr == hwmon_in_label) {
*buf = input_label;
return 0;
}
return -EOPNOTSUPP;
}
static int
nouveau_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
switch (type) {
case hwmon_chip:
return nouveau_chip_read(dev, attr, channel, val);
case hwmon_temp:
return nouveau_temp_read(dev, attr, channel, val);
case hwmon_fan:
return nouveau_fan_read(dev, attr, channel, val);
case hwmon_in:
return nouveau_in_read(dev, attr, channel, val);
case hwmon_pwm:
return nouveau_pwm_read(dev, attr, channel, val);
case hwmon_power:
return nouveau_power_read(dev, attr, channel, val);
default:
return -EOPNOTSUPP;
}
}
static int
nouveau_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long val)
{
switch (type) {
case hwmon_temp:
return nouveau_temp_write(dev, attr, channel, val);
case hwmon_pwm:
return nouveau_pwm_write(dev, attr, channel, val);
default:
return -EOPNOTSUPP;
}
}
static const struct hwmon_ops nouveau_hwmon_ops = {
.is_visible = nouveau_is_visible,
.read = nouveau_read,
.read_string = nouveau_read_string,
.write = nouveau_write,
};
static const struct hwmon_chip_info nouveau_chip_info = {
.ops = &nouveau_hwmon_ops,
.info = nouveau_info,
};
#endif
int
nouveau_hwmon_init(struct drm_device *dev)
{
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
const struct attribute_group *special_groups[N_ATTR_GROUPS];
struct nouveau_hwmon *hwmon;
struct device *hwmon_dev;
int ret = 0;
int i = 0;
if (!iccsense && !therm && !volt) {
NV_DEBUG(drm, "Skipping hwmon registration\n");
return 0;
}
hwmon = drm->hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
hwmon->dev = dev;
if (therm && therm->attr_get && therm->attr_set) {
if (nvkm_therm_temp_get(therm) >= 0)
special_groups[i++] = &temp1_auto_point_sensor_group;
if (therm->fan_get && therm->fan_get(therm) >= 0)
special_groups[i++] = &pwm_fan_sensor_group;
}
special_groups[i] = NULL;
hwmon_dev = hwmon_device_register_with_info(dev->dev, "nouveau", dev,
&nouveau_chip_info,
special_groups);
if (IS_ERR(hwmon_dev)) {
ret = PTR_ERR(hwmon_dev);
NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
return ret;
}
hwmon->hwmon = hwmon_dev;
return 0;
#else
return 0;
#endif
}
void
nouveau_hwmon_fini(struct drm_device *dev)
{
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct nouveau_hwmon *hwmon = nouveau_hwmon(dev);
if (!hwmon)
return;
if (hwmon->hwmon)
hwmon_device_unregister(hwmon->hwmon);
nouveau_drm(dev)->hwmon = NULL;
kfree(hwmon);
#endif
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_hwmon.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <nvif/os.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nv10_fence.h"
#include "nv50_display.h"
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = priv->bo->bo.resource;
u32 start = reg->start * PAGE_SIZE;
u32 limit = start + priv->bo->bo.base.size - 1;
int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
nouveau_fence_context_new(chan, &fctx->base);
fctx->base.emit = nv10_fence_emit;
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
ret = nvif_object_ctor(&chan->user, "fenceCtxDma", NvSema,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
}, sizeof(struct nv_dma_v0),
&fctx->sema);
if (ret)
nv10_fence_context_del(chan);
return ret;
}
int
nv50_fence_create(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv;
int ret = 0;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.dtor = nv10_fence_destroy;
priv->base.resume = nv17_fence_resume;
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
nouveau_bo_unpin(priv->bo);
}
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
if (ret) {
nv10_fence_destroy(drm);
return ret;
}
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nv50_fence.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nouveau_svm.h"
#include "nouveau_drv.h"
#include "nouveau_chan.h"
#include "nouveau_dmem.h"
#include <nvif/event.h>
#include <nvif/object.h>
#include <nvif/vmm.h>
#include <nvif/class.h>
#include <nvif/clb069.h>
#include <nvif/ifc00d.h>
#include <linux/sched/mm.h>
#include <linux/sort.h>
#include <linux/hmm.h>
#include <linux/memremap.h>
#include <linux/rmap.h>
struct nouveau_svm {
struct nouveau_drm *drm;
struct mutex mutex;
struct list_head inst;
struct nouveau_svm_fault_buffer {
int id;
struct nvif_object object;
u32 entries;
u32 getaddr;
u32 putaddr;
u32 get;
u32 put;
struct nvif_event notify;
struct work_struct work;
struct nouveau_svm_fault {
u64 inst;
u64 addr;
u64 time;
u32 engine;
u8 gpc;
u8 hub;
u8 access;
u8 client;
u8 fault;
struct nouveau_svmm *svmm;
} **fault;
int fault_nr;
} buffer[1];
};
#define FAULT_ACCESS_READ 0
#define FAULT_ACCESS_WRITE 1
#define FAULT_ACCESS_ATOMIC 2
#define FAULT_ACCESS_PREFETCH 3
#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
struct nouveau_pfnmap_args {
struct nvif_ioctl_v0 i;
struct nvif_ioctl_mthd_v0 m;
struct nvif_vmm_pfnmap_v0 p;
};
struct nouveau_ivmm {
struct nouveau_svmm *svmm;
u64 inst;
struct list_head head;
};
static struct nouveau_ivmm *
nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
{
struct nouveau_ivmm *ivmm;
list_for_each_entry(ivmm, &svm->inst, head) {
if (ivmm->inst == inst)
return ivmm;
}
return NULL;
}
#define SVMM_DBG(s,f,a...) \
NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
#define SVMM_ERR(s,f,a...) \
NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
int
nouveau_svmm_bind(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_svm_bind *args = data;
unsigned target, cmd, priority;
unsigned long addr, end;
struct mm_struct *mm;
args->va_start &= PAGE_MASK;
args->va_end = ALIGN(args->va_end, PAGE_SIZE);
/* Sanity check arguments */
if (args->reserved0 || args->reserved1)
return -EINVAL;
if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
return -EINVAL;
if (args->va_start >= args->va_end)
return -EINVAL;
cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
switch (cmd) {
case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
break;
default:
return -EINVAL;
}
priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
/* FIXME support CPU target ie all target value < GPU_VRAM */
target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
target &= NOUVEAU_SVM_BIND_TARGET_MASK;
switch (target) {
case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
break;
default:
return -EINVAL;
}
/*
* FIXME: For now refuse non 0 stride, we need to change the migrate
* kernel function to handle stride to avoid to create a mess within
* each device driver.
*/
if (args->stride)
return -EINVAL;
/*
* Ok we are ask to do something sane, for now we only support migrate
* commands but we will add things like memory policy (what to do on
* page fault) and maybe some other commands.
*/
mm = get_task_mm(current);
if (!mm) {
return -EINVAL;
}
mmap_read_lock(mm);
if (!cli->svm.svmm) {
mmap_read_unlock(mm);
mmput(mm);
return -EINVAL;
}
for (addr = args->va_start, end = args->va_end; addr < end;) {
struct vm_area_struct *vma;
unsigned long next;
vma = find_vma_intersection(mm, addr, end);
if (!vma)
break;
addr = max(addr, vma->vm_start);
next = min(vma->vm_end, end);
/* This is a best effort so we ignore errors */
nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
next);
addr = next;
}
/*
* FIXME Return the number of page we have migrated, again we need to
* update the migrate API to return that information so that we can
* report it to user space.
*/
args->result = 0;
mmap_read_unlock(mm);
mmput(mm);
return 0;
}
/* Unlink channel instance from SVMM. */
void
nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
{
struct nouveau_ivmm *ivmm;
if (svmm) {
mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
if (ivmm) {
list_del(&ivmm->head);
kfree(ivmm);
}
mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
}
}
/* Link channel instance to SVMM. */
int
nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
{
struct nouveau_ivmm *ivmm;
if (svmm) {
if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
return -ENOMEM;
ivmm->svmm = svmm;
ivmm->inst = inst;
mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
}
return 0;
}
/* Invalidate SVMM address-range on GPU. */
void
nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
{
if (limit > start) {
nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
&(struct nvif_vmm_pfnclr_v0) {
.addr = start,
.size = limit - start,
}, sizeof(struct nvif_vmm_pfnclr_v0));
}
}
static int
nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *update)
{
struct nouveau_svmm *svmm =
container_of(mn, struct nouveau_svmm, notifier);
unsigned long start = update->start;
unsigned long limit = update->end;
if (!mmu_notifier_range_blockable(update))
return -EAGAIN;
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
mutex_lock(&svmm->mutex);
if (unlikely(!svmm->vmm))
goto out;
/*
* Ignore invalidation callbacks for device private pages since
* the invalidation is handled as part of the migration process.
*/
if (update->event == MMU_NOTIFY_MIGRATE &&
update->owner == svmm->vmm->cli->drm->dev)
goto out;
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
if (start < svmm->unmanaged.start) {
nouveau_svmm_invalidate(svmm, start,
svmm->unmanaged.limit);
}
start = svmm->unmanaged.limit;
}
nouveau_svmm_invalidate(svmm, start, limit);
out:
mutex_unlock(&svmm->mutex);
return 0;
}
static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
{
kfree(container_of(mn, struct nouveau_svmm, notifier));
}
static const struct mmu_notifier_ops nouveau_mn_ops = {
.invalidate_range_start = nouveau_svmm_invalidate_range_start,
.free_notifier = nouveau_svmm_free_notifier,
};
void
nouveau_svmm_fini(struct nouveau_svmm **psvmm)
{
struct nouveau_svmm *svmm = *psvmm;
if (svmm) {
mutex_lock(&svmm->mutex);
svmm->vmm = NULL;
mutex_unlock(&svmm->mutex);
mmu_notifier_put(&svmm->notifier);
*psvmm = NULL;
}
}
int
nouveau_svmm_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_svmm *svmm;
struct drm_nouveau_svm_init *args = data;
int ret;
/* We need to fail if svm is disabled */
if (!cli->drm->svm)
return -ENOSYS;
/* Allocate tracking for SVM-enabled VMM. */
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
return -ENOMEM;
svmm->vmm = &cli->svm;
svmm->unmanaged.start = args->unmanaged_addr;
svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
mutex_init(&svmm->mutex);
/* Check that SVM isn't already enabled for the client. */
mutex_lock(&cli->mutex);
if (cli->svm.cli) {
ret = -EBUSY;
goto out_free;
}
/* Allocate a new GPU VMM that can support SVM (managed by the
* client, with replayable faults enabled).
*
* All future channel/memory allocations will make use of this
* VMM instead of the standard one.
*/
ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
cli->vmm.vmm.object.oclass, MANAGED,
args->unmanaged_addr, args->unmanaged_size,
&(struct gp100_vmm_v0) {
.fault_replay = true,
}, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
if (ret)
goto out_free;
mmap_write_lock(current->mm);
svmm->notifier.ops = &nouveau_mn_ops;
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
goto out_mm_unlock;
/* Note, ownership of svmm transfers to mmu_notifier */
cli->svm.svmm = svmm;
cli->svm.cli = cli;
mmap_write_unlock(current->mm);
mutex_unlock(&cli->mutex);
return 0;
out_mm_unlock:
mmap_write_unlock(current->mm);
out_free:
mutex_unlock(&cli->mutex);
kfree(svmm);
return ret;
}
/* Issue fault replay for GPU to retry accesses that faulted previously. */
static void
nouveau_svm_fault_replay(struct nouveau_svm *svm)
{
SVM_DBG(svm, "replay");
WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
GP100_VMM_VN_FAULT_REPLAY,
&(struct gp100_vmm_fault_replay_vn) {},
sizeof(struct gp100_vmm_fault_replay_vn)));
}
/* Cancel a replayable fault that could not be handled.
*
* Cancelling the fault will trigger recovery to reset the engine
* and kill the offending channel (ie. GPU SIGSEGV).
*/
static void
nouveau_svm_fault_cancel(struct nouveau_svm *svm,
u64 inst, u8 hub, u8 gpc, u8 client)
{
SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
GP100_VMM_VN_FAULT_CANCEL,
&(struct gp100_vmm_fault_cancel_v0) {
.hub = hub,
.gpc = gpc,
.client = client,
.inst = inst,
}, sizeof(struct gp100_vmm_fault_cancel_v0)));
}
static void
nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
struct nouveau_svm_fault *fault)
{
nouveau_svm_fault_cancel(svm, fault->inst,
fault->hub,
fault->gpc,
fault->client);
}
static int
nouveau_svm_fault_priority(u8 fault)
{
switch (fault) {
case FAULT_ACCESS_PREFETCH:
return 0;
case FAULT_ACCESS_READ:
return 1;
case FAULT_ACCESS_WRITE:
return 2;
case FAULT_ACCESS_ATOMIC:
return 3;
default:
WARN_ON_ONCE(1);
return -1;
}
}
static int
nouveau_svm_fault_cmp(const void *a, const void *b)
{
const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
int ret;
if ((ret = (s64)fa->inst - fb->inst))
return ret;
if ((ret = (s64)fa->addr - fb->addr))
return ret;
return nouveau_svm_fault_priority(fa->access) -
nouveau_svm_fault_priority(fb->access);
}
static void
nouveau_svm_fault_cache(struct nouveau_svm *svm,
struct nouveau_svm_fault_buffer *buffer, u32 offset)
{
struct nvif_object *memory = &buffer->object;
const u32 instlo = nvif_rd32(memory, offset + 0x00);
const u32 insthi = nvif_rd32(memory, offset + 0x04);
const u32 addrlo = nvif_rd32(memory, offset + 0x08);
const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
const u32 timelo = nvif_rd32(memory, offset + 0x10);
const u32 timehi = nvif_rd32(memory, offset + 0x14);
const u32 engine = nvif_rd32(memory, offset + 0x18);
const u32 info = nvif_rd32(memory, offset + 0x1c);
const u64 inst = (u64)insthi << 32 | instlo;
const u8 gpc = (info & 0x1f000000) >> 24;
const u8 hub = (info & 0x00100000) >> 20;
const u8 client = (info & 0x00007f00) >> 8;
struct nouveau_svm_fault *fault;
//XXX: i think we're supposed to spin waiting */
if (WARN_ON(!(info & 0x80000000)))
return;
nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
if (!buffer->fault[buffer->fault_nr]) {
fault = kmalloc(sizeof(*fault), GFP_KERNEL);
if (WARN_ON(!fault)) {
nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
return;
}
buffer->fault[buffer->fault_nr] = fault;
}
fault = buffer->fault[buffer->fault_nr++];
fault->inst = inst;
fault->addr = (u64)addrhi << 32 | addrlo;
fault->time = (u64)timehi << 32 | timelo;
fault->engine = engine;
fault->gpc = gpc;
fault->hub = hub;
fault->access = (info & 0x000f0000) >> 16;
fault->client = client;
fault->fault = (info & 0x0000001f);
SVM_DBG(svm, "fault %016llx %016llx %02x",
fault->inst, fault->addr, fault->access);
}
struct svm_notifier {
struct mmu_interval_notifier notifier;
struct nouveau_svmm *svmm;
};
static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
struct svm_notifier *sn =
container_of(mni, struct svm_notifier, notifier);
if (range->event == MMU_NOTIFY_EXCLUSIVE &&
range->owner == sn->svmm->vmm->cli->drm->dev)
return true;
/*
* serializes the update to mni->invalidate_seq done by caller and
* prevents invalidation of the PTE from progressing while HW is being
* programmed. This is very hacky and only works because the normal
* notifier that does invalidation is always called after the range
* notifier.
*/
if (mmu_notifier_range_blockable(range))
mutex_lock(&sn->svmm->mutex);
else if (!mutex_trylock(&sn->svmm->mutex))
return false;
mmu_interval_set_seq(mni, cur_seq);
mutex_unlock(&sn->svmm->mutex);
return true;
}
static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
.invalidate = nouveau_svm_range_invalidate,
};
static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
struct hmm_range *range,
struct nouveau_pfnmap_args *args)
{
struct page *page;
/*
* The address prepared here is passed through nvif_object_ioctl()
* to an eventual DMA map in something like gp100_vmm_pgt_pfn()
*
* This is all just encoding the internal hmm representation into a
* different nouveau internal representation.
*/
if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) {
args->p.phys[0] = 0;
return;
}
page = hmm_pfn_to_page(range->hmm_pfns[0]);
/*
* Only map compound pages to the GPU if the CPU is also mapping the
* page as a compound page. Otherwise, the PTE protections might not be
* consistent (e.g., CPU only maps part of a compound page).
* Note that the underlying page might still be larger than the
* CPU mapping (e.g., a PUD sized compound page partially mapped with
* a PMD sized page table entry).
*/
if (hmm_pfn_to_map_order(range->hmm_pfns[0])) {
unsigned long addr = args->p.addr;
args->p.page = hmm_pfn_to_map_order(range->hmm_pfns[0]) +
PAGE_SHIFT;
args->p.size = 1UL << args->p.page;
args->p.addr &= ~(args->p.size - 1);
page -= (addr - args->p.addr) >> PAGE_SHIFT;
}
if (is_device_private_page(page))
args->p.phys[0] = nouveau_dmem_page_addr(page) |
NVIF_VMM_PFNMAP_V0_V |
NVIF_VMM_PFNMAP_V0_VRAM;
else
args->p.phys[0] = page_to_phys(page) |
NVIF_VMM_PFNMAP_V0_V |
NVIF_VMM_PFNMAP_V0_HOST;
if (range->hmm_pfns[0] & HMM_PFN_WRITE)
args->p.phys[0] |= NVIF_VMM_PFNMAP_V0_W;
}
static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm,
struct nouveau_pfnmap_args *args, u32 size,
struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
struct mm_struct *mm = svmm->notifier.mm;
struct page *page;
unsigned long start = args->p.addr;
unsigned long notifier_seq;
int ret = 0;
ret = mmu_interval_notifier_insert(¬ifier->notifier, mm,
args->p.addr, args->p.size,
&nouveau_svm_mni_ops);
if (ret)
return ret;
while (true) {
if (time_after(jiffies, timeout)) {
ret = -EBUSY;
goto out;
}
notifier_seq = mmu_interval_read_begin(¬ifier->notifier);
mmap_read_lock(mm);
ret = make_device_exclusive_range(mm, start, start + PAGE_SIZE,
&page, drm->dev);
mmap_read_unlock(mm);
if (ret <= 0 || !page) {
ret = -EINVAL;
goto out;
}
mutex_lock(&svmm->mutex);
if (!mmu_interval_read_retry(¬ifier->notifier,
notifier_seq))
break;
mutex_unlock(&svmm->mutex);
}
/* Map the page on the GPU. */
args->p.page = 12;
args->p.size = PAGE_SIZE;
args->p.addr = start;
args->p.phys[0] = page_to_phys(page) |
NVIF_VMM_PFNMAP_V0_V |
NVIF_VMM_PFNMAP_V0_W |
NVIF_VMM_PFNMAP_V0_A |
NVIF_VMM_PFNMAP_V0_HOST;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
mutex_unlock(&svmm->mutex);
unlock_page(page);
put_page(page);
out:
mmu_interval_notifier_remove(¬ifier->notifier);
return ret;
}
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm,
struct nouveau_pfnmap_args *args, u32 size,
unsigned long hmm_flags,
struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
/* Have HMM fault pages within the fault window to the GPU. */
unsigned long hmm_pfns[1];
struct hmm_range range = {
.notifier = ¬ifier->notifier,
.default_flags = hmm_flags,
.hmm_pfns = hmm_pfns,
.dev_private_owner = drm->dev,
};
struct mm_struct *mm = svmm->notifier.mm;
int ret;
ret = mmu_interval_notifier_insert(¬ifier->notifier, mm,
args->p.addr, args->p.size,
&nouveau_svm_mni_ops);
if (ret)
return ret;
range.start = notifier->notifier.interval_tree.start;
range.end = notifier->notifier.interval_tree.last + 1;
while (true) {
if (time_after(jiffies, timeout)) {
ret = -EBUSY;
goto out;
}
range.notifier_seq = mmu_interval_read_begin(range.notifier);
mmap_read_lock(mm);
ret = hmm_range_fault(&range);
mmap_read_unlock(mm);
if (ret) {
if (ret == -EBUSY)
continue;
goto out;
}
mutex_lock(&svmm->mutex);
if (mmu_interval_read_retry(range.notifier,
range.notifier_seq)) {
mutex_unlock(&svmm->mutex);
continue;
}
break;
}
nouveau_hmm_convert_pfn(drm, &range, args);
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
mutex_unlock(&svmm->mutex);
out:
mmu_interval_notifier_remove(¬ifier->notifier);
return ret;
}
static void
nouveau_svm_fault(struct work_struct *work)
{
struct nouveau_svm_fault_buffer *buffer = container_of(work, typeof(*buffer), work);
struct nouveau_svm *svm = container_of(buffer, typeof(*svm), buffer[buffer->id]);
struct nvif_object *device = &svm->drm->client.device.object;
struct nouveau_svmm *svmm;
struct {
struct nouveau_pfnmap_args i;
u64 phys[1];
} args;
unsigned long hmm_flags;
u64 inst, start, limit;
int fi, fn;
int replay = 0, atomic = 0, ret;
/* Parse available fault buffer entries into a cache, and update
* the GET pointer so HW can reuse the entries.
*/
SVM_DBG(svm, "fault handler");
if (buffer->get == buffer->put) {
buffer->put = nvif_rd32(device, buffer->putaddr);
buffer->get = nvif_rd32(device, buffer->getaddr);
if (buffer->get == buffer->put)
return;
}
buffer->fault_nr = 0;
SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
while (buffer->get != buffer->put) {
nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
if (++buffer->get == buffer->entries)
buffer->get = 0;
}
nvif_wr32(device, buffer->getaddr, buffer->get);
SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
/* Sort parsed faults by instance pointer to prevent unnecessary
* instance to SVMM translations, followed by address and access
* type to reduce the amount of work when handling the faults.
*/
sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
nouveau_svm_fault_cmp, NULL);
/* Lookup SVMM structure for each unique instance pointer. */
mutex_lock(&svm->mutex);
for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
if (!svmm || buffer->fault[fi]->inst != inst) {
struct nouveau_ivmm *ivmm =
nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
svmm = ivmm ? ivmm->svmm : NULL;
inst = buffer->fault[fi]->inst;
SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
}
buffer->fault[fi]->svmm = svmm;
}
mutex_unlock(&svm->mutex);
/* Process list of faults. */
args.i.i.version = 0;
args.i.i.type = NVIF_IOCTL_V0_MTHD;
args.i.m.version = 0;
args.i.m.method = NVIF_VMM_V0_PFNMAP;
args.i.p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
struct svm_notifier notifier;
struct mm_struct *mm;
/* Cancel any faults from non-SVM channels. */
if (!(svmm = buffer->fault[fi]->svmm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
/* We try and group handling of faults within a small
* window into a single update.
*/
start = buffer->fault[fi]->addr;
limit = start + PAGE_SIZE;
if (start < svmm->unmanaged.limit)
limit = min_t(u64, limit, svmm->unmanaged.start);
/*
* Prepare the GPU-side update of all pages within the
* fault window, determining required pages and access
* permissions based on pending faults.
*/
args.i.p.addr = start;
args.i.p.page = PAGE_SHIFT;
args.i.p.size = PAGE_SIZE;
/*
* Determine required permissions based on GPU fault
* access flags.
*/
switch (buffer->fault[fi]->access) {
case 0: /* READ. */
hmm_flags = HMM_PFN_REQ_FAULT;
break;
case 2: /* ATOMIC. */
atomic = true;
break;
case 3: /* PREFETCH. */
hmm_flags = 0;
break;
default:
hmm_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE;
break;
}
mm = svmm->notifier.mm;
if (!mmget_not_zero(mm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
notifier.svmm = svmm;
if (atomic)
ret = nouveau_atomic_range_fault(svmm, svm->drm,
&args.i, sizeof(args),
¬ifier);
else
ret = nouveau_range_fault(svmm, svm->drm, &args.i,
sizeof(args), hmm_flags,
¬ifier);
mmput(mm);
limit = args.i.p.addr + args.i.p.size;
for (fn = fi; ++fn < buffer->fault_nr; ) {
/* It's okay to skip over duplicate addresses from the
* same SVMM as faults are ordered by access type such
* that only the first one needs to be handled.
*
* ie. WRITE faults appear first, thus any handling of
* pending READ faults will already be satisfied.
* But if a large page is mapped, make sure subsequent
* fault addresses have sufficient access permission.
*/
if (buffer->fault[fn]->svmm != svmm ||
buffer->fault[fn]->addr >= limit ||
(buffer->fault[fi]->access == FAULT_ACCESS_READ &&
!(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
!(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
!(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
break;
}
/* If handling failed completely, cancel all faults. */
if (ret) {
while (fi < fn) {
struct nouveau_svm_fault *fault =
buffer->fault[fi++];
nouveau_svm_fault_cancel_fault(svm, fault);
}
} else
replay++;
}
/* Issue fault replay to the GPU. */
if (replay)
nouveau_svm_fault_replay(svm);
}
static int
nouveau_svm_event(struct nvif_event *event, void *argv, u32 argc)
{
struct nouveau_svm_fault_buffer *buffer = container_of(event, typeof(*buffer), notify);
schedule_work(&buffer->work);
return NVIF_EVENT_KEEP;
}
static struct nouveau_pfnmap_args *
nouveau_pfns_to_args(void *pfns)
{
return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
}
u64 *
nouveau_pfns_alloc(unsigned long npages)
{
struct nouveau_pfnmap_args *args;
args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
if (!args)
return NULL;
args->i.type = NVIF_IOCTL_V0_MTHD;
args->m.method = NVIF_VMM_V0_PFNMAP;
args->p.page = PAGE_SHIFT;
return args->p.phys;
}
void
nouveau_pfns_free(u64 *pfns)
{
struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
kfree(args);
}
void
nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
unsigned long addr, u64 *pfns, unsigned long npages)
{
struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
int ret;
args->p.addr = addr;
args->p.size = npages << PAGE_SHIFT;
mutex_lock(&svmm->mutex);
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args,
struct_size(args, p.phys, npages), NULL);
mutex_unlock(&svmm->mutex);
}
static void
nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
{
struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
nvif_event_block(&buffer->notify);
flush_work(&buffer->work);
}
static int
nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
{
struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
struct nvif_object *device = &svm->drm->client.device.object;
buffer->get = nvif_rd32(device, buffer->getaddr);
buffer->put = nvif_rd32(device, buffer->putaddr);
SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
return nvif_event_allow(&buffer->notify);
}
static void
nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
{
struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
int i;
if (!nvif_object_constructed(&buffer->object))
return;
nouveau_svm_fault_buffer_fini(svm, id);
if (buffer->fault) {
for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
kfree(buffer->fault[i]);
kvfree(buffer->fault);
}
nvif_event_dtor(&buffer->notify);
nvif_object_dtor(&buffer->object);
}
static int
nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
{
struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
struct nouveau_drm *drm = svm->drm;
struct nvif_object *device = &drm->client.device.object;
struct nvif_clb069_v0 args = {};
int ret;
buffer->id = id;
ret = nvif_object_ctor(device, "svmFaultBuffer", 0, oclass, &args,
sizeof(args), &buffer->object);
if (ret < 0) {
SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
return ret;
}
nvif_object_map(&buffer->object, NULL, 0);
buffer->entries = args.entries;
buffer->getaddr = args.get;
buffer->putaddr = args.put;
INIT_WORK(&buffer->work, nouveau_svm_fault);
ret = nvif_event_ctor(&buffer->object, "svmFault", id, nouveau_svm_event, true, NULL, 0,
&buffer->notify);
if (ret)
return ret;
buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
if (!buffer->fault)
return -ENOMEM;
return nouveau_svm_fault_buffer_init(svm, id);
}
void
nouveau_svm_resume(struct nouveau_drm *drm)
{
struct nouveau_svm *svm = drm->svm;
if (svm)
nouveau_svm_fault_buffer_init(svm, 0);
}
void
nouveau_svm_suspend(struct nouveau_drm *drm)
{
struct nouveau_svm *svm = drm->svm;
if (svm)
nouveau_svm_fault_buffer_fini(svm, 0);
}
void
nouveau_svm_fini(struct nouveau_drm *drm)
{
struct nouveau_svm *svm = drm->svm;
if (svm) {
nouveau_svm_fault_buffer_dtor(svm, 0);
kfree(drm->svm);
drm->svm = NULL;
}
}
void
nouveau_svm_init(struct nouveau_drm *drm)
{
static const struct nvif_mclass buffers[] = {
{ VOLTA_FAULT_BUFFER_A, 0 },
{ MAXWELL_FAULT_BUFFER_A, 0 },
{}
};
struct nouveau_svm *svm;
int ret;
/* Disable on Volta and newer until channel recovery is fixed,
* otherwise clients will have a trivial way to trash the GPU
* for everyone.
*/
if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
return;
if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
return;
drm->svm->drm = drm;
mutex_init(&drm->svm->mutex);
INIT_LIST_HEAD(&drm->svm->inst);
ret = nvif_mclass(&drm->client.device.object, buffers);
if (ret < 0) {
SVM_DBG(svm, "No supported fault buffer class");
nouveau_svm_fini(drm);
return;
}
ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
if (ret) {
nouveau_svm_fini(drm);
return;
}
SVM_DBG(svm, "Initialised");
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_svm.c |
/*
* Copyright 2007 Dave Airlied
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Authors: Dave Airlied <[email protected]>
* Ben Skeggs <[email protected]>
* Jeremy Kolb <[email protected]>
*/
#include "nouveau_bo.h"
#include "nouveau_dma.h"
#include "nouveau_drv.h"
#include "nouveau_mem.h"
#include <nvif/push206e.h>
#include <nvhw/class/cl5039.h>
int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
u64 length = new_reg->size;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
int src_tiled = !!mem->kind;
int dst_tiled = !!nouveau_mem(new_reg)->kind;
int ret;
while (length) {
u32 amount, stride, height;
ret = PUSH_WAIT(push, 18 + 6 * (src_tiled + dst_tiled));
if (ret)
return ret;
amount = min(length, (u64)(4 * 1024 * 1024));
stride = 16 * 4;
height = amount / stride;
if (src_tiled) {
PUSH_MTHD(push, NV5039, SET_SRC_MEMORY_LAYOUT,
NVDEF(NV5039, SET_SRC_MEMORY_LAYOUT, V, BLOCKLINEAR),
SET_SRC_BLOCK_SIZE,
NVDEF(NV5039, SET_SRC_BLOCK_SIZE, WIDTH, ONE_GOB) |
NVDEF(NV5039, SET_SRC_BLOCK_SIZE, HEIGHT, ONE_GOB) |
NVDEF(NV5039, SET_SRC_BLOCK_SIZE, DEPTH, ONE_GOB),
SET_SRC_WIDTH, stride,
SET_SRC_HEIGHT, height,
SET_SRC_DEPTH, 1,
SET_SRC_LAYER, 0,
SET_SRC_ORIGIN,
NVVAL(NV5039, SET_SRC_ORIGIN, X, 0) |
NVVAL(NV5039, SET_SRC_ORIGIN, Y, 0));
} else {
PUSH_MTHD(push, NV5039, SET_SRC_MEMORY_LAYOUT,
NVDEF(NV5039, SET_SRC_MEMORY_LAYOUT, V, PITCH));
}
if (dst_tiled) {
PUSH_MTHD(push, NV5039, SET_DST_MEMORY_LAYOUT,
NVDEF(NV5039, SET_DST_MEMORY_LAYOUT, V, BLOCKLINEAR),
SET_DST_BLOCK_SIZE,
NVDEF(NV5039, SET_DST_BLOCK_SIZE, WIDTH, ONE_GOB) |
NVDEF(NV5039, SET_DST_BLOCK_SIZE, HEIGHT, ONE_GOB) |
NVDEF(NV5039, SET_DST_BLOCK_SIZE, DEPTH, ONE_GOB),
SET_DST_WIDTH, stride,
SET_DST_HEIGHT, height,
SET_DST_DEPTH, 1,
SET_DST_LAYER, 0,
SET_DST_ORIGIN,
NVVAL(NV5039, SET_DST_ORIGIN, X, 0) |
NVVAL(NV5039, SET_DST_ORIGIN, Y, 0));
} else {
PUSH_MTHD(push, NV5039, SET_DST_MEMORY_LAYOUT,
NVDEF(NV5039, SET_DST_MEMORY_LAYOUT, V, PITCH));
}
PUSH_MTHD(push, NV5039, OFFSET_IN_UPPER,
NVVAL(NV5039, OFFSET_IN_UPPER, VALUE, upper_32_bits(src_offset)),
OFFSET_OUT_UPPER,
NVVAL(NV5039, OFFSET_OUT_UPPER, VALUE, upper_32_bits(dst_offset)));
PUSH_MTHD(push, NV5039, OFFSET_IN, lower_32_bits(src_offset),
OFFSET_OUT, lower_32_bits(dst_offset),
PITCH_IN, stride,
PITCH_OUT, stride,
LINE_LENGTH_IN, stride,
LINE_COUNT, height,
FORMAT,
NVDEF(NV5039, FORMAT, IN, ONE) |
NVDEF(NV5039, FORMAT, OUT, ONE),
BUFFER_NOTIFY,
NVDEF(NV5039, BUFFER_NOTIFY, TYPE, WRITE_ONLY));
PUSH_MTHD(push, NV5039, NO_OPERATION, 0x00000000);
length -= amount;
src_offset += amount;
dst_offset += amount;
}
return 0;
}
int
nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
struct nvif_push *push = chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 6);
if (ret)
return ret;
PUSH_MTHD(push, NV5039, SET_OBJECT, handle);
PUSH_MTHD(push, NV5039, SET_CONTEXT_DMA_NOTIFY, chan->drm->ntfy.handle,
SET_CONTEXT_DMA_BUFFER_IN, chan->vram.handle,
SET_CONTEXT_DMA_BUFFER_OUT, chan->vram.handle);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_bo5039.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
#include <nvif/if0004.h>
#include <nvif/push006c.h>
struct nv04_fence_chan {
struct nouveau_fence_chan base;
};
struct nv04_fence_priv {
struct nouveau_fence_priv base;
};
static int
nv04_fence_emit(struct nouveau_fence *fence)
{
struct nvif_push *push = fence->channel->chan.push;
int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
PUSH_KICK(push);
}
return ret;
}
static int
nv04_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
return -ENODEV;
}
static u32
nv04_fence_read(struct nouveau_channel *chan)
{
struct nv04_nvsw_get_ref_v0 args = {};
WARN_ON(nvif_object_mthd(&chan->nvsw, NV04_NVSW_GET_REF,
&args, sizeof(args)));
return args.ref;
}
static void
nv04_fence_context_del(struct nouveau_channel *chan)
{
struct nv04_fence_chan *fctx = chan->fence;
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
}
static int
nv04_fence_context_new(struct nouveau_channel *chan)
{
struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (fctx) {
nouveau_fence_context_new(chan, &fctx->base);
fctx->base.emit = nv04_fence_emit;
fctx->base.sync = nv04_fence_sync;
fctx->base.read = nv04_fence_read;
chan->fence = fctx;
return 0;
}
return -ENOMEM;
}
static void
nv04_fence_destroy(struct nouveau_drm *drm)
{
struct nv04_fence_priv *priv = drm->fence;
drm->fence = NULL;
kfree(priv);
}
int
nv04_fence_create(struct nouveau_drm *drm)
{
struct nv04_fence_priv *priv;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.dtor = nv04_fence_destroy;
priv->base.context_new = nv04_fence_context_new;
priv->base.context_del = nv04_fence_context_del;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nv04_fence.c |
// SPDX-License-Identifier: MIT
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_fb_helper.h>
#include "nouveau_drv.h"
#include "nouveau_acpi.h"
#include "nouveau_vga.h"
static unsigned int
nouveau_vga_set_decode(struct pci_dev *pdev, bool state)
{
struct nouveau_drm *drm = nouveau_drm(pci_get_drvdata(pdev));
struct nvif_object *device = &drm->client.device.object;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
drm->client.device.info.chipset >= 0x4c)
nvif_wr32(device, 0x088060, state);
else
if (drm->client.device.info.chipset >= 0x40)
nvif_wr32(device, 0x088054, state);
else
nvif_wr32(device, 0x001854, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
static void
nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
pr_err("VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
nouveau_pmops_resume(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_err("VGA switcheroo: switched nouveau off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
nouveau_switcheroo_optimus_dsm();
nouveau_pmops_suspend(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
static void
nouveau_switcheroo_reprobe(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_fb_helper_output_poll_changed(dev);
}
static bool
nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops
nouveau_switcheroo_ops = {
.set_gpu_state = nouveau_switcheroo_set_state,
.reprobe = nouveau_switcheroo_reprobe,
.can_switch = nouveau_switcheroo_can_switch,
};
void
nouveau_vga_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = nouveau_pmops_runtime();
struct pci_dev *pdev;
/* only relevant for PCI devices */
if (!dev_is_pci(dev->dev))
return;
pdev = to_pci_dev(dev->dev);
vga_client_register(pdev, nouveau_vga_set_decode);
/* don't register Thunderbolt eGPU with vga_switcheroo */
if (pci_is_thunderbolt_attached(pdev))
return;
vga_switcheroo_register_client(pdev, &nouveau_switcheroo_ops, runtime);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
}
void
nouveau_vga_fini(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = nouveau_pmops_runtime();
struct pci_dev *pdev;
/* only relevant for PCI devices */
if (!dev_is_pci(dev->dev))
return;
pdev = to_pci_dev(dev->dev);
vga_client_unregister(pdev);
if (pci_is_thunderbolt_attached(pdev))
return;
vga_switcheroo_unregister_client(pdev);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
}
void
nouveau_vga_lastclose(struct drm_device *dev)
{
vga_switcheroo_process_delayed_switch();
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_vga.c |
/*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
*/
#include <linux/dma-buf.h>
#include <drm/ttm/ttm_tt.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
nvbo->bo.ttm->num_pages);
}
struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_gem_object *obj;
struct nouveau_bo *nvbo;
struct dma_resv *robj = attach->dmabuf->resv;
u64 size = attach->dmabuf->size;
int align = 0;
int ret;
dma_resv_lock(robj, NULL);
nvbo = nouveau_bo_alloc(&drm->client, &size, &align,
NOUVEAU_GEM_DOMAIN_GART, 0, 0, true);
if (IS_ERR(nvbo)) {
obj = ERR_CAST(nvbo);
goto unlock;
}
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
obj = ERR_PTR(-ENOMEM);
goto unlock;
}
ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART,
sg, robj);
if (ret) {
obj = ERR_PTR(ret);
goto unlock;
}
obj = &nvbo->bo.base;
unlock:
dma_resv_unlock(robj);
return obj;
}
int nouveau_gem_prime_pin(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int ret;
/* pin buffer into GTT */
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
if (ret)
return -EINVAL;
return 0;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
nouveau_bo_unpin(nvbo);
}
struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
int flags)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gobj);
if (nvbo->no_share)
return ERR_PTR(-EPERM);
return drm_gem_prime_export(gobj, flags);
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_prime.c |
/*
* Copyright 2005-2006 Erik Waling
* Copyright 2006 Stephane Marchesin
* Copyright 2007-2009 Stuart Bennett
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
#include <linux/io-mapping.h>
#include <linux/firmware.h>
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
#define NV_CIO_CRE_44_HEADB 0x3
#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
#define EDID1_LEN 128
#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
#define LOG_OLD_VALUE(x)
struct init_exec {
bool execute;
bool repeat;
};
static bool nv_cksum(const uint8_t *data, unsigned int length)
{
/*
* There's a few checksums in the BIOS, so here's a generic checking
* function.
*/
int i;
uint8_t sum = 0;
for (i = 0; i < length; i++)
sum += data[i];
if (sum)
return true;
return false;
}
static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
{
int compare_record_len, i = 0;
uint16_t compareclk, scriptptr = 0;
if (bios->major_version < 5) /* pre BIT */
compare_record_len = 3;
else
compare_record_len = 4;
do {
compareclk = ROM16(bios->data[clktable + compare_record_len * i]);
if (pxclk >= compareclk * 10) {
if (bios->major_version < 5) {
uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i];
scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]);
} else
scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]);
break;
}
i++;
} while (compareclk);
return scriptptr;
}
static void
run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
struct dcb_output *dcbent, int head, bool dl)
{
struct nouveau_drm *drm = nouveau_drm(dev);
NV_INFO(drm, "0x%04X: Parsing digital output script table\n",
scriptptr);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, head ? NV_CIO_CRE_44_HEADB :
NV_CIO_CRE_44_HEADA);
nouveau_bios_run_init_table(dev, scriptptr, dcbent, head);
nv04_dfp_bind_head(dev, dcbent, head, dl);
}
static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0);
uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
#ifdef __powerpc__
struct pci_dev *pdev = to_pci_dev(dev->dev);
#endif
if (!bios->fp.xlated_entry || !sub || !scriptofs)
return -EINVAL;
run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link);
if (script == LVDS_PANEL_OFF) {
/* off-on delay in ms */
mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7]));
}
#ifdef __powerpc__
/* Powerbook specific quirks */
if (script == LVDS_RESET &&
(pdev->device == 0x0179 || pdev->device == 0x0189 ||
pdev->device == 0x0329))
nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
#endif
return 0;
}
static int run_lvds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk)
{
/*
* The BIT LVDS table's header has the information to setup the
* necessary registers. Following the standard 4 byte header are:
* A bitmask byte and a dual-link transition pxclk value for use in
* selecting the init script when not using straps; 4 script pointers
* for panel power, selected by output and on/off; and 8 table pointers
* for panel init, the needed one determined by output, and bits in the
* conf byte. These tables are similar to the TMDS tables, consisting
* of a list of pxclks and script pointers.
*/
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
uint16_t scriptptr = 0, clktable;
/*
* For now we assume version 3.0 table - g80 support will need some
* changes
*/
switch (script) {
case LVDS_INIT:
return -ENOSYS;
case LVDS_BACKLIGHT_ON:
case LVDS_PANEL_ON:
scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]);
break;
case LVDS_BACKLIGHT_OFF:
case LVDS_PANEL_OFF:
scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
break;
case LVDS_RESET:
clktable = bios->fp.lvdsmanufacturerpointer + 15;
if (dcbent->or == 4)
clktable += 8;
if (dcbent->lvdsconf.use_straps_for_mode) {
if (bios->fp.dual_link)
clktable += 4;
if (bios->fp.if_is_24bit)
clktable += 2;
} else {
/* using EDID */
int cmpval_24bit = (dcbent->or == 4) ? 4 : 1;
if (bios->fp.dual_link) {
clktable += 4;
cmpval_24bit <<= 1;
}
if (bios->fp.strapless_is_24bit & cmpval_24bit)
clktable += 2;
}
clktable = ROM16(bios->data[clktable]);
if (!clktable) {
NV_ERROR(drm, "Pixel clock comparison table not found\n");
return -ENOENT;
}
scriptptr = clkcmptable(bios, clktable, pxclk);
}
if (!scriptptr) {
NV_ERROR(drm, "LVDS output init script not found\n");
return -ENOENT;
}
run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
return 0;
}
int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk)
{
/*
* LVDS operations are multiplexed in an effort to present a single API
* which works with two vastly differing underlying structures.
* This acts as the demux
*/
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &drm->client.device.object;
struct nvbios *bios = &drm->vbios;
uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
uint32_t sel_clk_binding, sel_clk;
int ret;
if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver ||
(lvds_ver >= 0x30 && script == LVDS_INIT))
return 0;
if (!bios->fp.lvds_init_run) {
bios->fp.lvds_init_run = true;
call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk);
}
if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change)
call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk);
if (script == LVDS_RESET && bios->fp.power_off_for_reset)
call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
NV_INFO(drm, "Calling LVDS script %d:\n", script);
/* don't let script change pll->head binding */
sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
if (lvds_ver < 0x30)
ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
else
ret = run_lvds_table(dev, dcbent, head, script, pxclk);
bios->fp.last_script_invoc = (script << 1 | head);
sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
/* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
return ret;
}
struct lvdstableheader {
uint8_t lvds_ver, headerlen, recordlen;
};
static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth)
{
/*
* BMP version (0xa) LVDS table has a simple header of version and
* record length. The BIT LVDS table has the typical BIT table header:
* version byte, header length byte, record length byte, and a byte for
* the maximum number of records that can be held in the table.
*/
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t lvds_ver, headerlen, recordlen;
memset(lth, 0, sizeof(struct lvdstableheader));
if (bios->fp.lvdsmanufacturerpointer == 0x0) {
NV_ERROR(drm, "Pointer to LVDS manufacturer table invalid\n");
return -EINVAL;
}
lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
switch (lvds_ver) {
case 0x0a: /* pre NV40 */
headerlen = 2;
recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
break;
case 0x30: /* NV4x */
headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
if (headerlen < 0x1f) {
NV_ERROR(drm, "LVDS table header not understood\n");
return -EINVAL;
}
recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
break;
case 0x40: /* G80/G90 */
headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
if (headerlen < 0x7) {
NV_ERROR(drm, "LVDS table header not understood\n");
return -EINVAL;
}
recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
break;
default:
NV_ERROR(drm,
"LVDS table revision %d.%d not currently supported\n",
lvds_ver >> 4, lvds_ver & 0xf);
return -ENOSYS;
}
lth->lvds_ver = lvds_ver;
lth->headerlen = headerlen;
lth->recordlen = recordlen;
return 0;
}
static int
get_fp_strap(struct drm_device *dev, struct nvbios *bios)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &drm->client.device.object;
/*
* The fp strap is normally dictated by the "User Strap" in
* PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the
* Internal_Flags struct at 0x48 is set, the user strap gets overriden
* by the PCI subsystem ID during POST, but not before the previous user
* strap has been committed to CR58 for CR57=0xf on head A, which may be
* read and used instead
*/
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
return nvif_rd32(device, 0x001800) & 0x0000000f;
else
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
else
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
}
static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
{
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t *fptable;
uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
int ret, ofs, fpstrapping;
struct lvdstableheader lth;
if (bios->fp.fptablepointer == 0x0) {
/* Most laptop cards lack an fp table. They use DDC. */
NV_DEBUG(drm, "Pointer to flat panel table invalid\n");
bios->digital_min_front_porch = 0x4b;
return 0;
}
fptable = &bios->data[bios->fp.fptablepointer];
fptable_ver = fptable[0];
switch (fptable_ver) {
/*
* BMP version 0x5.0x11 BIOSen have version 1 like tables, but no
* version field, and miss one of the spread spectrum/PWM bytes.
* This could affect early GF2Go parts (not seen any appropriate ROMs
* though). Here we assume that a version of 0x05 matches this case
* (combining with a BMP version check would be better), as the
* common case for the panel type field is 0x0005, and that is in
* fact what we are reading the first byte of.
*/
case 0x05: /* some NV10, 11, 15, 16 */
recordlen = 42;
ofs = -1;
break;
case 0x10: /* some NV15/16, and NV11+ */
recordlen = 44;
ofs = 0;
break;
case 0x20: /* NV40+ */
headerlen = fptable[1];
recordlen = fptable[2];
fpentries = fptable[3];
/*
* fptable[4] is the minimum
* RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
*/
bios->digital_min_front_porch = fptable[4];
ofs = -7;
break;
default:
NV_ERROR(drm,
"FP table revision %d.%d not currently supported\n",
fptable_ver >> 4, fptable_ver & 0xf);
return -ENOSYS;
}
if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */
return 0;
ret = parse_lvds_manufacturer_table_header(dev, bios, <h);
if (ret)
return ret;
if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) {
bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer +
lth.headerlen + 1;
bios->fp.xlatwidth = lth.recordlen;
}
if (bios->fp.fpxlatetableptr == 0x0) {
NV_ERROR(drm, "Pointer to flat panel xlat table invalid\n");
return -EINVAL;
}
fpstrapping = get_fp_strap(dev, bios);
fpindex = bios->data[bios->fp.fpxlatetableptr +
fpstrapping * bios->fp.xlatwidth];
if (fpindex > fpentries) {
NV_ERROR(drm, "Bad flat panel table index\n");
return -ENOENT;
}
/* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
if (lth.lvds_ver > 0x10)
bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
/*
* If either the strap or xlated fpindex value are 0xf there is no
* panel using a strap-derived bios mode present. this condition
* includes, but is different from, the DDC panel indicator above
*/
if (fpstrapping == 0xf || fpindex == 0xf)
return 0;
bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
recordlen * fpindex + ofs;
NV_INFO(drm, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
return 0;
}
bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
if (!mode) /* just checking whether we can produce a mode */
return bios->fp.mode_ptr;
memset(mode, 0, sizeof(struct drm_display_mode));
/*
* For version 1.0 (version in byte 0):
* bytes 1-2 are "panel type", including bits on whether Colour/mono,
* single/dual link, and type (TFT etc.)
* bytes 3-6 are bits per colour in RGBX
*/
mode->clock = ROM16(mode_entry[7]) * 10;
/* bytes 9-10 is HActive */
mode->hdisplay = ROM16(mode_entry[11]) + 1;
/*
* bytes 13-14 is HValid Start
* bytes 15-16 is HValid End
*/
mode->hsync_start = ROM16(mode_entry[17]) + 1;
mode->hsync_end = ROM16(mode_entry[19]) + 1;
mode->htotal = ROM16(mode_entry[21]) + 1;
/* bytes 23-24, 27-30 similarly, but vertical */
mode->vdisplay = ROM16(mode_entry[25]) + 1;
mode->vsync_start = ROM16(mode_entry[31]) + 1;
mode->vsync_end = ROM16(mode_entry[33]) + 1;
mode->vtotal = ROM16(mode_entry[35]) + 1;
mode->flags |= (mode_entry[37] & 0x10) ?
DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
mode->flags |= (mode_entry[37] & 0x1) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
/*
* bytes 38-39 relate to spread spectrum settings
* bytes 40-43 are something to do with PWM
*/
mode->status = MODE_OK;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
return bios->fp.mode_ptr;
}
int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit)
{
/*
* The LVDS table header is (mostly) described in
* parse_lvds_manufacturer_table_header(): the BIT header additionally
* contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if
* straps are not being used for the panel, this specifies the frequency
* at which modes should be set up in the dual link style.
*
* Following the header, the BMP (ver 0xa) table has several records,
* indexed by a separate xlat table, indexed in turn by the fp strap in
* EXTDEV_BOOT. Each record had a config byte, followed by 6 script
* numbers for use by INIT_SUB which controlled panel init and power,
* and finally a dword of ms to sleep between power off and on
* operations.
*
* In the BIT versions, the table following the header serves as an
* integrated config and xlat table: the records in the table are
* indexed by the FP strap nibble in EXTDEV_BOOT, and each record has
* two bytes - the first as a config byte, the second for indexing the
* fp mode table pointed to by the BIT 'D' table
*
* DDC is not used until after card init, so selecting the correct table
* entry and setting the dual link flag for EDID equipped panels,
* requiring tests against the native-mode pixel clock, cannot be done
* until later, when this function should be called with non-zero pxclk
*/
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
struct lvdstableheader lth;
uint16_t lvdsofs;
int ret, chip_version = bios->chip_version;
ret = parse_lvds_manufacturer_table_header(dev, bios, <h);
if (ret)
return ret;
switch (lth.lvds_ver) {
case 0x0a: /* pre NV40 */
lvdsmanufacturerindex = bios->data[
bios->fp.fpxlatemanufacturertableptr +
fpstrapping];
/* we're done if this isn't the EDID panel case */
if (!pxclk)
break;
if (chip_version < 0x25) {
/* nv17 behaviour
*
* It seems the old style lvds script pointer is reused
* to select 18/24 bit colour depth for EDID panels.
*/
lvdsmanufacturerindex =
(bios->legacy.lvds_single_a_script_ptr & 1) ?
2 : 0;
if (pxclk >= bios->fp.duallink_transition_clk)
lvdsmanufacturerindex++;
} else if (chip_version < 0x30) {
/* nv28 behaviour (off-chip encoder)
*
* nv28 does a complex dance of first using byte 121 of
* the EDID to choose the lvdsmanufacturerindex, then
* later attempting to match the EDID manufacturer and
* product IDs in a table (signature 'pidt' (panel id
* table?)), setting an lvdsmanufacturerindex of 0 and
* an fp strap of the match index (or 0xf if none)
*/
lvdsmanufacturerindex = 0;
} else {
/* nv31, nv34 behaviour */
lvdsmanufacturerindex = 0;
if (pxclk >= bios->fp.duallink_transition_clk)
lvdsmanufacturerindex = 2;
if (pxclk >= 140000)
lvdsmanufacturerindex = 3;
}
/*
* nvidia set the high nibble of (cr57=f, cr58) to
* lvdsmanufacturerindex in this case; we don't
*/
break;
case 0x30: /* NV4x */
case 0x40: /* G80/G90 */
lvdsmanufacturerindex = fpstrapping;
break;
default:
NV_ERROR(drm, "LVDS table revision not currently supported\n");
return -ENOSYS;
}
lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex;
switch (lth.lvds_ver) {
case 0x0a:
bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1;
bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2;
bios->fp.dual_link = bios->data[lvdsofs] & 4;
bios->fp.link_c_increment = bios->data[lvdsofs] & 8;
*if_is_24bit = bios->data[lvdsofs] & 16;
break;
case 0x30:
case 0x40:
/*
* No sign of the "power off for reset" or "reset for panel
* on" bits, but it's safer to assume we should
*/
bios->fp.power_off_for_reset = true;
bios->fp.reset_after_pclk_change = true;
/*
* It's ok lvdsofs is wrong for nv4x edid case; dual_link is
* over-written, and if_is_24bit isn't used
*/
bios->fp.dual_link = bios->data[lvdsofs] & 1;
bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
break;
}
/* set dual_link flag for EDID case */
if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
*dl = bios->fp.dual_link;
return 0;
}
int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
{
/*
* the pxclk parameter is in kHz
*
* This runs the TMDS regs setting code found on BIT bios cards
*
* For ffs(or) == 1 use the first table, for ffs(or) == 2 and
* ffs(or) == 3, use the second.
*/
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &drm->client.device.object;
struct nvbios *bios = &drm->vbios;
int cv = bios->chip_version;
uint16_t clktable = 0, scriptptr;
uint32_t sel_clk_binding, sel_clk;
/* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */
if (cv >= 0x17 && cv != 0x1a && cv != 0x20 &&
dcbent->location != DCB_LOC_ON_CHIP)
return 0;
switch (ffs(dcbent->or)) {
case 1:
clktable = bios->tmds.output0_script_ptr;
break;
case 2:
case 3:
clktable = bios->tmds.output1_script_ptr;
break;
}
if (!clktable) {
NV_ERROR(drm, "Pixel clock comparison table not found\n");
return -EINVAL;
}
scriptptr = clkcmptable(bios, clktable, pxclk);
if (!scriptptr) {
NV_ERROR(drm, "TMDS output init script not found\n");
return -ENOENT;
}
/* don't let script change pll->head binding */
sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
return 0;
}
static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
{
/*
* Parses the init table segment for pointers used in script execution.
*
* offset + 0 (16 bits): init script tables pointer
* offset + 2 (16 bits): macro index table pointer
* offset + 4 (16 bits): macro table pointer
* offset + 6 (16 bits): condition table pointer
* offset + 8 (16 bits): io condition table pointer
* offset + 10 (16 bits): io flag condition table pointer
* offset + 12 (16 bits): init function table pointer
*/
bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
}
static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
{
/*
* Parses the load detect values for g80 cards.
*
* offset + 0 (16 bits): loadval table pointer
*/
struct nouveau_drm *drm = nouveau_drm(dev);
uint16_t load_table_ptr;
uint8_t version, headerlen, entrylen, num_entries;
if (bitentry->length != 3) {
NV_ERROR(drm, "Do not understand BIT A table\n");
return -EINVAL;
}
load_table_ptr = ROM16(bios->data[bitentry->offset]);
if (load_table_ptr == 0x0) {
NV_DEBUG(drm, "Pointer to BIT loadval table invalid\n");
return -EINVAL;
}
version = bios->data[load_table_ptr];
if (version != 0x10) {
NV_ERROR(drm, "BIT loadval table version %d.%d not supported\n",
version >> 4, version & 0xF);
return -ENOSYS;
}
headerlen = bios->data[load_table_ptr + 1];
entrylen = bios->data[load_table_ptr + 2];
num_entries = bios->data[load_table_ptr + 3];
if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
NV_ERROR(drm, "Do not understand BIT loadval table\n");
return -EINVAL;
}
/* First entry is normal dac, 2nd tv-out perhaps? */
bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
return 0;
}
static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
{
/*
* Parses the flat panel table segment that the bit entry points to.
* Starting at bitentry->offset:
*
* offset + 0 (16 bits): ??? table pointer - seems to have 18 byte
* records beginning with a freq.
* offset + 2 (16 bits): mode table pointer
*/
struct nouveau_drm *drm = nouveau_drm(dev);
if (bitentry->length != 4) {
NV_ERROR(drm, "Do not understand BIT display table\n");
return -EINVAL;
}
bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]);
return 0;
}
static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
{
/*
* Parses the init table segment that the bit entry points to.
*
* See parse_script_table_pointers for layout
*/
struct nouveau_drm *drm = nouveau_drm(dev);
if (bitentry->length < 14) {
NV_ERROR(drm, "Do not understand init table\n");
return -EINVAL;
}
parse_script_table_pointers(bios, bitentry->offset);
return 0;
}
static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
{
/*
* BIT 'i' (info?) table
*
* offset + 0 (32 bits): BIOS version dword (as in B table)
* offset + 5 (8 bits): BIOS feature byte (same as for BMP?)
* offset + 13 (16 bits): pointer to table containing DAC load
* detection comparison values
*
* There's other things in the table, purpose unknown
*/
struct nouveau_drm *drm = nouveau_drm(dev);
uint16_t daccmpoffset;
uint8_t dacver, dacheaderlen;
if (bitentry->length < 6) {
NV_ERROR(drm, "BIT i table too short for needed information\n");
return -EINVAL;
}
/*
* bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
* Quadro identity crisis), other bits possibly as for BMP feature byte
*/
bios->feature_byte = bios->data[bitentry->offset + 5];
bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
if (bitentry->length < 15) {
NV_WARN(drm, "BIT i table not long enough for DAC load "
"detection comparison table\n");
return -EINVAL;
}
daccmpoffset = ROM16(bios->data[bitentry->offset + 13]);
/* doesn't exist on g80 */
if (!daccmpoffset)
return 0;
/*
* The first value in the table, following the header, is the
* comparison value, the second entry is a comparison value for
* TV load detection.
*/
dacver = bios->data[daccmpoffset];
dacheaderlen = bios->data[daccmpoffset + 1];
if (dacver != 0x00 && dacver != 0x10) {
NV_WARN(drm, "DAC load detection comparison table version "
"%d.%d not known\n", dacver >> 4, dacver & 0xf);
return -ENOSYS;
}
bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
return 0;
}
static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
{
/*
* Parses the LVDS table segment that the bit entry points to.
* Starting at bitentry->offset:
*
* offset + 0 (16 bits): LVDS strap xlate table pointer
*/
struct nouveau_drm *drm = nouveau_drm(dev);
if (bitentry->length != 2) {
NV_ERROR(drm, "Do not understand BIT LVDS table\n");
return -EINVAL;
}
/*
* No idea if it's still called the LVDS manufacturer table, but
* the concept's close enough.
*/
bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]);
return 0;
}
static int
parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
struct bit_entry *bitentry)
{
/*
* offset + 2 (8 bits): number of options in an
* INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set
* offset + 3 (16 bits): pointer to strap xlate table for RAM
* restrict option selection
*
* There's a bunch of bits in this table other than the RAM restrict
* stuff that we don't use - their use currently unknown
*/
/*
* Older bios versions don't have a sufficiently long table for
* what we want
*/
if (bitentry->length < 0x5)
return 0;
if (bitentry->version < 2) {
bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
} else {
bios->ram_restrict_group_count = bios->data[bitentry->offset + 0];
bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]);
}
return 0;
}
static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
{
/*
* Parses the pointer to the TMDS table
*
* Starting at bitentry->offset:
*
* offset + 0 (16 bits): TMDS table pointer
*
* The TMDS table is typically found just before the DCB table, with a
* characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being
* length?)
*
* At offset +7 is a pointer to a script, which I don't know how to
* run yet.
* At offset +9 is a pointer to another script, likewise
* Offset +11 has a pointer to a table where the first word is a pxclk
* frequency and the second word a pointer to a script, which should be
* run if the comparison pxclk frequency is less than the pxclk desired.
* This repeats for decreasing comparison frequencies
* Offset +13 has a pointer to a similar table
* The selection of table (and possibly +7/+9 script) is dictated by
* "or" from the DCB.
*/
struct nouveau_drm *drm = nouveau_drm(dev);
uint16_t tmdstableptr, script1, script2;
if (bitentry->length != 2) {
NV_ERROR(drm, "Do not understand BIT TMDS table\n");
return -EINVAL;
}
tmdstableptr = ROM16(bios->data[bitentry->offset]);
if (!tmdstableptr) {
NV_INFO(drm, "Pointer to TMDS table not found\n");
return -EINVAL;
}
NV_INFO(drm, "TMDS table version %d.%d\n",
bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
/* nv50+ has v2.0, but we don't parse it atm */
if (bios->data[tmdstableptr] != 0x11)
return -ENOSYS;
/*
* These two scripts are odd: they don't seem to get run even when
* they are not stubbed.
*/
script1 = ROM16(bios->data[tmdstableptr + 7]);
script2 = ROM16(bios->data[tmdstableptr + 9]);
if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
NV_WARN(drm, "TMDS table script pointers not stubbed\n");
bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
return 0;
}
struct bit_table {
const char id;
int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
};
#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
int
bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
u8 entries, *entry;
if (bios->type != NVBIOS_BIT)
return -ENODEV;
entries = bios->data[bios->offset + 10];
entry = &bios->data[bios->offset + 12];
while (entries--) {
if (entry[0] == id) {
bit->id = entry[0];
bit->version = entry[1];
bit->length = ROM16(entry[2]);
bit->offset = ROM16(entry[4]);
bit->data = ROMPTR(dev, entry[4]);
return 0;
}
entry += bios->data[bios->offset + 9];
}
return -ENOENT;
}
static int
parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
struct bit_table *table)
{
struct drm_device *dev = bios->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct bit_entry bitentry;
if (bit_table(dev, table->id, &bitentry) == 0)
return table->parse_fn(dev, bios, &bitentry);
NV_INFO(drm, "BIT table '%c' not found\n", table->id);
return -ENOSYS;
}
static int
parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
{
int ret;
/*
* The only restriction on parsing order currently is having 'i' first
* for use of bios->*_version or bios->feature_byte while parsing;
* functions shouldn't be actually *doing* anything apart from pulling
* data from the image into the bios struct, thus no interdependencies
*/
ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i));
if (ret) /* info? */
return ret;
if (bios->major_version >= 0x60) /* g80+ */
parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
if (ret)
return ret;
parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
return 0;
}
static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset)
{
/*
* Parses the BMP structure for useful things, but does not act on them
*
* offset + 5: BMP major version
* offset + 6: BMP minor version
* offset + 9: BMP feature byte
* offset + 10: BCD encoded BIOS version
*
* offset + 18: init script table pointer (for bios versions < 5.10h)
* offset + 20: extra init script table pointer (for bios
* versions < 5.10h)
*
* offset + 24: memory init table pointer (used on early bios versions)
* offset + 26: SDR memory sequencing setup data table
* offset + 28: DDR memory sequencing setup data table
*
* offset + 54: index of I2C CRTC pair to use for CRT output
* offset + 55: index of I2C CRTC pair to use for TV output
* offset + 56: index of I2C CRTC pair to use for flat panel output
* offset + 58: write CRTC index for I2C pair 0
* offset + 59: read CRTC index for I2C pair 0
* offset + 60: write CRTC index for I2C pair 1
* offset + 61: read CRTC index for I2C pair 1
*
* offset + 67: maximum internal PLL frequency (single stage PLL)
* offset + 71: minimum internal PLL frequency (single stage PLL)
*
* offset + 75: script table pointers, as described in
* parse_script_table_pointers
*
* offset + 89: TMDS single link output A table pointer
* offset + 91: TMDS single link output B table pointer
* offset + 95: LVDS single link output A table pointer
* offset + 105: flat panel timings table pointer
* offset + 107: flat panel strapping translation table pointer
* offset + 117: LVDS manufacturer panel config table pointer
* offset + 119: LVDS manufacturer strapping translation table pointer
*
* offset + 142: PLL limits table pointer
*
* offset + 156: minimum pixel clock for LVDS dual link
*/
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
uint16_t bmplength;
uint16_t legacy_scripts_offset, legacy_i2c_offset;
/* load needed defaults in case we can't parse this info */
bios->digital_min_front_porch = 0x4b;
bios->fmaxvco = 256000;
bios->fminvco = 128000;
bios->fp.duallink_transition_clk = 90000;
bmp_version_major = bmp[5];
bmp_version_minor = bmp[6];
NV_INFO(drm, "BMP version %d.%d\n",
bmp_version_major, bmp_version_minor);
/*
* Make sure that 0x36 is blank and can't be mistaken for a DCB
* pointer on early versions
*/
if (bmp_version_major < 5)
*(uint16_t *)&bios->data[0x36] = 0;
/*
* Seems that the minor version was 1 for all major versions prior
* to 5. Version 6 could theoretically exist, but I suspect BIT
* happened instead.
*/
if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
NV_ERROR(drm, "You have an unsupported BMP version. "
"Please send in your bios\n");
return -ENOSYS;
}
if (bmp_version_major == 0)
/* nothing that's currently useful in this version */
return 0;
else if (bmp_version_major == 1)
bmplength = 44; /* exact for 1.01 */
else if (bmp_version_major == 2)
bmplength = 48; /* exact for 2.01 */
else if (bmp_version_major == 3)
bmplength = 54;
/* guessed - mem init tables added in this version */
else if (bmp_version_major == 4 || bmp_version_minor < 0x1)
/* don't know if 5.0 exists... */
bmplength = 62;
/* guessed - BMP I2C indices added in version 4*/
else if (bmp_version_minor < 0x6)
bmplength = 67; /* exact for 5.01 */
else if (bmp_version_minor < 0x10)
bmplength = 75; /* exact for 5.06 */
else if (bmp_version_minor == 0x10)
bmplength = 89; /* exact for 5.10h */
else if (bmp_version_minor < 0x14)
bmplength = 118; /* exact for 5.11h */
else if (bmp_version_minor < 0x24)
/*
* Not sure of version where pll limits came in;
* certainly exist by 0x24 though.
*/
/* length not exact: this is long enough to get lvds members */
bmplength = 123;
else if (bmp_version_minor < 0x27)
/*
* Length not exact: this is long enough to get pll limit
* member
*/
bmplength = 144;
else
/*
* Length not exact: this is long enough to get dual link
* transition clock.
*/
bmplength = 158;
/* checksum */
if (nv_cksum(bmp, 8)) {
NV_ERROR(drm, "Bad BMP checksum\n");
return -EINVAL;
}
/*
* Bit 4 seems to indicate either a mobile bios or a quadro card --
* mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl
* (not nv10gl), bit 5 that the flat panel tables are present, and
* bit 6 a tv bios.
*/
bios->feature_byte = bmp[9];
if (bmp_version_major < 5 || bmp_version_minor < 0x10)
bios->old_style_init = true;
legacy_scripts_offset = 18;
if (bmp_version_major < 2)
legacy_scripts_offset -= 4;
bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]);
bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]);
if (bmp_version_major > 2) { /* appears in BMP 3 */
bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]);
bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]);
bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]);
}
legacy_i2c_offset = 0x48; /* BMP version 2 & 3 */
if (bmplength > 61)
legacy_i2c_offset = offset + 54;
bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
if (bmplength > 74) {
bios->fmaxvco = ROM32(bmp[67]);
bios->fminvco = ROM32(bmp[71]);
}
if (bmplength > 88)
parse_script_table_pointers(bios, offset + 75);
if (bmplength > 94) {
bios->tmds.output0_script_ptr = ROM16(bmp[89]);
bios->tmds.output1_script_ptr = ROM16(bmp[91]);
/*
* Never observed in use with lvds scripts, but is reused for
* 18/24 bit panel interface default for EDID equipped panels
* (if_is_24bit not set directly to avoid any oscillation).
*/
bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]);
}
if (bmplength > 108) {
bios->fp.fptablepointer = ROM16(bmp[105]);
bios->fp.fpxlatetableptr = ROM16(bmp[107]);
bios->fp.xlatwidth = 1;
}
if (bmplength > 120) {
bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
}
#if 0
if (bmplength > 143)
bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
#endif
if (bmplength > 157)
bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
return 0;
}
static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
{
int i, j;
for (i = 0; i <= (n - len); i++) {
for (j = 0; j < len; j++)
if (data[i + j] != str[j])
break;
if (j == len)
return i;
}
return 0;
}
void *
olddcb_table(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
u8 *dcb = NULL;
if (drm->client.device.info.family > NV_DEVICE_INFO_V0_TNT)
dcb = ROMPTR(dev, drm->vbios.data[0x36]);
if (!dcb) {
NV_WARN(drm, "No DCB data found in VBIOS\n");
return NULL;
}
if (dcb[0] >= 0x42) {
NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]);
return NULL;
} else
if (dcb[0] >= 0x30) {
if (ROM32(dcb[6]) == 0x4edcbdcb)
return dcb;
} else
if (dcb[0] >= 0x20) {
if (ROM32(dcb[4]) == 0x4edcbdcb)
return dcb;
} else
if (dcb[0] >= 0x15) {
if (!memcmp(&dcb[-7], "DEV_REC", 7))
return dcb;
} else {
/*
* v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
* always has the same single (crt) entry, even when tv-out
* present, so the conclusion is this version cannot really
* be used.
*
* v1.2 tables (some NV6/10, and NV15+) normally have the
* same 5 entries, which are not specific to the card and so
* no use.
*
* v1.2 does have an I2C table that read_dcb_i2c_table can
* handle, but cards exist (nv11 in #14821) with a bad i2c
* table pointer, so use the indices parsed in
* parse_bmp_structure.
*
* v1.1 (NV5+, maybe some NV4) is entirely unhelpful
*/
NV_WARN(drm, "No useful DCB data in VBIOS\n");
return NULL;
}
NV_WARN(drm, "DCB header validation failed\n");
return NULL;
}
void *
olddcb_outp(struct drm_device *dev, u8 idx)
{
u8 *dcb = olddcb_table(dev);
if (dcb && dcb[0] >= 0x30) {
if (idx < dcb[2])
return dcb + dcb[1] + (idx * dcb[3]);
} else
if (dcb && dcb[0] >= 0x20) {
u8 *i2c = ROMPTR(dev, dcb[2]);
u8 *ent = dcb + 8 + (idx * 8);
if (i2c && ent < i2c)
return ent;
} else
if (dcb && dcb[0] >= 0x15) {
u8 *i2c = ROMPTR(dev, dcb[2]);
u8 *ent = dcb + 4 + (idx * 10);
if (i2c && ent < i2c)
return ent;
}
return NULL;
}
int
olddcb_outp_foreach(struct drm_device *dev, void *data,
int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
{
int ret, idx = -1;
u8 *outp = NULL;
while ((outp = olddcb_outp(dev, ++idx))) {
if (ROM32(outp[0]) == 0x00000000)
break; /* seen on an NV11 with DCB v1.5 */
if (ROM32(outp[0]) == 0xffffffff)
break; /* seen on an NV17 with DCB v2.0 */
if ((outp[0] & 0x0f) == DCB_OUTPUT_UNUSED)
continue;
if ((outp[0] & 0x0f) == DCB_OUTPUT_EOL)
break;
ret = exec(dev, data, idx, outp);
if (ret)
return ret;
}
return 0;
}
u8 *
olddcb_conntab(struct drm_device *dev)
{
u8 *dcb = olddcb_table(dev);
if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
u8 *conntab = ROMPTR(dev, dcb[0x14]);
if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
return conntab;
}
return NULL;
}
u8 *
olddcb_conn(struct drm_device *dev, u8 idx)
{
u8 *conntab = olddcb_conntab(dev);
if (conntab && idx < conntab[2])
return conntab + conntab[1] + (idx * conntab[3]);
return NULL;
}
static struct dcb_output *new_dcb_entry(struct dcb_table *dcb)
{
struct dcb_output *entry = &dcb->entry[dcb->entries];
memset(entry, 0, sizeof(struct dcb_output));
entry->index = dcb->entries++;
return entry;
}
static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
int heads, int or)
{
struct dcb_output *entry = new_dcb_entry(dcb);
entry->type = type;
entry->i2c_index = i2c;
entry->heads = heads;
if (type != DCB_OUTPUT_ANALOG)
entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
entry->or = or;
}
static bool
parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf, struct dcb_output *entry)
{
struct nouveau_drm *drm = nouveau_drm(dev);
int link = 0;
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
entry->heads = (conn >> 8) & 0xf;
entry->connector = (conn >> 12) & 0xf;
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
entry->or = (conn >> 24) & 0xf;
switch (entry->type) {
case DCB_OUTPUT_ANALOG:
/*
* Although the rest of a CRT conf dword is usually
* zeros, mac biosen have stuff there so we must mask
*/
entry->crtconf.maxfreq = (dcb->version < 0x30) ?
(conf & 0xffff) * 10 :
(conf & 0xff) * 10000;
break;
case DCB_OUTPUT_LVDS:
{
uint32_t mask;
if (conf & 0x1)
entry->lvdsconf.use_straps_for_mode = true;
if (dcb->version < 0x22) {
mask = ~0xd;
/*
* The laptop in bug 14567 lies and claims to not use
* straps when it does, so assume all DCB 2.0 laptops
* use straps, until a broken EDID using one is produced
*/
entry->lvdsconf.use_straps_for_mode = true;
/*
* Both 0x4 and 0x8 show up in v2.0 tables; assume they
* mean the same thing (probably wrong, but might work)
*/
if (conf & 0x4 || conf & 0x8)
entry->lvdsconf.use_power_scripts = true;
} else {
mask = ~0x7;
if (conf & 0x2)
entry->lvdsconf.use_acpi_for_edid = true;
if (conf & 0x4)
entry->lvdsconf.use_power_scripts = true;
entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4;
link = entry->lvdsconf.sor.link;
}
if (conf & mask) {
/*
* Until we even try to use these on G8x, it's
* useless reporting unknown bits. They all are.
*/
if (dcb->version >= 0x40)
break;
NV_ERROR(drm, "Unknown LVDS configuration bits, "
"please report\n");
}
break;
}
case DCB_OUTPUT_TV:
{
if (dcb->version >= 0x30)
entry->tvconf.has_component_output = conf & (0x8 << 4);
else
entry->tvconf.has_component_output = false;
break;
}
case DCB_OUTPUT_DP:
entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
entry->extdev = (conf & 0x0000ff00) >> 8;
switch ((conf & 0x00e00000) >> 21) {
case 0:
entry->dpconf.link_bw = 162000;
break;
case 1:
entry->dpconf.link_bw = 270000;
break;
case 2:
entry->dpconf.link_bw = 540000;
break;
case 3:
default:
entry->dpconf.link_bw = 810000;
break;
}
switch ((conf & 0x0f000000) >> 24) {
case 0xf:
case 0x4:
entry->dpconf.link_nr = 4;
break;
case 0x3:
case 0x2:
entry->dpconf.link_nr = 2;
break;
default:
entry->dpconf.link_nr = 1;
break;
}
link = entry->dpconf.sor.link;
break;
case DCB_OUTPUT_TMDS:
if (dcb->version >= 0x40) {
entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
entry->extdev = (conf & 0x0000ff00) >> 8;
link = entry->tmdsconf.sor.link;
}
else if (dcb->version >= 0x30)
entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
else if (dcb->version >= 0x22)
entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
break;
case DCB_OUTPUT_EOL:
/* weird g80 mobile type that "nv" treats as a terminator */
dcb->entries--;
return false;
default:
break;
}
if (dcb->version < 0x40) {
/* Normal entries consist of a single bit, but dual link has
* the next most significant bit set too
*/
entry->duallink_possible =
((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
} else {
entry->duallink_possible = (entry->sorconf.link == 3);
}
/* unsure what DCB version introduces this, 3.0? */
if (conf & 0x100000)
entry->i2c_upper_default = true;
entry->hasht = (entry->extdev << 8) | (entry->location << 4) |
entry->type;
entry->hashm = (entry->heads << 8) | (link << 6) | entry->or;
return true;
}
static bool
parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf, struct dcb_output *entry)
{
struct nouveau_drm *drm = nouveau_drm(dev);
switch (conn & 0x0000000f) {
case 0:
entry->type = DCB_OUTPUT_ANALOG;
break;
case 1:
entry->type = DCB_OUTPUT_TV;
break;
case 2:
case 4:
if (conn & 0x10)
entry->type = DCB_OUTPUT_LVDS;
else
entry->type = DCB_OUTPUT_TMDS;
break;
case 3:
entry->type = DCB_OUTPUT_LVDS;
break;
default:
NV_ERROR(drm, "Unknown DCB type %d\n", conn & 0x0000000f);
return false;
}
entry->i2c_index = (conn & 0x0003c000) >> 14;
entry->heads = ((conn & 0x001c0000) >> 18) + 1;
entry->or = entry->heads; /* same as heads, hopefully safe enough */
entry->location = (conn & 0x01e00000) >> 21;
entry->bus = (conn & 0x0e000000) >> 25;
entry->duallink_possible = false;
switch (entry->type) {
case DCB_OUTPUT_ANALOG:
entry->crtconf.maxfreq = (conf & 0xffff) * 10;
break;
case DCB_OUTPUT_TV:
entry->tvconf.has_component_output = false;
break;
case DCB_OUTPUT_LVDS:
if ((conn & 0x00003f00) >> 8 != 0x10)
entry->lvdsconf.use_straps_for_mode = true;
entry->lvdsconf.use_power_scripts = true;
break;
default:
break;
}
return true;
}
static
void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
{
/*
* DCB v2.0 lists each output combination separately.
* Here we merge compatible entries to have fewer outputs, with
* more options
*/
struct nouveau_drm *drm = nouveau_drm(dev);
int i, newentries = 0;
for (i = 0; i < dcb->entries; i++) {
struct dcb_output *ient = &dcb->entry[i];
int j;
for (j = i + 1; j < dcb->entries; j++) {
struct dcb_output *jent = &dcb->entry[j];
if (jent->type == 100) /* already merged entry */
continue;
/* merge heads field when all other fields the same */
if (jent->i2c_index == ient->i2c_index &&
jent->type == ient->type &&
jent->location == ient->location &&
jent->or == ient->or) {
NV_INFO(drm, "Merging DCB entries %d and %d\n",
i, j);
ient->heads |= jent->heads;
jent->type = 100; /* dummy value */
}
}
}
/* Compact entries merged into others out of dcb */
for (i = 0; i < dcb->entries; i++) {
if (dcb->entry[i].type == 100)
continue;
if (newentries != i) {
dcb->entry[newentries] = dcb->entry[i];
dcb->entry[newentries].index = newentries;
}
newentries++;
}
dcb->entries = newentries;
}
static bool
apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &drm->vbios.dcb;
/* Dell Precision M6300
* DCB entry 2: 02025312 00000010
* DCB entry 3: 02026312 00000020
*
* Identical, except apparently a different connector on a
* different SOR link. Not a clue how we're supposed to know
* which one is in use if it even shares an i2c line...
*
* Ignore the connector on the second SOR link to prevent
* nasty problems until this is sorted (assuming it's not a
* VBIOS bug).
*/
if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
if (*conn == 0x02026312 && *conf == 0x00000020)
return false;
}
/* GeForce3 Ti 200
*
* DCB reports an LVDS output that should be TMDS:
* DCB entry 1: f2005014 ffffffff
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
return false;
}
}
/* XFX GT-240X-YA
*
* So many things wrong here, replace the entire encoder table..
*/
if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) {
if (idx == 0) {
*conn = 0x02001300; /* VGA, connector 1 */
*conf = 0x00000028;
} else
if (idx == 1) {
*conn = 0x01010312; /* DVI, connector 0 */
*conf = 0x00020030;
} else
if (idx == 2) {
*conn = 0x01010310; /* VGA, connector 0 */
*conf = 0x00000028;
} else
if (idx == 3) {
*conn = 0x02022362; /* HDMI, connector 2 */
*conf = 0x00020010;
} else {
*conn = 0x0000000e; /* EOL */
*conf = 0x00000000;
}
}
/* Some other twisted XFX board (rhbz#694914)
*
* The DVI/VGA encoder combo that's supposed to represent the
* DVI-I connector actually point at two different ones, and
* the HDMI connector ends up paired with the VGA instead.
*
* Connector table is missing anything for VGA at all, pointing it
* an invalid conntab entry 2 so we figure it out ourself.
*/
if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) {
if (idx == 0) {
*conn = 0x02002300; /* VGA, connector 2 */
*conf = 0x00000028;
} else
if (idx == 1) {
*conn = 0x01010312; /* DVI, connector 0 */
*conf = 0x00020030;
} else
if (idx == 2) {
*conn = 0x04020310; /* VGA, connector 0 */
*conf = 0x00000028;
} else
if (idx == 3) {
*conn = 0x02021322; /* HDMI, connector 1 */
*conf = 0x00020010;
} else {
*conn = 0x0000000e; /* EOL */
*conf = 0x00000000;
}
}
/* fdo#50830: connector indices for VGA and DVI-I are backwards */
if (nv_match_device(dev, 0x0421, 0x3842, 0xc793)) {
if (idx == 0 && *conn == 0x02000300)
*conn = 0x02011300;
else
if (idx == 1 && *conn == 0x04011310)
*conn = 0x04000310;
else
if (idx == 2 && *conn == 0x02011312)
*conn = 0x02000312;
}
return true;
}
static void
fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
{
struct dcb_table *dcb = &bios->dcb;
int all_heads = (nv_two_heads(dev) ? 3 : 1);
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
bios->legacy.i2c_indices.crt, 1, 1);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
all_heads, 0);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
all_heads, 1);
}
static int
parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &drm->vbios.dcb;
u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
u32 conn = ROM32(outp[0]);
bool ret;
if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
struct dcb_output *entry = new_dcb_entry(dcb);
NV_INFO(drm, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
if (dcb->version >= 0x20)
ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
else
ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
entry->id = idx;
if (!ret)
return 1; /* stop parsing */
/* Ignore the I2C index for on-chip TV-out, as there
* are cards with bogus values (nv31m in bug 23212),
* and it's otherwise useless.
*/
if (entry->type == DCB_OUTPUT_TV &&
entry->location == DCB_LOC_ON_CHIP)
entry->i2c_index = 0x0f;
}
return 0;
}
static void
dcb_fake_connectors(struct nvbios *bios)
{
struct dcb_table *dcbt = &bios->dcb;
u8 map[16] = { };
int i, idx = 0;
/* heuristic: if we ever get a non-zero connector field, assume
* that all the indices are valid and we don't need fake them.
*
* and, as usual, a blacklist of boards with bad bios data..
*/
if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) {
for (i = 0; i < dcbt->entries; i++) {
if (dcbt->entry[i].connector)
return;
}
}
/* no useful connector info available, we need to make it up
* ourselves. the rule here is: anything on the same i2c bus
* is considered to be on the same connector. any output
* without an associated i2c bus is assigned its own unique
* connector index.
*/
for (i = 0; i < dcbt->entries; i++) {
u8 i2c = dcbt->entry[i].i2c_index;
if (i2c == 0x0f) {
dcbt->entry[i].connector = idx++;
} else {
if (!map[i2c])
map[i2c] = ++idx;
dcbt->entry[i].connector = map[i2c] - 1;
}
}
/* if we created more than one connector, destroy the connector
* table - just in case it has random, rather than stub, entries.
*/
if (i > 1) {
u8 *conntab = olddcb_conntab(bios->dev);
if (conntab)
conntab[0] = 0x00;
}
}
static int
parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &bios->dcb;
u8 *dcbt, *conn;
int idx;
dcbt = olddcb_table(dev);
if (!dcbt) {
/* handle pre-DCB boards */
if (bios->type == NVBIOS_BMP) {
fabricate_dcb_encoder_table(dev, bios);
return 0;
}
return -EINVAL;
}
NV_INFO(drm, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
dcb->version = dcbt[0];
olddcb_outp_foreach(dev, NULL, parse_dcb_entry);
/*
* apart for v2.1+ not being known for requiring merging, this
* guarantees dcbent->index is the index of the entry in the rom image
*/
if (dcb->version < 0x21)
merge_like_dcb_entries(dev, dcb);
/* dump connector table entries to log, if any exist */
idx = -1;
while ((conn = olddcb_conn(dev, ++idx))) {
if (conn[0] != 0xff) {
if (olddcb_conntab(dev)[3] < 4)
NV_INFO(drm, "DCB conn %02d: %04x\n",
idx, ROM16(conn[0]));
else
NV_INFO(drm, "DCB conn %02d: %08x\n",
idx, ROM32(conn[0]));
}
}
dcb_fake_connectors(bios);
return 0;
}
static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
{
/*
* The header following the "HWSQ" signature has the number of entries,
* and the entry size
*
* An entry consists of a dword to write to the sequencer control reg
* (0x00001304), followed by the ucode bytes, written sequentially,
* starting at reg 0x00001400
*/
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &drm->client.device.object;
uint8_t bytes_to_write;
uint16_t hwsq_entry_offset;
int i;
if (bios->data[hwsq_offset] <= entry) {
NV_ERROR(drm, "Too few entries in HW sequencer table for "
"requested entry\n");
return -ENOENT;
}
bytes_to_write = bios->data[hwsq_offset + 1];
if (bytes_to_write != 36) {
NV_ERROR(drm, "Unknown HW sequencer entry size\n");
return -EINVAL;
}
NV_INFO(drm, "Loading NV17 power sequencing microcode\n");
hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
/* set sequencer control */
nvif_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
bytes_to_write -= 4;
/* write ucode */
for (i = 0; i < bytes_to_write; i += 4)
nvif_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
/* twiddle NV_PBUS_DEBUG_4 */
nvif_wr32(device, NV_PBUS_DEBUG_4, nvif_rd32(device, NV_PBUS_DEBUG_4) | 0x18);
return 0;
}
static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
struct nvbios *bios)
{
/*
* BMP based cards, from NV17, need a microcode loading to correctly
* control the GPIO etc for LVDS panels
*
* BIT based cards seem to do this directly in the init scripts
*
* The microcode entries are found by the "HWSQ" signature.
*/
static const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
const int sz = sizeof(hwsq_signature);
int hwsq_offset;
hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz);
if (!hwsq_offset)
return 0;
/* always use entry 0? */
return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0);
}
uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
static const uint8_t edid_sig[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
uint16_t offset = 0;
uint16_t newoffset;
int searchlen = NV_PROM_SIZE;
if (bios->fp.edid)
return bios->fp.edid;
while (searchlen) {
newoffset = findstr(&bios->data[offset], searchlen,
edid_sig, 8);
if (!newoffset)
return NULL;
offset += newoffset;
if (!nv_cksum(&bios->data[offset], EDID1_LEN))
break;
searchlen -= offset;
offset++;
}
NV_INFO(drm, "Found EDID in BIOS\n");
return bios->fp.edid = &bios->data[offset];
}
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
struct nvbios *legacy = &drm->vbios;
memset(legacy, 0, sizeof(struct nvbios));
spin_lock_init(&legacy->lock);
legacy->dev = dev;
legacy->data = bios->data;
legacy->length = bios->size;
legacy->major_version = bios->version.major;
legacy->chip_version = bios->version.chip;
if (bios->bit_offset) {
legacy->type = NVBIOS_BIT;
legacy->offset = bios->bit_offset;
return !parse_bit_structure(legacy, legacy->offset + 6);
} else
if (bios->bmp_offset) {
legacy->type = NVBIOS_BMP;
legacy->offset = bios->bmp_offset;
return !parse_bmp_structure(dev, legacy, legacy->offset);
}
return false;
}
int
nouveau_run_vbios_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
/* Reset the BIOS head to 0. */
bios->state.crtchead = 0;
if (bios->major_version < 5) /* BMP only */
load_nv17_hw_sequencer_ucode(dev, bios);
if (bios->execute) {
bios->fp.last_script_invoc = 0;
bios->fp.lvds_init_run = false;
}
return 0;
}
static bool
nouveau_bios_posted(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
unsigned htotal;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return true;
htotal = NVReadVgaCrtc(dev, 0, 0x06);
htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8;
htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4;
htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10;
htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11;
return (htotal != 0);
}
int
nouveau_bios_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
int ret;
/* only relevant for PCI devices */
if (!dev_is_pci(dev->dev))
return 0;
if (!NVInitVBIOS(dev))
return -ENODEV;
ret = parse_dcb_table(dev, bios);
if (ret)
return ret;
if (!bios->major_version) /* we don't run version 0 bios */
return 0;
/* init script execution disabled */
bios->execute = false;
/* ... unless card isn't POSTed already */
if (!nouveau_bios_posted(dev)) {
NV_INFO(drm, "Adaptor not initialised, "
"running VBIOS init tables.\n");
bios->execute = true;
}
ret = nouveau_run_vbios_init(dev);
if (ret)
return ret;
/* feature_byte on BMP is poor, but init always sets CR4B */
if (bios->major_version < 5)
bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
/* all BIT systems need p_f_m_t for digital_min_front_porch */
if (bios->is_mobile || bios->major_version >= 5)
ret = parse_fp_mode_table(dev, bios);
/* allow subsequent scripts to execute */
bios->execute = true;
return 0;
}
void
nouveau_bios_takedown(struct drm_device *dev)
{
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_bios.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.