python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <[email protected]>
*
*/
#include <linux/dma-buf.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/iosys-map.h>
#include <linux/mem_encrypt.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
#include "drm_internal.h"
/** @file drm_gem.c
*
* This file provides some of the base ioctls and library routines for
* the graphics memory manager implemented by each device driver.
*
* Because various devices have different requirements in terms of
* synchronization and migration strategies, implementing that is left up to
* the driver, and all that the general API provides should be generic --
* allocating objects, reading/writing data with the cpu, freeing objects.
* Even there, platform-dependent optimizations for reading/writing data with
* the CPU mean we'll likely hook those out to driver-specific calls. However,
* the DRI2 implementation wants to have at least allocate/mmap be generic.
*
* The goal was to have swap-backed object allocation managed through
* struct file. However, file descriptors as handles to a struct file have
* two major failings:
* - Process limits prevent more than 1024 or so being used at a time by
* default.
* - Inability to allocate high fds will aggravate the X Server's select()
* handling, and likely that of many GL client applications as well.
*
* This led to a plan of using our own integer IDs (called handles, following
* DRM terminology) to mimic fds, and implement the fd syscalls we need as
* ioctls. The objects themselves will still include the struct file so
* that we can transition to fds if the required kernel infrastructure shows
* up at a later date, and as our interface with shmfs for memory allocation.
*/
static void
drm_gem_init_release(struct drm_device *dev, void *ptr)
{
drm_vma_offset_manager_destroy(dev->vma_offset_manager);
}
/**
* drm_gem_init - Initialize the GEM device fields
* @dev: drm_devic structure to initialize
*/
int
drm_gem_init(struct drm_device *dev)
{
struct drm_vma_offset_manager *vma_offset_manager;
mutex_init(&dev->object_name_lock);
idr_init_base(&dev->object_name_idr, 1);
vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
GFP_KERNEL);
if (!vma_offset_manager) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
dev->vma_offset_manager = vma_offset_manager;
drm_vma_offset_manager_init(vma_offset_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
return drmm_add_action(dev, drm_gem_init_release, NULL);
}
/**
* drm_gem_object_init - initialize an allocated shmem-backed GEM object
* @dev: drm_device the object should be initialized for
* @obj: drm_gem_object to initialize
* @size: object size
*
* Initialize an already allocated GEM object of the specified size with
* shmfs backing store.
*/
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
struct file *filp;
drm_gem_private_object_init(dev, obj, size);
filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(filp))
return PTR_ERR(filp);
obj->filp = filp;
return 0;
}
EXPORT_SYMBOL(drm_gem_object_init);
/**
* drm_gem_private_object_init - initialize an allocated private GEM object
* @dev: drm_device the object should be initialized for
* @obj: drm_gem_object to initialize
* @size: object size
*
* Initialize an already allocated GEM object of the specified size with
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
*/
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj->dev = dev;
obj->filp = NULL;
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
dma_resv_init(&obj->_resv);
if (!obj->resv)
obj->resv = &obj->_resv;
if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA))
drm_gem_gpuva_init(obj);
drm_vma_node_reset(&obj->vma_node);
INIT_LIST_HEAD(&obj->lru_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
/**
* drm_gem_private_object_fini - Finalize a failed drm_gem_object
* @obj: drm_gem_object
*
* Uninitialize an already allocated GEM object when it initialized failed
*/
void drm_gem_private_object_fini(struct drm_gem_object *obj)
{
WARN_ON(obj->dma_buf);
dma_resv_fini(&obj->_resv);
}
EXPORT_SYMBOL(drm_gem_private_object_fini);
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
*
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
static void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
/* Remove any name for this object */
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
obj->name = 0;
}
}
static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
{
/* Unbreak the reference cycle if we have an exported dma_buf. */
if (obj->dma_buf) {
dma_buf_put(obj->dma_buf);
obj->dma_buf = NULL;
}
}
static void
drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
bool final = false;
if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
return;
/*
* Must bump handle count first as this may be the last
* ref, in which case the object would disappear before we
* checked for a name
*/
mutex_lock(&dev->object_name_lock);
if (--obj->handle_count == 0) {
drm_gem_object_handle_free(obj);
drm_gem_object_exported_dma_buf_free(obj);
final = true;
}
mutex_unlock(&dev->object_name_lock);
if (final)
drm_gem_object_put(obj);
}
/*
* Called at device or object close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
return 0;
}
/**
* drm_gem_handle_delete - deletes the given file-private handle
* @filp: drm file-private structure to use for the handle look up
* @handle: userspace handle to delete
*
* Removes the GEM handle from the @filp lookup table which has been added with
* drm_gem_handle_create(). If this is the last handle also cleans up linked
* resources like GEM names.
*/
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_gem_object *obj;
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_replace(&filp->object_idr, NULL, handle);
spin_unlock(&filp->table_lock);
if (IS_ERR_OR_NULL(obj))
return -EINVAL;
/* Release driver's reference and decrement refcount. */
drm_gem_object_release_handle(handle, obj, filp);
/* And finally make the handle available for future allocations. */
spin_lock(&filp->table_lock);
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
* @file: drm file-private structure containing the gem object
* @dev: corresponding drm_device
* @handle: gem object handle
* @offset: return location for the fake mmap offset
*
* This implements the &drm_driver.dumb_map_offset kms driver callback for
* drivers which use gem to manage their backing storage.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset)
{
struct drm_gem_object *obj;
int ret;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
/* Don't allow imported objects to be mapped */
if (obj->import_attach) {
ret = -EINVAL;
goto out;
}
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
*offset = drm_vma_node_offset_addr(&obj->vma_node);
out:
drm_gem_object_put(obj);
return ret;
}
EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
/**
* drm_gem_handle_create_tail - internal functions to create a handle
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
* @handlep: pointer to return the created handle to the caller
*
* This expects the &drm_device.object_name_lock to be held already and will
* drop it before returning. Used to avoid races in establishing new handles
* when importing an object from either an flink name or a dma-buf.
*
* Handles must be release again through drm_gem_handle_delete(). This is done
* when userspace closes @file_priv for all attached handles, or through the
* GEM_CLOSE ioctl for individual handles.
*/
int
drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
struct drm_device *dev = obj->dev;
u32 handle;
int ret;
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
if (obj->handle_count++ == 0)
drm_gem_object_get(obj);
/*
* Get the user-visible handle using idr. Preload and perform
* allocation under our spinlock.
*/
idr_preload(GFP_KERNEL);
spin_lock(&file_priv->table_lock);
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
spin_unlock(&file_priv->table_lock);
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
if (ret < 0)
goto err_unref;
handle = ret;
ret = drm_vma_node_allow(&obj->vma_node, file_priv);
if (ret)
goto err_remove;
if (obj->funcs->open) {
ret = obj->funcs->open(obj, file_priv);
if (ret)
goto err_revoke;
}
*handlep = handle;
return 0;
err_revoke:
drm_vma_node_revoke(&obj->vma_node, file_priv);
err_remove:
spin_lock(&file_priv->table_lock);
idr_remove(&file_priv->object_idr, handle);
spin_unlock(&file_priv->table_lock);
err_unref:
drm_gem_object_handle_put_unlocked(obj);
return ret;
}
/**
* drm_gem_handle_create - create a gem handle for an object
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
* @handlep: pointer to return the created handle to the caller
*
* Create a handle for this object. This adds a handle reference to the object,
* which includes a regular reference count. Callers will likely want to
* dereference the object afterwards.
*
* Since this publishes @obj to userspace it must be fully set up by this point,
* drivers must call this last in their buffer object creation callbacks.
*/
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
mutex_lock(&obj->dev->object_name_lock);
return drm_gem_handle_create_tail(file_priv, obj, handlep);
}
EXPORT_SYMBOL(drm_gem_handle_create);
/**
* drm_gem_free_mmap_offset - release a fake mmap offset for an object
* @obj: obj in question
*
* This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
*
* Note that drm_gem_object_release() already calls this function, so drivers
* don't have to take care of releasing the mmap offset themselves when freeing
* the GEM object.
*/
void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
/**
* drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
* @obj: obj in question
* @size: the virtual size
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap(2) call. The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures.
*
* This routine allocates and attaches a fake offset for @obj, in cases where
* the virtual size differs from the physical size (ie. &drm_gem_object.size).
* Otherwise just use drm_gem_create_mmap_offset().
*
* This function is idempotent and handles an already allocated mmap offset
* transparently. Drivers do not need to check for this case.
*/
int
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
{
struct drm_device *dev = obj->dev;
return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
size / PAGE_SIZE);
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
/**
* drm_gem_create_mmap_offset - create a fake mmap offset for an object
* @obj: obj in question
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap(2) call. The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures.
*
* This routine allocates and attaches a fake offset for @obj.
*
* Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
* the fake offset again.
*/
int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
return drm_gem_create_mmap_offset_size(obj, obj->size);
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
/*
* Move folios to appropriate lru and release the folios, decrementing the
* ref count of those folios.
*/
static void drm_gem_check_release_batch(struct folio_batch *fbatch)
{
check_move_unevictable_folios(fbatch);
__folio_batch_release(fbatch);
cond_resched();
}
/**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
* @obj: obj in question
*
* This reads the page-array of the shmem-backing storage of the given gem
* object. An array of pages is returned. If a page is not allocated or
* swapped-out, this will allocate/swap-in the required pages. Note that the
* whole object is covered by the page-array and pinned in memory.
*
* Use drm_gem_put_pages() to release the array and unpin all pages.
*
* This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
* If you require other GFP-masks, you have to do those allocations yourself.
*
* Note that you are not allowed to change gfp-zones during runtime. That is,
* shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
* set during initialization. If you have special zone constraints, set them
* after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
* to keep pages in the required zone during swap-in.
*
* This function is only valid on objects initialized with
* drm_gem_object_init(), but not for those initialized with
* drm_gem_private_object_init() only.
*/
struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{
struct address_space *mapping;
struct page **pages;
struct folio *folio;
struct folio_batch fbatch;
int i, j, npages;
if (WARN_ON(!obj->filp))
return ERR_PTR(-EINVAL);
/* This is the shared memory object that backs the GEM resource */
mapping = obj->filp->f_mapping;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
* driver author is doing something really wrong:
*/
WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
npages = obj->size >> PAGE_SHIFT;
pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (pages == NULL)
return ERR_PTR(-ENOMEM);
mapping_set_unevictable(mapping);
i = 0;
while (i < npages) {
folio = shmem_read_folio_gfp(mapping, i,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
goto fail;
for (j = 0; j < folio_nr_pages(folio); j++, i++)
pages[i] = folio_file_page(folio, i);
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
* correct region during swapin. Note that this requires
* __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
* so shmem can relocate pages during swapin if required.
*/
BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
(folio_pfn(folio) >= 0x00100000UL));
}
return pages;
fail:
mapping_clear_unevictable(mapping);
folio_batch_init(&fbatch);
j = 0;
while (j < i) {
struct folio *f = page_folio(pages[j]);
if (!folio_batch_add(&fbatch, f))
drm_gem_check_release_batch(&fbatch);
j += folio_nr_pages(f);
}
if (fbatch.nr)
drm_gem_check_release_batch(&fbatch);
kvfree(pages);
return ERR_CAST(folio);
}
EXPORT_SYMBOL(drm_gem_get_pages);
/**
* drm_gem_put_pages - helper to free backing pages for a GEM object
* @obj: obj in question
* @pages: pages to free
* @dirty: if true, pages will be marked as dirty
* @accessed: if true, the pages will be marked as accessed
*/
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed)
{
int i, npages;
struct address_space *mapping;
struct folio_batch fbatch;
mapping = file_inode(obj->filp)->i_mapping;
mapping_clear_unevictable(mapping);
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
* driver author is doing something really wrong:
*/
WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
npages = obj->size >> PAGE_SHIFT;
folio_batch_init(&fbatch);
for (i = 0; i < npages; i++) {
struct folio *folio;
if (!pages[i])
continue;
folio = page_folio(pages[i]);
if (dirty)
folio_mark_dirty(folio);
if (accessed)
folio_mark_accessed(folio);
/* Undo the reference we took when populating the table */
if (!folio_batch_add(&fbatch, folio))
drm_gem_check_release_batch(&fbatch);
i += folio_nr_pages(folio) - 1;
}
if (folio_batch_count(&fbatch))
drm_gem_check_release_batch(&fbatch);
kvfree(pages);
}
EXPORT_SYMBOL(drm_gem_put_pages);
static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
struct drm_gem_object **objs)
{
int i, ret = 0;
struct drm_gem_object *obj;
spin_lock(&filp->table_lock);
for (i = 0; i < count; i++) {
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle[i]);
if (!obj) {
ret = -ENOENT;
break;
}
drm_gem_object_get(obj);
objs[i] = obj;
}
spin_unlock(&filp->table_lock);
return ret;
}
/**
* drm_gem_objects_lookup - look up GEM objects from an array of handles
* @filp: DRM file private date
* @bo_handles: user pointer to array of userspace handle
* @count: size of handle array
* @objs_out: returned pointer to array of drm_gem_object pointers
*
* Takes an array of userspace handles and returns a newly allocated array of
* GEM objects.
*
* For a single handle lookup, use drm_gem_object_lookup().
*
* Returns:
*
* @objs filled in with GEM object pointers. Returned GEM objects need to be
* released with drm_gem_object_put(). -ENOENT is returned on a lookup
* failure. 0 is returned on success.
*
*/
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out)
{
int ret;
u32 *handles;
struct drm_gem_object **objs;
if (!count)
return 0;
objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
GFP_KERNEL | __GFP_ZERO);
if (!objs)
return -ENOMEM;
*objs_out = objs;
handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
if (!handles) {
ret = -ENOMEM;
goto out;
}
if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
ret = -EFAULT;
DRM_DEBUG("Failed to copy in GEM handles\n");
goto out;
}
ret = objects_lookup(filp, handles, count, objs);
out:
kvfree(handles);
return ret;
}
EXPORT_SYMBOL(drm_gem_objects_lookup);
/**
* drm_gem_object_lookup - look up a GEM object from its handle
* @filp: DRM file private date
* @handle: userspace handle
*
* Returns:
*
* A reference to the object named by the handle if such exists on @filp, NULL
* otherwise.
*
* If looking up an array of handles, use drm_gem_objects_lookup().
*/
struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *filp, u32 handle)
{
struct drm_gem_object *obj = NULL;
objects_lookup(filp, &handle, 1, &obj);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
* drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
* shared and/or exclusive fences.
* @filep: DRM file private date
* @handle: userspace handle
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
* @timeout: timeout value in jiffies or zero to return immediately
*
* Returns:
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than 0 on success.
*/
long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout)
{
long ret;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(filep, handle);
if (!obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
return -EINVAL;
}
ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
true, timeout);
if (ret == 0)
ret = -ETIME;
else if (ret > 0)
ret = 0;
drm_gem_object_put(obj);
return ret;
}
EXPORT_SYMBOL(drm_gem_dma_resv_wait);
/**
* drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
* @dev: drm_device
* @data: ioctl data
* @file_priv: drm file-private structure
*
* Releases the handle to an mm object.
*/
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_close *args = data;
int ret;
if (!drm_core_check_feature(dev, DRIVER_GEM))
return -EOPNOTSUPP;
ret = drm_gem_handle_delete(file_priv, args->handle);
return ret;
}
/**
* drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
* @dev: drm_device
* @data: ioctl data
* @file_priv: drm file-private structure
*
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
* is freed, the name goes away.
*/
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_flink *args = data;
struct drm_gem_object *obj;
int ret;
if (!drm_core_check_feature(dev, DRIVER_GEM))
return -EOPNOTSUPP;
obj = drm_gem_object_lookup(file_priv, args->handle);
if (obj == NULL)
return -ENOENT;
mutex_lock(&dev->object_name_lock);
/* prevent races with concurrent gem_close. */
if (obj->handle_count == 0) {
ret = -ENOENT;
goto err;
}
if (!obj->name) {
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
if (ret < 0)
goto err;
obj->name = ret;
}
args->name = (uint64_t) obj->name;
ret = 0;
err:
mutex_unlock(&dev->object_name_lock);
drm_gem_object_put(obj);
return ret;
}
/**
* drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
* @dev: drm_device
* @data: ioctl data
* @file_priv: drm file-private structure
*
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_open *args = data;
struct drm_gem_object *obj;
int ret;
u32 handle;
if (!drm_core_check_feature(dev, DRIVER_GEM))
return -EOPNOTSUPP;
mutex_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
if (obj) {
drm_gem_object_get(obj);
} else {
mutex_unlock(&dev->object_name_lock);
return -ENOENT;
}
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
if (ret)
goto err;
args->handle = handle;
args->size = obj->size;
err:
drm_gem_object_put(obj);
return ret;
}
/**
* drm_gem_open - initializes GEM file-private structures at devnode open time
* @dev: drm_device which is being opened by userspace
* @file_private: drm file-private structure to set up
*
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
idr_init_base(&file_private->object_idr, 1);
spin_lock_init(&file_private->table_lock);
}
/**
* drm_gem_release - release file-private GEM resources
* @dev: drm_device which is being closed by userspace
* @file_private: drm file-private structure to clean up
*
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
*/
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, file_private);
idr_destroy(&file_private->object_idr);
}
/**
* drm_gem_object_release - release GEM buffer object resources
* @obj: GEM buffer object
*
* This releases any structures and resources used by @obj and is the inverse of
* drm_gem_object_init().
*/
void
drm_gem_object_release(struct drm_gem_object *obj)
{
if (obj->filp)
fput(obj->filp);
drm_gem_private_object_fini(obj);
drm_gem_free_mmap_offset(obj);
drm_gem_lru_remove(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
/**
* drm_gem_object_free - free a GEM object
* @kref: kref of the object to free
*
* Called after the last reference to the object has been lost.
*
* Frees the object
*/
void
drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj =
container_of(kref, struct drm_gem_object, refcount);
if (WARN_ON(!obj->funcs->free))
return;
obj->funcs->free(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
* drm_gem_vm_open - vma->ops->open implementation for GEM
* @vma: VM area structure
*
* This function implements the #vm_operations_struct open() callback for GEM
* drivers. This must be used together with drm_gem_vm_close().
*/
void drm_gem_vm_open(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_get(obj);
}
EXPORT_SYMBOL(drm_gem_vm_open);
/**
* drm_gem_vm_close - vma->ops->close implementation for GEM
* @vma: VM area structure
*
* This function implements the #vm_operations_struct close() callback for GEM
* drivers. This must be used together with drm_gem_vm_open().
*/
void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_put(obj);
}
EXPORT_SYMBOL(drm_gem_vm_close);
/**
* drm_gem_mmap_obj - memory map a GEM object
* @obj: the GEM object to map
* @obj_size: the object size to be mapped, in bytes
* @vma: VMA for the area to be mapped
*
* Set up the VMA to prepare mapping of the GEM object using the GEM object's
* vm_ops. Depending on their requirements, GEM objects can either
* provide a fault handler in their vm_ops (in which case any accesses to
* the object will be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring), or mmap the buffer memory
* synchronously after calling drm_gem_mmap_obj.
*
* This function is mainly intended to implement the DMABUF mmap operation, when
* the GEM object is not looked up based on its fake offset. To implement the
* DRM mmap operation, drivers should use the drm_gem_mmap() function.
*
* drm_gem_mmap_obj() assumes the user is granted access to the buffer while
* drm_gem_mmap() prevents unprivileged users from mapping random objects. So
* callers must verify access restrictions before calling this helper.
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
* size, or if no vm_ops are provided.
*/
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
int ret;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_get(obj);
vma->vm_private_data = obj;
vma->vm_ops = obj->funcs->vm_ops;
if (obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
if (ret)
goto err_drm_gem_object_put;
WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
} else {
if (!vma->vm_ops) {
ret = -EINVAL;
goto err_drm_gem_object_put;
}
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
}
return 0;
err_drm_gem_object_put:
drm_gem_object_put(obj);
return ret;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
/**
* drm_gem_mmap - memory map routine for GEM objects
* @filp: DRM file pointer
* @vma: VMA for the area to be mapped
*
* If a driver supports GEM object mapping, mmap calls on the DRM file
* descriptor will end up here.
*
* Look up the GEM object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on
* the object) and map it with a call to drm_gem_mmap_obj().
*
* If the caller is not granted access to the buffer object, the mmap will fail
* with EACCES. Please see the vma manager for more information.
*/
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_object *obj = NULL;
struct drm_vma_offset_node *node;
int ret;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff,
vma_pages(vma));
if (likely(node)) {
obj = container_of(node, struct drm_gem_object, vma_node);
/*
* When the object is being freed, after it hits 0-refcnt it
* proceeds to tear down the object. In the process it will
* attempt to remove the VMA offset and so acquire this
* mgr->vm_lock. Therefore if we find an object with a 0-refcnt
* that matches our range, we know it is in the process of being
* destroyed and will be freed as soon as we release the lock -
* so we have to check for the 0-refcnted object and treat it as
* invalid.
*/
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
if (!obj)
return -EINVAL;
if (!drm_vma_node_is_allowed(node, priv)) {
drm_gem_object_put(obj);
return -EACCES;
}
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
drm_gem_object_put(obj);
return ret;
}
EXPORT_SYMBOL(drm_gem_mmap);
void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj)
{
drm_printf_indent(p, indent, "name=%d\n", obj->name);
drm_printf_indent(p, indent, "refcount=%u\n",
kref_read(&obj->refcount));
drm_printf_indent(p, indent, "start=%08lx\n",
drm_vma_node_start(&obj->vma_node));
drm_printf_indent(p, indent, "size=%zu\n", obj->size);
drm_printf_indent(p, indent, "imported=%s\n",
str_yes_no(obj->import_attach));
if (obj->funcs->print_info)
obj->funcs->print_info(p, indent, obj);
}
int drm_gem_pin(struct drm_gem_object *obj)
{
if (obj->funcs->pin)
return obj->funcs->pin(obj);
return 0;
}
void drm_gem_unpin(struct drm_gem_object *obj)
{
if (obj->funcs->unpin)
obj->funcs->unpin(obj);
}
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_assert_held(obj->resv);
if (!obj->funcs->vmap)
return -EOPNOTSUPP;
ret = obj->funcs->vmap(obj, map);
if (ret)
return ret;
else if (iosys_map_is_null(map))
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(drm_gem_vmap);
void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_assert_held(obj->resv);
if (iosys_map_is_null(map))
return;
if (obj->funcs->vunmap)
obj->funcs->vunmap(obj, map);
/* Always set the mapping to NULL. Callers may rely on this. */
iosys_map_clear(map);
}
EXPORT_SYMBOL(drm_gem_vunmap);
int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_lock(obj->resv, NULL);
ret = drm_gem_vmap(obj, map);
dma_resv_unlock(obj->resv);
return ret;
}
EXPORT_SYMBOL(drm_gem_vmap_unlocked);
void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_lock(obj->resv, NULL);
drm_gem_vunmap(obj, map);
dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires
* the lock on an array of GEM objects.
*
* Once you've locked your reservations, you'll want to set up space
* for your shared fences (if applicable), submit your job, then
* drm_gem_unlock_reservations().
*
* @objs: drm_gem_objects to lock
* @count: Number of objects in @objs
* @acquire_ctx: struct ww_acquire_ctx that will be initialized as
* part of tracking this set of locked reservations.
*/
int
drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx)
{
int contended = -1;
int i, ret;
ww_acquire_init(acquire_ctx, &reservation_ww_class);
retry:
if (contended != -1) {
struct drm_gem_object *obj = objs[contended];
ret = dma_resv_lock_slow_interruptible(obj->resv,
acquire_ctx);
if (ret) {
ww_acquire_fini(acquire_ctx);
return ret;
}
}
for (i = 0; i < count; i++) {
if (i == contended)
continue;
ret = dma_resv_lock_interruptible(objs[i]->resv,
acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++)
dma_resv_unlock(objs[j]->resv);
if (contended != -1 && contended >= i)
dma_resv_unlock(objs[contended]->resv);
if (ret == -EDEADLK) {
contended = i;
goto retry;
}
ww_acquire_fini(acquire_ctx);
return ret;
}
}
ww_acquire_done(acquire_ctx);
return 0;
}
EXPORT_SYMBOL(drm_gem_lock_reservations);
void
drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx)
{
int i;
for (i = 0; i < count; i++)
dma_resv_unlock(objs[i]->resv);
ww_acquire_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
/**
* drm_gem_lru_init - initialize a LRU
*
* @lru: The LRU to initialize
* @lock: The lock protecting the LRU
*/
void
drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
{
lru->lock = lock;
lru->count = 0;
INIT_LIST_HEAD(&lru->list);
}
EXPORT_SYMBOL(drm_gem_lru_init);
static void
drm_gem_lru_remove_locked(struct drm_gem_object *obj)
{
obj->lru->count -= obj->size >> PAGE_SHIFT;
WARN_ON(obj->lru->count < 0);
list_del(&obj->lru_node);
obj->lru = NULL;
}
/**
* drm_gem_lru_remove - remove object from whatever LRU it is in
*
* If the object is currently in any LRU, remove it.
*
* @obj: The GEM object to remove from current LRU
*/
void
drm_gem_lru_remove(struct drm_gem_object *obj)
{
struct drm_gem_lru *lru = obj->lru;
if (!lru)
return;
mutex_lock(lru->lock);
drm_gem_lru_remove_locked(obj);
mutex_unlock(lru->lock);
}
EXPORT_SYMBOL(drm_gem_lru_remove);
/**
* drm_gem_lru_move_tail_locked - move the object to the tail of the LRU
*
* Like &drm_gem_lru_move_tail but lru lock must be held
*
* @lru: The LRU to move the object into.
* @obj: The GEM object to move into this LRU
*/
void
drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
{
lockdep_assert_held_once(lru->lock);
if (obj->lru)
drm_gem_lru_remove_locked(obj);
lru->count += obj->size >> PAGE_SHIFT;
list_add_tail(&obj->lru_node, &lru->list);
obj->lru = lru;
}
EXPORT_SYMBOL(drm_gem_lru_move_tail_locked);
/**
* drm_gem_lru_move_tail - move the object to the tail of the LRU
*
* If the object is already in this LRU it will be moved to the
* tail. Otherwise it will be removed from whichever other LRU
* it is in (if any) and moved into this LRU.
*
* @lru: The LRU to move the object into.
* @obj: The GEM object to move into this LRU
*/
void
drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
{
mutex_lock(lru->lock);
drm_gem_lru_move_tail_locked(lru, obj);
mutex_unlock(lru->lock);
}
EXPORT_SYMBOL(drm_gem_lru_move_tail);
/**
* drm_gem_lru_scan - helper to implement shrinker.scan_objects
*
* If the shrink callback succeeds, it is expected that the driver
* move the object out of this LRU.
*
* If the LRU possibly contain active buffers, it is the responsibility
* of the shrink callback to check for this (ie. dma_resv_test_signaled())
* or if necessary block until the buffer becomes idle.
*
* @lru: The LRU to scan
* @nr_to_scan: The number of pages to try to reclaim
* @remaining: The number of pages left to reclaim, should be initialized by caller
* @shrink: Callback to try to shrink/reclaim the object.
*/
unsigned long
drm_gem_lru_scan(struct drm_gem_lru *lru,
unsigned int nr_to_scan,
unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj))
{
struct drm_gem_lru still_in_lru;
struct drm_gem_object *obj;
unsigned freed = 0;
drm_gem_lru_init(&still_in_lru, lru->lock);
mutex_lock(lru->lock);
while (freed < nr_to_scan) {
obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
if (!obj)
break;
drm_gem_lru_move_tail_locked(&still_in_lru, obj);
/*
* If it's in the process of being freed, gem_object->free()
* may be blocked on lock waiting to remove it. So just
* skip it.
*/
if (!kref_get_unless_zero(&obj->refcount))
continue;
/*
* Now that we own a reference, we can drop the lock for the
* rest of the loop body, to reduce contention with other
* code paths that need the LRU lock
*/
mutex_unlock(lru->lock);
/*
* Note that this still needs to be trylock, since we can
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
if (!dma_resv_trylock(obj->resv)) {
*remaining += obj->size >> PAGE_SHIFT;
goto tail;
}
if (shrink(obj)) {
freed += obj->size >> PAGE_SHIFT;
/*
* If we succeeded in releasing the object's backing
* pages, we expect the driver to have moved the object
* out of this LRU
*/
WARN_ON(obj->lru == &still_in_lru);
WARN_ON(obj->lru == lru);
}
dma_resv_unlock(obj->resv);
tail:
drm_gem_object_put(obj);
mutex_lock(lru->lock);
}
/*
* Move objects we've skipped over out of the temporary still_in_lru
* back into this LRU
*/
list_for_each_entry (obj, &still_in_lru.list, lru_node)
obj->lru = lru;
list_splice_tail(&still_in_lru.list, &lru->list);
lru->count += still_in_lru.count;
mutex_unlock(lru->lock);
return freed;
}
EXPORT_SYMBOL(drm_gem_lru_scan);
/**
* drm_gem_evict - helper to evict backing pages for a GEM object
* @obj: obj in question
*/
int drm_gem_evict(struct drm_gem_object *obj)
{
dma_resv_assert_held(obj->resv);
if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ))
return -EBUSY;
if (obj->funcs->evict)
return obj->funcs->evict(obj);
return 0;
}
EXPORT_SYMBOL(drm_gem_evict);
| linux-master | drivers/gpu/drm/drm_gem.c |
// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright (C) 2016 Noralf Trønnes
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/io.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <drm/drm_device.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp)
{
return clip->y1 * pitch + clip->x1 * cpp;
}
/**
* drm_fb_clip_offset - Returns the clipping rectangles byte-offset in a framebuffer
* @pitch: Framebuffer line pitch in byte
* @format: Framebuffer format
* @clip: Clip rectangle
*
* Returns:
* The byte offset of the clip rectangle's top-left corner within the framebuffer.
*/
unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format,
const struct drm_rect *clip)
{
return clip_offset(clip, pitch, format->cpp[0]);
}
EXPORT_SYMBOL(drm_fb_clip_offset);
/* TODO: Make this function work with multi-plane formats. */
static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
const void *vaddr, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool vaddr_cached_hint,
void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
unsigned long linepixels = drm_rect_width(clip);
unsigned long lines = drm_rect_height(clip);
size_t sbuf_len = linepixels * fb->format->cpp[0];
void *stmp = NULL;
unsigned long i;
const void *sbuf;
/*
* Some source buffers, such as DMA memory, use write-combine
* caching, so reads are uncached. Speed up access by fetching
* one line at a time.
*/
if (!vaddr_cached_hint) {
stmp = kmalloc(sbuf_len, GFP_KERNEL);
if (!stmp)
return -ENOMEM;
}
if (!dst_pitch)
dst_pitch = drm_rect_width(clip) * dst_pixsize;
vaddr += clip_offset(clip, fb->pitches[0], fb->format->cpp[0]);
for (i = 0; i < lines; ++i) {
if (stmp)
sbuf = memcpy(stmp, vaddr, sbuf_len);
else
sbuf = vaddr;
xfrm_line(dst, sbuf, linepixels);
vaddr += fb->pitches[0];
dst += dst_pitch;
}
kfree(stmp);
return 0;
}
/* TODO: Make this function work with multi-plane formats. */
static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
const void *vaddr, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool vaddr_cached_hint,
void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
unsigned long linepixels = drm_rect_width(clip);
unsigned long lines = drm_rect_height(clip);
size_t dbuf_len = linepixels * dst_pixsize;
size_t stmp_off = round_up(dbuf_len, ARCH_KMALLOC_MINALIGN); /* for sbuf alignment */
size_t sbuf_len = linepixels * fb->format->cpp[0];
void *stmp = NULL;
unsigned long i;
const void *sbuf;
void *dbuf;
if (vaddr_cached_hint) {
dbuf = kmalloc(dbuf_len, GFP_KERNEL);
} else {
dbuf = kmalloc(stmp_off + sbuf_len, GFP_KERNEL);
stmp = dbuf + stmp_off;
}
if (!dbuf)
return -ENOMEM;
if (!dst_pitch)
dst_pitch = linepixels * dst_pixsize;
vaddr += clip_offset(clip, fb->pitches[0], fb->format->cpp[0]);
for (i = 0; i < lines; ++i) {
if (stmp)
sbuf = memcpy(stmp, vaddr, sbuf_len);
else
sbuf = vaddr;
xfrm_line(dbuf, sbuf, linepixels);
memcpy_toio(dst, dbuf, dbuf_len);
vaddr += fb->pitches[0];
dst += dst_pitch;
}
kfree(dbuf);
return 0;
}
/* TODO: Make this function work with multi-plane formats. */
static int drm_fb_xfrm(struct iosys_map *dst,
const unsigned int *dst_pitch, const u8 *dst_pixsize,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool vaddr_cached_hint,
void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
0, 0, 0, 0
};
if (!dst_pitch)
dst_pitch = default_dst_pitch;
/* TODO: handle src in I/O memory here */
if (dst[0].is_iomem)
return __drm_fb_xfrm_toio(dst[0].vaddr_iomem, dst_pitch[0], dst_pixsize[0],
src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line);
else
return __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0],
src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line);
}
/**
* drm_fb_memcpy - Copy clip buffer
* @dst: Array of destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory. Destination and
* framebuffer formats must match. No conversion takes place. The parameters @dst,
* @dst_pitch and @src refer to arrays. Each array must have at least as many entries
* as there are planes in @fb's format. Each entry stores the value for the format's
* respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*/
void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
0, 0, 0, 0
};
const struct drm_format_info *format = fb->format;
unsigned int i, y, lines = drm_rect_height(clip);
if (!dst_pitch)
dst_pitch = default_dst_pitch;
for (i = 0; i < format->num_planes; ++i) {
unsigned int bpp_i = drm_format_info_bpp(format, i);
unsigned int cpp_i = DIV_ROUND_UP(bpp_i, 8);
size_t len_i = DIV_ROUND_UP(drm_rect_width(clip) * bpp_i, 8);
unsigned int dst_pitch_i = dst_pitch[i];
struct iosys_map dst_i = dst[i];
struct iosys_map src_i = src[i];
if (!dst_pitch_i)
dst_pitch_i = len_i;
iosys_map_incr(&src_i, clip_offset(clip, fb->pitches[i], cpp_i));
for (y = 0; y < lines; y++) {
/* TODO: handle src_i in I/O memory here */
iosys_map_memcpy_to(&dst_i, 0, src_i.vaddr, len_i);
iosys_map_incr(&src_i, fb->pitches[i]);
iosys_map_incr(&dst_i, dst_pitch_i);
}
}
}
EXPORT_SYMBOL(drm_fb_memcpy);
static void drm_fb_swab16_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u16 *dbuf16 = dbuf;
const u16 *sbuf16 = sbuf;
const u16 *send16 = sbuf16 + pixels;
while (sbuf16 < send16)
*dbuf16++ = swab16(*sbuf16++);
}
static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u32 *dbuf32 = dbuf;
const u32 *sbuf32 = sbuf;
const u32 *send32 = sbuf32 + pixels;
while (sbuf32 < send32)
*dbuf32++ = swab32(*sbuf32++);
}
/**
* drm_fb_swab - Swap bytes into clip buffer
* @dst: Array of destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @cached: Source buffer is mapped cached (eg. not write-combined)
*
* This function copies parts of a framebuffer to display memory and swaps per-pixel
* bytes during the process. Destination and framebuffer formats must match. The
* parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
* least as many entries as there are planes in @fb's format. Each entry stores the
* value for the format's respective color plane at the same index. If @cached is
* false a temporary buffer is used to cache one pixel line at a time to speed up
* slow uncached reads.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*/
void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool cached)
{
const struct drm_format_info *format = fb->format;
u8 cpp = DIV_ROUND_UP(drm_format_info_bpp(format, 0), 8);
void (*swab_line)(void *dbuf, const void *sbuf, unsigned int npixels);
switch (cpp) {
case 4:
swab_line = drm_fb_swab32_line;
break;
case 2:
swab_line = drm_fb_swab16_line;
break;
default:
drm_warn_once(fb->dev, "Format %p4cc has unsupported pixel size.\n",
&format->format);
return;
}
drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, swab_line);
}
EXPORT_SYMBOL(drm_fb_swab);
static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
dbuf8[x] = ((pix & 0x00e00000) >> 16) |
((pix & 0x0000e000) >> 11) |
((pix & 0x000000c0) >> 6);
}
}
/**
* drm_fb_xrgb8888_to_rgb332 - Convert XRGB8888 to RGB332 clip buffer
* @dst: Array of RGB332 destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
* parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
* least as many entries as there are planes in @fb's format. Each entry stores the
* value for the format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* Drivers can use this function for RGB332 devices that don't support XRGB8888 natively.
*/
void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
1,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_rgb332_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
val16 = ((pix & 0x00F80000) >> 8) |
((pix & 0x0000FC00) >> 5) |
((pix & 0x000000F8) >> 3);
dbuf16[x] = cpu_to_le16(val16);
}
}
/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
unsigned int pixels)
{
__le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
val16 = ((pix & 0x00F80000) >> 8) |
((pix & 0x0000FC00) >> 5) |
((pix & 0x000000F8) >> 3);
dbuf16[x] = cpu_to_le16(swab16(val16));
}
}
/**
* drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
* @dst: Array of RGB565 destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @swab: Swap bytes
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
* parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
* least as many entries as there are planes in @fb's format. Each entry stores the
* value for the format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* Drivers can use this function for RGB565 devices that don't support XRGB8888 natively.
*/
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool swab)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels);
if (swab)
xfrm_line = drm_fb_xrgb8888_to_rgb565_swab_line;
else
xfrm_line = drm_fb_xrgb8888_to_rgb565_line;
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, xfrm_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
val16 = ((pix & 0x00f80000) >> 9) |
((pix & 0x0000f800) >> 6) |
((pix & 0x000000f8) >> 3);
dbuf16[x] = cpu_to_le16(val16);
}
}
/**
* drm_fb_xrgb8888_to_xrgb1555 - Convert XRGB8888 to XRGB1555 clip buffer
* @dst: Array of XRGB1555 destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
* @src refer to arrays. Each array must have at least as many entries as
* there are planes in @fb's format. Each entry stores the value for the
* format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* Drivers can use this function for XRGB1555 devices that don't support
* XRGB8888 natively.
*/
void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_xrgb1555_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
val16 = BIT(15) | /* set alpha bit */
((pix & 0x00f80000) >> 9) |
((pix & 0x0000f800) >> 6) |
((pix & 0x000000f8) >> 3);
dbuf16[x] = cpu_to_le16(val16);
}
}
/**
* drm_fb_xrgb8888_to_argb1555 - Convert XRGB8888 to ARGB1555 clip buffer
* @dst: Array of ARGB1555 destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
* @src refer to arrays. Each array must have at least as many entries as
* there are planes in @fb's format. Each entry stores the value for the
* format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* Drivers can use this function for ARGB1555 devices that don't support
* XRGB8888 natively. It sets an opaque alpha channel as part of the conversion.
*/
void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_argb1555_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
val16 = ((pix & 0x00f80000) >> 8) |
((pix & 0x0000f800) >> 5) |
((pix & 0x000000f8) >> 2) |
BIT(0); /* set alpha bit */
dbuf16[x] = cpu_to_le16(val16);
}
}
/**
* drm_fb_xrgb8888_to_rgba5551 - Convert XRGB8888 to RGBA5551 clip buffer
* @dst: Array of RGBA5551 destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
* @src refer to arrays. Each array must have at least as many entries as
* there are planes in @fb's format. Each entry stores the value for the
* format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* Drivers can use this function for RGBA5551 devices that don't support
* XRGB8888 natively. It sets an opaque alpha channel as part of the conversion.
*/
void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_rgba5551_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
/* write blue-green-red to output in little endianness */
*dbuf8++ = (pix & 0x000000FF) >> 0;
*dbuf8++ = (pix & 0x0000FF00) >> 8;
*dbuf8++ = (pix & 0x00FF0000) >> 16;
}
}
/**
* drm_fb_xrgb8888_to_rgb888 - Convert XRGB8888 to RGB888 clip buffer
* @dst: Array of RGB888 destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
* parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
* least as many entries as there are planes in @fb's format. Each entry stores the
* value for the format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* Drivers can use this function for RGB888 devices that don't natively
* support XRGB8888.
*/
void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
3,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_rgb888_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
pix |= GENMASK(31, 24); /* fill alpha bits */
dbuf32[x] = cpu_to_le32(pix);
}
}
/**
* drm_fb_xrgb8888_to_argb8888 - Convert XRGB8888 to ARGB8888 clip buffer
* @dst: Array of ARGB8888 destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. The parameters @dst, @dst_pitch and @src refer
* to arrays. Each array must have at least as many entries as there are planes in
* @fb's format. Each entry stores the value for the format's respective color plane
* at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* Drivers can use this function for ARGB8888 devices that don't support XRGB8888
* natively. It sets an opaque alpha channel as part of the conversion.
*/
void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_argb8888_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
pix = ((pix & 0x00ff0000) >> 16) << 0 |
((pix & 0x0000ff00) >> 8) << 8 |
((pix & 0x000000ff) >> 0) << 16 |
GENMASK(31, 24); /* fill alpha bits */
*dbuf32++ = cpu_to_le32(pix);
}
}
static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src,
const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_abgr8888_line);
}
static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
pix = ((pix & 0x00ff0000) >> 16) << 0 |
((pix & 0x0000ff00) >> 8) << 8 |
((pix & 0x000000ff) >> 0) << 16 |
((pix & 0xff000000) >> 24) << 24;
*dbuf32++ = cpu_to_le32(pix);
}
}
static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src,
const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_xbgr8888_line);
}
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u32 val32;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
val32 = ((pix & 0x000000FF) << 2) |
((pix & 0x0000FF00) << 4) |
((pix & 0x00FF0000) << 6);
pix = val32 | ((val32 >> 8) & 0x00300C03);
*dbuf32++ = cpu_to_le32(pix);
}
}
/**
* drm_fb_xrgb8888_to_xrgb2101010 - Convert XRGB8888 to XRGB2101010 clip buffer
* @dst: Array of XRGB2101010 destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
* parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
* least as many entries as there are planes in @fb's format. Each entry stores the
* value for the format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* Drivers can use this function for XRGB2101010 devices that don't support XRGB8888
* natively.
*/
void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_xrgb2101010_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u32 val32;
u32 pix;
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
val32 = ((pix & 0x000000ff) << 2) |
((pix & 0x0000ff00) << 4) |
((pix & 0x00ff0000) << 6);
pix = GENMASK(31, 30) | /* set alpha bits */
val32 | ((val32 >> 8) & 0x00300c03);
*dbuf32++ = cpu_to_le32(pix);
}
}
/**
* drm_fb_xrgb8888_to_argb2101010 - Convert XRGB8888 to ARGB2101010 clip buffer
* @dst: Array of ARGB2101010 destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
* @src refer to arrays. Each array must have at least as many entries as
* there are planes in @fb's format. Each entry stores the value for the
* format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* Drivers can use this function for ARGB2101010 devices that don't support XRGB8888
* natively.
*/
void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_argb2101010_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
for (x = 0; x < pixels; x++) {
u32 pix = le32_to_cpu(sbuf32[x]);
u8 r = (pix & 0x00ff0000) >> 16;
u8 g = (pix & 0x0000ff00) >> 8;
u8 b = pix & 0x000000ff;
/* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
*dbuf8++ = (3 * r + 6 * g + b) / 10;
}
}
/**
* drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
* @dst: Array of 8-bit grayscale destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
* parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
* least as many entries as there are planes in @fb's format. Each entry stores the
* value for the format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* DRM doesn't have native monochrome or grayscale support. Drivers can use this
* function for grayscale devices that don't support XRGB8888 natively.Such
* drivers can announce the commonly supported XR24 format to userspace and use
* this function to convert to the native format. Monochrome drivers will use the
* most significant bit, where 1 means foreground color and 0 background color.
* ITU BT.601 is being used for the RGB -> luma (brightness) conversion.
*/
void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
1,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_gray8_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
/**
* drm_fb_blit - Copy parts of a framebuffer to display memory
* @dst: Array of display-memory addresses to copy to
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @dst_format: FOURCC code of the display's color format
* @src: The framebuffer memory to copy from
* @fb: The framebuffer to copy from
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory. If the
* formats of the display and the framebuffer mismatch, the blit function
* will attempt to convert between them during the process. The parameters @dst,
* @dst_pitch and @src refer to arrays. Each array must have at least as many
* entries as there are planes in @dst_format's format. Each entry stores the
* value for the format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
* Returns:
* 0 on success, or
* -EINVAL if the color-format conversion failed, or
* a negative error code otherwise.
*/
int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
uint32_t fb_format = fb->format->format;
if (fb_format == dst_format) {
drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
return 0;
} else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) {
drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
return 0;
} else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) {
drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
return 0;
} else if (fb_format == DRM_FORMAT_XRGB8888) {
if (dst_format == DRM_FORMAT_RGB565) {
drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false);
return 0;
} else if (dst_format == DRM_FORMAT_XRGB1555) {
drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_ARGB1555) {
drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_RGBA5551) {
drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_RGB888) {
drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_ARGB8888) {
drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_XBGR8888) {
drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_ABGR8888) {
drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_XRGB2101010) {
drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_ARGB2101010) {
drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_BGRX8888) {
drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
return 0;
}
}
drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n",
&fb_format, &dst_format);
return -EINVAL;
}
EXPORT_SYMBOL(drm_fb_blit);
static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
const u8 *sbuf8 = sbuf;
while (pixels) {
unsigned int i, bits = min(pixels, 8U);
u8 byte = 0;
for (i = 0; i < bits; i++, pixels--) {
if (*sbuf8++ >= 128)
byte |= BIT(i);
}
*dbuf8++ = byte;
}
}
/**
* drm_fb_xrgb8888_to_mono - Convert XRGB8888 to monochrome
* @dst: Array of monochrome destination buffers (0=black, 1=white)
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
* parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
* least as many entries as there are planes in @fb's format. Each entry stores the
* value for the format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner). The first pixel (upper left corner of the clip rectangle) will
* be converted and copied to the first bit (LSB) in the first byte of the monochrome
* destination buffer. If the caller requires that the first pixel in a byte must
* be located at an x-coordinate that is a multiple of 8, then the caller must take
* care itself of supplying a suitable clip rectangle.
*
* DRM doesn't have native monochrome support. Drivers can use this function for
* monochrome devices that don't support XRGB8888 natively. Such drivers can
* announce the commonly supported XR24 format to userspace and use this function
* to convert to the native format.
*
* This function uses drm_fb_xrgb8888_to_gray8() to convert to grayscale and
* then the result is converted from grayscale to monochrome.
*/
void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
0, 0, 0, 0
};
unsigned int linepixels = drm_rect_width(clip);
unsigned int lines = drm_rect_height(clip);
unsigned int cpp = fb->format->cpp[0];
unsigned int len_src32 = linepixels * cpp;
struct drm_device *dev = fb->dev;
void *vaddr = src[0].vaddr;
unsigned int dst_pitch_0;
unsigned int y;
u8 *mono = dst[0].vaddr, *gray8;
u32 *src32;
if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888))
return;
if (!dst_pitch)
dst_pitch = default_dst_pitch;
dst_pitch_0 = dst_pitch[0];
/*
* The mono destination buffer contains 1 bit per pixel
*/
if (!dst_pitch_0)
dst_pitch_0 = DIV_ROUND_UP(linepixels, 8);
/*
* The dma memory is write-combined so reads are uncached.
* Speed up by fetching one line at a time.
*
* Also, format conversion from XR24 to monochrome are done
* line-by-line but are converted to 8-bit grayscale as an
* intermediate step.
*
* Allocate a buffer to be used for both copying from the cma
* memory and to store the intermediate grayscale line pixels.
*/
src32 = kmalloc(len_src32 + linepixels, GFP_KERNEL);
if (!src32)
return;
gray8 = (u8 *)src32 + len_src32;
vaddr += clip_offset(clip, fb->pitches[0], cpp);
for (y = 0; y < lines; y++) {
src32 = memcpy(src32, vaddr, len_src32);
drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels);
drm_fb_gray8_to_mono_line(mono, gray8, linepixels);
vaddr += fb->pitches[0];
mono += dst_pitch_0;
}
kfree(src32);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
static uint32_t drm_fb_nonalpha_fourcc(uint32_t fourcc)
{
/* only handle formats with depth != 0 and alpha channel */
switch (fourcc) {
case DRM_FORMAT_ARGB1555:
return DRM_FORMAT_XRGB1555;
case DRM_FORMAT_ABGR1555:
return DRM_FORMAT_XBGR1555;
case DRM_FORMAT_RGBA5551:
return DRM_FORMAT_RGBX5551;
case DRM_FORMAT_BGRA5551:
return DRM_FORMAT_BGRX5551;
case DRM_FORMAT_ARGB8888:
return DRM_FORMAT_XRGB8888;
case DRM_FORMAT_ABGR8888:
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_RGBA8888:
return DRM_FORMAT_RGBX8888;
case DRM_FORMAT_BGRA8888:
return DRM_FORMAT_BGRX8888;
case DRM_FORMAT_ARGB2101010:
return DRM_FORMAT_XRGB2101010;
case DRM_FORMAT_ABGR2101010:
return DRM_FORMAT_XBGR2101010;
case DRM_FORMAT_RGBA1010102:
return DRM_FORMAT_RGBX1010102;
case DRM_FORMAT_BGRA1010102:
return DRM_FORMAT_BGRX1010102;
}
return fourcc;
}
static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
{
const uint32_t *fourccs_end = fourccs + nfourccs;
while (fourccs < fourccs_end) {
if (*fourccs == fourcc)
return true;
++fourccs;
}
return false;
}
/**
* drm_fb_build_fourcc_list - Filters a list of supported color formats against
* the device's native formats
* @dev: DRM device
* @native_fourccs: 4CC codes of natively supported color formats
* @native_nfourccs: The number of entries in @native_fourccs
* @fourccs_out: Returns 4CC codes of supported color formats
* @nfourccs_out: The number of available entries in @fourccs_out
*
* This function create a list of supported color format from natively
* supported formats and additional emulated formats.
* At a minimum, most userspace programs expect at least support for
* XRGB8888 on the primary plane. Devices that have to emulate the
* format, and possibly others, can use drm_fb_build_fourcc_list() to
* create a list of supported color formats. The returned list can
* be handed over to drm_universal_plane_init() et al. Native formats
* will go before emulated formats. Native formats with alpha channel
* will be replaced by such without, as primary planes usually don't
* support alpha. Other heuristics might be applied
* to optimize the order. Formats near the beginning of the list are
* usually preferred over formats near the end of the list.
*
* Returns:
* The number of color-formats 4CC codes returned in @fourccs_out.
*/
size_t drm_fb_build_fourcc_list(struct drm_device *dev,
const u32 *native_fourccs, size_t native_nfourccs,
u32 *fourccs_out, size_t nfourccs_out)
{
/*
* XRGB8888 is the default fallback format for most of userspace
* and it's currently the only format that should be emulated for
* the primary plane. Only if there's ever another default fallback,
* it should be added here.
*/
static const uint32_t extra_fourccs[] = {
DRM_FORMAT_XRGB8888,
};
static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
u32 *fourccs = fourccs_out;
const u32 *fourccs_end = fourccs_out + nfourccs_out;
size_t i;
/*
* The device's native formats go first.
*/
for (i = 0; i < native_nfourccs; ++i) {
/*
* Several DTs, boot loaders and firmware report native
* alpha formats that are non-alpha formats instead. So
* replace alpha formats by non-alpha formats.
*/
u32 fourcc = drm_fb_nonalpha_fourcc(native_fourccs[i]);
if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
continue; /* skip duplicate entries */
} else if (fourccs == fourccs_end) {
drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
continue; /* end of available output buffer */
}
drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
*fourccs = fourcc;
++fourccs;
}
/*
* The extra formats, emulated by the driver, go second.
*/
for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
u32 fourcc = extra_fourccs[i];
if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
continue; /* skip duplicate and native entries */
} else if (fourccs == fourccs_end) {
drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
continue; /* end of available output buffer */
}
drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
*fourccs = fourcc;
++fourccs;
}
return fourccs - fourccs_out;
}
EXPORT_SYMBOL(drm_fb_build_fourcc_list);
| linux-master | drivers/gpu/drm/drm_format_helper.c |
/*
* Copyright (C) 2016 Samsung Electronics Co.Ltd
* Authors:
* Marek Szyprowski <[email protected]>
*
* DRM core plane blending related functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <drm/drm_atomic.h>
#include <drm/drm_blend.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
/**
* DOC: overview
*
* The basic plane composition model supported by standard plane properties only
* has a source rectangle (in logical pixels within the &drm_framebuffer), with
* sub-pixel accuracy, which is scaled up to a pixel-aligned destination
* rectangle in the visible area of a &drm_crtc. The visible area of a CRTC is
* defined by the horizontal and vertical visible pixels (stored in @hdisplay
* and @vdisplay) of the requested mode (stored in &drm_crtc_state.mode). These
* two rectangles are both stored in the &drm_plane_state.
*
* For the atomic ioctl the following standard (atomic) properties on the plane object
* encode the basic plane composition model:
*
* SRC_X:
* X coordinate offset for the source rectangle within the
* &drm_framebuffer, in 16.16 fixed point. Must be positive.
* SRC_Y:
* Y coordinate offset for the source rectangle within the
* &drm_framebuffer, in 16.16 fixed point. Must be positive.
* SRC_W:
* Width for the source rectangle within the &drm_framebuffer, in 16.16
* fixed point. SRC_X plus SRC_W must be within the width of the source
* framebuffer. Must be positive.
* SRC_H:
* Height for the source rectangle within the &drm_framebuffer, in 16.16
* fixed point. SRC_Y plus SRC_H must be within the height of the source
* framebuffer. Must be positive.
* CRTC_X:
* X coordinate offset for the destination rectangle. Can be negative.
* CRTC_Y:
* Y coordinate offset for the destination rectangle. Can be negative.
* CRTC_W:
* Width for the destination rectangle. CRTC_X plus CRTC_W can extend past
* the currently visible horizontal area of the &drm_crtc.
* CRTC_H:
* Height for the destination rectangle. CRTC_Y plus CRTC_H can extend past
* the currently visible vertical area of the &drm_crtc.
* FB_ID:
* Mode object ID of the &drm_framebuffer this plane should scan out.
* CRTC_ID:
* Mode object ID of the &drm_crtc this plane should be connected to.
*
* Note that the source rectangle must fully lie within the bounds of the
* &drm_framebuffer. The destination rectangle can lie outside of the visible
* area of the current mode of the CRTC. It must be appropriately clipped by the
* driver, which can be done by calling drm_plane_helper_check_update(). Drivers
* are also allowed to round the subpixel sampling positions appropriately, but
* only to the next full pixel. No pixel outside of the source rectangle may
* ever be sampled, which is important when applying more sophisticated
* filtering than just a bilinear one when scaling. The filtering mode when
* scaling is unspecified.
*
* On top of this basic transformation additional properties can be exposed by
* the driver:
*
* alpha:
* Alpha is setup with drm_plane_create_alpha_property(). It controls the
* plane-wide opacity, from transparent (0) to opaque (0xffff). It can be
* combined with pixel alpha.
* The pixel values in the framebuffers are expected to not be
* pre-multiplied by the global alpha associated to the plane.
*
* rotation:
* Rotation is set up with drm_plane_create_rotation_property(). It adds a
* rotation and reflection step between the source and destination rectangles.
* Without this property the rectangle is only scaled, but not rotated or
* reflected.
*
* Possbile values:
*
* "rotate-<degrees>":
* Signals that a drm plane is rotated <degrees> degrees in counter
* clockwise direction.
*
* "reflect-<axis>":
* Signals that the contents of a drm plane is reflected along the
* <axis> axis, in the same way as mirroring.
*
* reflect-x::
*
* |o | | o|
* | | -> | |
* | v| |v |
*
* reflect-y::
*
* |o | | ^|
* | | -> | |
* | v| |o |
*
* zpos:
* Z position is set up with drm_plane_create_zpos_immutable_property() and
* drm_plane_create_zpos_property(). It controls the visibility of overlapping
* planes. Without this property the primary plane is always below the cursor
* plane, and ordering between all other planes is undefined. The positive
* Z axis points towards the user, i.e. planes with lower Z position values
* are underneath planes with higher Z position values. Two planes with the
* same Z position value have undefined ordering. Note that the Z position
* value can also be immutable, to inform userspace about the hard-coded
* stacking of planes, see drm_plane_create_zpos_immutable_property(). If
* any plane has a zpos property (either mutable or immutable), then all
* planes shall have a zpos property.
*
* pixel blend mode:
* Pixel blend mode is set up with drm_plane_create_blend_mode_property().
* It adds a blend mode for alpha blending equation selection, describing
* how the pixels from the current plane are composited with the
* background.
*
* Three alpha blending equations are defined:
*
* "None":
* Blend formula that ignores the pixel alpha::
*
* out.rgb = plane_alpha * fg.rgb +
* (1 - plane_alpha) * bg.rgb
*
* "Pre-multiplied":
* Blend formula that assumes the pixel color values
* have been already pre-multiplied with the alpha
* channel values::
*
* out.rgb = plane_alpha * fg.rgb +
* (1 - (plane_alpha * fg.alpha)) * bg.rgb
*
* "Coverage":
* Blend formula that assumes the pixel color values have not
* been pre-multiplied and will do so when blending them to the
* background color values::
*
* out.rgb = plane_alpha * fg.alpha * fg.rgb +
* (1 - (plane_alpha * fg.alpha)) * bg.rgb
*
* Using the following symbols:
*
* "fg.rgb":
* Each of the RGB component values from the plane's pixel
* "fg.alpha":
* Alpha component value from the plane's pixel. If the plane's
* pixel format has no alpha component, then this is assumed to be
* 1.0. In these cases, this property has no effect, as all three
* equations become equivalent.
* "bg.rgb":
* Each of the RGB component values from the background
* "plane_alpha":
* Plane alpha value set by the plane "alpha" property. If the
* plane does not expose the "alpha" property, then this is
* assumed to be 1.0
*
* Note that all the property extensions described here apply either to the
* plane or the CRTC (e.g. for the background color, which currently is not
* exposed and assumed to be black).
*
* SCALING_FILTER:
* Indicates scaling filter to be used for plane scaler
*
* The value of this property can be one of the following:
*
* Default:
* Driver's default scaling filter
* Nearest Neighbor:
* Nearest Neighbor scaling filter
*
* Drivers can set up this property for a plane by calling
* drm_plane_create_scaling_filter_property
*/
/**
* drm_plane_create_alpha_property - create a new alpha property
* @plane: drm plane
*
* This function creates a generic, mutable, alpha property and enables support
* for it in the DRM core. It is attached to @plane.
*
* The alpha property will be allowed to be within the bounds of 0
* (transparent) to 0xffff (opaque).
*
* Returns:
* 0 on success, negative error code on failure.
*/
int drm_plane_create_alpha_property(struct drm_plane *plane)
{
struct drm_property *prop;
prop = drm_property_create_range(plane->dev, 0, "alpha",
0, DRM_BLEND_ALPHA_OPAQUE);
if (!prop)
return -ENOMEM;
drm_object_attach_property(&plane->base, prop, DRM_BLEND_ALPHA_OPAQUE);
plane->alpha_property = prop;
if (plane->state)
plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
return 0;
}
EXPORT_SYMBOL(drm_plane_create_alpha_property);
/**
* drm_plane_create_rotation_property - create a new rotation property
* @plane: drm plane
* @rotation: initial value of the rotation property
* @supported_rotations: bitmask of supported rotations and reflections
*
* This creates a new property with the selected support for transformations.
*
* Since a rotation by 180° degress is the same as reflecting both along the x
* and the y axis the rotation property is somewhat redundant. Drivers can use
* drm_rotation_simplify() to normalize values of this property.
*
* The property exposed to userspace is a bitmask property (see
* drm_property_create_bitmask()) called "rotation" and has the following
* bitmask enumaration values:
*
* DRM_MODE_ROTATE_0:
* "rotate-0"
* DRM_MODE_ROTATE_90:
* "rotate-90"
* DRM_MODE_ROTATE_180:
* "rotate-180"
* DRM_MODE_ROTATE_270:
* "rotate-270"
* DRM_MODE_REFLECT_X:
* "reflect-x"
* DRM_MODE_REFLECT_Y:
* "reflect-y"
*
* Rotation is the specified amount in degrees in counter clockwise direction,
* the X and Y axis are within the source rectangle, i.e. the X/Y axis before
* rotation. After reflection, the rotation is applied to the image sampled from
* the source rectangle, before scaling it to fit the destination rectangle.
*/
int drm_plane_create_rotation_property(struct drm_plane *plane,
unsigned int rotation,
unsigned int supported_rotations)
{
static const struct drm_prop_enum_list props[] = {
{ __builtin_ffs(DRM_MODE_ROTATE_0) - 1, "rotate-0" },
{ __builtin_ffs(DRM_MODE_ROTATE_90) - 1, "rotate-90" },
{ __builtin_ffs(DRM_MODE_ROTATE_180) - 1, "rotate-180" },
{ __builtin_ffs(DRM_MODE_ROTATE_270) - 1, "rotate-270" },
{ __builtin_ffs(DRM_MODE_REFLECT_X) - 1, "reflect-x" },
{ __builtin_ffs(DRM_MODE_REFLECT_Y) - 1, "reflect-y" },
};
struct drm_property *prop;
WARN_ON((supported_rotations & DRM_MODE_ROTATE_MASK) == 0);
WARN_ON(!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK));
WARN_ON(rotation & ~supported_rotations);
prop = drm_property_create_bitmask(plane->dev, 0, "rotation",
props, ARRAY_SIZE(props),
supported_rotations);
if (!prop)
return -ENOMEM;
drm_object_attach_property(&plane->base, prop, rotation);
if (plane->state)
plane->state->rotation = rotation;
plane->rotation_property = prop;
return 0;
}
EXPORT_SYMBOL(drm_plane_create_rotation_property);
/**
* drm_rotation_simplify() - Try to simplify the rotation
* @rotation: Rotation to be simplified
* @supported_rotations: Supported rotations
*
* Attempt to simplify the rotation to a form that is supported.
* Eg. if the hardware supports everything except DRM_MODE_REFLECT_X
* one could call this function like this:
*
* drm_rotation_simplify(rotation, DRM_MODE_ROTATE_0 |
* DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
* DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_Y);
*
* to eliminate the DRM_MODE_REFLECT_X flag. Depending on what kind of
* transforms the hardware supports, this function may not
* be able to produce a supported transform, so the caller should
* check the result afterwards.
*/
unsigned int drm_rotation_simplify(unsigned int rotation,
unsigned int supported_rotations)
{
if (rotation & ~supported_rotations) {
rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
rotation = (rotation & DRM_MODE_REFLECT_MASK) |
BIT((ffs(rotation & DRM_MODE_ROTATE_MASK) + 1)
% 4);
}
return rotation;
}
EXPORT_SYMBOL(drm_rotation_simplify);
/**
* drm_plane_create_zpos_property - create mutable zpos property
* @plane: drm plane
* @zpos: initial value of zpos property
* @min: minimal possible value of zpos property
* @max: maximal possible value of zpos property
*
* This function initializes generic mutable zpos property and enables support
* for it in drm core. Drivers can then attach this property to planes to enable
* support for configurable planes arrangement during blending operation.
* Drivers that attach a mutable zpos property to any plane should call the
* drm_atomic_normalize_zpos() helper during their implementation of
* &drm_mode_config_funcs.atomic_check(), which will update the normalized zpos
* values and store them in &drm_plane_state.normalized_zpos. Usually min
* should be set to 0 and max to maximal number of planes for given crtc - 1.
*
* If zpos of some planes cannot be changed (like fixed background or
* cursor/topmost planes), drivers shall adjust the min/max values and assign
* those planes immutable zpos properties with lower or higher values (for more
* information, see drm_plane_create_zpos_immutable_property() function). In such
* case drivers shall also assign proper initial zpos values for all planes in
* its plane_reset() callback, so the planes will be always sorted properly.
*
* See also drm_atomic_normalize_zpos().
*
* The property exposed to userspace is called "zpos".
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_plane_create_zpos_property(struct drm_plane *plane,
unsigned int zpos,
unsigned int min, unsigned int max)
{
struct drm_property *prop;
prop = drm_property_create_range(plane->dev, 0, "zpos", min, max);
if (!prop)
return -ENOMEM;
drm_object_attach_property(&plane->base, prop, zpos);
plane->zpos_property = prop;
if (plane->state) {
plane->state->zpos = zpos;
plane->state->normalized_zpos = zpos;
}
return 0;
}
EXPORT_SYMBOL(drm_plane_create_zpos_property);
/**
* drm_plane_create_zpos_immutable_property - create immuttable zpos property
* @plane: drm plane
* @zpos: value of zpos property
*
* This function initializes generic immutable zpos property and enables
* support for it in drm core. Using this property driver lets userspace
* to get the arrangement of the planes for blending operation and notifies
* it that the hardware (or driver) doesn't support changing of the planes'
* order. For mutable zpos see drm_plane_create_zpos_property().
*
* The property exposed to userspace is called "zpos".
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
unsigned int zpos)
{
struct drm_property *prop;
prop = drm_property_create_range(plane->dev, DRM_MODE_PROP_IMMUTABLE,
"zpos", zpos, zpos);
if (!prop)
return -ENOMEM;
drm_object_attach_property(&plane->base, prop, zpos);
plane->zpos_property = prop;
if (plane->state) {
plane->state->zpos = zpos;
plane->state->normalized_zpos = zpos;
}
return 0;
}
EXPORT_SYMBOL(drm_plane_create_zpos_immutable_property);
static int drm_atomic_state_zpos_cmp(const void *a, const void *b)
{
const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
if (sa->zpos != sb->zpos)
return sa->zpos - sb->zpos;
else
return sa->plane->base.id - sb->plane->base.id;
}
static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_device *dev = crtc->dev;
int total_planes = dev->mode_config.num_total_plane;
struct drm_plane_state **states;
struct drm_plane *plane;
int i, n = 0;
int ret = 0;
drm_dbg_atomic(dev, "[CRTC:%d:%s] calculating normalized zpos values\n",
crtc->base.id, crtc->name);
states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
return -ENOMEM;
/*
* Normalization process might create new states for planes which
* normalized_zpos has to be recalculated.
*/
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto done;
}
states[n++] = plane_state;
drm_dbg_atomic(dev, "[PLANE:%d:%s] processing zpos value %d\n",
plane->base.id, plane->name, plane_state->zpos);
}
sort(states, n, sizeof(*states), drm_atomic_state_zpos_cmp, NULL);
for (i = 0; i < n; i++) {
plane = states[i]->plane;
states[i]->normalized_zpos = i;
drm_dbg_atomic(dev, "[PLANE:%d:%s] normalized zpos value %d\n",
plane->base.id, plane->name, i);
}
crtc_state->zpos_changed = true;
done:
kfree(states);
return ret;
}
/**
* drm_atomic_normalize_zpos - calculate normalized zpos values for all crtcs
* @dev: DRM device
* @state: atomic state of DRM device
*
* This function calculates normalized zpos value for all modified planes in
* the provided atomic state of DRM device.
*
* For every CRTC this function checks new states of all planes assigned to
* it and calculates normalized zpos value for these planes. Planes are compared
* first by their zpos values, then by plane id (if zpos is equal). The plane
* with lowest zpos value is at the bottom. The &drm_plane_state.normalized_zpos
* is then filled with unique values from 0 to number of active planes in crtc
* minus one.
*
* RETURNS
* Zero for success or -errno
*/
int drm_atomic_normalize_zpos(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
int i, ret = 0;
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
crtc = new_plane_state->crtc;
if (!crtc)
continue;
if (old_plane_state->zpos != new_plane_state->zpos) {
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
new_crtc_state->zpos_changed = true;
}
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (old_crtc_state->plane_mask != new_crtc_state->plane_mask ||
new_crtc_state->zpos_changed) {
ret = drm_atomic_helper_crtc_normalize_zpos(crtc,
new_crtc_state);
if (ret)
return ret;
}
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_normalize_zpos);
/**
* drm_plane_create_blend_mode_property - create a new blend mode property
* @plane: drm plane
* @supported_modes: bitmask of supported modes, must include
* BIT(DRM_MODE_BLEND_PREMULTI). Current DRM assumption is
* that alpha is premultiplied, and old userspace can break if
* the property defaults to anything else.
*
* This creates a new property describing the blend mode.
*
* The property exposed to userspace is an enumeration property (see
* drm_property_create_enum()) called "pixel blend mode" and has the
* following enumeration values:
*
* "None":
* Blend formula that ignores the pixel alpha.
*
* "Pre-multiplied":
* Blend formula that assumes the pixel color values have been already
* pre-multiplied with the alpha channel values.
*
* "Coverage":
* Blend formula that assumes the pixel color values have not been
* pre-multiplied and will do so when blending them to the background color
* values.
*
* RETURNS:
* Zero for success or -errno
*/
int drm_plane_create_blend_mode_property(struct drm_plane *plane,
unsigned int supported_modes)
{
struct drm_device *dev = plane->dev;
struct drm_property *prop;
static const struct drm_prop_enum_list props[] = {
{ DRM_MODE_BLEND_PIXEL_NONE, "None" },
{ DRM_MODE_BLEND_PREMULTI, "Pre-multiplied" },
{ DRM_MODE_BLEND_COVERAGE, "Coverage" },
};
unsigned int valid_mode_mask = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE);
int i;
if (WARN_ON((supported_modes & ~valid_mode_mask) ||
((supported_modes & BIT(DRM_MODE_BLEND_PREMULTI)) == 0)))
return -EINVAL;
prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"pixel blend mode",
hweight32(supported_modes));
if (!prop)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(props); i++) {
int ret;
if (!(BIT(props[i].type) & supported_modes))
continue;
ret = drm_property_add_enum(prop, props[i].type,
props[i].name);
if (ret) {
drm_property_destroy(dev, prop);
return ret;
}
}
drm_object_attach_property(&plane->base, prop, DRM_MODE_BLEND_PREMULTI);
plane->blend_mode_property = prop;
return 0;
}
EXPORT_SYMBOL(drm_plane_create_blend_mode_property);
| linux-master | drivers/gpu/drm/drm_blend.c |
/*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/uaccess.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_print.h>
#include <linux/dma-resv.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
int drm_modeset_register_all(struct drm_device *dev)
{
int ret;
ret = drm_plane_register_all(dev);
if (ret)
goto err_plane;
ret = drm_crtc_register_all(dev);
if (ret)
goto err_crtc;
ret = drm_encoder_register_all(dev);
if (ret)
goto err_encoder;
ret = drm_connector_register_all(dev);
if (ret)
goto err_connector;
drm_debugfs_late_register(dev);
return 0;
err_connector:
drm_encoder_unregister_all(dev);
err_encoder:
drm_crtc_unregister_all(dev);
err_crtc:
drm_plane_unregister_all(dev);
err_plane:
return ret;
}
void drm_modeset_unregister_all(struct drm_device *dev)
{
drm_connector_unregister_all(dev);
drm_encoder_unregister_all(dev);
drm_crtc_unregister_all(dev);
drm_plane_unregister_all(dev);
}
/**
* drm_mode_getresources - get graphics configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Construct a set of configuration description structures and return
* them to the user, including CRTC, connector and framebuffer configuration.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_getresources(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_card_res *card_res = data;
struct drm_framebuffer *fb;
struct drm_connector *connector;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
int count, ret = 0;
uint32_t __user *fb_id;
uint32_t __user *crtc_id;
uint32_t __user *connector_id;
uint32_t __user *encoder_id;
struct drm_connector_list_iter conn_iter;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
mutex_lock(&file_priv->fbs_lock);
count = 0;
fb_id = u64_to_user_ptr(card_res->fb_id_ptr);
list_for_each_entry(fb, &file_priv->fbs, filp_head) {
if (count < card_res->count_fbs &&
put_user(fb->base.id, fb_id + count)) {
mutex_unlock(&file_priv->fbs_lock);
return -EFAULT;
}
count++;
}
card_res->count_fbs = count;
mutex_unlock(&file_priv->fbs_lock);
card_res->max_height = dev->mode_config.max_height;
card_res->min_height = dev->mode_config.min_height;
card_res->max_width = dev->mode_config.max_width;
card_res->min_width = dev->mode_config.min_width;
count = 0;
crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr);
drm_for_each_crtc(crtc, dev) {
if (drm_lease_held(file_priv, crtc->base.id)) {
if (count < card_res->count_crtcs &&
put_user(crtc->base.id, crtc_id + count))
return -EFAULT;
count++;
}
}
card_res->count_crtcs = count;
count = 0;
encoder_id = u64_to_user_ptr(card_res->encoder_id_ptr);
drm_for_each_encoder(encoder, dev) {
if (count < card_res->count_encoders &&
put_user(encoder->base.id, encoder_id + count))
return -EFAULT;
count++;
}
card_res->count_encoders = count;
drm_connector_list_iter_begin(dev, &conn_iter);
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&
(connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
continue;
if (drm_lease_held(file_priv, connector->base.id)) {
if (count < card_res->count_connectors &&
put_user(connector->base.id, connector_id + count)) {
drm_connector_list_iter_end(&conn_iter);
return -EFAULT;
}
count++;
}
}
card_res->count_connectors = count;
drm_connector_list_iter_end(&conn_iter);
return ret;
}
/**
* drm_mode_config_reset - call ->reset callbacks
* @dev: drm device
*
* This functions calls all the crtc's, encoder's and connector's ->reset
* callback. Drivers can use this in e.g. their driver load or resume code to
* reset hardware and software state.
*/
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_for_each_plane(plane, dev)
if (plane->funcs->reset)
plane->funcs->reset(plane);
drm_for_each_crtc(crtc, dev)
if (crtc->funcs->reset)
crtc->funcs->reset(crtc);
drm_for_each_encoder(encoder, dev)
if (encoder->funcs && encoder->funcs->reset)
encoder->funcs->reset(encoder);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
if (connector->funcs->reset)
connector->funcs->reset(connector);
drm_connector_list_iter_end(&conn_iter);
}
EXPORT_SYMBOL(drm_mode_config_reset);
/*
* Global properties
*/
static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
{ DRM_PLANE_TYPE_CURSOR, "Cursor" },
};
static int drm_mode_create_standard_properties(struct drm_device *dev)
{
struct drm_property *prop;
int ret;
ret = drm_connector_create_standard_properties(dev);
if (ret)
return ret;
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"type", drm_plane_type_enum_list,
ARRAY_SIZE(drm_plane_type_enum_list));
if (!prop)
return -ENOMEM;
dev->mode_config.plane_type_property = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_X", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_x = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_Y", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_y = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_W", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_w = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_H", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_h = prop;
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_X", INT_MIN, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_x = prop;
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_Y", INT_MIN, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_y = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_W", 0, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_w = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_H", 0, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_h = prop;
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
"FB_ID", DRM_MODE_OBJECT_FB);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_fb_id = prop;
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
"IN_FENCE_FD", -1, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_in_fence_fd = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"OUT_FENCE_PTR", 0, U64_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_out_fence_ptr = prop;
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_ID", DRM_MODE_OBJECT_CRTC);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_id = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
"FB_DAMAGE_CLIPS", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_fb_damage_clips = prop;
prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
"ACTIVE");
if (!prop)
return -ENOMEM;
dev->mode_config.prop_active = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
"MODE_ID", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_mode_id = prop;
prop = drm_property_create_bool(dev, 0,
"VRR_ENABLED");
if (!prop)
return -ENOMEM;
dev->mode_config.prop_vrr_enabled = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"DEGAMMA_LUT", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.degamma_lut_property = prop;
prop = drm_property_create_range(dev,
DRM_MODE_PROP_IMMUTABLE,
"DEGAMMA_LUT_SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.degamma_lut_size_property = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"CTM", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.ctm_property = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"GAMMA_LUT", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.gamma_lut_property = prop;
prop = drm_property_create_range(dev,
DRM_MODE_PROP_IMMUTABLE,
"GAMMA_LUT_SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.gamma_lut_size_property = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
"IN_FORMATS", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.modifiers_property = prop;
return 0;
}
static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
{
drm_mode_config_cleanup(dev);
}
/**
* drmm_mode_config_init - managed DRM mode_configuration structure
* initialization
* @dev: DRM device
*
* Initialize @dev's mode_config structure, used for tracking the graphics
* configuration of @dev.
*
* Since this initializes the modeset locks, no locking is possible. Which is no
* problem, since this should happen single threaded at init time. It is the
* driver's problem to ensure this guarantee.
*
* Cleanup is automatically handled through registering drm_mode_config_cleanup
* with drmm_add_action().
*
* Returns: 0 on success, negative error value on failure.
*/
int drmm_mode_config_init(struct drm_device *dev)
{
int ret;
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
mutex_init(&dev->mode_config.fb_lock);
mutex_init(&dev->mode_config.blob_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
INIT_LIST_HEAD(&dev->mode_config.privobj_list);
idr_init_base(&dev->mode_config.object_idr, 1);
idr_init_base(&dev->mode_config.tile_idr, 1);
ida_init(&dev->mode_config.connector_ida);
spin_lock_init(&dev->mode_config.connector_list_lock);
init_llist_head(&dev->mode_config.connector_free_list);
INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
ret = drm_mode_create_standard_properties(dev);
if (ret) {
drm_mode_config_cleanup(dev);
return ret;
}
/* Just to be sure */
dev->mode_config.num_fb = 0;
dev->mode_config.num_connector = 0;
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
dev->mode_config.num_total_plane = 0;
if (IS_ENABLED(CONFIG_LOCKDEP)) {
struct drm_modeset_acquire_ctx modeset_ctx;
struct ww_acquire_ctx resv_ctx;
struct dma_resv resv;
int ret;
dma_resv_init(&resv);
drm_modeset_acquire_init(&modeset_ctx, 0);
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
&modeset_ctx);
if (ret == -EDEADLK)
ret = drm_modeset_backoff(&modeset_ctx);
ww_acquire_init(&resv_ctx, &reservation_ww_class);
ret = dma_resv_lock(&resv, &resv_ctx);
if (ret == -EDEADLK)
dma_resv_lock_slow(&resv, &resv_ctx);
dma_resv_unlock(&resv);
ww_acquire_fini(&resv_ctx);
drm_modeset_drop_locks(&modeset_ctx);
drm_modeset_acquire_fini(&modeset_ctx);
dma_resv_fini(&resv);
}
return drmm_add_action_or_reset(dev, drm_mode_config_init_release,
NULL);
}
EXPORT_SYMBOL(drmm_mode_config_init);
/**
* drm_mode_config_cleanup - free up DRM mode_config info
* @dev: DRM device
*
* Free up all the connectors and CRTCs associated with this DRM device, then
* free up the framebuffers and associated buffer objects.
*
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
*
* FIXME: With the managed drmm_mode_config_init() it is no longer necessary for
* drivers to explicitly call this function.
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_crtc *crtc, *ct;
struct drm_encoder *encoder, *enct;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
struct drm_property_blob *blob, *bt;
struct drm_plane *plane, *plt;
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
head) {
encoder->funcs->destroy(encoder);
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
/* drm_connector_list_iter holds an full reference to the
* current connector itself, which means it is inherently safe
* against unreferencing the current connector - but not against
* deleting it right away. */
drm_connector_put(connector);
}
drm_connector_list_iter_end(&conn_iter);
/* connector_iter drops references in a work item. */
flush_work(&dev->mode_config.connector_free_work);
if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
DRM_ERROR("connector %s leaked!\n", connector->name);
drm_connector_list_iter_end(&conn_iter);
}
list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
head) {
drm_property_destroy(dev, property);
}
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
head) {
plane->funcs->destroy(plane);
}
list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
crtc->funcs->destroy(crtc);
}
list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
head_global) {
drm_property_blob_put(blob);
}
/*
* Single-threaded teardown context, so it's not required to grab the
* fb_lock to protect against concurrent fb_list access. Contrary, it
* would actually deadlock with the drm_framebuffer_cleanup function.
*
* Also, if there are any framebuffers left, that's a driver leak now,
* so politely WARN about this.
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
struct drm_printer p = drm_debug_printer("[leaked fb]");
drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
drm_framebuffer_print_info(&p, 1, fb);
drm_framebuffer_free(&fb->base.refcount);
}
ida_destroy(&dev->mode_config.connector_ida);
idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.object_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
static u32 full_encoder_mask(struct drm_device *dev)
{
struct drm_encoder *encoder;
u32 encoder_mask = 0;
drm_for_each_encoder(encoder, dev)
encoder_mask |= drm_encoder_mask(encoder);
return encoder_mask;
}
/*
* For some reason we want the encoder itself included in
* possible_clones. Make life easy for drivers by allowing them
* to leave possible_clones unset if no cloning is possible.
*/
static void fixup_encoder_possible_clones(struct drm_encoder *encoder)
{
if (encoder->possible_clones == 0)
encoder->possible_clones = drm_encoder_mask(encoder);
}
static void validate_encoder_possible_clones(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
u32 encoder_mask = full_encoder_mask(dev);
struct drm_encoder *other;
drm_for_each_encoder(other, dev) {
WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) !=
!!(other->possible_clones & drm_encoder_mask(encoder)),
"possible_clones mismatch: "
"[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. "
"[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n",
encoder->base.id, encoder->name,
drm_encoder_mask(encoder), encoder->possible_clones,
other->base.id, other->name,
drm_encoder_mask(other), other->possible_clones);
}
WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 ||
(encoder->possible_clones & ~encoder_mask) != 0,
"Bogus possible_clones: "
"[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n",
encoder->base.id, encoder->name,
encoder->possible_clones, encoder_mask);
}
static u32 full_crtc_mask(struct drm_device *dev)
{
struct drm_crtc *crtc;
u32 crtc_mask = 0;
drm_for_each_crtc(crtc, dev)
crtc_mask |= drm_crtc_mask(crtc);
return crtc_mask;
}
static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
{
u32 crtc_mask = full_crtc_mask(encoder->dev);
WARN((encoder->possible_crtcs & crtc_mask) == 0 ||
(encoder->possible_crtcs & ~crtc_mask) != 0,
"Bogus possible_crtcs: "
"[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n",
encoder->base.id, encoder->name,
encoder->possible_crtcs, crtc_mask);
}
void drm_mode_config_validate(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_crtc *crtc;
struct drm_plane *plane;
u32 primary_with_crtc = 0, cursor_with_crtc = 0;
unsigned int num_primary = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
drm_for_each_encoder(encoder, dev)
fixup_encoder_possible_clones(encoder);
drm_for_each_encoder(encoder, dev) {
validate_encoder_possible_clones(encoder);
validate_encoder_possible_crtcs(encoder);
}
drm_for_each_crtc(crtc, dev) {
WARN(!crtc->primary, "Missing primary plane on [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
WARN(crtc->cursor && crtc->funcs->cursor_set,
"[CRTC:%d:%s] must not have both a cursor plane and a cursor_set func",
crtc->base.id, crtc->name);
WARN(crtc->cursor && crtc->funcs->cursor_set2,
"[CRTC:%d:%s] must not have both a cursor plane and a cursor_set2 func",
crtc->base.id, crtc->name);
WARN(crtc->cursor && crtc->funcs->cursor_move,
"[CRTC:%d:%s] must not have both a cursor plane and a cursor_move func",
crtc->base.id, crtc->name);
if (crtc->primary) {
WARN(!(crtc->primary->possible_crtcs & drm_crtc_mask(crtc)),
"Bogus primary plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
crtc->primary->base.id, crtc->primary->name,
crtc->base.id, crtc->name);
WARN(primary_with_crtc & drm_plane_mask(crtc->primary),
"Primary plane [PLANE:%d:%s] used for multiple CRTCs",
crtc->primary->base.id, crtc->primary->name);
primary_with_crtc |= drm_plane_mask(crtc->primary);
}
if (crtc->cursor) {
WARN(!(crtc->cursor->possible_crtcs & drm_crtc_mask(crtc)),
"Bogus cursor plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
crtc->cursor->base.id, crtc->cursor->name,
crtc->base.id, crtc->name);
WARN(cursor_with_crtc & drm_plane_mask(crtc->cursor),
"Cursor plane [PLANE:%d:%s] used for multiple CRTCs",
crtc->cursor->base.id, crtc->cursor->name);
cursor_with_crtc |= drm_plane_mask(crtc->cursor);
}
}
drm_for_each_plane(plane, dev) {
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
num_primary++;
}
WARN(num_primary != dev->mode_config.num_crtc,
"Must have as many primary planes as there are CRTCs, but have %u primary planes and %u CRTCs",
num_primary, dev->mode_config.num_crtc);
}
| linux-master | drivers/gpu/drm/drm_mode_config.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Deepak Rawat <[email protected]>
* Rob Clark <[email protected]>
*
**************************************************************************/
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_framebuffer.h>
static void convert_clip_rect_to_rect(const struct drm_clip_rect *src,
struct drm_mode_rect *dest,
uint32_t num_clips, uint32_t src_inc)
{
while (num_clips > 0) {
dest->x1 = src->x1;
dest->y1 = src->y1;
dest->x2 = src->x2;
dest->y2 = src->y2;
src += src_inc;
dest++;
num_clips--;
}
}
/**
* drm_atomic_helper_check_plane_damage - Verify plane damage on atomic_check.
* @state: The driver state object.
* @plane_state: Plane state for which to verify damage.
*
* This helper function makes sure that damage from plane state is discarded
* for full modeset. If there are more reasons a driver would want to do a full
* plane update rather than processing individual damage regions, then those
* cases should be taken care of here.
*
* Note that &drm_plane_state.fb_damage_clips == NULL in plane state means that
* full plane update should happen. It also ensure helper iterator will return
* &drm_plane_state.src as damage.
*/
void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
struct drm_plane_state *plane_state)
{
struct drm_crtc_state *crtc_state;
if (plane_state->crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state,
plane_state->crtc);
if (WARN_ON(!crtc_state))
return;
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
drm_property_blob_put(plane_state->fb_damage_clips);
plane_state->fb_damage_clips = NULL;
}
}
}
EXPORT_SYMBOL(drm_atomic_helper_check_plane_damage);
/**
* drm_atomic_helper_dirtyfb - Helper for dirtyfb.
* @fb: DRM framebuffer.
* @file_priv: Drm file for the ioctl call.
* @flags: Dirty fb annotate flags.
* @color: Color for annotate fill.
* @clips: Dirty region.
* @num_clips: Count of clip in clips.
*
* A helper to implement &drm_framebuffer_funcs.dirty using damage interface
* during plane update. If num_clips is 0 then this helper will do a full plane
* update. This is the same behaviour expected by DIRTFB IOCTL.
*
* Note that this helper is blocking implementation. This is what current
* drivers and userspace expect in their DIRTYFB IOCTL implementation, as a way
* to rate-limit userspace and make sure its rendering doesn't get ahead of
* uploading new data too much.
*
* Return: Zero on success, negative errno on failure.
*/
int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned int flags,
unsigned int color, struct drm_clip_rect *clips,
unsigned int num_clips)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_property_blob *damage = NULL;
struct drm_mode_rect *rects = NULL;
struct drm_atomic_state *state;
struct drm_plane *plane;
int ret = 0;
/*
* When called from ioctl, we are interruptible, but not when called
* internally (ie. defio worker)
*/
drm_modeset_acquire_init(&ctx,
file_priv ? DRM_MODESET_ACQUIRE_INTERRUPTIBLE : 0);
state = drm_atomic_state_alloc(fb->dev);
if (!state) {
ret = -ENOMEM;
goto out_drop_locks;
}
state->acquire_ctx = &ctx;
if (clips) {
uint32_t inc = 1;
if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
inc = 2;
num_clips /= 2;
}
rects = kcalloc(num_clips, sizeof(*rects), GFP_KERNEL);
if (!rects) {
ret = -ENOMEM;
goto out;
}
convert_clip_rect_to_rect(clips, rects, num_clips, inc);
damage = drm_property_create_blob(fb->dev,
num_clips * sizeof(*rects),
rects);
if (IS_ERR(damage)) {
ret = PTR_ERR(damage);
damage = NULL;
goto out;
}
}
retry:
drm_for_each_plane(plane, fb->dev) {
struct drm_plane_state *plane_state;
ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
if (ret)
goto out;
if (plane->state->fb != fb) {
drm_modeset_unlock(&plane->mutex);
continue;
}
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto out;
}
drm_property_replace_blob(&plane_state->fb_damage_clips,
damage);
}
ret = drm_atomic_commit(state);
out:
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
ret = drm_modeset_backoff(&ctx);
if (!ret)
goto retry;
}
drm_property_blob_put(damage);
kfree(rects);
drm_atomic_state_put(state);
out_drop_locks:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_dirtyfb);
/**
* drm_atomic_helper_damage_iter_init - Initialize the damage iterator.
* @iter: The iterator to initialize.
* @old_state: Old plane state for validation.
* @state: Plane state from which to iterate the damage clips.
*
* Initialize an iterator, which clips plane damage
* &drm_plane_state.fb_damage_clips to plane &drm_plane_state.src. This iterator
* returns full plane src in case damage is not present because either
* user-space didn't sent or driver discarded it (it want to do full plane
* update). Currently this iterator returns full plane src in case plane src
* changed but that can be changed in future to return damage.
*
* For the case when plane is not visible or plane update should not happen the
* first call to iter_next will return false. Note that this helper use clipped
* &drm_plane_state.src, so driver calling this helper should have called
* drm_atomic_helper_check_plane_state() earlier.
*/
void
drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
const struct drm_plane_state *old_state,
const struct drm_plane_state *state)
{
struct drm_rect src;
memset(iter, 0, sizeof(*iter));
if (!state || !state->crtc || !state->fb || !state->visible)
return;
iter->clips = (struct drm_rect *)drm_plane_get_damage_clips(state);
iter->num_clips = drm_plane_get_damage_clips_count(state);
/* Round down for x1/y1 and round up for x2/y2 to catch all pixels */
src = drm_plane_state_src(state);
iter->plane_src.x1 = src.x1 >> 16;
iter->plane_src.y1 = src.y1 >> 16;
iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF);
iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF);
if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
iter->clips = NULL;
iter->num_clips = 0;
iter->full_update = true;
}
}
EXPORT_SYMBOL(drm_atomic_helper_damage_iter_init);
/**
* drm_atomic_helper_damage_iter_next - Advance the damage iterator.
* @iter: The iterator to advance.
* @rect: Return a rectangle in fb coordinate clipped to plane src.
*
* Since plane src is in 16.16 fixed point and damage clips are whole number,
* this iterator round off clips that intersect with plane src. Round down for
* x1/y1 and round up for x2/y2 for the intersected coordinate. Similar rounding
* off for full plane src, in case it's returned as damage. This iterator will
* skip damage clips outside of plane src.
*
* Return: True if the output is valid, false if reached the end.
*
* If the first call to iterator next returns false then it means no need to
* update the plane.
*/
bool
drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
struct drm_rect *rect)
{
bool ret = false;
if (iter->full_update) {
*rect = iter->plane_src;
iter->full_update = false;
return true;
}
while (iter->curr_clip < iter->num_clips) {
*rect = iter->clips[iter->curr_clip];
iter->curr_clip++;
if (drm_rect_intersect(rect, &iter->plane_src)) {
ret = true;
break;
}
}
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next);
/**
* drm_atomic_helper_damage_merged - Merged plane damage
* @old_state: Old plane state for validation.
* @state: Plane state from which to iterate the damage clips.
* @rect: Returns the merged damage rectangle
*
* This function merges any valid plane damage clips into one rectangle and
* returns it in @rect.
*
* For details see: drm_atomic_helper_damage_iter_init() and
* drm_atomic_helper_damage_iter_next().
*
* Returns:
* True if there is valid plane damage otherwise false.
*/
bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
struct drm_plane_state *state,
struct drm_rect *rect)
{
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
bool valid = false;
rect->x1 = INT_MAX;
rect->y1 = INT_MAX;
rect->x2 = 0;
rect->y2 = 0;
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drm_atomic_for_each_plane_damage(&iter, &clip) {
rect->x1 = min(rect->x1, clip.x1);
rect->y1 = min(rect->y1, clip.y1);
rect->x2 = max(rect->x2, clip.x2);
rect->y2 = max(rect->y2, clip.y2);
valid = true;
}
return valid;
}
EXPORT_SYMBOL(drm_atomic_helper_damage_merged);
| linux-master | drivers/gpu/drm/drm_damage_helper.c |
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/err.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
/**
* DOC: overview
*
* &struct drm_bridge represents a device that hangs on to an encoder. These are
* handy when a regular &drm_encoder entity isn't enough to represent the entire
* encoder chain.
*
* A bridge is always attached to a single &drm_encoder at a time, but can be
* either connected to it directly, or through a chain of bridges::
*
* [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
*
* Here, the output of the encoder feeds to bridge A, and that furthers feeds to
* bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
* Chaining multiple bridges to the output of a bridge, or the same bridge to
* the output of different bridges, is not supported.
*
* &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
* CRTCs, encoders or connectors and hence are not visible to userspace. They
* just provide additional hooks to get the desired output at the end of the
* encoder chain.
*/
/**
* DOC: display driver integration
*
* Display drivers are responsible for linking encoders with the first bridge
* in the chains. This is done by acquiring the appropriate bridge with
* devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
* encoder with a call to drm_bridge_attach().
*
* Bridges are responsible for linking themselves with the next bridge in the
* chain, if any. This is done the same way as for encoders, with the call to
* drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
*
* Once these links are created, the bridges can participate along with encoder
* functions to perform mode validation and fixup (through
* drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
* setting (through drm_bridge_chain_mode_set()), enable (through
* drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
* and disable (through drm_atomic_bridge_chain_disable() and
* drm_atomic_bridge_chain_post_disable()). Those functions call the
* corresponding operations provided in &drm_bridge_funcs in sequence for all
* bridges in the chain.
*
* For display drivers that use the atomic helpers
* drm_atomic_helper_check_modeset(),
* drm_atomic_helper_commit_modeset_enables() and
* drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
* commit check and commit tail handlers, or through the higher-level
* drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
* drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
* requires no intervention from the driver. For other drivers, the relevant
* DRM bridge chain functions shall be called manually.
*
* Bridges also participate in implementing the &drm_connector at the end of
* the bridge chain. Display drivers may use the drm_bridge_connector_init()
* helper to create the &drm_connector, or implement it manually on top of the
* connector-related operations exposed by the bridge (see the overview
* documentation of bridge operations for more details).
*/
/**
* DOC: special care dsi
*
* The interaction between the bridges and other frameworks involved in
* the probing of the upstream driver and the bridge driver can be
* challenging. Indeed, there's multiple cases that needs to be
* considered:
*
* - The upstream driver doesn't use the component framework and isn't a
* MIPI-DSI host. In this case, the bridge driver will probe at some
* point and the upstream driver should try to probe again by returning
* EPROBE_DEFER as long as the bridge driver hasn't probed.
*
* - The upstream driver doesn't use the component framework, but is a
* MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
* controlled. In this case, the bridge device is a child of the
* display device and when it will probe it's assured that the display
* device (and MIPI-DSI host) is present. The upstream driver will be
* assured that the bridge driver is connected between the
* &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
* Therefore, it must run mipi_dsi_host_register() in its probe
* function, and then run drm_bridge_attach() in its
* &mipi_dsi_host_ops.attach hook.
*
* - The upstream driver uses the component framework and is a MIPI-DSI
* host. The bridge device uses the MIPI-DCS commands to be
* controlled. This is the same situation than above, and can run
* mipi_dsi_host_register() in either its probe or bind hooks.
*
* - The upstream driver uses the component framework and is a MIPI-DSI
* host. The bridge device uses a separate bus (such as I2C) to be
* controlled. In this case, there's no correlation between the probe
* of the bridge and upstream drivers, so care must be taken to avoid
* an endless EPROBE_DEFER loop, with each driver waiting for the
* other to probe.
*
* The ideal pattern to cover the last item (and all the others in the
* MIPI-DSI host driver case) is to split the operations like this:
*
* - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
* probe hook. It will make sure that the MIPI-DSI host sticks around,
* and that the driver's bind can be called.
*
* - In its probe hook, the bridge driver must try to find its MIPI-DSI
* host, register as a MIPI-DSI device and attach the MIPI-DSI device
* to its host. The bridge driver is now functional.
*
* - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
* now add its component. Its bind hook will now be called and since
* the bridge driver is attached and registered, we can now look for
* and attach it.
*
* At this point, we're now certain that both the upstream driver and
* the bridge driver are functional and we can't have a deadlock-like
* situation when probing.
*/
/**
* DOC: dsi bridge operations
*
* DSI host interfaces are expected to be implemented as bridges rather than
* encoders, however there are a few aspects of their operation that need to
* be defined in order to provide a consistent interface.
*
* A DSI host should keep the PHY powered down until the pre_enable operation is
* called. All lanes are in an undefined idle state up to this point, and it
* must not be assumed that it is LP-11.
* pre_enable should initialise the PHY, set the data lanes to LP-11, and the
* clock lane to either LP-11 or HS depending on the mode_flag
* %MIPI_DSI_CLOCK_NON_CONTINUOUS.
*
* Ordinarily the downstream bridge DSI peripheral pre_enable will have been
* called before the DSI host. If the DSI peripheral requires LP-11 and/or
* the clock lane to be in HS mode prior to pre_enable, then it can set the
* &pre_enable_prev_first flag to request the pre_enable (and
* post_disable) order to be altered to enable the DSI host first.
*
* Either the CRTC being enabled, or the DSI host enable operation should switch
* the host to actively transmitting video on the data lanes.
*
* The reverse also applies. The DSI host disable operation or stopping the CRTC
* should stop transmitting video, and the data lanes should return to the LP-11
* state. The DSI host &post_disable operation should disable the PHY.
* If the &pre_enable_prev_first flag is set, then the DSI peripheral's
* bridge &post_disable will be called before the DSI host's post_disable.
*
* Whilst it is valid to call &host_transfer prior to pre_enable or after
* post_disable, the exact state of the lanes is undefined at this point. The
* DSI host should initialise the interface, transmit the data, and then disable
* the interface again.
*
* Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
* implemented, it therefore needs to be handled entirely within the DSI Host
* driver.
*/
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
/**
* drm_bridge_add - add the given bridge to the global bridge list
*
* @bridge: bridge control structure
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
mutex_init(&bridge->hpd_mutex);
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
}
EXPORT_SYMBOL(drm_bridge_add);
static void drm_bridge_remove_void(void *bridge)
{
drm_bridge_remove(bridge);
}
/**
* devm_drm_bridge_add - devm managed version of drm_bridge_add()
*
* @dev: device to tie the bridge lifetime to
* @bridge: bridge control structure
*
* This is the managed version of drm_bridge_add() which automatically
* calls drm_bridge_remove() when @dev is unbound.
*
* Return: 0 if no error or negative error code.
*/
int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
{
drm_bridge_add(bridge);
return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
}
EXPORT_SYMBOL(devm_drm_bridge_add);
/**
* drm_bridge_remove - remove the given bridge from the global bridge list
*
* @bridge: bridge control structure
*/
void drm_bridge_remove(struct drm_bridge *bridge)
{
mutex_lock(&bridge_lock);
list_del_init(&bridge->list);
mutex_unlock(&bridge_lock);
mutex_destroy(&bridge->hpd_mutex);
}
EXPORT_SYMBOL(drm_bridge_remove);
static struct drm_private_state *
drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
{
struct drm_bridge *bridge = drm_priv_to_bridge(obj);
struct drm_bridge_state *state;
state = bridge->funcs->atomic_duplicate_state(bridge);
return state ? &state->base : NULL;
}
static void
drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
struct drm_private_state *s)
{
struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
struct drm_bridge *bridge = drm_priv_to_bridge(obj);
bridge->funcs->atomic_destroy_state(bridge, state);
}
static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
};
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
* @encoder: DRM encoder
* @bridge: bridge to attach
* @previous: previous bridge in the chain (optional)
* @flags: DRM_BRIDGE_ATTACH_* flags
*
* Called by a kms driver to link the bridge to an encoder's chain. The previous
* argument specifies the previous bridge in the chain. If NULL, the bridge is
* linked directly at the encoder's output. Otherwise it is linked at the
* previous bridge's output.
*
* If non-NULL the previous bridge must be already attached by a call to this
* function.
*
* Note that bridges attached to encoders are auto-detached during encoder
* cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
* *not* be balanced with a drm_bridge_detach() in driver code.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous,
enum drm_bridge_attach_flags flags)
{
int ret;
if (!encoder || !bridge)
return -EINVAL;
if (previous && (!previous->dev || previous->encoder != encoder))
return -EINVAL;
if (bridge->dev)
return -EBUSY;
bridge->dev = encoder->dev;
bridge->encoder = encoder;
if (previous)
list_add(&bridge->chain_node, &previous->chain_node);
else
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
ret = bridge->funcs->attach(bridge, flags);
if (ret < 0)
goto err_reset_bridge;
}
if (bridge->funcs->atomic_reset) {
struct drm_bridge_state *state;
state = bridge->funcs->atomic_reset(bridge);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
goto err_detach_bridge;
}
drm_atomic_private_obj_init(bridge->dev, &bridge->base,
&state->base,
&drm_bridge_priv_state_funcs);
}
return 0;
err_detach_bridge:
if (bridge->funcs->detach)
bridge->funcs->detach(bridge);
err_reset_bridge:
bridge->dev = NULL;
bridge->encoder = NULL;
list_del(&bridge->chain_node);
#ifdef CONFIG_OF
DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
bridge->of_node, encoder->name, ret);
#else
DRM_ERROR("failed to attach bridge to encoder %s: %d\n",
encoder->name, ret);
#endif
return ret;
}
EXPORT_SYMBOL(drm_bridge_attach);
void drm_bridge_detach(struct drm_bridge *bridge)
{
if (WARN_ON(!bridge))
return;
if (WARN_ON(!bridge->dev))
return;
if (bridge->funcs->atomic_reset)
drm_atomic_private_obj_fini(&bridge->base);
if (bridge->funcs->detach)
bridge->funcs->detach(bridge);
list_del(&bridge->chain_node);
bridge->dev = NULL;
}
/**
* DOC: bridge operations
*
* Bridge drivers expose operations through the &drm_bridge_funcs structure.
* The DRM internals (atomic and CRTC helpers) use the helpers defined in
* drm_bridge.c to call bridge operations. Those operations are divided in
* three big categories to support different parts of the bridge usage.
*
* - The encoder-related operations support control of the bridges in the
* chain, and are roughly counterparts to the &drm_encoder_helper_funcs
* operations. They are used by the legacy CRTC and the atomic modeset
* helpers to perform mode validation, fixup and setting, and enable and
* disable the bridge automatically.
*
* The enable and disable operations are split in
* &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
* &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
* finer-grained control.
*
* Bridge drivers may implement the legacy version of those operations, or
* the atomic version (prefixed with atomic\_), in which case they shall also
* implement the atomic state bookkeeping operations
* (&drm_bridge_funcs.atomic_duplicate_state,
* &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
* Mixing atomic and non-atomic versions of the operations is not supported.
*
* - The bus format negotiation operations
* &drm_bridge_funcs.atomic_get_output_bus_fmts and
* &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
* negotiate the formats transmitted between bridges in the chain when
* multiple formats are supported. Negotiation for formats is performed
* transparently for display drivers by the atomic modeset helpers. Only
* atomic versions of those operations exist, bridge drivers that need to
* implement them shall thus also implement the atomic version of the
* encoder-related operations. This feature is not supported by the legacy
* CRTC helpers.
*
* - The connector-related operations support implementing a &drm_connector
* based on a chain of bridges. DRM bridges traditionally create a
* &drm_connector for bridges meant to be used at the end of the chain. This
* puts additional burden on bridge drivers, especially for bridges that may
* be used in the middle of a chain or at the end of it. Furthermore, it
* requires all operations of the &drm_connector to be handled by a single
* bridge, which doesn't always match the hardware architecture.
*
* To simplify bridge drivers and make the connector implementation more
* flexible, a new model allows bridges to unconditionally skip creation of
* &drm_connector and instead expose &drm_bridge_funcs operations to support
* an externally-implemented &drm_connector. Those operations are
* &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
* &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
* &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
* implemented, display drivers shall create a &drm_connector instance for
* each chain of bridges, and implement those connector instances based on
* the bridge connector operations.
*
* Bridge drivers shall implement the connector-related operations for all
* the features that the bridge hardware support. For instance, if a bridge
* supports reading EDID, the &drm_bridge_funcs.get_edid shall be
* implemented. This however doesn't mean that the DDC lines are wired to the
* bridge on a particular platform, as they could also be connected to an I2C
* controller of the SoC. Support for the connector-related operations on the
* running platform is reported through the &drm_bridge.ops flags. Bridge
* drivers shall detect which operations they can support on the platform
* (usually this information is provided by ACPI or DT), and set the
* &drm_bridge.ops flags for all supported operations. A flag shall only be
* set if the corresponding &drm_bridge_funcs operation is implemented, but
* an implemented operation doesn't necessarily imply that the corresponding
* flag will be set. Display drivers shall use the &drm_bridge.ops flags to
* decide which bridge to delegate a connector operation to. This mechanism
* allows providing a single static const &drm_bridge_funcs instance in
* bridge drivers, improving security by storing function pointers in
* read-only memory.
*
* In order to ease transition, bridge drivers may support both the old and
* new models by making connector creation optional and implementing the
* connected-related bridge operations. Connector creation is then controlled
* by the flags argument to the drm_bridge_attach() function. Display drivers
* that support the new model and create connectors themselves shall set the
* %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
* connector creation. For intermediate bridges in the chain, the flag shall
* be passed to the drm_bridge_attach() call for the downstream bridge.
* Bridge drivers that implement the new model only shall return an error
* from their &drm_bridge_funcs.attach handler when the
* %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
* should use the new model, and convert the bridge drivers they use if
* needed, in order to gradually transition to the new model.
*/
/**
* drm_bridge_chain_mode_fixup - fixup proposed mode for all bridges in the
* encoder chain
* @bridge: bridge control structure
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
*
* Calls &drm_bridge_funcs.mode_fixup for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
*
* RETURNS:
* true on success, false on failure
*/
bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_encoder *encoder;
if (!bridge)
return true;
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
if (!bridge->funcs->mode_fixup)
continue;
if (!bridge->funcs->mode_fixup(bridge, mode, adjusted_mode))
return false;
}
return true;
}
EXPORT_SYMBOL(drm_bridge_chain_mode_fixup);
/**
* drm_bridge_chain_mode_valid - validate the mode against all bridges in the
* encoder chain.
* @bridge: bridge control structure
* @info: display info against which the mode shall be validated
* @mode: desired mode to be validated
*
* Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
* chain, starting from the first bridge to the last. If at least one bridge
* does not accept the mode the function returns the error code.
*
* Note: the bridge passed should be the one closest to the encoder.
*
* RETURNS:
* MODE_OK on success, drm_mode_status Enum error code on failure
*/
enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct drm_encoder *encoder;
if (!bridge)
return MODE_OK;
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
enum drm_mode_status ret;
if (!bridge->funcs->mode_valid)
continue;
ret = bridge->funcs->mode_valid(bridge, info, mode);
if (ret != MODE_OK)
return ret;
}
return MODE_OK;
}
EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
/**
* drm_bridge_chain_mode_set - set proposed mode for all bridges in the
* encoder chain
* @bridge: bridge control structure
* @mode: desired mode to be set for the encoder chain
* @adjusted_mode: updated mode that works for this encoder chain
*
* Calls &drm_bridge_funcs.mode_set op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct drm_encoder *encoder;
if (!bridge)
return;
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
if (bridge->funcs->mode_set)
bridge->funcs->mode_set(bridge, mode, adjusted_mode);
}
}
EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
* drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
* @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_disable (falls back on
* &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
* starting from the last bridge to the first. These are called before calling
* &drm_encoder_helper_funcs.atomic_disable
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct drm_encoder *encoder;
struct drm_bridge *iter;
if (!bridge)
return;
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
if (iter->funcs->atomic_disable) {
struct drm_bridge_state *old_bridge_state;
old_bridge_state =
drm_atomic_get_old_bridge_state(old_state,
iter);
if (WARN_ON(!old_bridge_state))
return;
iter->funcs->atomic_disable(iter, old_bridge_state);
} else if (iter->funcs->disable) {
iter->funcs->disable(iter);
}
if (iter == bridge)
break;
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
if (old_state && bridge->funcs->atomic_post_disable) {
struct drm_bridge_state *old_bridge_state;
old_bridge_state =
drm_atomic_get_old_bridge_state(old_state,
bridge);
if (WARN_ON(!old_bridge_state))
return;
bridge->funcs->atomic_post_disable(bridge,
old_bridge_state);
} else if (bridge->funcs->post_disable) {
bridge->funcs->post_disable(bridge);
}
}
/**
* drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
* in the encoder chain
* @bridge: bridge control structure
* @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_post_disable (falls back on
* &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
* starting from the first bridge to the last. These are called after completing
* &drm_encoder_helper_funcs.atomic_disable
*
* If a bridge sets @pre_enable_prev_first, then the @post_disable for that
* bridge will be called before the previous one to reverse the @pre_enable
* calling direction.
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct drm_encoder *encoder;
struct drm_bridge *next, *limit;
if (!bridge)
return;
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
limit = NULL;
if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
next = list_next_entry(bridge, chain_node);
if (next->pre_enable_prev_first) {
/* next bridge had requested that prev
* was enabled first, so disabled last
*/
limit = next;
/* Find the next bridge that has NOT requested
* prev to be enabled first / disabled last
*/
list_for_each_entry_from(next, &encoder->bridge_chain,
chain_node) {
if (next->pre_enable_prev_first) {
next = list_prev_entry(next, chain_node);
limit = next;
break;
}
}
/* Call these bridges in reverse order */
list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
chain_node) {
if (next == bridge)
break;
drm_atomic_bridge_call_post_disable(next,
old_state);
}
}
}
drm_atomic_bridge_call_post_disable(bridge, old_state);
if (limit)
/* Jump all bridges that we have already post_disabled */
bridge = limit;
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
if (old_state && bridge->funcs->atomic_pre_enable) {
struct drm_bridge_state *old_bridge_state;
old_bridge_state =
drm_atomic_get_old_bridge_state(old_state,
bridge);
if (WARN_ON(!old_bridge_state))
return;
bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
} else if (bridge->funcs->pre_enable) {
bridge->funcs->pre_enable(bridge);
}
}
/**
* drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
* the encoder chain
* @bridge: bridge control structure
* @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
* &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
* starting from the last bridge to the first. These are called before calling
* &drm_encoder_helper_funcs.atomic_enable
*
* If a bridge sets @pre_enable_prev_first, then the pre_enable for the
* prev bridge will be called before pre_enable of this bridge.
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct drm_encoder *encoder;
struct drm_bridge *iter, *next, *limit;
if (!bridge)
return;
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
if (iter->pre_enable_prev_first) {
next = iter;
limit = bridge;
list_for_each_entry_from_reverse(next,
&encoder->bridge_chain,
chain_node) {
if (next == bridge)
break;
if (!next->pre_enable_prev_first) {
/* Found first bridge that does NOT
* request prev to be enabled first
*/
limit = list_prev_entry(next, chain_node);
break;
}
}
list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
/* Call requested prev bridge pre_enable
* in order.
*/
if (next == iter)
/* At the first bridge to request prev
* bridges called first.
*/
break;
drm_atomic_bridge_call_pre_enable(next, old_state);
}
}
drm_atomic_bridge_call_pre_enable(iter, old_state);
if (iter->pre_enable_prev_first)
/* Jump all bridges that we have already pre_enabled */
iter = limit;
if (iter == bridge)
break;
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
/**
* drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
* @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_enable (falls back on
* &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
* starting from the first bridge to the last. These are called after completing
* &drm_encoder_helper_funcs.atomic_enable
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct drm_encoder *encoder;
if (!bridge)
return;
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
if (bridge->funcs->atomic_enable) {
struct drm_bridge_state *old_bridge_state;
old_bridge_state =
drm_atomic_get_old_bridge_state(old_state,
bridge);
if (WARN_ON(!old_bridge_state))
return;
bridge->funcs->atomic_enable(bridge, old_bridge_state);
} else if (bridge->funcs->enable) {
bridge->funcs->enable(bridge);
}
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
static int drm_atomic_bridge_check(struct drm_bridge *bridge,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
if (bridge->funcs->atomic_check) {
struct drm_bridge_state *bridge_state;
int ret;
bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
bridge);
if (WARN_ON(!bridge_state))
return -EINVAL;
ret = bridge->funcs->atomic_check(bridge, bridge_state,
crtc_state, conn_state);
if (ret)
return ret;
} else if (bridge->funcs->mode_fixup) {
if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
&crtc_state->adjusted_mode))
return -EINVAL;
}
return 0;
}
static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
struct drm_bridge *cur_bridge,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 out_bus_fmt)
{
unsigned int i, num_in_bus_fmts = 0;
struct drm_bridge_state *cur_state;
struct drm_bridge *prev_bridge;
u32 *in_bus_fmts;
int ret;
prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
cur_bridge);
/*
* If bus format negotiation is not supported by this bridge, let's
* pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
* hope that it can handle this situation gracefully (by providing
* appropriate default values).
*/
if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
if (cur_bridge != first_bridge) {
ret = select_bus_fmt_recursive(first_bridge,
prev_bridge, crtc_state,
conn_state,
MEDIA_BUS_FMT_FIXED);
if (ret)
return ret;
}
/*
* Driver does not implement the atomic state hooks, but that's
* fine, as long as it does not access the bridge state.
*/
if (cur_state) {
cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
cur_state->output_bus_cfg.format = out_bus_fmt;
}
return 0;
}
/*
* If the driver implements ->atomic_get_input_bus_fmts() it
* should also implement the atomic state hooks.
*/
if (WARN_ON(!cur_state))
return -EINVAL;
in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
cur_state,
crtc_state,
conn_state,
out_bus_fmt,
&num_in_bus_fmts);
if (!num_in_bus_fmts)
return -ENOTSUPP;
else if (!in_bus_fmts)
return -ENOMEM;
if (first_bridge == cur_bridge) {
cur_state->input_bus_cfg.format = in_bus_fmts[0];
cur_state->output_bus_cfg.format = out_bus_fmt;
kfree(in_bus_fmts);
return 0;
}
for (i = 0; i < num_in_bus_fmts; i++) {
ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
crtc_state, conn_state,
in_bus_fmts[i]);
if (ret != -ENOTSUPP)
break;
}
if (!ret) {
cur_state->input_bus_cfg.format = in_bus_fmts[i];
cur_state->output_bus_cfg.format = out_bus_fmt;
}
kfree(in_bus_fmts);
return ret;
}
/*
* This function is called by &drm_atomic_bridge_chain_check() just before
* calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
* It performs bus format negotiation between bridge elements. The negotiation
* happens in reverse order, starting from the last element in the chain up to
* @bridge.
*
* Negotiation starts by retrieving supported output bus formats on the last
* bridge element and testing them one by one. The test is recursive, meaning
* that for each tested output format, the whole chain will be walked backward,
* and each element will have to choose an input bus format that can be
* transcoded to the requested output format. When a bridge element does not
* support transcoding into a specific output format -ENOTSUPP is returned and
* the next bridge element will have to try a different format. If none of the
* combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
*
* This implementation is relying on
* &drm_bridge_funcs.atomic_get_output_bus_fmts() and
* &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
* input/output formats.
*
* When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
* the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
* tries a single format: &drm_connector.display_info.bus_formats[0] if
* available, MEDIA_BUS_FMT_FIXED otherwise.
*
* When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
* &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
* bridge element that lacks this hook and asks the previous element in the
* chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
* to do in that case (fail if they want to enforce bus format negotiation, or
* provide a reasonable default if they need to support pipelines where not
* all elements support bus format negotiation).
*/
static int
drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_connector *conn = conn_state->connector;
struct drm_encoder *encoder = bridge->encoder;
struct drm_bridge_state *last_bridge_state;
unsigned int i, num_out_bus_fmts = 0;
struct drm_bridge *last_bridge;
u32 *out_bus_fmts;
int ret = 0;
last_bridge = list_last_entry(&encoder->bridge_chain,
struct drm_bridge, chain_node);
last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
last_bridge);
if (last_bridge->funcs->atomic_get_output_bus_fmts) {
const struct drm_bridge_funcs *funcs = last_bridge->funcs;
/*
* If the driver implements ->atomic_get_output_bus_fmts() it
* should also implement the atomic state hooks.
*/
if (WARN_ON(!last_bridge_state))
return -EINVAL;
out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
last_bridge_state,
crtc_state,
conn_state,
&num_out_bus_fmts);
if (!num_out_bus_fmts)
return -ENOTSUPP;
else if (!out_bus_fmts)
return -ENOMEM;
} else {
num_out_bus_fmts = 1;
out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
if (!out_bus_fmts)
return -ENOMEM;
if (conn->display_info.num_bus_formats &&
conn->display_info.bus_formats)
out_bus_fmts[0] = conn->display_info.bus_formats[0];
else
out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
}
for (i = 0; i < num_out_bus_fmts; i++) {
ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
conn_state, out_bus_fmts[i]);
if (ret != -ENOTSUPP)
break;
}
kfree(out_bus_fmts);
return ret;
}
static void
drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
struct drm_connector *conn,
struct drm_atomic_state *state)
{
struct drm_bridge_state *bridge_state, *next_bridge_state;
struct drm_bridge *next_bridge;
u32 output_flags = 0;
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
/* No bridge state attached to this bridge => nothing to propagate. */
if (!bridge_state)
return;
next_bridge = drm_bridge_get_next_bridge(bridge);
/*
* Let's try to apply the most common case here, that is, propagate
* display_info flags for the last bridge, and propagate the input
* flags of the next bridge element to the output end of the current
* bridge when the bridge is not the last one.
* There are exceptions to this rule, like when signal inversion is
* happening at the board level, but that's something drivers can deal
* with from their &drm_bridge_funcs.atomic_check() implementation by
* simply overriding the flags value we've set here.
*/
if (!next_bridge) {
output_flags = conn->display_info.bus_flags;
} else {
next_bridge_state = drm_atomic_get_new_bridge_state(state,
next_bridge);
/*
* No bridge state attached to the next bridge, just leave the
* flags to 0.
*/
if (next_bridge_state)
output_flags = next_bridge_state->input_bus_cfg.flags;
}
bridge_state->output_bus_cfg.flags = output_flags;
/*
* Propagate the output flags to the input end of the bridge. Again, it's
* not necessarily what all bridges want, but that's what most of them
* do, and by doing that by default we avoid forcing drivers to
* duplicate the "dummy propagation" logic.
*/
bridge_state->input_bus_cfg.flags = output_flags;
}
/**
* drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
* @bridge: bridge control structure
* @crtc_state: new CRTC state
* @conn_state: new connector state
*
* First trigger a bus format negotiation before calling
* &drm_bridge_funcs.atomic_check() (falls back on
* &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
* starting from the last bridge to the first. These are called before calling
* &drm_encoder_helper_funcs.atomic_check()
*
* RETURNS:
* 0 on success, a negative error code on failure
*/
int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_connector *conn = conn_state->connector;
struct drm_encoder *encoder;
struct drm_bridge *iter;
int ret;
if (!bridge)
return 0;
ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
conn_state);
if (ret)
return ret;
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
int ret;
/*
* Bus flags are propagated by default. If a bridge needs to
* tweak the input bus flags for any reason, it should happen
* in its &drm_bridge_funcs.atomic_check() implementation such
* that preceding bridges in the chain can propagate the new
* bus flags.
*/
drm_atomic_bridge_propagate_bus_flags(iter, conn,
crtc_state->state);
ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
if (ret)
return ret;
if (iter == bridge)
break;
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
/**
* drm_bridge_detect - check if anything is attached to the bridge output
* @bridge: bridge control structure
*
* If the bridge supports output detection, as reported by the
* DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
* bridge and return the connection status. Otherwise return
* connector_status_unknown.
*
* RETURNS:
* The detection status on success, or connector_status_unknown if the bridge
* doesn't support output detection.
*/
enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
{
if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
return connector_status_unknown;
return bridge->funcs->detect(bridge);
}
EXPORT_SYMBOL_GPL(drm_bridge_detect);
/**
* drm_bridge_get_modes - fill all modes currently valid for the sink into the
* @connector
* @bridge: bridge control structure
* @connector: the connector to fill with modes
*
* If the bridge supports output modes retrieval, as reported by the
* DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
* fill the connector with all valid modes and return the number of modes
* added. Otherwise return 0.
*
* RETURNS:
* The number of modes added to the connector.
*/
int drm_bridge_get_modes(struct drm_bridge *bridge,
struct drm_connector *connector)
{
if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
return 0;
return bridge->funcs->get_modes(bridge, connector);
}
EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
/**
* drm_bridge_get_edid - get the EDID data of the connected display
* @bridge: bridge control structure
* @connector: the connector to read EDID for
*
* If the bridge supports output EDID retrieval, as reported by the
* DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
* get the EDID and return it. Otherwise return NULL.
*
* RETURNS:
* The retrieved EDID on success, or NULL otherwise.
*/
struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
return NULL;
return bridge->funcs->get_edid(bridge, connector);
}
EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
/**
* drm_bridge_hpd_enable - enable hot plug detection for the bridge
* @bridge: bridge control structure
* @cb: hot-plug detection callback
* @data: data to be passed to the hot-plug detection callback
*
* Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
* and @data as hot plug notification callback. From now on the @cb will be
* called with @data when an output status change is detected by the bridge,
* until hot plug notification gets disabled with drm_bridge_hpd_disable().
*
* Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
* bridge->ops. This function shall not be called when the flag is not set.
*
* Only one hot plug detection callback can be registered at a time, it is an
* error to call this function when hot plug detection is already enabled for
* the bridge.
*/
void drm_bridge_hpd_enable(struct drm_bridge *bridge,
void (*cb)(void *data,
enum drm_connector_status status),
void *data)
{
if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
return;
mutex_lock(&bridge->hpd_mutex);
if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
goto unlock;
bridge->hpd_cb = cb;
bridge->hpd_data = data;
if (bridge->funcs->hpd_enable)
bridge->funcs->hpd_enable(bridge);
unlock:
mutex_unlock(&bridge->hpd_mutex);
}
EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
/**
* drm_bridge_hpd_disable - disable hot plug detection for the bridge
* @bridge: bridge control structure
*
* Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
* plug detection callback previously registered with drm_bridge_hpd_enable().
* Once this function returns the callback will not be called by the bridge
* when an output status change occurs.
*
* Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
* bridge->ops. This function shall not be called when the flag is not set.
*/
void drm_bridge_hpd_disable(struct drm_bridge *bridge)
{
if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
return;
mutex_lock(&bridge->hpd_mutex);
if (bridge->funcs->hpd_disable)
bridge->funcs->hpd_disable(bridge);
bridge->hpd_cb = NULL;
bridge->hpd_data = NULL;
mutex_unlock(&bridge->hpd_mutex);
}
EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
/**
* drm_bridge_hpd_notify - notify hot plug detection events
* @bridge: bridge control structure
* @status: output connection status
*
* Bridge drivers shall call this function to report hot plug events when they
* detect a change in the output status, when hot plug detection has been
* enabled by drm_bridge_hpd_enable().
*
* This function shall be called in a context that can sleep.
*/
void drm_bridge_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status)
{
mutex_lock(&bridge->hpd_mutex);
if (bridge->hpd_cb)
bridge->hpd_cb(bridge->hpd_data, status);
mutex_unlock(&bridge->hpd_mutex);
}
EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
#ifdef CONFIG_OF
/**
* of_drm_find_bridge - find the bridge corresponding to the device node in
* the global bridge list
*
* @np: device node
*
* RETURNS:
* drm_bridge control struct on success, NULL on failure
*/
struct drm_bridge *of_drm_find_bridge(struct device_node *np)
{
struct drm_bridge *bridge;
mutex_lock(&bridge_lock);
list_for_each_entry(bridge, &bridge_list, list) {
if (bridge->of_node == np) {
mutex_unlock(&bridge_lock);
return bridge;
}
}
mutex_unlock(&bridge_lock);
return NULL;
}
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
#ifdef CONFIG_DEBUG_FS
static int drm_bridge_chains_info(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_mode_config *config = &dev->mode_config;
struct drm_encoder *encoder;
unsigned int bridge_idx = 0;
list_for_each_entry(encoder, &config->encoder_list, head) {
struct drm_bridge *bridge;
drm_printf(&p, "encoder[%u]\n", encoder->base.id);
drm_for_each_bridge_in_chain(encoder, bridge) {
drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x",
bridge_idx, bridge->type, bridge->ops);
#ifdef CONFIG_OF
if (bridge->of_node)
drm_printf(&p, ", OF: %pOFfc", bridge->of_node);
#endif
drm_printf(&p, "\n");
bridge_idx++;
}
}
return 0;
}
static const struct drm_debugfs_info drm_bridge_debugfs_list[] = {
{ "bridge_chains", drm_bridge_chains_info, 0 },
};
void drm_bridge_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_add_files(minor->dev, drm_bridge_debugfs_list,
ARRAY_SIZE(drm_bridge_debugfs_list));
}
#endif
MODULE_AUTHOR("Ajay Kumar <[email protected]>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/drm_bridge.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Intel
*
* Based on drivers/base/devres.c
*/
#include <drm/drm_managed.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
/**
* DOC: managed resources
*
* Inspired by struct &device managed resources, but tied to the lifetime of
* struct &drm_device, which can outlive the underlying physical device, usually
* when userspace has some open files and other handles to resources still open.
*
* Release actions can be added with drmm_add_action(), memory allocations can
* be done directly with drmm_kmalloc() and the related functions. Everything
* will be released on the final drm_dev_put() in reverse order of how the
* release actions have been added and memory has been allocated since driver
* loading started with devm_drm_dev_alloc().
*
* Note that release actions and managed memory can also be added and removed
* during the lifetime of the driver, all the functions are fully concurrent
* safe. But it is recommended to use managed resources only for resources that
* change rarely, if ever, during the lifetime of the &drm_device instance.
*/
struct drmres_node {
struct list_head entry;
drmres_release_t release;
const char *name;
size_t size;
};
struct drmres {
struct drmres_node node;
/*
* Some archs want to perform DMA into kmalloc caches
* and need a guaranteed alignment larger than
* the alignment of a 64-bit integer.
* Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
* alignment for struct drmres when allocated by kmalloc().
*/
u8 __aligned(ARCH_DMA_MINALIGN) data[];
};
static void free_dr(struct drmres *dr)
{
kfree_const(dr->node.name);
kfree(dr);
}
void drm_managed_release(struct drm_device *dev)
{
struct drmres *dr, *tmp;
drm_dbg_drmres(dev, "drmres release begin\n");
list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
dr, dr->node.name, dr->node.size);
if (dr->node.release)
dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
list_del(&dr->node.entry);
free_dr(dr);
}
drm_dbg_drmres(dev, "drmres release end\n");
}
/*
* Always inline so that kmalloc_track_caller tracks the actual interesting
* caller outside of drm_managed.c.
*/
static __always_inline struct drmres * alloc_dr(drmres_release_t release,
size_t size, gfp_t gfp, int nid)
{
size_t tot_size;
struct drmres *dr;
/* We must catch any near-SIZE_MAX cases that could overflow. */
if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
return NULL;
dr = kmalloc_node_track_caller(tot_size, gfp, nid);
if (unlikely(!dr))
return NULL;
memset(dr, 0, offsetof(struct drmres, data));
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
dr->node.size = size;
return dr;
}
static void del_dr(struct drm_device *dev, struct drmres *dr)
{
list_del_init(&dr->node.entry);
drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
dr, dr->node.name, (unsigned long) dr->node.size);
}
static void add_dr(struct drm_device *dev, struct drmres *dr)
{
unsigned long flags;
spin_lock_irqsave(&dev->managed.lock, flags);
list_add(&dr->node.entry, &dev->managed.resources);
spin_unlock_irqrestore(&dev->managed.lock, flags);
drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
dr, dr->node.name, (unsigned long) dr->node.size);
}
void drmm_add_final_kfree(struct drm_device *dev, void *container)
{
WARN_ON(dev->managed.final_kfree);
WARN_ON(dev < (struct drm_device *) container);
WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
dev->managed.final_kfree = container;
}
int __drmm_add_action(struct drm_device *dev,
drmres_release_t action,
void *data, const char *name)
{
struct drmres *dr;
void **void_ptr;
dr = alloc_dr(action, data ? sizeof(void*) : 0,
GFP_KERNEL | __GFP_ZERO,
dev_to_node(dev->dev));
if (!dr) {
drm_dbg_drmres(dev, "failed to add action %s for %p\n",
name, data);
return -ENOMEM;
}
dr->node.name = kstrdup_const(name, GFP_KERNEL);
if (data) {
void_ptr = (void **)&dr->data;
*void_ptr = data;
}
add_dr(dev, dr);
return 0;
}
EXPORT_SYMBOL(__drmm_add_action);
int __drmm_add_action_or_reset(struct drm_device *dev,
drmres_release_t action,
void *data, const char *name)
{
int ret;
ret = __drmm_add_action(dev, action, data, name);
if (ret)
action(dev, data);
return ret;
}
EXPORT_SYMBOL(__drmm_add_action_or_reset);
/**
* drmm_kmalloc - &drm_device managed kmalloc()
* @dev: DRM device
* @size: size of the memory allocation
* @gfp: GFP allocation flags
*
* This is a &drm_device managed version of kmalloc(). The allocated memory is
* automatically freed on the final drm_dev_put(). Memory can also be freed
* before the final drm_dev_put() by calling drmm_kfree().
*/
void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
{
struct drmres *dr;
dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
if (!dr) {
drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
size, gfp);
return NULL;
}
dr->node.name = kstrdup_const("kmalloc", gfp);
add_dr(dev, dr);
return dr->data;
}
EXPORT_SYMBOL(drmm_kmalloc);
/**
* drmm_kstrdup - &drm_device managed kstrdup()
* @dev: DRM device
* @s: 0-terminated string to be duplicated
* @gfp: GFP allocation flags
*
* This is a &drm_device managed version of kstrdup(). The allocated memory is
* automatically freed on the final drm_dev_put() and works exactly like a
* memory allocation obtained by drmm_kmalloc().
*/
char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
{
size_t size;
char *buf;
if (!s)
return NULL;
size = strlen(s) + 1;
buf = drmm_kmalloc(dev, size, gfp);
if (buf)
memcpy(buf, s, size);
return buf;
}
EXPORT_SYMBOL_GPL(drmm_kstrdup);
/**
* drmm_kfree - &drm_device managed kfree()
* @dev: DRM device
* @data: memory allocation to be freed
*
* This is a &drm_device managed version of kfree() which can be used to
* release memory allocated through drmm_kmalloc() or any of its related
* functions before the final drm_dev_put() of @dev.
*/
void drmm_kfree(struct drm_device *dev, void *data)
{
struct drmres *dr_match = NULL, *dr;
unsigned long flags;
if (!data)
return;
spin_lock_irqsave(&dev->managed.lock, flags);
list_for_each_entry(dr, &dev->managed.resources, node.entry) {
if (dr->data == data) {
dr_match = dr;
del_dr(dev, dr_match);
break;
}
}
spin_unlock_irqrestore(&dev->managed.lock, flags);
if (WARN_ON(!dr_match))
return;
free_dr(dr_match);
}
EXPORT_SYMBOL(drmm_kfree);
void __drmm_mutex_release(struct drm_device *dev, void *res)
{
struct mutex *lock = res;
mutex_destroy(lock);
}
EXPORT_SYMBOL(__drmm_mutex_release);
| linux-master | drivers/gpu/drm/drm_managed.c |
/*
* \file drm_dma.c
* DMA IOCTL and function support
*
* \author Rickard E. (Rik) Faith <[email protected]>
* \author Gareth Hughes <[email protected]>
*/
/*
* Created: Fri Mar 19 14:30:16 1999 by [email protected]
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
/**
* drm_legacy_dma_setup() - Initialize the DMA data.
*
* @dev: DRM device.
* Return: zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
int drm_legacy_dma_setup(struct drm_device *dev)
{
int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
return 0;
}
/**
* drm_legacy_dma_takedown() - Cleanup the DMA resources.
*
* @dev: DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
void drm_legacy_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
drm_dma_handle_t *dmah;
int i, j;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
if (!dma)
return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
if (dma->bufs[i].seglist[j]) {
dmah = dma->bufs[i].seglist[j];
dma_free_coherent(dev->dev,
dmah->size,
dmah->vaddr,
dmah->busaddr);
kfree(dmah);
}
}
kfree(dma->bufs[i].seglist);
}
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
kfree(dma->bufs[i].buflist[j].dev_private);
}
kfree(dma->bufs[i].buflist);
}
}
kfree(dma->buflist);
kfree(dma->pagelist);
kfree(dev->dma);
dev->dma = NULL;
}
/**
* drm_legacy_free_buffer() - Free a buffer.
*
* @dev: DRM device.
* @buf: buffer to free.
*
* Resets the fields of \p buf.
*/
void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
{
if (!buf)
return;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->used = 0;
}
/**
* drm_legacy_reclaim_buffers() - Reclaim the buffers.
*
* @dev: DRM device.
* @file_priv: DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
void drm_legacy_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_legacy_free_buffer(dev, dma->buflist[i]);
break;
case DRM_LIST_WAIT:
dma->buflist[i]->list = DRM_LIST_RECLAIM;
break;
default:
/* Buffer already on hardware. */
break;
}
}
}
}
| linux-master | drivers/gpu/drm/drm_dma.c |
// SPDX-License-Identifier: MIT
/*
* Copyright (C) 2020 - 2021 Red Hat, Inc.
*
* Authors:
* Hans de Goede <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <drm/drm_privacy_screen_machine.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_privacy_screen_driver.h>
#include "drm_internal.h"
/**
* DOC: overview
*
* This class allows non KMS drivers, from e.g. drivers/platform/x86 to
* register a privacy-screen device, which the KMS drivers can then use
* to implement the standard privacy-screen properties, see
* :ref:`Standard Connector Properties<standard_connector_properties>`.
*
* KMS drivers using a privacy-screen class device are advised to use the
* drm_connector_attach_privacy_screen_provider() and
* drm_connector_update_privacy_screen() helpers for dealing with this.
*/
#define to_drm_privacy_screen(dev) \
container_of(dev, struct drm_privacy_screen, dev)
static DEFINE_MUTEX(drm_privacy_screen_lookup_lock);
static LIST_HEAD(drm_privacy_screen_lookup_list);
static DEFINE_MUTEX(drm_privacy_screen_devs_lock);
static LIST_HEAD(drm_privacy_screen_devs);
/*** drm_privacy_screen_machine.h functions ***/
/**
* drm_privacy_screen_lookup_add - add an entry to the static privacy-screen
* lookup list
* @lookup: lookup list entry to add
*
* Add an entry to the static privacy-screen lookup list. Note the
* &struct list_head which is part of the &struct drm_privacy_screen_lookup
* gets added to a list owned by the privacy-screen core. So the passed in
* &struct drm_privacy_screen_lookup must not be free-ed until it is removed
* from the lookup list by calling drm_privacy_screen_lookup_remove().
*/
void drm_privacy_screen_lookup_add(struct drm_privacy_screen_lookup *lookup)
{
mutex_lock(&drm_privacy_screen_lookup_lock);
list_add(&lookup->list, &drm_privacy_screen_lookup_list);
mutex_unlock(&drm_privacy_screen_lookup_lock);
}
EXPORT_SYMBOL(drm_privacy_screen_lookup_add);
/**
* drm_privacy_screen_lookup_remove - remove an entry to the static
* privacy-screen lookup list
* @lookup: lookup list entry to remove
*
* Remove an entry previously added with drm_privacy_screen_lookup_add()
* from the static privacy-screen lookup list.
*/
void drm_privacy_screen_lookup_remove(struct drm_privacy_screen_lookup *lookup)
{
mutex_lock(&drm_privacy_screen_lookup_lock);
list_del(&lookup->list);
mutex_unlock(&drm_privacy_screen_lookup_lock);
}
EXPORT_SYMBOL(drm_privacy_screen_lookup_remove);
/*** drm_privacy_screen_consumer.h functions ***/
static struct drm_privacy_screen *drm_privacy_screen_get_by_name(
const char *name)
{
struct drm_privacy_screen *priv;
struct device *dev = NULL;
mutex_lock(&drm_privacy_screen_devs_lock);
list_for_each_entry(priv, &drm_privacy_screen_devs, list) {
if (strcmp(dev_name(&priv->dev), name) == 0) {
dev = get_device(&priv->dev);
break;
}
}
mutex_unlock(&drm_privacy_screen_devs_lock);
return dev ? to_drm_privacy_screen(dev) : NULL;
}
/**
* drm_privacy_screen_get - get a privacy-screen provider
* @dev: consumer-device for which to get a privacy-screen provider
* @con_id: (video)connector name for which to get a privacy-screen provider
*
* Get a privacy-screen provider for a privacy-screen attached to the
* display described by the @dev and @con_id parameters.
*
* Return:
* * A pointer to a &struct drm_privacy_screen on success.
* * ERR_PTR(-ENODEV) if no matching privacy-screen is found
* * ERR_PTR(-EPROBE_DEFER) if there is a matching privacy-screen,
* but it has not been registered yet.
*/
struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev,
const char *con_id)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
struct drm_privacy_screen_lookup *l;
struct drm_privacy_screen *priv;
const char *provider = NULL;
int match, best = -1;
/*
* For now we only support using a static lookup table, which is
* populated by the drm_privacy_screen_arch_init() call. This should
* be extended with device-tree / fw_node lookup when support is added
* for device-tree using hardware with a privacy-screen.
*
* The lookup algorithm was shamelessly taken from the clock
* framework:
*
* We do slightly fuzzy matching here:
* An entry with a NULL ID is assumed to be a wildcard.
* If an entry has a device ID, it must match
* If an entry has a connection ID, it must match
* Then we take the most specific entry - with the following order
* of precedence: dev+con > dev only > con only.
*/
mutex_lock(&drm_privacy_screen_lookup_lock);
list_for_each_entry(l, &drm_privacy_screen_lookup_list, list) {
match = 0;
if (l->dev_id) {
if (!dev_id || strcmp(l->dev_id, dev_id))
continue;
match += 2;
}
if (l->con_id) {
if (!con_id || strcmp(l->con_id, con_id))
continue;
match += 1;
}
if (match > best) {
provider = l->provider;
best = match;
}
}
mutex_unlock(&drm_privacy_screen_lookup_lock);
if (!provider)
return ERR_PTR(-ENODEV);
priv = drm_privacy_screen_get_by_name(provider);
if (!priv)
return ERR_PTR(-EPROBE_DEFER);
return priv;
}
EXPORT_SYMBOL(drm_privacy_screen_get);
/**
* drm_privacy_screen_put - release a privacy-screen reference
* @priv: privacy screen reference to release
*
* Release a privacy-screen provider reference gotten through
* drm_privacy_screen_get(). May be called with a NULL or ERR_PTR,
* in which case it is a no-op.
*/
void drm_privacy_screen_put(struct drm_privacy_screen *priv)
{
if (IS_ERR_OR_NULL(priv))
return;
put_device(&priv->dev);
}
EXPORT_SYMBOL(drm_privacy_screen_put);
/**
* drm_privacy_screen_set_sw_state - set a privacy-screen's sw-state
* @priv: privacy screen to set the sw-state for
* @sw_state: new sw-state value to set
*
* Set the sw-state of a privacy screen. If the privacy-screen is not
* in a locked hw-state, then the actual and hw-state of the privacy-screen
* will be immediately updated to the new value. If the privacy-screen is
* in a locked hw-state, then the new sw-state will be remembered as the
* requested state to put the privacy-screen in when it becomes unlocked.
*
* Return: 0 on success, negative error code on failure.
*/
int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv,
enum drm_privacy_screen_status sw_state)
{
int ret = 0;
mutex_lock(&priv->lock);
if (!priv->ops) {
ret = -ENODEV;
goto out;
}
/*
* As per the DRM connector properties documentation, setting the
* sw_state while the hw_state is locked is allowed. In this case
* it is a no-op other then storing the new sw_state so that it
* can be honored when the state gets unlocked.
* Also skip the set if the hw already is in the desired state.
*/
if (priv->hw_state >= PRIVACY_SCREEN_DISABLED_LOCKED ||
priv->hw_state == sw_state) {
priv->sw_state = sw_state;
goto out;
}
ret = priv->ops->set_sw_state(priv, sw_state);
out:
mutex_unlock(&priv->lock);
return ret;
}
EXPORT_SYMBOL(drm_privacy_screen_set_sw_state);
/**
* drm_privacy_screen_get_state - get privacy-screen's current state
* @priv: privacy screen to get the state for
* @sw_state_ret: address where to store the privacy-screens current sw-state
* @hw_state_ret: address where to store the privacy-screens current hw-state
*
* Get the current state of a privacy-screen, both the sw-state and the
* hw-state.
*/
void drm_privacy_screen_get_state(struct drm_privacy_screen *priv,
enum drm_privacy_screen_status *sw_state_ret,
enum drm_privacy_screen_status *hw_state_ret)
{
mutex_lock(&priv->lock);
*sw_state_ret = priv->sw_state;
*hw_state_ret = priv->hw_state;
mutex_unlock(&priv->lock);
}
EXPORT_SYMBOL(drm_privacy_screen_get_state);
/**
* drm_privacy_screen_register_notifier - register a notifier
* @priv: Privacy screen to register the notifier with
* @nb: Notifier-block for the notifier to register
*
* Register a notifier with the privacy-screen to be notified of changes made
* to the privacy-screen state from outside of the privacy-screen class.
* E.g. the state may be changed by the hardware itself in response to a
* hotkey press.
*
* The notifier is called with no locks held. The new hw_state and sw_state
* can be retrieved using the drm_privacy_screen_get_state() function.
* A pointer to the drm_privacy_screen's struct is passed as the ``void *data``
* argument of the notifier_block's notifier_call.
*
* The notifier will NOT be called when changes are made through
* drm_privacy_screen_set_sw_state(). It is only called for external changes.
*
* Return: 0 on success, negative error code on failure.
*/
int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv,
struct notifier_block *nb)
{
return blocking_notifier_chain_register(&priv->notifier_head, nb);
}
EXPORT_SYMBOL(drm_privacy_screen_register_notifier);
/**
* drm_privacy_screen_unregister_notifier - unregister a notifier
* @priv: Privacy screen to register the notifier with
* @nb: Notifier-block for the notifier to register
*
* Unregister a notifier registered with drm_privacy_screen_register_notifier().
*
* Return: 0 on success, negative error code on failure.
*/
int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv,
struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&priv->notifier_head, nb);
}
EXPORT_SYMBOL(drm_privacy_screen_unregister_notifier);
/*** drm_privacy_screen_driver.h functions ***/
static ssize_t sw_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_privacy_screen *priv = to_drm_privacy_screen(dev);
const char * const sw_state_names[] = {
"Disabled",
"Enabled",
};
ssize_t ret;
mutex_lock(&priv->lock);
if (!priv->ops)
ret = -ENODEV;
else if (WARN_ON(priv->sw_state >= ARRAY_SIZE(sw_state_names)))
ret = -ENXIO;
else
ret = sprintf(buf, "%s\n", sw_state_names[priv->sw_state]);
mutex_unlock(&priv->lock);
return ret;
}
/*
* RO: Do not allow setting the sw_state through sysfs, this MUST be done
* through the drm_properties on the drm_connector.
*/
static DEVICE_ATTR_RO(sw_state);
static ssize_t hw_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_privacy_screen *priv = to_drm_privacy_screen(dev);
const char * const hw_state_names[] = {
"Disabled",
"Enabled",
"Disabled, locked",
"Enabled, locked",
};
ssize_t ret;
mutex_lock(&priv->lock);
if (!priv->ops)
ret = -ENODEV;
else if (WARN_ON(priv->hw_state >= ARRAY_SIZE(hw_state_names)))
ret = -ENXIO;
else
ret = sprintf(buf, "%s\n", hw_state_names[priv->hw_state]);
mutex_unlock(&priv->lock);
return ret;
}
static DEVICE_ATTR_RO(hw_state);
static struct attribute *drm_privacy_screen_attrs[] = {
&dev_attr_sw_state.attr,
&dev_attr_hw_state.attr,
NULL
};
ATTRIBUTE_GROUPS(drm_privacy_screen);
static struct device_type drm_privacy_screen_type = {
.name = "privacy_screen",
.groups = drm_privacy_screen_groups,
};
static void drm_privacy_screen_device_release(struct device *dev)
{
struct drm_privacy_screen *priv = to_drm_privacy_screen(dev);
kfree(priv);
}
/**
* drm_privacy_screen_register - register a privacy-screen
* @parent: parent-device for the privacy-screen
* @ops: &struct drm_privacy_screen_ops pointer with ops for the privacy-screen
* @data: Private data owned by the privacy screen provider
*
* Create and register a privacy-screen.
*
* Return:
* * A pointer to the created privacy-screen on success.
* * An ERR_PTR(errno) on failure.
*/
struct drm_privacy_screen *drm_privacy_screen_register(
struct device *parent, const struct drm_privacy_screen_ops *ops,
void *data)
{
struct drm_privacy_screen *priv;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
mutex_init(&priv->lock);
BLOCKING_INIT_NOTIFIER_HEAD(&priv->notifier_head);
priv->dev.class = drm_class;
priv->dev.type = &drm_privacy_screen_type;
priv->dev.parent = parent;
priv->dev.release = drm_privacy_screen_device_release;
dev_set_name(&priv->dev, "privacy_screen-%s", dev_name(parent));
priv->drvdata = data;
priv->ops = ops;
priv->ops->get_hw_state(priv);
ret = device_register(&priv->dev);
if (ret) {
put_device(&priv->dev);
return ERR_PTR(ret);
}
mutex_lock(&drm_privacy_screen_devs_lock);
list_add(&priv->list, &drm_privacy_screen_devs);
mutex_unlock(&drm_privacy_screen_devs_lock);
return priv;
}
EXPORT_SYMBOL(drm_privacy_screen_register);
/**
* drm_privacy_screen_unregister - unregister privacy-screen
* @priv: privacy-screen to unregister
*
* Unregister a privacy-screen registered with drm_privacy_screen_register().
* May be called with a NULL or ERR_PTR, in which case it is a no-op.
*/
void drm_privacy_screen_unregister(struct drm_privacy_screen *priv)
{
if (IS_ERR_OR_NULL(priv))
return;
mutex_lock(&drm_privacy_screen_devs_lock);
list_del(&priv->list);
mutex_unlock(&drm_privacy_screen_devs_lock);
mutex_lock(&priv->lock);
priv->drvdata = NULL;
priv->ops = NULL;
mutex_unlock(&priv->lock);
device_unregister(&priv->dev);
}
EXPORT_SYMBOL(drm_privacy_screen_unregister);
/**
* drm_privacy_screen_call_notifier_chain - notify consumers of state change
* @priv: Privacy screen to register the notifier with
*
* A privacy-screen provider driver can call this functions upon external
* changes to the privacy-screen state. E.g. the state may be changed by the
* hardware itself in response to a hotkey press.
* This function must be called without holding the privacy-screen lock.
* the driver must update sw_state and hw_state to reflect the new state before
* calling this function.
* The expected behavior from the driver upon receiving an external state
* change event is: 1. Take the lock; 2. Update sw_state and hw_state;
* 3. Release the lock. 4. Call drm_privacy_screen_call_notifier_chain().
*/
void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv)
{
blocking_notifier_call_chain(&priv->notifier_head, 0, priv);
}
EXPORT_SYMBOL(drm_privacy_screen_call_notifier_chain);
| linux-master | drivers/gpu/drm/drm_privacy_screen.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2011 Red Hat Inc.
* Copyright 2023 Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/* Algorithm:
*
* We store the last allocated bo in "hole", we always try to allocate
* after the last allocated bo. Principle is that in a linear GPU ring
* progression was is after last is the oldest bo we allocated and thus
* the first one that should no longer be in use by the GPU.
*
* If it's not the case we skip over the bo after last to the closest
* done bo if such one exist. If none exist and we are not asked to
* block we report failure to allocate.
*
* If we are asked to block we wait on all the oldest fence of all
* rings. We just wait for any of those fence to complete.
*/
#include <drm/drm_suballoc.h>
#include <drm/drm_print.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/dma-fence.h>
static void drm_suballoc_remove_locked(struct drm_suballoc *sa);
static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager);
/**
* drm_suballoc_manager_init() - Initialise the drm_suballoc_manager
* @sa_manager: pointer to the sa_manager
* @size: number of bytes we want to suballocate
* @align: alignment for each suballocated chunk
*
* Prepares the suballocation manager for suballocations.
*/
void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager,
size_t size, size_t align)
{
unsigned int i;
BUILD_BUG_ON(!is_power_of_2(DRM_SUBALLOC_MAX_QUEUES));
if (!align)
align = 1;
/* alignment must be a power of 2 */
if (WARN_ON_ONCE(align & (align - 1)))
align = roundup_pow_of_two(align);
init_waitqueue_head(&sa_manager->wq);
sa_manager->size = size;
sa_manager->align = align;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
INIT_LIST_HEAD(&sa_manager->flist[i]);
}
EXPORT_SYMBOL(drm_suballoc_manager_init);
/**
* drm_suballoc_manager_fini() - Destroy the drm_suballoc_manager
* @sa_manager: pointer to the sa_manager
*
* Cleans up the suballocation manager after use. All fences added
* with drm_suballoc_free() must be signaled, or we cannot clean up
* the entire manager.
*/
void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager)
{
struct drm_suballoc *sa, *tmp;
if (!sa_manager->size)
return;
if (!list_empty(&sa_manager->olist)) {
sa_manager->hole = &sa_manager->olist;
drm_suballoc_try_free(sa_manager);
if (!list_empty(&sa_manager->olist))
DRM_ERROR("sa_manager is not empty, clearing anyway\n");
}
list_for_each_entry_safe(sa, tmp, &sa_manager->olist, olist) {
drm_suballoc_remove_locked(sa);
}
sa_manager->size = 0;
}
EXPORT_SYMBOL(drm_suballoc_manager_fini);
static void drm_suballoc_remove_locked(struct drm_suballoc *sa)
{
struct drm_suballoc_manager *sa_manager = sa->manager;
if (sa_manager->hole == &sa->olist)
sa_manager->hole = sa->olist.prev;
list_del_init(&sa->olist);
list_del_init(&sa->flist);
dma_fence_put(sa->fence);
kfree(sa);
}
static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager)
{
struct drm_suballoc *sa, *tmp;
if (sa_manager->hole->next == &sa_manager->olist)
return;
sa = list_entry(sa_manager->hole->next, struct drm_suballoc, olist);
list_for_each_entry_safe_from(sa, tmp, &sa_manager->olist, olist) {
if (!sa->fence || !dma_fence_is_signaled(sa->fence))
return;
drm_suballoc_remove_locked(sa);
}
}
static size_t drm_suballoc_hole_soffset(struct drm_suballoc_manager *sa_manager)
{
struct list_head *hole = sa_manager->hole;
if (hole != &sa_manager->olist)
return list_entry(hole, struct drm_suballoc, olist)->eoffset;
return 0;
}
static size_t drm_suballoc_hole_eoffset(struct drm_suballoc_manager *sa_manager)
{
struct list_head *hole = sa_manager->hole;
if (hole->next != &sa_manager->olist)
return list_entry(hole->next, struct drm_suballoc, olist)->soffset;
return sa_manager->size;
}
static bool drm_suballoc_try_alloc(struct drm_suballoc_manager *sa_manager,
struct drm_suballoc *sa,
size_t size, size_t align)
{
size_t soffset, eoffset, wasted;
soffset = drm_suballoc_hole_soffset(sa_manager);
eoffset = drm_suballoc_hole_eoffset(sa_manager);
wasted = round_up(soffset, align) - soffset;
if ((eoffset - soffset) >= (size + wasted)) {
soffset += wasted;
sa->manager = sa_manager;
sa->soffset = soffset;
sa->eoffset = soffset + size;
list_add(&sa->olist, sa_manager->hole);
INIT_LIST_HEAD(&sa->flist);
sa_manager->hole = &sa->olist;
return true;
}
return false;
}
static bool __drm_suballoc_event(struct drm_suballoc_manager *sa_manager,
size_t size, size_t align)
{
size_t soffset, eoffset, wasted;
unsigned int i;
for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
if (!list_empty(&sa_manager->flist[i]))
return true;
soffset = drm_suballoc_hole_soffset(sa_manager);
eoffset = drm_suballoc_hole_eoffset(sa_manager);
wasted = round_up(soffset, align) - soffset;
return ((eoffset - soffset) >= (size + wasted));
}
/**
* drm_suballoc_event() - Check if we can stop waiting
* @sa_manager: pointer to the sa_manager
* @size: number of bytes we want to allocate
* @align: alignment we need to match
*
* Return: true if either there is a fence we can wait for or
* enough free memory to satisfy the allocation directly.
* false otherwise.
*/
static bool drm_suballoc_event(struct drm_suballoc_manager *sa_manager,
size_t size, size_t align)
{
bool ret;
spin_lock(&sa_manager->wq.lock);
ret = __drm_suballoc_event(sa_manager, size, align);
spin_unlock(&sa_manager->wq.lock);
return ret;
}
static bool drm_suballoc_next_hole(struct drm_suballoc_manager *sa_manager,
struct dma_fence **fences,
unsigned int *tries)
{
struct drm_suballoc *best_bo = NULL;
unsigned int i, best_idx;
size_t soffset, best, tmp;
/* if hole points to the end of the buffer */
if (sa_manager->hole->next == &sa_manager->olist) {
/* try again with its beginning */
sa_manager->hole = &sa_manager->olist;
return true;
}
soffset = drm_suballoc_hole_soffset(sa_manager);
/* to handle wrap around we add sa_manager->size */
best = sa_manager->size * 2;
/* go over all fence list and try to find the closest sa
* of the current last
*/
for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i) {
struct drm_suballoc *sa;
fences[i] = NULL;
if (list_empty(&sa_manager->flist[i]))
continue;
sa = list_first_entry(&sa_manager->flist[i],
struct drm_suballoc, flist);
if (!dma_fence_is_signaled(sa->fence)) {
fences[i] = sa->fence;
continue;
}
/* limit the number of tries each freelist gets */
if (tries[i] > 2)
continue;
tmp = sa->soffset;
if (tmp < soffset) {
/* wrap around, pretend it's after */
tmp += sa_manager->size;
}
tmp -= soffset;
if (tmp < best) {
/* this sa bo is the closest one */
best = tmp;
best_idx = i;
best_bo = sa;
}
}
if (best_bo) {
++tries[best_idx];
sa_manager->hole = best_bo->olist.prev;
/*
* We know that this one is signaled,
* so it's safe to remove it.
*/
drm_suballoc_remove_locked(best_bo);
return true;
}
return false;
}
/**
* drm_suballoc_new() - Make a suballocation.
* @sa_manager: pointer to the sa_manager
* @size: number of bytes we want to suballocate.
* @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL but
* the argument is provided for suballocations from reclaim context or
* where the caller wants to avoid pipelining rather than wait for
* reclaim.
* @intr: Whether to perform waits interruptible. This should typically
* always be true, unless the caller needs to propagate a
* non-interruptible context from above layers.
* @align: Alignment. Must not exceed the default manager alignment.
* If @align is zero, then the manager alignment is used.
*
* Try to make a suballocation of size @size, which will be rounded
* up to the alignment specified in specified in drm_suballoc_manager_init().
*
* Return: a new suballocated bo, or an ERR_PTR.
*/
struct drm_suballoc *
drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size,
gfp_t gfp, bool intr, size_t align)
{
struct dma_fence *fences[DRM_SUBALLOC_MAX_QUEUES];
unsigned int tries[DRM_SUBALLOC_MAX_QUEUES];
unsigned int count;
int i, r;
struct drm_suballoc *sa;
if (WARN_ON_ONCE(align > sa_manager->align))
return ERR_PTR(-EINVAL);
if (WARN_ON_ONCE(size > sa_manager->size || !size))
return ERR_PTR(-EINVAL);
if (!align)
align = sa_manager->align;
sa = kmalloc(sizeof(*sa), gfp);
if (!sa)
return ERR_PTR(-ENOMEM);
sa->manager = sa_manager;
sa->fence = NULL;
INIT_LIST_HEAD(&sa->olist);
INIT_LIST_HEAD(&sa->flist);
spin_lock(&sa_manager->wq.lock);
do {
for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
tries[i] = 0;
do {
drm_suballoc_try_free(sa_manager);
if (drm_suballoc_try_alloc(sa_manager, sa,
size, align)) {
spin_unlock(&sa_manager->wq.lock);
return sa;
}
/* see if we can skip over some allocations */
} while (drm_suballoc_next_hole(sa_manager, fences, tries));
for (i = 0, count = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
if (fences[i])
fences[count++] = dma_fence_get(fences[i]);
if (count) {
long t;
spin_unlock(&sa_manager->wq.lock);
t = dma_fence_wait_any_timeout(fences, count, intr,
MAX_SCHEDULE_TIMEOUT,
NULL);
for (i = 0; i < count; ++i)
dma_fence_put(fences[i]);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
} else if (intr) {
/* if we have nothing to wait for block */
r = wait_event_interruptible_locked
(sa_manager->wq,
__drm_suballoc_event(sa_manager, size, align));
} else {
spin_unlock(&sa_manager->wq.lock);
wait_event(sa_manager->wq,
drm_suballoc_event(sa_manager, size, align));
r = 0;
spin_lock(&sa_manager->wq.lock);
}
} while (!r);
spin_unlock(&sa_manager->wq.lock);
kfree(sa);
return ERR_PTR(r);
}
EXPORT_SYMBOL(drm_suballoc_new);
/**
* drm_suballoc_free - Free a suballocation
* @suballoc: pointer to the suballocation
* @fence: fence that signals when suballocation is idle
*
* Free the suballocation. The suballocation can be re-used after @fence signals.
*/
void drm_suballoc_free(struct drm_suballoc *suballoc,
struct dma_fence *fence)
{
struct drm_suballoc_manager *sa_manager;
if (!suballoc)
return;
sa_manager = suballoc->manager;
spin_lock(&sa_manager->wq.lock);
if (fence && !dma_fence_is_signaled(fence)) {
u32 idx;
suballoc->fence = dma_fence_get(fence);
idx = fence->context & (DRM_SUBALLOC_MAX_QUEUES - 1);
list_add_tail(&suballoc->flist, &sa_manager->flist[idx]);
} else {
drm_suballoc_remove_locked(suballoc);
}
wake_up_all_locked(&sa_manager->wq);
spin_unlock(&sa_manager->wq.lock);
}
EXPORT_SYMBOL(drm_suballoc_free);
#ifdef CONFIG_DEBUG_FS
void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
struct drm_printer *p,
unsigned long long suballoc_base)
{
struct drm_suballoc *i;
spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
unsigned long long soffset = i->soffset;
unsigned long long eoffset = i->eoffset;
if (&i->olist == sa_manager->hole)
drm_puts(p, ">");
else
drm_puts(p, " ");
drm_printf(p, "[0x%010llx 0x%010llx] size %8lld",
suballoc_base + soffset, suballoc_base + eoffset,
eoffset - soffset);
if (i->fence)
drm_printf(p, " protected by 0x%016llx on context %llu",
(unsigned long long)i->fence->seqno,
(unsigned long long)i->fence->context);
drm_puts(p, "\n");
}
spin_unlock(&sa_manager->wq.lock);
}
EXPORT_SYMBOL(drm_suballoc_dump_debug_info);
#endif
MODULE_AUTHOR("Multiple");
MODULE_DESCRIPTION("Range suballocator helper");
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | drivers/gpu/drm/drm_suballoc.c |
/*
* Created: Fri Jan 19 10:48:35 2001 by [email protected]
*
* Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author Rickard E. (Rik) Faith <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/slab.h>
#include <linux/srcu.h>
#include <drm/drm_accel.h>
#include <drm/drm_cache.h>
#include <drm/drm_client.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_machine.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
#include "drm_legacy.h"
MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;
/*
* If the drm core fails to init for whatever reason,
* we should prevent any drivers from registering with it.
* It's best to check this at drm_dev_init(), as some drivers
* prefer to embed struct drm_device into their own device
* structure and call drm_dev_init() themselves.
*/
static bool drm_core_init_complete;
static struct dentry *drm_debugfs_root;
DEFINE_STATIC_SRCU(drm_unplug_srcu);
/*
* DRM Minors
* A DRM device can provide several char-dev interfaces on the DRM-Major. Each
* of them is represented by a drm_minor object. Depending on the capabilities
* of the device-driver, different interfaces are registered.
*
* Minors can be accessed via dev->$minor_name. This pointer is either
* NULL or a valid drm_minor pointer and stays valid as long as the device is
* valid. This means, DRM minors have the same life-time as the underlying
* device. However, this doesn't mean that the minor is active. Minors are
* registered and unregistered dynamically according to device-state.
*/
static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
enum drm_minor_type type)
{
switch (type) {
case DRM_MINOR_PRIMARY:
return &dev->primary;
case DRM_MINOR_RENDER:
return &dev->render;
case DRM_MINOR_ACCEL:
return &dev->accel;
default:
BUG();
}
}
static void drm_minor_alloc_release(struct drm_device *dev, void *data)
{
struct drm_minor *minor = data;
unsigned long flags;
WARN_ON(dev != minor->dev);
put_device(minor->kdev);
if (minor->type == DRM_MINOR_ACCEL) {
accel_minor_remove(minor->index);
} else {
spin_lock_irqsave(&drm_minor_lock, flags);
idr_remove(&drm_minors_idr, minor->index);
spin_unlock_irqrestore(&drm_minor_lock, flags);
}
}
static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
unsigned long flags;
int r;
minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
if (!minor)
return -ENOMEM;
minor->type = type;
minor->dev = dev;
idr_preload(GFP_KERNEL);
if (type == DRM_MINOR_ACCEL) {
r = accel_minor_alloc();
} else {
spin_lock_irqsave(&drm_minor_lock, flags);
r = idr_alloc(&drm_minors_idr,
NULL,
64 * type,
64 * (type + 1),
GFP_NOWAIT);
spin_unlock_irqrestore(&drm_minor_lock, flags);
}
idr_preload_end();
if (r < 0)
return r;
minor->index = r;
r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
if (r)
return r;
minor->kdev = drm_sysfs_minor_alloc(minor);
if (IS_ERR(minor->kdev))
return PTR_ERR(minor->kdev);
*drm_minor_get_slot(dev, type) = minor;
return 0;
}
static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
unsigned long flags;
int ret;
DRM_DEBUG("\n");
minor = *drm_minor_get_slot(dev, type);
if (!minor)
return 0;
if (minor->type == DRM_MINOR_ACCEL) {
accel_debugfs_init(minor, minor->index);
} else {
ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
goto err_debugfs;
}
}
ret = device_add(minor->kdev);
if (ret)
goto err_debugfs;
/* replace NULL with @minor so lookups will succeed from now on */
if (minor->type == DRM_MINOR_ACCEL) {
accel_minor_replace(minor, minor->index);
} else {
spin_lock_irqsave(&drm_minor_lock, flags);
idr_replace(&drm_minors_idr, minor, minor->index);
spin_unlock_irqrestore(&drm_minor_lock, flags);
}
DRM_DEBUG("new minor registered %d\n", minor->index);
return 0;
err_debugfs:
drm_debugfs_cleanup(minor);
return ret;
}
static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
unsigned long flags;
minor = *drm_minor_get_slot(dev, type);
if (!minor || !device_is_registered(minor->kdev))
return;
/* replace @minor with NULL so lookups will fail from now on */
if (minor->type == DRM_MINOR_ACCEL) {
accel_minor_replace(NULL, minor->index);
} else {
spin_lock_irqsave(&drm_minor_lock, flags);
idr_replace(&drm_minors_idr, NULL, minor->index);
spin_unlock_irqrestore(&drm_minor_lock, flags);
}
device_del(minor->kdev);
dev_set_drvdata(minor->kdev, NULL); /* safety belt */
drm_debugfs_cleanup(minor);
}
/*
* Looks up the given minor-ID and returns the respective DRM-minor object. The
* refence-count of the underlying device is increased so you must release this
* object with drm_minor_release().
*
* As long as you hold this minor, it is guaranteed that the object and the
* minor->dev pointer will stay valid! However, the device may get unplugged and
* unregistered while you hold the minor.
*/
struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
struct drm_minor *minor;
unsigned long flags;
spin_lock_irqsave(&drm_minor_lock, flags);
minor = idr_find(&drm_minors_idr, minor_id);
if (minor)
drm_dev_get(minor->dev);
spin_unlock_irqrestore(&drm_minor_lock, flags);
if (!minor) {
return ERR_PTR(-ENODEV);
} else if (drm_dev_is_unplugged(minor->dev)) {
drm_dev_put(minor->dev);
return ERR_PTR(-ENODEV);
}
return minor;
}
void drm_minor_release(struct drm_minor *minor)
{
drm_dev_put(minor->dev);
}
/**
* DOC: driver instance overview
*
* A device instance for a drm driver is represented by &struct drm_device. This
* is allocated and initialized with devm_drm_dev_alloc(), usually from
* bus-specific ->probe() callbacks implemented by the driver. The driver then
* needs to initialize all the various subsystems for the drm device like memory
* management, vblank handling, modesetting support and initial output
* configuration plus obviously initialize all the corresponding hardware bits.
* Finally when everything is up and running and ready for userspace the device
* instance can be published using drm_dev_register().
*
* There is also deprecated support for initializing device instances using
* bus-specific helpers and the &drm_driver.load callback. But due to
* backwards-compatibility needs the device instance have to be published too
* early, which requires unpretty global locking to make safe and is therefore
* only support for existing drivers not yet converted to the new scheme.
*
* When cleaning up a device instance everything needs to be done in reverse:
* First unpublish the device instance with drm_dev_unregister(). Then clean up
* any other resources allocated at device initialization and drop the driver's
* reference to &drm_device using drm_dev_put().
*
* Note that any allocation or resource which is visible to userspace must be
* released only when the final drm_dev_put() is called, and not when the
* driver is unbound from the underlying physical struct &device. Best to use
* &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
* related functions.
*
* devres managed resources like devm_kmalloc() can only be used for resources
* directly related to the underlying hardware device, and only used in code
* paths fully protected by drm_dev_enter() and drm_dev_exit().
*
* Display driver example
* ~~~~~~~~~~~~~~~~~~~~~~
*
* The following example shows a typical structure of a DRM display driver.
* The example focus on the probe() function and the other functions that is
* almost always present and serves as a demonstration of devm_drm_dev_alloc().
*
* .. code-block:: c
*
* struct driver_device {
* struct drm_device drm;
* void *userspace_facing;
* struct clk *pclk;
* };
*
* static const struct drm_driver driver_drm_driver = {
* [...]
* };
*
* static int driver_probe(struct platform_device *pdev)
* {
* struct driver_device *priv;
* struct drm_device *drm;
* int ret;
*
* priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
* struct driver_device, drm);
* if (IS_ERR(priv))
* return PTR_ERR(priv);
* drm = &priv->drm;
*
* ret = drmm_mode_config_init(drm);
* if (ret)
* return ret;
*
* priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
* if (!priv->userspace_facing)
* return -ENOMEM;
*
* priv->pclk = devm_clk_get(dev, "PCLK");
* if (IS_ERR(priv->pclk))
* return PTR_ERR(priv->pclk);
*
* // Further setup, display pipeline etc
*
* platform_set_drvdata(pdev, drm);
*
* drm_mode_config_reset(drm);
*
* ret = drm_dev_register(drm);
* if (ret)
* return ret;
*
* drm_fbdev_generic_setup(drm, 32);
*
* return 0;
* }
*
* // This function is called before the devm_ resources are released
* static int driver_remove(struct platform_device *pdev)
* {
* struct drm_device *drm = platform_get_drvdata(pdev);
*
* drm_dev_unregister(drm);
* drm_atomic_helper_shutdown(drm)
*
* return 0;
* }
*
* // This function is called on kernel restart and shutdown
* static void driver_shutdown(struct platform_device *pdev)
* {
* drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
* }
*
* static int __maybe_unused driver_pm_suspend(struct device *dev)
* {
* return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
* }
*
* static int __maybe_unused driver_pm_resume(struct device *dev)
* {
* drm_mode_config_helper_resume(dev_get_drvdata(dev));
*
* return 0;
* }
*
* static const struct dev_pm_ops driver_pm_ops = {
* SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
* };
*
* static struct platform_driver driver_driver = {
* .driver = {
* [...]
* .pm = &driver_pm_ops,
* },
* .probe = driver_probe,
* .remove = driver_remove,
* .shutdown = driver_shutdown,
* };
* module_platform_driver(driver_driver);
*
* Drivers that want to support device unplugging (USB, DT overlay unload) should
* use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
* regions that is accessing device resources to prevent use after they're
* released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
* shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
* drm_atomic_helper_shutdown() is called. This means that if the disable code
* paths are protected, they will not run on regular driver module unload,
* possibly leaving the hardware enabled.
*/
/**
* drm_put_dev - Unregister and release a DRM device
* @dev: DRM device
*
* Called at module unload time or when a PCI device is unplugged.
*
* Cleans up all DRM device, calling drm_lastclose().
*
* Note: Use of this function is deprecated. It will eventually go away
* completely. Please use drm_dev_unregister() and drm_dev_put() explicitly
* instead to make sure that the device isn't userspace accessible any more
* while teardown is in progress, ensuring that userspace can't access an
* inconsistent state.
*/
void drm_put_dev(struct drm_device *dev)
{
DRM_DEBUG("\n");
if (!dev) {
DRM_ERROR("cleanup called no dev\n");
return;
}
drm_dev_unregister(dev);
drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_put_dev);
/**
* drm_dev_enter - Enter device critical section
* @dev: DRM device
* @idx: Pointer to index that will be passed to the matching drm_dev_exit()
*
* This function marks and protects the beginning of a section that should not
* be entered after the device has been unplugged. The section end is marked
* with drm_dev_exit(). Calls to this function can be nested.
*
* Returns:
* True if it is OK to enter the section, false otherwise.
*/
bool drm_dev_enter(struct drm_device *dev, int *idx)
{
*idx = srcu_read_lock(&drm_unplug_srcu);
if (dev->unplugged) {
srcu_read_unlock(&drm_unplug_srcu, *idx);
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dev_enter);
/**
* drm_dev_exit - Exit device critical section
* @idx: index returned from drm_dev_enter()
*
* This function marks the end of a section that should not be entered after
* the device has been unplugged.
*/
void drm_dev_exit(int idx)
{
srcu_read_unlock(&drm_unplug_srcu, idx);
}
EXPORT_SYMBOL(drm_dev_exit);
/**
* drm_dev_unplug - unplug a DRM device
* @dev: DRM device
*
* This unplugs a hotpluggable DRM device, which makes it inaccessible to
* userspace operations. Entry-points can use drm_dev_enter() and
* drm_dev_exit() to protect device resources in a race free manner. This
* essentially unregisters the device like drm_dev_unregister(), but can be
* called while there are still open users of @dev.
*/
void drm_dev_unplug(struct drm_device *dev)
{
/*
* After synchronizing any critical read section is guaranteed to see
* the new value of ->unplugged, and any critical section which might
* still have seen the old value of ->unplugged is guaranteed to have
* finished.
*/
dev->unplugged = true;
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
/* Clear all CPU mappings pointing to this device */
unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
}
EXPORT_SYMBOL(drm_dev_unplug);
/*
* DRM internal mount
* We want to be able to allocate our own "struct address_space" to control
* memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
* stand-alone address_space objects, so we need an underlying inode. As there
* is no way to allocate an independent inode easily, we need a fake internal
* VFS mount-point.
*
* The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
* frees it again. You are allowed to use iget() and iput() to get references to
* the inode. But each drm_fs_inode_new() call must be paired with exactly one
* drm_fs_inode_free() call (which does not have to be the last iput()).
* We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
* between multiple inode-users. You could, technically, call
* iget() + drm_fs_inode_free() directly after alloc and sometime later do an
* iput(), but this way you'd end up with a new vfsmount for each inode.
*/
static int drm_fs_cnt;
static struct vfsmount *drm_fs_mnt;
static int drm_fs_init_fs_context(struct fs_context *fc)
{
return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
}
static struct file_system_type drm_fs_type = {
.name = "drm",
.owner = THIS_MODULE,
.init_fs_context = drm_fs_init_fs_context,
.kill_sb = kill_anon_super,
};
static struct inode *drm_fs_inode_new(void)
{
struct inode *inode;
int r;
r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
if (r < 0) {
DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
return ERR_PTR(r);
}
inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
if (IS_ERR(inode))
simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
return inode;
}
static void drm_fs_inode_free(struct inode *inode)
{
if (inode) {
iput(inode);
simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
}
}
/**
* DOC: component helper usage recommendations
*
* DRM drivers that drive hardware where a logical device consists of a pile of
* independent hardware blocks are recommended to use the :ref:`component helper
* library<component>`. For consistency and better options for code reuse the
* following guidelines apply:
*
* - The entire device initialization procedure should be run from the
* &component_master_ops.master_bind callback, starting with
* devm_drm_dev_alloc(), then binding all components with
* component_bind_all() and finishing with drm_dev_register().
*
* - The opaque pointer passed to all components through component_bind_all()
* should point at &struct drm_device of the device instance, not some driver
* specific private structure.
*
* - The component helper fills the niche where further standardization of
* interfaces is not practical. When there already is, or will be, a
* standardized interface like &drm_bridge or &drm_panel, providing its own
* functions to find such components at driver load time, like
* drm_of_find_panel_or_bridge(), then the component helper should not be
* used.
*/
static void drm_dev_init_release(struct drm_device *dev, void *res)
{
drm_legacy_ctxbitmap_cleanup(dev);
drm_legacy_remove_map_hash(dev);
drm_fs_inode_free(dev->anon_inode);
put_device(dev->dev);
/* Prevent use-after-free in drm_managed_release when debugging is
* enabled. Slightly awkward, but can't really be helped. */
dev->dev = NULL;
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
mutex_destroy(&dev->debugfs_mutex);
drm_legacy_destroy_members(dev);
}
static int drm_dev_init(struct drm_device *dev,
const struct drm_driver *driver,
struct device *parent)
{
struct inode *inode;
int ret;
if (!drm_core_init_complete) {
DRM_ERROR("DRM core is not initialized\n");
return -ENODEV;
}
if (WARN_ON(!parent))
return -EINVAL;
kref_init(&dev->ref);
dev->dev = get_device(parent);
dev->driver = driver;
INIT_LIST_HEAD(&dev->managed.resources);
spin_lock_init(&dev->managed.lock);
/* no per-device feature limits by default */
dev->driver_features = ~0u;
if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL) &&
(drm_core_check_feature(dev, DRIVER_RENDER) ||
drm_core_check_feature(dev, DRIVER_MODESET))) {
DRM_ERROR("DRM driver can't be both a compute acceleration and graphics driver\n");
return -EINVAL;
}
drm_legacy_init_members(dev);
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
INIT_LIST_HEAD(&dev->vblank_event_list);
INIT_LIST_HEAD(&dev->debugfs_list);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
mutex_init(&dev->debugfs_mutex);
ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
if (ret)
return ret;
inode = drm_fs_inode_new();
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
goto err;
}
dev->anon_inode = inode;
if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) {
ret = drm_minor_alloc(dev, DRM_MINOR_ACCEL);
if (ret)
goto err;
} else {
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
if (ret)
goto err;
}
ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
if (ret)
goto err;
}
ret = drm_legacy_create_map_hash(dev);
if (ret)
goto err;
drm_legacy_ctxbitmap_init(dev);
if (drm_core_check_feature(dev, DRIVER_GEM)) {
ret = drm_gem_init(dev);
if (ret) {
DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
goto err;
}
}
dev->unique = drmm_kstrdup(dev, dev_name(parent), GFP_KERNEL);
if (!dev->unique) {
ret = -ENOMEM;
goto err;
}
return 0;
err:
drm_managed_release(dev);
return ret;
}
static void devm_drm_dev_init_release(void *data)
{
drm_dev_put(data);
}
static int devm_drm_dev_init(struct device *parent,
struct drm_device *dev,
const struct drm_driver *driver)
{
int ret;
ret = drm_dev_init(dev, driver, parent);
if (ret)
return ret;
return devm_add_action_or_reset(parent,
devm_drm_dev_init_release, dev);
}
void *__devm_drm_dev_alloc(struct device *parent,
const struct drm_driver *driver,
size_t size, size_t offset)
{
void *container;
struct drm_device *drm;
int ret;
container = kzalloc(size, GFP_KERNEL);
if (!container)
return ERR_PTR(-ENOMEM);
drm = container + offset;
ret = devm_drm_dev_init(parent, drm, driver);
if (ret) {
kfree(container);
return ERR_PTR(ret);
}
drmm_add_final_kfree(drm, container);
return container;
}
EXPORT_SYMBOL(__devm_drm_dev_alloc);
/**
* drm_dev_alloc - Allocate new DRM device
* @driver: DRM driver to allocate device for
* @parent: Parent device object
*
* This is the deprecated version of devm_drm_dev_alloc(), which does not support
* subclassing through embedding the struct &drm_device in a driver private
* structure, and which does not support automatic cleanup through devres.
*
* RETURNS:
* Pointer to new DRM device, or ERR_PTR on failure.
*/
struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
struct device *parent)
{
struct drm_device *dev;
int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
ret = drm_dev_init(dev, driver, parent);
if (ret) {
kfree(dev);
return ERR_PTR(ret);
}
drmm_add_final_kfree(dev, dev);
return dev;
}
EXPORT_SYMBOL(drm_dev_alloc);
static void drm_dev_release(struct kref *ref)
{
struct drm_device *dev = container_of(ref, struct drm_device, ref);
if (dev->driver->release)
dev->driver->release(dev);
drm_managed_release(dev);
kfree(dev->managed.final_kfree);
}
/**
* drm_dev_get - Take reference of a DRM device
* @dev: device to take reference of or NULL
*
* This increases the ref-count of @dev by one. You *must* already own a
* reference when calling this. Use drm_dev_put() to drop this reference
* again.
*
* This function never fails. However, this function does not provide *any*
* guarantee whether the device is alive or running. It only provides a
* reference to the object and the memory associated with it.
*/
void drm_dev_get(struct drm_device *dev)
{
if (dev)
kref_get(&dev->ref);
}
EXPORT_SYMBOL(drm_dev_get);
/**
* drm_dev_put - Drop reference of a DRM device
* @dev: device to drop reference of or NULL
*
* This decreases the ref-count of @dev by one. The device is destroyed if the
* ref-count drops to zero.
*/
void drm_dev_put(struct drm_device *dev)
{
if (dev)
kref_put(&dev->ref, drm_dev_release);
}
EXPORT_SYMBOL(drm_dev_put);
static int create_compat_control_link(struct drm_device *dev)
{
struct drm_minor *minor;
char *name;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
if (!minor)
return 0;
/*
* Some existing userspace out there uses the existing of the controlD*
* sysfs files to figure out whether it's a modeset driver. It only does
* readdir, hence a symlink is sufficient (and the least confusing
* option). Otherwise controlD* is entirely unused.
*
* Old controlD chardev have been allocated in the range
* 64-127.
*/
name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
if (!name)
return -ENOMEM;
ret = sysfs_create_link(minor->kdev->kobj.parent,
&minor->kdev->kobj,
name);
kfree(name);
return ret;
}
static void remove_compat_control_link(struct drm_device *dev)
{
struct drm_minor *minor;
char *name;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
if (!minor)
return;
name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
if (!name)
return;
sysfs_remove_link(minor->kdev->kobj.parent, name);
kfree(name);
}
/**
* drm_dev_register - Register DRM device
* @dev: Device to register
* @flags: Flags passed to the driver's .load() function
*
* Register the DRM device @dev with the system, advertise device to user-space
* and start normal device operation. @dev must be initialized via drm_dev_init()
* previously.
*
* Never call this twice on any device!
*
* NOTE: To ensure backward compatibility with existing drivers method this
* function calls the &drm_driver.load method after registering the device
* nodes, creating race conditions. Usage of the &drm_driver.load methods is
* therefore deprecated, drivers must perform all initialization before calling
* drm_dev_register().
*
* RETURNS:
* 0 on success, negative error code on failure.
*/
int drm_dev_register(struct drm_device *dev, unsigned long flags)
{
const struct drm_driver *driver = dev->driver;
int ret;
if (!driver->load)
drm_mode_config_validate(dev);
WARN_ON(!dev->managed.final_kfree);
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
ret = drm_minor_register(dev, DRM_MINOR_RENDER);
if (ret)
goto err_minors;
ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
if (ret)
goto err_minors;
ret = drm_minor_register(dev, DRM_MINOR_ACCEL);
if (ret)
goto err_minors;
ret = create_compat_control_link(dev);
if (ret)
goto err_minors;
dev->registered = true;
if (driver->load) {
ret = driver->load(dev, flags);
if (ret)
goto err_minors;
}
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_register_all(dev);
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor,
driver->patchlevel, driver->date,
dev->dev ? dev_name(dev->dev) : "virtual device",
dev->primary ? dev->primary->index : dev->accel->index);
goto out_unlock;
err_minors:
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_ACCEL);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
out_unlock:
if (drm_dev_needs_global_mutex(dev))
mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_dev_register);
/**
* drm_dev_unregister - Unregister DRM device
* @dev: Device to unregister
*
* Unregister the DRM device from the system. This does the reverse of
* drm_dev_register() but does not deallocate the device. The caller must call
* drm_dev_put() to drop their final reference, unless it is managed with devres
* (as devices allocated with devm_drm_dev_alloc() are), in which case there is
* already an unwind action registered.
*
* A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
* which can be called while there are still open users of @dev.
*
* This should be called first in the device teardown code to make sure
* userspace can't access the device instance any more.
*/
void drm_dev_unregister(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_lastclose(dev);
dev->registered = false;
drm_client_dev_unregister(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev);
if (dev->driver->unload)
dev->driver->unload(dev);
drm_legacy_pci_agp_destroy(dev);
drm_legacy_rmmaps(dev);
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_ACCEL);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
}
EXPORT_SYMBOL(drm_dev_unregister);
/*
* DRM Core
* The DRM core module initializes all global DRM objects and makes them
* available to drivers. Once setup, drivers can probe their respective
* devices.
* Currently, core management includes:
* - The "DRM-Global" key/value database
* - Global ID management for connectors
* - DRM major number allocation
* - DRM minor management
* - DRM sysfs class
* - DRM debugfs root
*
* Furthermore, the DRM core provides dynamic char-dev lookups. For each
* interface registered on a DRM device, you can request minor numbers from DRM
* core. DRM core takes care of major-number management and char-dev
* registration. A stub ->open() callback forwards any open() requests to the
* registered minor.
*/
static int drm_stub_open(struct inode *inode, struct file *filp)
{
const struct file_operations *new_fops;
struct drm_minor *minor;
int err;
DRM_DEBUG("\n");
minor = drm_minor_acquire(iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
new_fops = fops_get(minor->dev->driver->fops);
if (!new_fops) {
err = -ENODEV;
goto out;
}
replace_fops(filp, new_fops);
if (filp->f_op->open)
err = filp->f_op->open(inode, filp);
else
err = 0;
out:
drm_minor_release(minor);
return err;
}
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
.open = drm_stub_open,
.llseek = noop_llseek,
};
static void drm_core_exit(void)
{
drm_privacy_screen_lookup_exit();
accel_core_exit();
unregister_chrdev(DRM_MAJOR, "drm");
debugfs_remove(drm_debugfs_root);
drm_sysfs_destroy();
idr_destroy(&drm_minors_idr);
drm_connector_ida_destroy();
}
static int __init drm_core_init(void)
{
int ret;
drm_connector_ida_init();
idr_init(&drm_minors_idr);
drm_memcpy_init_early();
ret = drm_sysfs_init();
if (ret < 0) {
DRM_ERROR("Cannot create DRM class: %d\n", ret);
goto error;
}
drm_debugfs_root = debugfs_create_dir("dri", NULL);
ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
if (ret < 0)
goto error;
ret = accel_core_init();
if (ret < 0)
goto error;
drm_privacy_screen_lookup_init();
drm_core_init_complete = true;
DRM_DEBUG("Initialized\n");
return 0;
error:
drm_core_exit();
return ret;
}
module_init(drm_core_init);
module_exit(drm_core_exit);
| linux-master | drivers/gpu/drm/drm_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018 Noralf Trønnes
*/
#include <linux/dma-buf.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_print.h>
MODULE_IMPORT_NS(DMA_BUF);
/**
* DOC: overview
*
* This library provides helpers for GEM objects backed by shmem buffers
* allocated using anonymous pageable memory.
*
* Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
* For GEM callback helpers in struct &drm_gem_object functions, see likewise
* named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
* drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
*/
static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.free = drm_gem_shmem_object_free,
.print_info = drm_gem_shmem_object_print_info,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
};
static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
{
struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
int ret = 0;
size = PAGE_ALIGN(size);
if (dev->driver->gem_create_object) {
obj = dev->driver->gem_create_object(dev, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
shmem = to_drm_gem_shmem_obj(obj);
} else {
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem)
return ERR_PTR(-ENOMEM);
obj = &shmem->base;
}
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
if (private) {
drm_gem_private_object_init(dev, obj, size);
shmem->map_wc = false; /* dma-buf mappings use always writecombine */
} else {
ret = drm_gem_object_init(dev, obj, size);
}
if (ret) {
drm_gem_private_object_fini(obj);
goto err_free;
}
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto err_release;
INIT_LIST_HEAD(&shmem->madv_list);
if (!private) {
/*
* Our buffers are kept pinned, so allocating them
* from the MOVABLE zone is a really bad idea, and
* conflicts with CMA. See comments above new_inode()
* why this is required _and_ expected if you're
* going to pin these pages.
*/
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
return shmem;
err_release:
drm_gem_object_release(obj);
err_free:
kfree(obj);
return ERR_PTR(ret);
}
/**
* drm_gem_shmem_create - Allocate an object with the given size
* @dev: DRM device
* @size: Size of the object to allocate
*
* This function creates a shmem GEM object.
*
* Returns:
* A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
{
return __drm_gem_shmem_create(dev, size, false);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
/**
* drm_gem_shmem_free - Free resources associated with a shmem GEM object
* @shmem: shmem GEM object to free
*
* This function cleans up the GEM object state and frees the memory used to
* store the object itself.
*/
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
if (obj->import_attach) {
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
dma_resv_lock(shmem->base.resv, NULL);
drm_WARN_ON(obj->dev, shmem->vmap_use_count);
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
}
if (shmem->pages)
drm_gem_shmem_put_pages(shmem);
drm_WARN_ON(obj->dev, shmem->pages_use_count);
dma_resv_unlock(shmem->base.resv);
}
drm_gem_object_release(obj);
kfree(shmem);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct page **pages;
dma_resv_assert_held(shmem->base.resv);
if (shmem->pages_use_count++ > 0)
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
/*
* TODO: Allocating WC pages which are correctly flushed is only
* supported on x86. Ideal solution would be a GFP_WC flag, which also
* ttm_pool.c could use.
*/
#ifdef CONFIG_X86
if (shmem->map_wc)
set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
#endif
shmem->pages = pages;
return 0;
}
/*
* drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function decreases the use count and puts the backing pages when use drops to zero.
*/
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
dma_resv_assert_held(shmem->base.resv);
if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
return;
if (--shmem->pages_use_count > 0)
return;
#ifdef CONFIG_X86
if (shmem->map_wc)
set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif
drm_gem_put_pages(obj, shmem->pages,
shmem->pages_mark_dirty_on_put,
shmem->pages_mark_accessed_on_put);
shmem->pages = NULL;
}
EXPORT_SYMBOL(drm_gem_shmem_put_pages);
static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
int ret;
dma_resv_assert_held(shmem->base.resv);
ret = drm_gem_shmem_get_pages(shmem);
return ret;
}
static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
dma_resv_assert_held(shmem->base.resv);
drm_gem_shmem_put_pages(shmem);
}
/**
* drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function makes sure the backing pages are pinned in memory while the
* buffer is exported.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
int ret;
drm_WARN_ON(obj->dev, obj->import_attach);
ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
return ret;
ret = drm_gem_shmem_pin_locked(shmem);
dma_resv_unlock(shmem->base.resv);
return ret;
}
EXPORT_SYMBOL(drm_gem_shmem_pin);
/**
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function removes the requirement that the backing pages are pinned in
* memory.
*/
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
drm_WARN_ON(obj->dev, obj->import_attach);
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_unpin_locked(shmem);
dma_resv_unlock(shmem->base.resv);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin);
/*
* drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing
* store.
*
* This function makes sure that a contiguous kernel virtual address mapping
* exists for the buffer backing the shmem GEM object. It hides the differences
* between dma-buf imported and natively allocated objects.
*
* Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
if (obj->import_attach) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
return -EIO;
}
}
} else {
pgprot_t prot = PAGE_KERNEL;
dma_resv_assert_held(shmem->base.resv);
if (shmem->vmap_use_count++ > 0) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
goto err_zero_use;
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
if (!shmem->vaddr)
ret = -ENOMEM;
else
iosys_map_set_vaddr(map, shmem->vaddr);
}
if (ret) {
drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
goto err_put_pages;
}
return 0;
err_put_pages:
if (!obj->import_attach)
drm_gem_shmem_put_pages(shmem);
err_zero_use:
shmem->vmap_use_count = 0;
return ret;
}
EXPORT_SYMBOL(drm_gem_shmem_vmap);
/*
* drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
* This function cleans up a kernel virtual address mapping acquired by
* drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
* zero.
*
* This function hides the differences between dma-buf imported and natively
* allocated objects.
*/
void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
dma_resv_assert_held(shmem->base.resv);
if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
return;
if (--shmem->vmap_use_count > 0)
return;
vunmap(shmem->vaddr);
drm_gem_shmem_put_pages(shmem);
}
shmem->vaddr = NULL;
}
EXPORT_SYMBOL(drm_gem_shmem_vunmap);
static int
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size,
uint32_t *handle)
{
struct drm_gem_shmem_object *shmem;
int ret;
shmem = drm_gem_shmem_create(dev, size);
if (IS_ERR(shmem))
return PTR_ERR(shmem);
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
*/
ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
return ret;
}
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
{
dma_resv_assert_held(shmem->base.resv);
if (shmem->madv >= 0)
shmem->madv = madv;
madv = shmem->madv;
return (madv >= 0);
}
EXPORT_SYMBOL(drm_gem_shmem_madvise);
void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
dma_resv_assert_held(shmem->base.resv);
drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
shmem->sgt = NULL;
drm_gem_shmem_put_pages(shmem);
shmem->madv = -1;
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
drm_gem_free_mmap_offset(obj);
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
}
EXPORT_SYMBOL(drm_gem_shmem_purge);
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
* @file: DRM file structure to create the dumb buffer for
* @dev: DRM device
* @args: IOCTL data
*
* This function computes the pitch of the dumb buffer and rounds it up to an
* integer number of bytes per pixel. Drivers for hardware that doesn't have
* any additional restrictions on the pitch can directly use this function as
* their &drm_driver.dumb_create callback.
*
* For hardware with additional restrictions, drivers can adjust the fields
* set up by userspace before calling into this function.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
if (!args->pitch || !args->size) {
args->pitch = min_pitch;
args->size = PAGE_ALIGN(args->pitch * args->height);
} else {
/* ensure sane minimum values */
if (args->pitch < min_pitch)
args->pitch = min_pitch;
if (args->size < args->pitch * args->height)
args->size = PAGE_ALIGN(args->pitch * args->height);
}
return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
vm_fault_t ret;
struct page *page;
pgoff_t page_offset;
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
dma_resv_lock(shmem->base.resv, NULL);
if (page_offset >= num_pages ||
drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
shmem->madv < 0) {
ret = VM_FAULT_SIGBUS;
} else {
page = shmem->pages[page_offset];
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
dma_resv_unlock(shmem->base.resv);
return ret;
}
static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
drm_WARN_ON(obj->dev, obj->import_attach);
dma_resv_lock(shmem->base.resv, NULL);
/*
* We should have already pinned the pages when the buffer was first
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
shmem->pages_use_count++;
dma_resv_unlock(shmem->base.resv);
drm_gem_vm_open(vma);
}
static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_put_pages(shmem);
dma_resv_unlock(shmem->base.resv);
drm_gem_vm_close(vma);
}
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
};
EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
/**
* drm_gem_shmem_mmap - Memory-map a shmem GEM object
* @shmem: shmem GEM object
* @vma: VMA for the area to be mapped
*
* This function implements an augmented version of the GEM DRM file mmap
* operation for shmem objects.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
{
struct drm_gem_object *obj = &shmem->base;
int ret;
if (obj->import_attach) {
/* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
* doesn't set those fields.
*/
vma->vm_private_data = NULL;
vma->vm_ops = NULL;
ret = dma_buf_mmap(obj->dma_buf, vma, 0);
/* Drop the reference drm_gem_mmap_obj() acquired.*/
if (!ret)
drm_gem_object_put(obj);
return ret;
}
dma_resv_lock(shmem->base.resv, NULL);
ret = drm_gem_shmem_get_pages(shmem);
dma_resv_unlock(shmem->base.resv);
if (ret)
return ret;
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
/**
* drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
* @shmem: shmem GEM object
* @p: DRM printer
* @indent: Tab indentation level
*/
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent)
{
if (shmem->base.import_attach)
return;
drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
EXPORT_SYMBOL(drm_gem_shmem_print_info);
/**
* drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
* pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function exports a scatter/gather table suitable for PRIME usage by
* calling the standard DMA mapping API.
*
* Drivers who need to acquire an scatter/gather table for objects need to call
* drm_gem_shmem_get_pages_sgt() instead.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or error pointer on failure.
*/
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
drm_WARN_ON(obj->dev, obj->import_attach);
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
int ret;
struct sg_table *sgt;
if (shmem->sgt)
return shmem->sgt;
drm_WARN_ON(obj->dev, obj->import_attach);
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
return ERR_PTR(ret);
sgt = drm_gem_shmem_get_sg_table(shmem);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto err_put_pages;
}
/* Map the pages for use by the h/w. */
ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
goto err_free_sgt;
shmem->sgt = sgt;
return sgt;
err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
drm_gem_shmem_put_pages(shmem);
return ERR_PTR(ret);
}
/**
* drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
* scatter/gather table for a shmem GEM object.
* @shmem: shmem GEM object
*
* This function returns a scatter/gather table suitable for driver usage. If
* the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
* table created.
*
* This is the main function for drivers to get at backing storage, and it hides
* and difference between dma-buf imported and natively allocated objects.
* drm_gem_shmem_get_sg_table() should not be directly called by drivers.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or errno on failure.
*/
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
{
int ret;
struct sg_table *sgt;
ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
return ERR_PTR(ret);
sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
dma_resv_unlock(shmem->base.resv);
return sgt;
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
/**
* drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
* another driver's scatter/gather table of pinned pages
* @dev: Device to import into
* @attach: DMA-BUF attachment
* @sgt: Scatter/gather table of pinned pages
*
* This function imports a scatter/gather table exported via DMA-BUF by
* another driver. Drivers that use the shmem helpers should set this as their
* &drm_driver.gem_prime_import_sg_table callback.
*
* Returns:
* A pointer to a newly created GEM object or an ERR_PTR-encoded negative
* error code on failure.
*/
struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
size_t size = PAGE_ALIGN(attach->dmabuf->size);
struct drm_gem_shmem_object *shmem;
shmem = __drm_gem_shmem_create(dev, size, true);
if (IS_ERR(shmem))
return ERR_CAST(shmem);
shmem->sgt = sgt;
drm_dbg_prime(dev, "size = %zu\n", size);
return &shmem->base;
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
MODULE_IMPORT_NS(DMA_BUF);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/drm_gem_shmem_helper.c |
/*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <drm/drm_plane.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_file.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_managed.h>
#include <drm/drm_vblank.h>
#include "drm_crtc_internal.h"
/**
* DOC: overview
*
* A plane represents an image source that can be blended with or overlaid on
* top of a CRTC during the scanout process. Planes take their input data from a
* &drm_framebuffer object. The plane itself specifies the cropping and scaling
* of that image, and where it is placed on the visible area of a display
* pipeline, represented by &drm_crtc. A plane can also have additional
* properties that specify how the pixels are positioned and blended, like
* rotation or Z-position. All these properties are stored in &drm_plane_state.
*
* Unless explicitly specified (via CRTC property or otherwise), the active area
* of a CRTC will be black by default. This means portions of the active area
* which are not covered by a plane will be black, and alpha blending of any
* planes with the CRTC background will blend with black at the lowest zpos.
*
* To create a plane, a KMS drivers allocates and zeroes an instances of
* &struct drm_plane (possibly as part of a larger structure) and registers it
* with a call to drm_universal_plane_init().
*
* Each plane has a type, see enum drm_plane_type. A plane can be compatible
* with multiple CRTCs, see &drm_plane.possible_crtcs.
*
* Each CRTC must have a unique primary plane userspace can attach to enable
* the CRTC. In other words, userspace must be able to attach a different
* primary plane to each CRTC at the same time. Primary planes can still be
* compatible with multiple CRTCs. There must be exactly as many primary planes
* as there are CRTCs.
*
* Legacy uAPI doesn't expose the primary and cursor planes directly. DRM core
* relies on the driver to set the primary and optionally the cursor plane used
* for legacy IOCTLs. This is done by calling drm_crtc_init_with_planes(). All
* drivers must provide one primary plane per CRTC to avoid surprising legacy
* userspace too much.
*/
/**
* DOC: standard plane properties
*
* DRM planes have a few standardized properties:
*
* type:
* Immutable property describing the type of the plane.
*
* For user-space which has enabled the &DRM_CLIENT_CAP_ATOMIC capability,
* the plane type is just a hint and is mostly superseded by atomic
* test-only commits. The type hint can still be used to come up more
* easily with a plane configuration accepted by the driver.
*
* The value of this property can be one of the following:
*
* "Primary":
* To light up a CRTC, attaching a primary plane is the most likely to
* work if it covers the whole CRTC and doesn't have scaling or
* cropping set up.
*
* Drivers may support more features for the primary plane, user-space
* can find out with test-only atomic commits.
*
* Some primary planes are implicitly used by the kernel in the legacy
* IOCTLs &DRM_IOCTL_MODE_SETCRTC and &DRM_IOCTL_MODE_PAGE_FLIP.
* Therefore user-space must not mix explicit usage of any primary
* plane (e.g. through an atomic commit) with these legacy IOCTLs.
*
* "Cursor":
* To enable this plane, using a framebuffer configured without scaling
* or cropping and with the following properties is the most likely to
* work:
*
* - If the driver provides the capabilities &DRM_CAP_CURSOR_WIDTH and
* &DRM_CAP_CURSOR_HEIGHT, create the framebuffer with this size.
* Otherwise, create a framebuffer with the size 64x64.
* - If the driver doesn't support modifiers, create a framebuffer with
* a linear layout. Otherwise, use the IN_FORMATS plane property.
*
* Drivers may support more features for the cursor plane, user-space
* can find out with test-only atomic commits.
*
* Some cursor planes are implicitly used by the kernel in the legacy
* IOCTLs &DRM_IOCTL_MODE_CURSOR and &DRM_IOCTL_MODE_CURSOR2.
* Therefore user-space must not mix explicit usage of any cursor
* plane (e.g. through an atomic commit) with these legacy IOCTLs.
*
* Some drivers may support cursors even if no cursor plane is exposed.
* In this case, the legacy cursor IOCTLs can be used to configure the
* cursor.
*
* "Overlay":
* Neither primary nor cursor.
*
* Overlay planes are the only planes exposed when the
* &DRM_CLIENT_CAP_UNIVERSAL_PLANES capability is disabled.
*
* IN_FORMATS:
* Blob property which contains the set of buffer format and modifier
* pairs supported by this plane. The blob is a struct
* drm_format_modifier_blob. Without this property the plane doesn't
* support buffers with modifiers. Userspace cannot change this property.
*
* Note that userspace can check the &DRM_CAP_ADDFB2_MODIFIERS driver
* capability for general modifier support. If this flag is set then every
* plane will have the IN_FORMATS property, even when it only supports
* DRM_FORMAT_MOD_LINEAR. Before linux kernel release v5.1 there have been
* various bugs in this area with inconsistencies between the capability
* flag and per-plane properties.
*/
static unsigned int drm_num_planes(struct drm_device *dev)
{
unsigned int num = 0;
struct drm_plane *tmp;
drm_for_each_plane(tmp, dev) {
num++;
}
return num;
}
static inline u32 *
formats_ptr(struct drm_format_modifier_blob *blob)
{
return (u32 *)(((char *)blob) + blob->formats_offset);
}
static inline struct drm_format_modifier *
modifiers_ptr(struct drm_format_modifier_blob *blob)
{
return (struct drm_format_modifier *)(((char *)blob) + blob->modifiers_offset);
}
static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane)
{
const struct drm_mode_config *config = &dev->mode_config;
struct drm_property_blob *blob;
struct drm_format_modifier *mod;
size_t blob_size, formats_size, modifiers_size;
struct drm_format_modifier_blob *blob_data;
unsigned int i, j;
formats_size = sizeof(__u32) * plane->format_count;
if (WARN_ON(!formats_size)) {
/* 0 formats are never expected */
return 0;
}
modifiers_size =
sizeof(struct drm_format_modifier) * plane->modifier_count;
blob_size = sizeof(struct drm_format_modifier_blob);
/* Modifiers offset is a pointer to a struct with a 64 bit field so it
* should be naturally aligned to 8B.
*/
BUILD_BUG_ON(sizeof(struct drm_format_modifier_blob) % 8);
blob_size += ALIGN(formats_size, 8);
blob_size += modifiers_size;
blob = drm_property_create_blob(dev, blob_size, NULL);
if (IS_ERR(blob))
return -1;
blob_data = blob->data;
blob_data->version = FORMAT_BLOB_CURRENT;
blob_data->count_formats = plane->format_count;
blob_data->formats_offset = sizeof(struct drm_format_modifier_blob);
blob_data->count_modifiers = plane->modifier_count;
blob_data->modifiers_offset =
ALIGN(blob_data->formats_offset + formats_size, 8);
memcpy(formats_ptr(blob_data), plane->format_types, formats_size);
mod = modifiers_ptr(blob_data);
for (i = 0; i < plane->modifier_count; i++) {
for (j = 0; j < plane->format_count; j++) {
if (!plane->funcs->format_mod_supported ||
plane->funcs->format_mod_supported(plane,
plane->format_types[j],
plane->modifiers[i])) {
mod->formats |= 1ULL << j;
}
}
mod->modifier = plane->modifiers[i];
mod->offset = 0;
mod->pad = 0;
mod++;
}
drm_object_attach_property(&plane->base, config->modifiers_property,
blob->base.id);
return 0;
}
__printf(9, 0)
static int __drm_universal_plane_init(struct drm_device *dev,
struct drm_plane *plane,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats,
unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
static const uint64_t default_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
};
unsigned int format_modifier_count = 0;
int ret;
/* plane index is used with 32bit bitmasks */
if (WARN_ON(config->num_total_plane >= 32))
return -EINVAL;
/*
* First driver to need more than 64 formats needs to fix this. Each
* format is encoded as a bit and the current code only supports a u64.
*/
if (WARN_ON(format_count > 64))
return -EINVAL;
WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
(!funcs->atomic_destroy_state ||
!funcs->atomic_duplicate_state));
ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
return ret;
drm_modeset_lock_init(&plane->mutex);
plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
GFP_KERNEL);
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
drm_mode_object_unregister(dev, &plane->base);
return -ENOMEM;
}
if (format_modifiers) {
const uint64_t *temp_modifiers = format_modifiers;
while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID)
format_modifier_count++;
} else {
if (!dev->mode_config.fb_modifiers_not_supported) {
format_modifiers = default_modifiers;
format_modifier_count = ARRAY_SIZE(default_modifiers);
}
}
/* autoset the cap and check for consistency across all planes */
drm_WARN_ON(dev, config->fb_modifiers_not_supported &&
format_modifier_count);
plane->modifier_count = format_modifier_count;
plane->modifiers = kmalloc_array(format_modifier_count,
sizeof(format_modifiers[0]),
GFP_KERNEL);
if (format_modifier_count && !plane->modifiers) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
kfree(plane->format_types);
drm_mode_object_unregister(dev, &plane->base);
return -ENOMEM;
}
if (name) {
plane->name = kvasprintf(GFP_KERNEL, name, ap);
} else {
plane->name = kasprintf(GFP_KERNEL, "plane-%d",
drm_num_planes(dev));
}
if (!plane->name) {
kfree(plane->format_types);
kfree(plane->modifiers);
drm_mode_object_unregister(dev, &plane->base);
return -ENOMEM;
}
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
memcpy(plane->modifiers, format_modifiers,
format_modifier_count * sizeof(format_modifiers[0]));
plane->possible_crtcs = possible_crtcs;
plane->type = type;
list_add_tail(&plane->head, &config->plane_list);
plane->index = config->num_total_plane++;
drm_object_attach_property(&plane->base,
config->plane_type_property,
plane->type);
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
drm_object_attach_property(&plane->base, config->prop_in_fence_fd, -1);
drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
drm_object_attach_property(&plane->base, config->prop_src_x, 0);
drm_object_attach_property(&plane->base, config->prop_src_y, 0);
drm_object_attach_property(&plane->base, config->prop_src_w, 0);
drm_object_attach_property(&plane->base, config->prop_src_h, 0);
}
if (format_modifier_count)
create_in_format_blob(dev, plane);
return 0;
}
/**
* drm_universal_plane_init - Initialize a new universal plane object
* @dev: DRM device
* @plane: plane object to init
* @possible_crtcs: bitmask of possible CRTCs
* @funcs: callbacks for the new plane
* @formats: array of supported formats (DRM_FORMAT\_\*)
* @format_count: number of elements in @formats
* @format_modifiers: array of struct drm_format modifiers terminated by
* DRM_FORMAT_MOD_INVALID
* @type: type of plane (overlay, primary, cursor)
* @name: printf style format string for the plane name, or NULL for default name
*
* Initializes a plane object of type @type. The &drm_plane_funcs.destroy hook
* should call drm_plane_cleanup() and kfree() the plane structure. The plane
* structure should not be allocated with devm_kzalloc().
*
* Note: consider using drmm_universal_plane_alloc() instead of
* drm_universal_plane_init() to let the DRM managed resource infrastructure
* take care of cleanup and deallocation.
*
* Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
* @format_modifiers to NULL. The plane will advertise the linear modifier.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, ...)
{
va_list ap;
int ret;
WARN_ON(!funcs->destroy);
va_start(ap, name);
ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
formats, format_count, format_modifiers,
type, name, ap);
va_end(ap);
return ret;
}
EXPORT_SYMBOL(drm_universal_plane_init);
static void drmm_universal_plane_alloc_release(struct drm_device *dev, void *ptr)
{
struct drm_plane *plane = ptr;
if (WARN_ON(!plane->dev))
return;
drm_plane_cleanup(plane);
}
void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size,
size_t offset, uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, ...)
{
void *container;
struct drm_plane *plane;
va_list ap;
int ret;
if (WARN_ON(!funcs || funcs->destroy))
return ERR_PTR(-EINVAL);
container = drmm_kzalloc(dev, size, GFP_KERNEL);
if (!container)
return ERR_PTR(-ENOMEM);
plane = container + offset;
va_start(ap, name);
ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
formats, format_count, format_modifiers,
type, name, ap);
va_end(ap);
if (ret)
return ERR_PTR(ret);
ret = drmm_add_action_or_reset(dev, drmm_universal_plane_alloc_release,
plane);
if (ret)
return ERR_PTR(ret);
return container;
}
EXPORT_SYMBOL(__drmm_universal_plane_alloc);
void *__drm_universal_plane_alloc(struct drm_device *dev, size_t size,
size_t offset, uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, ...)
{
void *container;
struct drm_plane *plane;
va_list ap;
int ret;
if (drm_WARN_ON(dev, !funcs))
return ERR_PTR(-EINVAL);
container = kzalloc(size, GFP_KERNEL);
if (!container)
return ERR_PTR(-ENOMEM);
plane = container + offset;
va_start(ap, name);
ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
formats, format_count, format_modifiers,
type, name, ap);
va_end(ap);
if (ret)
goto err_kfree;
return container;
err_kfree:
kfree(container);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(__drm_universal_plane_alloc);
int drm_plane_register_all(struct drm_device *dev)
{
unsigned int num_planes = 0;
unsigned int num_zpos = 0;
struct drm_plane *plane;
int ret = 0;
drm_for_each_plane(plane, dev) {
if (plane->funcs->late_register)
ret = plane->funcs->late_register(plane);
if (ret)
return ret;
if (plane->zpos_property)
num_zpos++;
num_planes++;
}
drm_WARN(dev, num_zpos && num_planes != num_zpos,
"Mixing planes with and without zpos property is invalid\n");
return 0;
}
void drm_plane_unregister_all(struct drm_device *dev)
{
struct drm_plane *plane;
drm_for_each_plane(plane, dev) {
if (plane->funcs->early_unregister)
plane->funcs->early_unregister(plane);
}
}
/**
* drm_plane_cleanup - Clean up the core plane usage
* @plane: plane to cleanup
*
* This function cleans up @plane and removes it from the DRM mode setting
* core. Note that the function does *not* free the plane structure itself,
* this is the responsibility of the caller.
*/
void drm_plane_cleanup(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
drm_modeset_lock_fini(&plane->mutex);
kfree(plane->format_types);
kfree(plane->modifiers);
drm_mode_object_unregister(dev, &plane->base);
BUG_ON(list_empty(&plane->head));
/* Note that the plane_list is considered to be static; should we
* remove the drm_plane at runtime we would have to decrement all
* the indices on the drm_plane after us in the plane_list.
*/
list_del(&plane->head);
dev->mode_config.num_total_plane--;
WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
if (plane->state && plane->funcs->atomic_destroy_state)
plane->funcs->atomic_destroy_state(plane, plane->state);
kfree(plane->name);
memset(plane, 0, sizeof(*plane));
}
EXPORT_SYMBOL(drm_plane_cleanup);
/**
* drm_plane_from_index - find the registered plane at an index
* @dev: DRM device
* @idx: index of registered plane to find for
*
* Given a plane index, return the registered plane from DRM device's
* list of planes with matching index. This is the inverse of drm_plane_index().
*/
struct drm_plane *
drm_plane_from_index(struct drm_device *dev, int idx)
{
struct drm_plane *plane;
drm_for_each_plane(plane, dev)
if (idx == plane->index)
return plane;
return NULL;
}
EXPORT_SYMBOL(drm_plane_from_index);
/**
* drm_plane_force_disable - Forcibly disable a plane
* @plane: plane to disable
*
* Forces the plane to be disabled.
*
* Used when the plane's current framebuffer is destroyed,
* and when restoring fbdev mode.
*
* Note that this function is not suitable for atomic drivers, since it doesn't
* wire through the lock acquisition context properly and hence can't handle
* retries or driver private locks. You probably want to use
* drm_atomic_helper_disable_plane() or
* drm_atomic_helper_disable_planes_on_crtc() instead.
*/
void drm_plane_force_disable(struct drm_plane *plane)
{
int ret;
if (!plane->fb)
return;
WARN_ON(drm_drv_uses_atomic_modeset(plane->dev));
plane->old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane, NULL);
if (ret) {
DRM_ERROR("failed to disable plane with busy fb\n");
plane->old_fb = NULL;
return;
}
/* disconnect the plane from the fb and crtc: */
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
plane->fb = NULL;
plane->crtc = NULL;
}
EXPORT_SYMBOL(drm_plane_force_disable);
/**
* drm_mode_plane_set_obj_prop - set the value of a property
* @plane: drm plane object to set property value for
* @property: property to set
* @value: value the property should be set to
*
* This functions sets a given property on a given plane object. This function
* calls the driver's ->set_property callback and changes the software state of
* the property if the callback succeeds.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_mode_object *obj = &plane->base;
if (plane->funcs->set_property)
ret = plane->funcs->set_property(plane, property, value);
if (!ret)
drm_object_property_set_value(obj, property, value);
return ret;
}
EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_plane_res *plane_resp = data;
struct drm_plane *plane;
uint32_t __user *plane_ptr;
int count = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
plane_ptr = u64_to_user_ptr(plane_resp->plane_id_ptr);
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
drm_for_each_plane(plane, dev) {
/*
* Unless userspace set the 'universal planes'
* capability bit, only advertise overlays.
*/
if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
!file_priv->universal_planes)
continue;
if (drm_lease_held(file_priv, plane->base.id)) {
if (count < plane_resp->count_planes &&
put_user(plane->base.id, plane_ptr + count))
return -EFAULT;
count++;
}
}
plane_resp->count_planes = count;
return 0;
}
int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_plane *plane_resp = data;
struct drm_plane *plane;
uint32_t __user *format_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
plane = drm_plane_find(dev, file_priv, plane_resp->plane_id);
if (!plane)
return -ENOENT;
drm_modeset_lock(&plane->mutex, NULL);
if (plane->state && plane->state->crtc && drm_lease_held(file_priv, plane->state->crtc->base.id))
plane_resp->crtc_id = plane->state->crtc->base.id;
else if (!plane->state && plane->crtc && drm_lease_held(file_priv, plane->crtc->base.id))
plane_resp->crtc_id = plane->crtc->base.id;
else
plane_resp->crtc_id = 0;
if (plane->state && plane->state->fb)
plane_resp->fb_id = plane->state->fb->base.id;
else if (!plane->state && plane->fb)
plane_resp->fb_id = plane->fb->base.id;
else
plane_resp->fb_id = 0;
drm_modeset_unlock(&plane->mutex);
plane_resp->plane_id = plane->base.id;
plane_resp->possible_crtcs = drm_lease_filter_crtcs(file_priv,
plane->possible_crtcs);
plane_resp->gamma_size = 0;
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (plane->format_count &&
(plane_resp->count_format_types >= plane->format_count)) {
format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
if (copy_to_user(format_ptr,
plane->format_types,
sizeof(uint32_t) * plane->format_count)) {
return -EFAULT;
}
}
plane_resp->count_format_types = plane->format_count;
return 0;
}
int drm_plane_check_pixel_format(struct drm_plane *plane,
u32 format, u64 modifier)
{
unsigned int i;
for (i = 0; i < plane->format_count; i++) {
if (format == plane->format_types[i])
break;
}
if (i == plane->format_count)
return -EINVAL;
if (plane->funcs->format_mod_supported) {
if (!plane->funcs->format_mod_supported(plane, format, modifier))
return -EINVAL;
} else {
if (!plane->modifier_count)
return 0;
for (i = 0; i < plane->modifier_count; i++) {
if (modifier == plane->modifiers[i])
break;
}
if (i == plane->modifier_count)
return -EINVAL;
}
return 0;
}
static int __setplane_check(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int32_t crtc_x, int32_t crtc_y,
uint32_t crtc_w, uint32_t crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
int ret;
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
DRM_DEBUG_KMS("Invalid crtc for plane\n");
return -EINVAL;
}
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, fb->format->format,
fb->modifier);
if (ret) {
DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n",
&fb->format->format, fb->modifier);
return ret;
}
/* Give drivers some help against integer overflows */
if (crtc_w > INT_MAX ||
crtc_x > INT_MAX - (int32_t) crtc_w ||
crtc_h > INT_MAX ||
crtc_y > INT_MAX - (int32_t) crtc_h) {
DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
crtc_w, crtc_h, crtc_x, crtc_y);
return -ERANGE;
}
ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
if (ret)
return ret;
return 0;
}
/**
* drm_any_plane_has_format - Check whether any plane supports this format and modifier combination
* @dev: DRM device
* @format: pixel format (DRM_FORMAT_*)
* @modifier: data layout modifier
*
* Returns:
* Whether at least one plane supports the specified format and modifier combination.
*/
bool drm_any_plane_has_format(struct drm_device *dev,
u32 format, u64 modifier)
{
struct drm_plane *plane;
drm_for_each_plane(plane, dev) {
if (drm_plane_check_pixel_format(plane, format, modifier) == 0)
return true;
}
return false;
}
EXPORT_SYMBOL(drm_any_plane_has_format);
/*
* __setplane_internal - setplane handler for internal callers
*
* This function will take a reference on the new fb for the plane
* on success.
*
* src_{x,y,w,h} are provided in 16.16 fixed point format
*/
static int __setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int32_t crtc_x, int32_t crtc_y,
uint32_t crtc_w, uint32_t crtc_h,
/* src_{x,y,w,h} values are 16.16 fixed point */
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
int ret = 0;
WARN_ON(drm_drv_uses_atomic_modeset(plane->dev));
/* No fb means shut it down */
if (!fb) {
plane->old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane, ctx);
if (!ret) {
plane->crtc = NULL;
plane->fb = NULL;
} else {
plane->old_fb = NULL;
}
goto out;
}
ret = __setplane_check(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
if (ret)
goto out;
plane->old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, ctx);
if (!ret) {
plane->crtc = crtc;
plane->fb = fb;
drm_framebuffer_get(plane->fb);
} else {
plane->old_fb = NULL;
}
out:
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
return ret;
}
static int __setplane_atomic(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int32_t crtc_x, int32_t crtc_y,
uint32_t crtc_w, uint32_t crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
int ret;
WARN_ON(!drm_drv_uses_atomic_modeset(plane->dev));
/* No fb means shut it down */
if (!fb)
return plane->funcs->disable_plane(plane, ctx);
/*
* FIXME: This is redundant with drm_atomic_plane_check(),
* but the legacy cursor/"async" .update_plane() tricks
* don't call that so we still need this here. Should remove
* this when all .update_plane() implementations have been
* fixed to call drm_atomic_plane_check().
*/
ret = __setplane_check(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
if (ret)
return ret;
return plane->funcs->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, ctx);
}
static int setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int32_t crtc_x, int32_t crtc_y,
uint32_t crtc_w, uint32_t crtc_h,
/* src_{x,y,w,h} values are 16.16 fixed point */
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_modeset_acquire_ctx ctx;
int ret;
DRM_MODESET_LOCK_ALL_BEGIN(plane->dev, ctx,
DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
if (drm_drv_uses_atomic_modeset(plane->dev))
ret = __setplane_atomic(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, &ctx);
else
ret = __setplane_internal(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, &ctx);
DRM_MODESET_LOCK_ALL_END(plane->dev, ctx, ret);
return ret;
}
int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
struct drm_plane *plane;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb = NULL;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
plane = drm_plane_find(dev, file_priv, plane_req->plane_id);
if (!plane) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
return -ENOENT;
}
if (plane_req->fb_id) {
fb = drm_framebuffer_lookup(dev, file_priv, plane_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
return -ENOENT;
}
crtc = drm_crtc_find(dev, file_priv, plane_req->crtc_id);
if (!crtc) {
drm_framebuffer_put(fb);
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
return -ENOENT;
}
}
ret = setplane_internal(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
if (fb)
drm_framebuffer_put(fb);
return ret;
}
static int drm_mode_cursor_universal(struct drm_crtc *crtc,
struct drm_mode_cursor2 *req,
struct drm_file *file_priv,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
struct drm_plane *plane = crtc->cursor;
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 fbreq = {
.width = req->width,
.height = req->height,
.pixel_format = DRM_FORMAT_ARGB8888,
.pitches = { req->width * 4 },
.handles = { req->handle },
};
int32_t crtc_x, crtc_y;
uint32_t crtc_w = 0, crtc_h = 0;
uint32_t src_w = 0, src_h = 0;
int ret = 0;
BUG_ON(!plane);
WARN_ON(plane->crtc != crtc && plane->crtc != NULL);
/*
* Obtain fb we'll be using (either new or existing) and take an extra
* reference to it if fb != null. setplane will take care of dropping
* the reference if the plane update fails.
*/
if (req->flags & DRM_MODE_CURSOR_BO) {
if (req->handle) {
fb = drm_internal_framebuffer_create(dev, &fbreq, file_priv);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
return PTR_ERR(fb);
}
fb->hot_x = req->hot_x;
fb->hot_y = req->hot_y;
} else {
fb = NULL;
}
} else {
if (plane->state)
fb = plane->state->fb;
else
fb = plane->fb;
if (fb)
drm_framebuffer_get(fb);
}
if (req->flags & DRM_MODE_CURSOR_MOVE) {
crtc_x = req->x;
crtc_y = req->y;
} else {
crtc_x = crtc->cursor_x;
crtc_y = crtc->cursor_y;
}
if (fb) {
crtc_w = fb->width;
crtc_h = fb->height;
src_w = fb->width << 16;
src_h = fb->height << 16;
}
if (drm_drv_uses_atomic_modeset(dev))
ret = __setplane_atomic(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
0, 0, src_w, src_h, ctx);
else
ret = __setplane_internal(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
0, 0, src_w, src_h, ctx);
if (fb)
drm_framebuffer_put(fb);
/* Update successful; save new cursor position, if necessary */
if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
crtc->cursor_x = req->x;
crtc->cursor_y = req->y;
}
return ret;
}
static int drm_mode_cursor_common(struct drm_device *dev,
struct drm_mode_cursor2 *req,
struct drm_file *file_priv)
{
struct drm_crtc *crtc;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
crtc = drm_crtc_find(dev, file_priv, req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
return -ENOENT;
}
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = drm_modeset_lock(&crtc->mutex, &ctx);
if (ret)
goto out;
/*
* If this crtc has a universal cursor plane, call that plane's update
* handler rather than using legacy cursor handlers.
*/
if (crtc->cursor) {
ret = drm_modeset_lock(&crtc->cursor->mutex, &ctx);
if (ret)
goto out;
if (!drm_lease_held(file_priv, crtc->cursor->base.id)) {
ret = -EACCES;
goto out;
}
ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx);
goto out;
}
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
ret = -ENXIO;
goto out;
}
/* Turns off the cursor if handle is 0 */
if (crtc->funcs->cursor_set2)
ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
req->width, req->height, req->hot_x, req->hot_y);
else
ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
req->width, req->height);
}
if (req->flags & DRM_MODE_CURSOR_MOVE) {
if (crtc->funcs->cursor_move) {
ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
} else {
ret = -EFAULT;
goto out;
}
}
out:
if (ret == -EDEADLK) {
ret = drm_modeset_backoff(&ctx);
if (!ret)
goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_cursor *req = data;
struct drm_mode_cursor2 new_req;
memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
new_req.hot_x = new_req.hot_y = 0;
return drm_mode_cursor_common(dev, &new_req, file_priv);
}
/*
* Set the cursor configuration based on user request. This implements the 2nd
* version of the cursor ioctl, which allows userspace to additionally specify
* the hotspot of the pointer.
*/
int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_cursor2 *req = data;
return drm_mode_cursor_common(dev, req, file_priv);
}
int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_page_flip_target *page_flip = data;
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_framebuffer *fb = NULL, *old_fb;
struct drm_pending_vblank_event *e = NULL;
u32 target_vblank = page_flip->sequence;
struct drm_modeset_acquire_ctx ctx;
int ret = -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS)
return -EINVAL;
if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET))
return -EINVAL;
/* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags
* can be specified
*/
if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET)
return -EINVAL;
if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
return -EINVAL;
crtc = drm_crtc_find(dev, file_priv, page_flip->crtc_id);
if (!crtc)
return -ENOENT;
plane = crtc->primary;
if (!drm_lease_held(file_priv, plane->base.id))
return -EACCES;
if (crtc->funcs->page_flip_target) {
u32 current_vblank;
int r;
r = drm_crtc_vblank_get(crtc);
if (r)
return r;
current_vblank = (u32)drm_crtc_vblank_count(crtc);
switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) {
case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE:
if ((int)(target_vblank - current_vblank) > 1) {
DRM_DEBUG("Invalid absolute flip target %u, "
"must be <= %u\n", target_vblank,
current_vblank + 1);
drm_crtc_vblank_put(crtc);
return -EINVAL;
}
break;
case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE:
if (target_vblank != 0 && target_vblank != 1) {
DRM_DEBUG("Invalid relative flip target %u, "
"must be 0 or 1\n", target_vblank);
drm_crtc_vblank_put(crtc);
return -EINVAL;
}
target_vblank += current_vblank;
break;
default:
target_vblank = current_vblank +
!(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC);
break;
}
} else if (crtc->funcs->page_flip == NULL ||
(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) {
return -EINVAL;
}
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = drm_modeset_lock(&crtc->mutex, &ctx);
if (ret)
goto out;
ret = drm_modeset_lock(&plane->mutex, &ctx);
if (ret)
goto out;
if (plane->state)
old_fb = plane->state->fb;
else
old_fb = plane->fb;
if (old_fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
* yet discovered.
*/
ret = -EBUSY;
goto out;
}
fb = drm_framebuffer_lookup(dev, file_priv, page_flip->fb_id);
if (!fb) {
ret = -ENOENT;
goto out;
}
if (plane->state) {
const struct drm_plane_state *state = plane->state;
ret = drm_framebuffer_check_src_coords(state->src_x,
state->src_y,
state->src_w,
state->src_h,
fb);
} else {
ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y,
&crtc->mode, fb);
}
if (ret)
goto out;
/*
* Only check the FOURCC format code, excluding modifiers. This is
* enough for all legacy drivers. Atomic drivers have their own
* checks in their ->atomic_check implementation, which will
* return -EINVAL if any hw or driver constraint is violated due
* to modifier changes.
*/
if (old_fb->format->format != fb->format->format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL;
goto out;
}
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
e = kzalloc(sizeof *e, GFP_KERNEL);
if (!e) {
ret = -ENOMEM;
goto out;
}
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof(e->event);
e->event.vbl.user_data = page_flip->user_data;
e->event.vbl.crtc_id = crtc->base.id;
ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
if (ret) {
kfree(e);
e = NULL;
goto out;
}
}
plane->old_fb = plane->fb;
if (crtc->funcs->page_flip_target)
ret = crtc->funcs->page_flip_target(crtc, fb, e,
page_flip->flags,
target_vblank,
&ctx);
else
ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags,
&ctx);
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
drm_event_cancel_free(dev, &e->base);
/* Keep the old fb, don't unref it. */
plane->old_fb = NULL;
} else {
if (!plane->state) {
plane->fb = fb;
drm_framebuffer_get(fb);
}
}
out:
if (fb)
drm_framebuffer_put(fb);
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
if (ret == -EDEADLK) {
ret = drm_modeset_backoff(&ctx);
if (!ret)
goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
if (ret && crtc->funcs->page_flip_target)
drm_crtc_vblank_put(crtc);
return ret;
}
/**
* DOC: damage tracking
*
* FB_DAMAGE_CLIPS is an optional plane property which provides a means to
* specify a list of damage rectangles on a plane in framebuffer coordinates of
* the framebuffer attached to the plane. In current context damage is the area
* of plane framebuffer that has changed since last plane update (also called
* page-flip), irrespective of whether currently attached framebuffer is same as
* framebuffer attached during last plane update or not.
*
* FB_DAMAGE_CLIPS is a hint to kernel which could be helpful for some drivers
* to optimize internally especially for virtual devices where each framebuffer
* change needs to be transmitted over network, usb, etc.
*
* Since FB_DAMAGE_CLIPS is a hint so it is an optional property. User-space can
* ignore damage clips property and in that case driver will do a full plane
* update. In case damage clips are provided then it is guaranteed that the area
* inside damage clips will be updated to plane. For efficiency driver can do
* full update or can update more than specified in damage clips. Since driver
* is free to read more, user-space must always render the entire visible
* framebuffer. Otherwise there can be corruptions. Also, if a user-space
* provides damage clips which doesn't encompass the actual damage to
* framebuffer (since last plane update) can result in incorrect rendering.
*
* FB_DAMAGE_CLIPS is a blob property with the layout of blob data is simply an
* array of &drm_mode_rect. Unlike plane &drm_plane_state.src coordinates,
* damage clips are not in 16.16 fixed point. Similar to plane src in
* framebuffer, damage clips cannot be negative. In damage clip, x1/y1 are
* inclusive and x2/y2 are exclusive. While kernel does not error for overlapped
* damage clips, it is strongly discouraged.
*
* Drivers that are interested in damage interface for plane should enable
* FB_DAMAGE_CLIPS property by calling drm_plane_enable_fb_damage_clips().
* Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and
* drm_atomic_helper_damage_iter_next() helper iterator function to get damage
* rectangles clipped to &drm_plane_state.src.
*/
/**
* drm_plane_enable_fb_damage_clips - Enables plane fb damage clips property.
* @plane: Plane on which to enable damage clips property.
*
* This function lets driver to enable the damage clips property on a plane.
*/
void drm_plane_enable_fb_damage_clips(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
drm_object_attach_property(&plane->base, config->prop_fb_damage_clips,
0);
}
EXPORT_SYMBOL(drm_plane_enable_fb_damage_clips);
/**
* drm_plane_get_damage_clips_count - Returns damage clips count.
* @state: Plane state.
*
* Simple helper to get the number of &drm_mode_rect clips set by user-space
* during plane update.
*
* Return: Number of clips in plane fb_damage_clips blob property.
*/
unsigned int
drm_plane_get_damage_clips_count(const struct drm_plane_state *state)
{
return (state && state->fb_damage_clips) ?
state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0;
}
EXPORT_SYMBOL(drm_plane_get_damage_clips_count);
struct drm_mode_rect *
__drm_plane_get_damage_clips(const struct drm_plane_state *state)
{
return (struct drm_mode_rect *)((state && state->fb_damage_clips) ?
state->fb_damage_clips->data : NULL);
}
/**
* drm_plane_get_damage_clips - Returns damage clips.
* @state: Plane state.
*
* Note that this function returns uapi type &drm_mode_rect. Drivers might want
* to use the helper functions drm_atomic_helper_damage_iter_init() and
* drm_atomic_helper_damage_iter_next() or drm_atomic_helper_damage_merged() if
* the driver can only handle a single damage region at most.
*
* Return: Damage clips in plane fb_damage_clips blob property.
*/
struct drm_mode_rect *
drm_plane_get_damage_clips(const struct drm_plane_state *state)
{
struct drm_device *dev = state->plane->dev;
struct drm_mode_config *config = &dev->mode_config;
/* check that drm_plane_enable_fb_damage_clips() was called */
if (!drm_mode_obj_find_prop_id(&state->plane->base,
config->prop_fb_damage_clips->base.id))
drm_warn_once(dev, "drm_plane_enable_fb_damage_clips() not called\n");
return __drm_plane_get_damage_clips(state);
}
EXPORT_SYMBOL(drm_plane_get_damage_clips);
struct drm_property *
drm_create_scaling_filter_prop(struct drm_device *dev,
unsigned int supported_filters)
{
struct drm_property *prop;
static const struct drm_prop_enum_list props[] = {
{ DRM_SCALING_FILTER_DEFAULT, "Default" },
{ DRM_SCALING_FILTER_NEAREST_NEIGHBOR, "Nearest Neighbor" },
};
unsigned int valid_mode_mask = BIT(DRM_SCALING_FILTER_DEFAULT) |
BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR);
int i;
if (WARN_ON((supported_filters & ~valid_mode_mask) ||
((supported_filters & BIT(DRM_SCALING_FILTER_DEFAULT)) == 0)))
return ERR_PTR(-EINVAL);
prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"SCALING_FILTER",
hweight32(supported_filters));
if (!prop)
return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(props); i++) {
int ret;
if (!(BIT(props[i].type) & supported_filters))
continue;
ret = drm_property_add_enum(prop, props[i].type,
props[i].name);
if (ret) {
drm_property_destroy(dev, prop);
return ERR_PTR(ret);
}
}
return prop;
}
/**
* drm_plane_create_scaling_filter_property - create a new scaling filter
* property
*
* @plane: drm plane
* @supported_filters: bitmask of supported scaling filters, must include
* BIT(DRM_SCALING_FILTER_DEFAULT).
*
* This function lets driver to enable the scaling filter property on a given
* plane.
*
* RETURNS:
* Zero for success or -errno
*/
int drm_plane_create_scaling_filter_property(struct drm_plane *plane,
unsigned int supported_filters)
{
struct drm_property *prop =
drm_create_scaling_filter_prop(plane->dev, supported_filters);
if (IS_ERR(prop))
return PTR_ERR(prop);
drm_object_attach_property(&plane->base, prop,
DRM_SCALING_FILTER_DEFAULT);
plane->scaling_filter_property = prop;
return 0;
}
EXPORT_SYMBOL(drm_plane_create_scaling_filter_property);
| linux-master | drivers/gpu/drm/drm_plane.c |
/*
* \file drm_lock.c
* IOCTLs for locking
*
* \author Rickard E. (Rik) Faith <[email protected]>
* \author Gareth Hughes <[email protected]>
*/
/*
* Created: Tue Feb 2 08:37:54 1999 by [email protected]
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/sched/signal.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
/*
* Take the heavyweight lock.
*
* \param lock lock pointer.
* \param context locking context.
* \return one if the lock is held, or zero otherwise.
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
static
int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock_bh(&lock_data->spinlock);
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else {
new = context | _DRM_LOCK_HELD |
((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
_DRM_LOCK_CONT : 0);
}
prev = cmpxchg(lock, old, new);
} while (prev != old);
spin_unlock_bh(&lock_data->spinlock);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
}
}
if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
}
/*
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
*
* \param dev DRM device.
* \param lock lock pointer.
* \param context locking context.
* \return always one.
*
* Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
static int drm_lock_transfer(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
lock_data->file_priv = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
prev = cmpxchg(lock, old, new);
} while (prev != old);
return 1;
}
static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock_bh(&lock_data->spinlock);
if (lock_data->kernel_waiters != 0) {
drm_lock_transfer(lock_data, 0);
lock_data->idle_has_lock = 1;
spin_unlock_bh(&lock_data->spinlock);
return 1;
}
spin_unlock_bh(&lock_data->spinlock);
do {
old = *lock;
new = _DRM_LOCKING_CONTEXT(old);
prev = cmpxchg(lock, old, new);
} while (prev != old);
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
/*
* Lock ioctl.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Add the current task to the lock wait queue, and attempt to take to lock.
*/
int drm_legacy_lock(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DECLARE_WAITQUEUE(entry, current);
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
++file_priv->lock_count;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
task_pid_nr(current), lock->context);
return -EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, task_pid_nr(current),
master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
lock->flags);
add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters++;
spin_unlock_bh(&master->lock.spinlock);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!master->lock.hw_lock) {
/* Device has been unregistered */
send_sig(SIGTERM, current, 0);
ret = -EINTR;
break;
}
if (drm_lock_take(&master->lock, lock->context)) {
master->lock.file_priv = file_priv;
master->lock.lock_time = jiffies;
break; /* Got lock */
}
/* Contention */
mutex_unlock(&drm_global_mutex);
schedule();
mutex_lock(&drm_global_mutex);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
}
spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters--;
spin_unlock_bh(&master->lock.spinlock);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&master->lock.lock_queue, &entry);
DRM_DEBUG("%d %s\n", lock->context,
ret ? "interrupted" : "has lock");
if (ret) return ret;
/* don't set the block all signals on the master process for now
* really probably not the correct answer but lets us debug xkb
* xserver for now */
if (!drm_is_current_master(file_priv)) {
dev->sigdata.context = lock->context;
dev->sigdata.lock = master->lock.hw_lock;
}
if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
{
if (dev->driver->dma_quiescent(dev)) {
DRM_DEBUG("%d waiting for DMA quiescent\n",
lock->context);
return -EBUSY;
}
}
return 0;
}
/*
* Unlock ioctl.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Transfer and free the lock.
*/
int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
task_pid_nr(current), lock->context);
return -EINVAL;
}
if (drm_legacy_lock_free(&master->lock, lock->context)) {
/* FIXME: Should really bail out here. */
}
return 0;
}
/*
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
*
* This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
* by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
* a deadlock, which is why the "idlelock" was invented).
*
* This should be sufficient to wait for GPU idle without
* having to worry about starvation.
*/
void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
{
int ret;
spin_lock_bh(&lock_data->spinlock);
lock_data->kernel_waiters++;
if (!lock_data->idle_has_lock) {
spin_unlock_bh(&lock_data->spinlock);
ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
spin_lock_bh(&lock_data->spinlock);
if (ret == 1)
lock_data->idle_has_lock = 1;
}
spin_unlock_bh(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_legacy_idlelock_take);
void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock_bh(&lock_data->spinlock);
if (--lock_data->kernel_waiters == 0) {
if (lock_data->idle_has_lock) {
do {
old = *lock;
prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
} while (prev != old);
wake_up_interruptible(&lock_data->lock_queue);
lock_data->idle_has_lock = 0;
}
}
spin_unlock_bh(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_legacy_idlelock_release);
static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
return (file_priv->lock_count && master->lock.hw_lock &&
_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
}
void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
/* if the master has gone away we can't do anything with the lock */
if (!dev->master)
return;
if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_legacy_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
}
void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
/*
* Since the master is disappearing, so is the
* possibility to lock.
*/
mutex_lock(&dev->struct_mutex);
if (master->lock.hw_lock) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
master->lock.hw_lock = NULL;
master->lock.file_priv = NULL;
wake_up_interruptible_all(&master->lock.lock_queue);
}
mutex_unlock(&dev->struct_mutex);
}
| linux-master | drivers/gpu/drm/drm_lock.c |
/* SPDX-License-Identifier: MIT */
/*
* drm_panel_orientation_quirks.c -- Quirks for non-normal panel orientation
*
* Copyright (C) 2017 Hans de Goede <[email protected]>
*
* Note the quirks in this file are shared with fbdev/efifb and as such
* must not depend on other drm code.
*/
#include <linux/dmi.h>
#include <linux/module.h>
#include <drm/drm_connector.h>
#include <drm/drm_utils.h>
#ifdef CONFIG_DMI
/*
* Some x86 clamshell design devices use portrait tablet screens and a display
* engine which cannot rotate in hardware, so we need to rotate the fbcon to
* compensate. Unfortunately these (cheap) devices also typically have quite
* generic DMI data, so we match on a combination of DMI data, screen resolution
* and a list of known BIOS dates to avoid false positives.
*/
struct drm_dmi_panel_orientation_data {
int width;
int height;
const char * const *bios_dates;
int orientation;
};
static const struct drm_dmi_panel_orientation_data gpd_micropc = {
.width = 720,
.height = 1280,
.bios_dates = (const char * const []){ "04/26/2019",
NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_pocket = {
.width = 1200,
.height = 1920,
.bios_dates = (const char * const []){ "05/26/2017", "06/28/2017",
"07/05/2017", "08/07/2017", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_pocket2 = {
.width = 1200,
.height = 1920,
.bios_dates = (const char * const []){ "06/28/2018", "08/28/2018",
"12/07/2018", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_win = {
.width = 720,
.height = 1280,
.bios_dates = (const char * const []){
"10/25/2016", "11/18/2016", "12/23/2016", "12/26/2016",
"02/21/2017", "03/20/2017", "05/25/2017", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_win2 = {
.width = 720,
.height = 1280,
.bios_dates = (const char * const []){
"12/07/2017", "05/24/2018", "06/29/2018", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
.width = 800,
.height = 1280,
.bios_dates = (const char * const []){ "10/16/2015", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data onegx1_pro = {
.width = 1200,
.height = 1920,
.bios_dates = (const char * const []){ "12/17/2020", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.width = 720,
.height = 1280,
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd800x1280_leftside_up = {
.width = 800,
.height = 1280,
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
.width = 800,
.height = 1280,
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = {
.width = 1080,
.height = 1920,
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200,
.height = 1920,
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
.width = 1280,
.height = 1920,
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = {
.width = 1600,
.height = 2560,
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd1600x2560_rightside_up = {
.width = 1600,
.height = 2560,
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Acer Switch V 10 (SW5-017) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Anbernic Win600 */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Win600"),
},
.driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* Asus T100HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
.driver_data = (void *)&lcd800x1280_leftside_up,
}, { /* Asus T101HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Asus T103HAF */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* AYA NEO 2021 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* AYA NEO AIR */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYA NEO NEXT */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Chuwi HiBook (CWI514) */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
/* Above matches are too generic, add bios-date match */
DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Chuwi Hi10 Pro (CWI529) */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Dynabook K50 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dynabook Inc."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "dynabook K50/FR"),
},
.driver_data = (void *)&lcd800x1280_leftside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_micropc,
}, { /* GPD MicroPC (later BIOS versions with proper DMI strings) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
},
.driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* GPD Win Max */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* GPD Pocket, note that the DMI data is less generic then
* it seems, devices with a board-vendor of "AMI Corporation"
* are quite rare, as are devices which have both board- *and*
* product-id set to "Default String"
*/
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
},
.driver_data = (void *)&gpd_pocket,
}, { /* GPD Pocket 2 (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_pocket2,
}, { /* GPD Win (same note on DMI match as GPD Pocket) */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win,
}, { /* GPD Win 2 (too generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win2,
}, { /* GPD Win 3 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
},
.driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TW891"),
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
},
.driver_data = (void *)&itworks_tw891,
}, { /* KD Kurio Smart C15200 2-in-1 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "KD Interactive"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Kurio Smart"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "KDM960BCP"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* Lenovo Ideapad Miix 310 laptop, only some production batches
* have a portrait screen, the resolution checks makes the quirk
* apply only to those batches.
*/
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80SG"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Ideapad Miix 320 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Ideapad D330-10IGM (HD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Ideapad D330-10IGM (FHD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Ideapad D330-10IGL (HD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo IdeaPad Duet 3 10IGL5 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Book X90F / X90L */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Book X91F / X91L */
.matches = {
/* Non exact match to match F + L versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Tablet 2 830F / 830L */
.matches = {
/*
* Note this also matches the Lenovo Yoga Tablet 2 1050F/L
* since that uses the same mainboard. The resolution match
* will limit this to only matching on the 830F/L. Neither has
* any external video outputs so those are not a concern.
*/
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
/* Partial match on beginning of BIOS version */
DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Tab 3 X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)&lcd1600x2560_rightside_up,
}, { /* Nanote UMPC-01 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* OneGX1 Pro */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
}, { /* OneXPlayer */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
},
.driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* Samsung GalaxyBook 10.6 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
},
.driver_data = (void *)&lcd1280x1920_rightside_up,
}, { /* Valve Steam Deck */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
},
{}
};
/**
* drm_get_panel_orientation_quirk - Check for panel orientation quirks
* @width: width in pixels of the panel
* @height: height in pixels of the panel
*
* This function checks for platform specific (e.g. DMI based) quirks
* providing info on panel_orientation for systems where this cannot be
* probed from the hard-/firm-ware. To avoid false-positive this function
* takes the panel resolution as argument and checks that against the
* resolution expected by the quirk-table entry.
*
* Note this function is also used outside of the drm-subsys, by for example
* the efifb code. Because of this this function gets compiled into its own
* kernel-module when built as a module.
*
* Returns:
* A DRM_MODE_PANEL_ORIENTATION_* value if there is a quirk for this system,
* or DRM_MODE_PANEL_ORIENTATION_UNKNOWN if there is no quirk.
*/
int drm_get_panel_orientation_quirk(int width, int height)
{
const struct dmi_system_id *match;
const struct drm_dmi_panel_orientation_data *data;
const char *bios_date;
int i;
for (match = dmi_first_match(orientation_data);
match;
match = dmi_first_match(match + 1)) {
data = match->driver_data;
if (data->width != width ||
data->height != height)
continue;
if (!data->bios_dates)
return data->orientation;
bios_date = dmi_get_system_info(DMI_BIOS_DATE);
if (!bios_date)
continue;
i = match_string(data->bios_dates, -1, bios_date);
if (i >= 0)
return data->orientation;
}
return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
EXPORT_SYMBOL(drm_get_panel_orientation_quirk);
#else
/* There are no quirks for non x86 devices yet */
int drm_get_panel_orientation_quirk(int width, int height)
{
return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
EXPORT_SYMBOL(drm_get_panel_orientation_quirk);
#endif
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | drivers/gpu/drm/drm_panel_orientation_quirks.c |
/*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
* Copyright (C) 2018 Intel Corp.
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <[email protected]>
* Daniel Vetter <[email protected]>
*/
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
#include <drm/drm_writeback.h>
#include <drm/drm_vblank.h>
#include <linux/dma-fence.h>
#include <linux/uaccess.h>
#include <linux/sync_file.h>
#include <linux/file.h>
#include "drm_crtc_internal.h"
/**
* DOC: overview
*
* This file contains the marshalling and demarshalling glue for the atomic UAPI
* in all its forms: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and
* SET_PROPERTY IOCTLs. Plus interface functions for compatibility helpers and
* drivers which have special needs to construct their own atomic updates, e.g.
* for load detect or similar.
*/
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
* @mode: kernel-internal mode to use for the CRTC, or NULL to disable
*
* Set a mode (originating from the kernel) on the desired CRTC state and update
* the enable property.
*
* RETURNS:
* Zero on success, error code on failure. Cannot return -EDEADLK.
*/
int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
const struct drm_display_mode *mode)
{
struct drm_crtc *crtc = state->crtc;
struct drm_mode_modeinfo umode;
/* Early return for no change. */
if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
return 0;
drm_property_blob_put(state->mode_blob);
state->mode_blob = NULL;
if (mode) {
struct drm_property_blob *blob;
drm_mode_convert_to_umode(&umode, mode);
blob = drm_property_create_blob(crtc->dev,
sizeof(umode), &umode);
if (IS_ERR(blob))
return PTR_ERR(blob);
drm_mode_copy(&state->mode, mode);
state->mode_blob = blob;
state->enable = true;
drm_dbg_atomic(crtc->dev,
"Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
mode->name, crtc->base.id, crtc->name, state);
} else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
drm_dbg_atomic(crtc->dev,
"Set [NOMODE] for [CRTC:%d:%s] state %p\n",
crtc->base.id, crtc->name, state);
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
/**
* drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
* @blob: pointer to blob property to use for mode
*
* Set a mode (originating from a blob property) on the desired CRTC state.
* This function will take a reference on the blob property for the CRTC state,
* and release the reference held on the state's existing mode property, if any
* was set.
*
* RETURNS:
* Zero on success, error code on failure. Cannot return -EDEADLK.
*/
int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
struct drm_property_blob *blob)
{
struct drm_crtc *crtc = state->crtc;
if (blob == state->mode_blob)
return 0;
drm_property_blob_put(state->mode_blob);
state->mode_blob = NULL;
memset(&state->mode, 0, sizeof(state->mode));
if (blob) {
int ret;
if (blob->length != sizeof(struct drm_mode_modeinfo)) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] bad mode blob length: %zu\n",
crtc->base.id, crtc->name,
blob->length);
return -EINVAL;
}
ret = drm_mode_convert_umode(crtc->dev,
&state->mode, blob->data);
if (ret) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
crtc->base.id, crtc->name,
ret, drm_get_mode_status_name(state->mode.status));
drm_mode_debug_printmodeline(&state->mode);
return -EINVAL;
}
state->mode_blob = drm_property_blob_get(blob);
state->enable = true;
drm_dbg_atomic(crtc->dev,
"Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
state->mode.name, crtc->base.id, crtc->name,
state);
} else {
state->enable = false;
drm_dbg_atomic(crtc->dev,
"Set [NOMODE] for [CRTC:%d:%s] state %p\n",
crtc->base.id, crtc->name, state);
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
/**
* drm_atomic_set_crtc_for_plane - set CRTC for plane
* @plane_state: the plane whose incoming state to update
* @crtc: CRTC to use for the plane
*
* Changing the assigned CRTC for a plane requires us to grab the lock and state
* for the new CRTC, as needed. This function takes care of all these details
* besides updating the pointer in the state object itself.
*
* Returns:
* 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
* then the w/w mutex code has detected a deadlock and the entire atomic
* sequence must be restarted. All other errors are fatal.
*/
int
drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc)
{
struct drm_plane *plane = plane_state->plane;
struct drm_crtc_state *crtc_state;
/* Nothing to do for same crtc*/
if (plane_state->crtc == crtc)
return 0;
if (plane_state->crtc) {
crtc_state = drm_atomic_get_crtc_state(plane_state->state,
plane_state->crtc);
if (WARN_ON(IS_ERR(crtc_state)))
return PTR_ERR(crtc_state);
crtc_state->plane_mask &= ~drm_plane_mask(plane);
}
plane_state->crtc = crtc;
if (crtc) {
crtc_state = drm_atomic_get_crtc_state(plane_state->state,
crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
crtc_state->plane_mask |= drm_plane_mask(plane);
}
if (crtc)
drm_dbg_atomic(plane->dev,
"Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
plane->base.id, plane->name, plane_state,
crtc->base.id, crtc->name);
else
drm_dbg_atomic(plane->dev,
"Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
plane->base.id, plane->name, plane_state);
return 0;
}
EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
/**
* drm_atomic_set_fb_for_plane - set framebuffer for plane
* @plane_state: atomic state object for the plane
* @fb: fb to use for the plane
*
* Changing the assigned framebuffer for a plane requires us to grab a reference
* to the new fb and drop the reference to the old fb, if there is one. This
* function takes care of all these details besides updating the pointer in the
* state object itself.
*/
void
drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb)
{
struct drm_plane *plane = plane_state->plane;
if (fb)
drm_dbg_atomic(plane->dev,
"Set [FB:%d] for [PLANE:%d:%s] state %p\n",
fb->base.id, plane->base.id, plane->name,
plane_state);
else
drm_dbg_atomic(plane->dev,
"Set [NOFB] for [PLANE:%d:%s] state %p\n",
plane->base.id, plane->name, plane_state);
drm_framebuffer_assign(&plane_state->fb, fb);
}
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
/**
* drm_atomic_set_crtc_for_connector - set CRTC for connector
* @conn_state: atomic state object for the connector
* @crtc: CRTC to use for the connector
*
* Changing the assigned CRTC for a connector requires us to grab the lock and
* state for the new CRTC, as needed. This function takes care of all these
* details besides updating the pointer in the state object itself.
*
* Returns:
* 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
* then the w/w mutex code has detected a deadlock and the entire atomic
* sequence must be restarted. All other errors are fatal.
*/
int
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc)
{
struct drm_connector *connector = conn_state->connector;
struct drm_crtc_state *crtc_state;
if (conn_state->crtc == crtc)
return 0;
if (conn_state->crtc) {
crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
conn_state->crtc);
crtc_state->connector_mask &=
~drm_connector_mask(conn_state->connector);
drm_connector_put(conn_state->connector);
conn_state->crtc = NULL;
}
if (crtc) {
crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
crtc_state->connector_mask |=
drm_connector_mask(conn_state->connector);
drm_connector_get(conn_state->connector);
conn_state->crtc = crtc;
drm_dbg_atomic(connector->dev,
"Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
connector->base.id, connector->name,
conn_state, crtc->base.id, crtc->name);
} else {
drm_dbg_atomic(connector->dev,
"Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
connector->base.id, connector->name,
conn_state);
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
static void set_out_fence_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc, s32 __user *fence_ptr)
{
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
}
static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
s32 __user *fence_ptr;
fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
return fence_ptr;
}
static int set_out_fence_for_connector(struct drm_atomic_state *state,
struct drm_connector *connector,
s32 __user *fence_ptr)
{
unsigned int index = drm_connector_index(connector);
if (!fence_ptr)
return 0;
if (put_user(-1, fence_ptr))
return -EFAULT;
state->connectors[index].out_fence_ptr = fence_ptr;
return 0;
}
static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
struct drm_connector *connector)
{
unsigned int index = drm_connector_index(connector);
s32 __user *fence_ptr;
fence_ptr = state->connectors[index].out_fence_ptr;
state->connectors[index].out_fence_ptr = NULL;
return fence_ptr;
}
static int
drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
struct drm_property_blob **blob,
uint64_t blob_id,
ssize_t expected_size,
ssize_t expected_elem_size,
bool *replaced)
{
struct drm_property_blob *new_blob = NULL;
if (blob_id != 0) {
new_blob = drm_property_lookup_blob(dev, blob_id);
if (new_blob == NULL) {
drm_dbg_atomic(dev,
"cannot find blob ID %llu\n", blob_id);
return -EINVAL;
}
if (expected_size > 0 &&
new_blob->length != expected_size) {
drm_dbg_atomic(dev,
"[BLOB:%d] length %zu different from expected %zu\n",
new_blob->base.id, new_blob->length, expected_size);
drm_property_blob_put(new_blob);
return -EINVAL;
}
if (expected_elem_size > 0 &&
new_blob->length % expected_elem_size != 0) {
drm_dbg_atomic(dev,
"[BLOB:%d] length %zu not divisible by element size %zu\n",
new_blob->base.id, new_blob->length, expected_elem_size);
drm_property_blob_put(new_blob);
return -EINVAL;
}
}
*replaced |= drm_property_replace_blob(blob, new_blob);
drm_property_blob_put(new_blob);
return 0;
}
static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
bool replaced = false;
int ret;
if (property == config->prop_active)
state->active = val;
else if (property == config->prop_mode_id) {
struct drm_property_blob *mode =
drm_property_lookup_blob(dev, val);
ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
drm_property_blob_put(mode);
return ret;
} else if (property == config->prop_vrr_enabled) {
state->vrr_enabled = val;
} else if (property == config->degamma_lut_property) {
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->degamma_lut,
val,
-1, sizeof(struct drm_color_lut),
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->ctm_property) {
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->ctm,
val,
sizeof(struct drm_color_ctm), -1,
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->gamma_lut_property) {
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->gamma_lut,
val,
-1, sizeof(struct drm_color_lut),
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->prop_out_fence_ptr) {
s32 __user *fence_ptr = u64_to_user_ptr(val);
if (!fence_ptr)
return 0;
if (put_user(-1, fence_ptr))
return -EFAULT;
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
} else if (property == crtc->scaling_filter_property) {
state->scaling_filter = val;
} else if (crtc->funcs->atomic_set_property) {
return crtc->funcs->atomic_set_property(crtc, state, property, val);
} else {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] unknown property [PROP:%d:%s]\n",
crtc->base.id, crtc->name,
property->base.id, property->name);
return -EINVAL;
}
return 0;
}
static int
drm_atomic_crtc_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_active)
*val = drm_atomic_crtc_effectively_active(state);
else if (property == config->prop_mode_id)
*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
else if (property == config->prop_vrr_enabled)
*val = state->vrr_enabled;
else if (property == config->degamma_lut_property)
*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
else if (property == config->ctm_property)
*val = (state->ctm) ? state->ctm->base.id : 0;
else if (property == config->gamma_lut_property)
*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
else if (property == config->prop_out_fence_ptr)
*val = 0;
else if (property == crtc->scaling_filter_property)
*val = state->scaling_filter;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else {
drm_dbg_atomic(dev,
"[CRTC:%d:%s] unknown property [PROP:%d:%s]\n",
crtc->base.id, crtc->name,
property->base.id, property->name);
return -EINVAL;
}
return 0;
}
static int drm_atomic_plane_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_file *file_priv,
struct drm_property *property, uint64_t val)
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
bool replaced = false;
int ret;
if (property == config->prop_fb_id) {
struct drm_framebuffer *fb;
fb = drm_framebuffer_lookup(dev, file_priv, val);
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_put(fb);
} else if (property == config->prop_in_fence_fd) {
if (state->fence)
return -EINVAL;
if (U642I64(val) == -1)
return 0;
state->fence = sync_file_get_fence(val);
if (!state->fence)
return -EINVAL;
} else if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
if (val && !crtc) {
drm_dbg_atomic(dev,
"[PROP:%d:%s] cannot find CRTC with ID %llu\n",
property->base.id, property->name, val);
return -EACCES;
}
return drm_atomic_set_crtc_for_plane(state, crtc);
} else if (property == config->prop_crtc_x) {
state->crtc_x = U642I64(val);
} else if (property == config->prop_crtc_y) {
state->crtc_y = U642I64(val);
} else if (property == config->prop_crtc_w) {
state->crtc_w = val;
} else if (property == config->prop_crtc_h) {
state->crtc_h = val;
} else if (property == config->prop_src_x) {
state->src_x = val;
} else if (property == config->prop_src_y) {
state->src_y = val;
} else if (property == config->prop_src_w) {
state->src_w = val;
} else if (property == config->prop_src_h) {
state->src_h = val;
} else if (property == plane->alpha_property) {
state->alpha = val;
} else if (property == plane->blend_mode_property) {
state->pixel_blend_mode = val;
} else if (property == plane->rotation_property) {
if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
plane->base.id, plane->name, val);
return -EINVAL;
}
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
} else if (property == plane->color_encoding_property) {
state->color_encoding = val;
} else if (property == plane->color_range_property) {
state->color_range = val;
} else if (property == config->prop_fb_damage_clips) {
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->fb_damage_clips,
val,
-1,
sizeof(struct drm_rect),
&replaced);
return ret;
} else if (property == plane->scaling_filter_property) {
state->scaling_filter = val;
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
} else {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
plane->base.id, plane->name,
property->base.id, property->name);
return -EINVAL;
}
return 0;
}
static int
drm_atomic_plane_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_fb_id) {
*val = (state->fb) ? state->fb->base.id : 0;
} else if (property == config->prop_in_fence_fd) {
*val = -1;
} else if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->prop_crtc_x) {
*val = I642U64(state->crtc_x);
} else if (property == config->prop_crtc_y) {
*val = I642U64(state->crtc_y);
} else if (property == config->prop_crtc_w) {
*val = state->crtc_w;
} else if (property == config->prop_crtc_h) {
*val = state->crtc_h;
} else if (property == config->prop_src_x) {
*val = state->src_x;
} else if (property == config->prop_src_y) {
*val = state->src_y;
} else if (property == config->prop_src_w) {
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
} else if (property == plane->alpha_property) {
*val = state->alpha;
} else if (property == plane->blend_mode_property) {
*val = state->pixel_blend_mode;
} else if (property == plane->rotation_property) {
*val = state->rotation;
} else if (property == plane->zpos_property) {
*val = state->zpos;
} else if (property == plane->color_encoding_property) {
*val = state->color_encoding;
} else if (property == plane->color_range_property) {
*val = state->color_range;
} else if (property == config->prop_fb_damage_clips) {
*val = (state->fb_damage_clips) ?
state->fb_damage_clips->base.id : 0;
} else if (property == plane->scaling_filter_property) {
*val = state->scaling_filter;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
drm_dbg_atomic(dev,
"[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
plane->base.id, plane->name,
property->base.id, property->name);
return -EINVAL;
}
return 0;
}
static int drm_atomic_set_writeback_fb_for_connector(
struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
int ret;
struct drm_connector *conn = conn_state->connector;
ret = drm_writeback_set_fb(conn_state, fb);
if (ret < 0)
return ret;
if (fb)
drm_dbg_atomic(conn->dev,
"Set [FB:%d] for connector state %p\n",
fb->base.id, conn_state);
else
drm_dbg_atomic(conn->dev,
"Set [NOFB] for connector state %p\n",
conn_state);
return 0;
}
static int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_connector_state *state, struct drm_file *file_priv,
struct drm_property *property, uint64_t val)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
bool replaced = false;
int ret;
if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
if (val && !crtc) {
drm_dbg_atomic(dev,
"[PROP:%d:%s] cannot find CRTC with ID %llu\n",
property->base.id, property->name, val);
return -EACCES;
}
return drm_atomic_set_crtc_for_connector(state, crtc);
} else if (property == config->dpms_property) {
/* setting DPMS property requires special handling, which
* is done in legacy setprop path for us. Disallow (for
* now?) atomic writes to DPMS property:
*/
drm_dbg_atomic(dev,
"legacy [PROP:%d:%s] can only be set via legacy uAPI\n",
property->base.id, property->name);
return -EINVAL;
} else if (property == config->tv_select_subconnector_property) {
state->tv.select_subconnector = val;
} else if (property == config->tv_subconnector_property) {
state->tv.subconnector = val;
} else if (property == config->tv_left_margin_property) {
state->tv.margins.left = val;
} else if (property == config->tv_right_margin_property) {
state->tv.margins.right = val;
} else if (property == config->tv_top_margin_property) {
state->tv.margins.top = val;
} else if (property == config->tv_bottom_margin_property) {
state->tv.margins.bottom = val;
} else if (property == config->legacy_tv_mode_property) {
state->tv.legacy_mode = val;
} else if (property == config->tv_mode_property) {
state->tv.mode = val;
} else if (property == config->tv_brightness_property) {
state->tv.brightness = val;
} else if (property == config->tv_contrast_property) {
state->tv.contrast = val;
} else if (property == config->tv_flicker_reduction_property) {
state->tv.flicker_reduction = val;
} else if (property == config->tv_overscan_property) {
state->tv.overscan = val;
} else if (property == config->tv_saturation_property) {
state->tv.saturation = val;
} else if (property == config->tv_hue_property) {
state->tv.hue = val;
} else if (property == config->link_status_property) {
/* Never downgrade from GOOD to BAD on userspace's request here,
* only hw issues can do that.
*
* For an atomic property the userspace doesn't need to be able
* to understand all the properties, but needs to be able to
* restore the state it wants on VT switch. So if the userspace
* tries to change the link_status from GOOD to BAD, driver
* silently rejects it and returns a 0. This prevents userspace
* from accidentally breaking the display when it restores the
* state.
*/
if (state->link_status != DRM_LINK_STATUS_GOOD)
state->link_status = val;
} else if (property == config->hdr_output_metadata_property) {
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->hdr_output_metadata,
val,
sizeof(struct hdr_output_metadata), -1,
&replaced);
return ret;
} else if (property == config->aspect_ratio_property) {
state->picture_aspect_ratio = val;
} else if (property == config->content_type_property) {
state->content_type = val;
} else if (property == connector->scaling_mode_property) {
state->scaling_mode = val;
} else if (property == config->content_protection_property) {
if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
drm_dbg_kms(dev, "only drivers can set CP Enabled\n");
return -EINVAL;
}
state->content_protection = val;
} else if (property == config->hdcp_content_type_property) {
state->hdcp_content_type = val;
} else if (property == connector->colorspace_property) {
state->colorspace = val;
} else if (property == config->writeback_fb_id_property) {
struct drm_framebuffer *fb;
int ret;
fb = drm_framebuffer_lookup(dev, file_priv, val);
ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
if (fb)
drm_framebuffer_put(fb);
return ret;
} else if (property == config->writeback_out_fence_ptr_property) {
s32 __user *fence_ptr = u64_to_user_ptr(val);
return set_out_fence_for_connector(state->state, connector,
fence_ptr);
} else if (property == connector->max_bpc_property) {
state->max_requested_bpc = val;
} else if (property == connector->privacy_screen_sw_state_property) {
state->privacy_screen_sw_state = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
} else {
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]\n",
connector->base.id, connector->name,
property->base.id, property->name);
return -EINVAL;
}
return 0;
}
static int
drm_atomic_connector_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->dpms_property) {
if (state->crtc && state->crtc->state->self_refresh_active)
*val = DRM_MODE_DPMS_ON;
else
*val = connector->dpms;
} else if (property == config->tv_select_subconnector_property) {
*val = state->tv.select_subconnector;
} else if (property == config->tv_subconnector_property) {
*val = state->tv.subconnector;
} else if (property == config->tv_left_margin_property) {
*val = state->tv.margins.left;
} else if (property == config->tv_right_margin_property) {
*val = state->tv.margins.right;
} else if (property == config->tv_top_margin_property) {
*val = state->tv.margins.top;
} else if (property == config->tv_bottom_margin_property) {
*val = state->tv.margins.bottom;
} else if (property == config->legacy_tv_mode_property) {
*val = state->tv.legacy_mode;
} else if (property == config->tv_mode_property) {
*val = state->tv.mode;
} else if (property == config->tv_brightness_property) {
*val = state->tv.brightness;
} else if (property == config->tv_contrast_property) {
*val = state->tv.contrast;
} else if (property == config->tv_flicker_reduction_property) {
*val = state->tv.flicker_reduction;
} else if (property == config->tv_overscan_property) {
*val = state->tv.overscan;
} else if (property == config->tv_saturation_property) {
*val = state->tv.saturation;
} else if (property == config->tv_hue_property) {
*val = state->tv.hue;
} else if (property == config->link_status_property) {
*val = state->link_status;
} else if (property == config->aspect_ratio_property) {
*val = state->picture_aspect_ratio;
} else if (property == config->content_type_property) {
*val = state->content_type;
} else if (property == connector->colorspace_property) {
*val = state->colorspace;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
} else if (property == config->hdr_output_metadata_property) {
*val = state->hdr_output_metadata ?
state->hdr_output_metadata->base.id : 0;
} else if (property == config->content_protection_property) {
*val = state->content_protection;
} else if (property == config->hdcp_content_type_property) {
*val = state->hdcp_content_type;
} else if (property == config->writeback_fb_id_property) {
/* Writeback framebuffer is one-shot, write and forget */
*val = 0;
} else if (property == config->writeback_out_fence_ptr_property) {
*val = 0;
} else if (property == connector->max_bpc_property) {
*val = state->max_requested_bpc;
} else if (property == connector->privacy_screen_sw_state_property) {
*val = state->privacy_screen_sw_state;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
} else {
drm_dbg_atomic(dev,
"[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]\n",
connector->base.id, connector->name,
property->base.id, property->name);
return -EINVAL;
}
return 0;
}
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = property->dev;
int ret;
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR: {
struct drm_connector *connector = obj_to_connector(obj);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
ret = drm_atomic_connector_get_property(connector,
connector->state, property, val);
break;
}
case DRM_MODE_OBJECT_CRTC: {
struct drm_crtc *crtc = obj_to_crtc(obj);
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
ret = drm_atomic_crtc_get_property(crtc,
crtc->state, property, val);
break;
}
case DRM_MODE_OBJECT_PLANE: {
struct drm_plane *plane = obj_to_plane(obj);
WARN_ON(!drm_modeset_is_locked(&plane->mutex));
ret = drm_atomic_plane_get_property(plane,
plane->state, property, val);
break;
}
default:
drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id);
ret = -EINVAL;
break;
}
return ret;
}
/*
* The big monster ioctl
*/
static struct drm_pending_vblank_event *create_vblank_event(
struct drm_crtc *crtc, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
e = kzalloc(sizeof *e, GFP_KERNEL);
if (!e)
return NULL;
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof(e->event);
e->event.vbl.crtc_id = crtc->base.id;
e->event.vbl.user_data = user_data;
return e;
}
int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
struct drm_connector *connector,
int mode)
{
struct drm_connector *tmp_connector;
struct drm_connector_state *new_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i, ret, old_mode = connector->dpms;
bool active = false;
ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
state->acquire_ctx);
if (ret)
return ret;
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
connector->dpms = mode;
crtc = connector->state->crtc;
if (!crtc)
goto out;
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
goto out;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto out;
}
for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
if (new_conn_state->crtc != crtc)
continue;
if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
active = true;
break;
}
}
crtc_state->active = active;
ret = drm_atomic_commit(state);
out:
if (ret != 0)
connector->dpms = old_mode;
return ret;
}
int drm_atomic_set_property(struct drm_atomic_state *state,
struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
uint64_t prop_value)
{
struct drm_mode_object *ref;
int ret;
if (!drm_property_change_valid_get(prop, prop_value, &ref))
return -EINVAL;
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR: {
struct drm_connector *connector = obj_to_connector(obj);
struct drm_connector_state *connector_state;
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state)) {
ret = PTR_ERR(connector_state);
break;
}
ret = drm_atomic_connector_set_property(connector,
connector_state, file_priv,
prop, prop_value);
break;
}
case DRM_MODE_OBJECT_CRTC: {
struct drm_crtc *crtc = obj_to_crtc(obj);
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
break;
}
ret = drm_atomic_crtc_set_property(crtc,
crtc_state, prop, prop_value);
break;
}
case DRM_MODE_OBJECT_PLANE: {
struct drm_plane *plane = obj_to_plane(obj);
struct drm_plane_state *plane_state;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
break;
}
ret = drm_atomic_plane_set_property(plane,
plane_state, file_priv,
prop, prop_value);
break;
}
default:
drm_dbg_atomic(prop->dev, "[OBJECT:%d] has no properties\n", obj->id);
ret = -EINVAL;
break;
}
drm_property_change_valid_put(prop, ref);
return ret;
}
/**
* DOC: explicit fencing properties
*
* Explicit fencing allows userspace to control the buffer synchronization
* between devices. A Fence or a group of fences are transferred to/from
* userspace using Sync File fds and there are two DRM properties for that.
* IN_FENCE_FD on each DRM Plane to send fences to the kernel and
* OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
*
* As a contrast, with implicit fencing the kernel keeps track of any
* ongoing rendering, and automatically ensures that the atomic update waits
* for any pending rendering to complete. This is usually tracked in &struct
* dma_resv which can also contain mandatory kernel fences. Implicit syncing
* is how Linux traditionally worked (e.g. DRI2/3 on X.org), whereas explicit
* fencing is what Android wants.
*
* "IN_FENCE_FD”:
* Use this property to pass a fence that DRM should wait on before
* proceeding with the Atomic Commit request and show the framebuffer for
* the plane on the screen. The fence can be either a normal fence or a
* merged one, the sync_file framework will handle both cases and use a
* fence_array if a merged fence is received. Passing -1 here means no
* fences to wait on.
*
* If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
* it will only check if the Sync File is a valid one.
*
* On the driver side the fence is stored on the @fence parameter of
* &struct drm_plane_state. Drivers which also support implicit fencing
* should extract the implicit fence using drm_gem_plane_helper_prepare_fb(),
* to make sure there's consistent behaviour between drivers in precedence
* of implicit vs. explicit fencing.
*
* "OUT_FENCE_PTR”:
* Use this property to pass a file descriptor pointer to DRM. Once the
* Atomic Commit request call returns OUT_FENCE_PTR will be filled with
* the file descriptor number of a Sync File. This Sync File contains the
* CRTC fence that will be signaled when all framebuffers present on the
* Atomic Commit * request for that given CRTC are scanned out on the
* screen.
*
* The Atomic Commit request fails if a invalid pointer is passed. If the
* Atomic Commit request fails for any other reason the out fence fd
* returned will be -1. On a Atomic Commit with the
* DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
*
* Note that out-fences don't have a special interface to drivers and are
* internally represented by a &struct drm_pending_vblank_event in struct
* &drm_crtc_state, which is also used by the nonblocking atomic commit
* helpers and for the DRM event handling for existing userspace.
*/
struct drm_out_fence_state {
s32 __user *out_fence_ptr;
struct sync_file *sync_file;
int fd;
};
static int setup_out_fence(struct drm_out_fence_state *fence_state,
struct dma_fence *fence)
{
fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
if (fence_state->fd < 0)
return fence_state->fd;
if (put_user(fence_state->fd, fence_state->out_fence_ptr))
return -EFAULT;
fence_state->sync_file = sync_file_create(fence);
if (!fence_state->sync_file)
return -ENOMEM;
return 0;
}
static int prepare_signaling(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_mode_atomic *arg,
struct drm_file *file_priv,
struct drm_out_fence_state **fence_state,
unsigned int *num_fences)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
int i, c = 0, ret;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
return 0;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
s32 __user *fence_ptr;
fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
struct drm_pending_vblank_event *e;
e = create_vblank_event(crtc, arg->user_data);
if (!e)
return -ENOMEM;
crtc_state->event = e;
}
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
struct drm_pending_vblank_event *e = crtc_state->event;
if (!file_priv)
continue;
ret = drm_event_reserve_init(dev, file_priv, &e->base,
&e->event.base);
if (ret) {
kfree(e);
crtc_state->event = NULL;
return ret;
}
}
if (fence_ptr) {
struct dma_fence *fence;
struct drm_out_fence_state *f;
f = krealloc(*fence_state, sizeof(**fence_state) *
(*num_fences + 1), GFP_KERNEL);
if (!f)
return -ENOMEM;
memset(&f[*num_fences], 0, sizeof(*f));
f[*num_fences].out_fence_ptr = fence_ptr;
*fence_state = f;
fence = drm_crtc_create_fence(crtc);
if (!fence)
return -ENOMEM;
ret = setup_out_fence(&f[(*num_fences)++], fence);
if (ret) {
dma_fence_put(fence);
return ret;
}
crtc_state->event->base.fence = fence;
}
c++;
}
for_each_new_connector_in_state(state, conn, conn_state, i) {
struct drm_writeback_connector *wb_conn;
struct drm_out_fence_state *f;
struct dma_fence *fence;
s32 __user *fence_ptr;
if (!conn_state->writeback_job)
continue;
fence_ptr = get_out_fence_for_connector(state, conn);
if (!fence_ptr)
continue;
f = krealloc(*fence_state, sizeof(**fence_state) *
(*num_fences + 1), GFP_KERNEL);
if (!f)
return -ENOMEM;
memset(&f[*num_fences], 0, sizeof(*f));
f[*num_fences].out_fence_ptr = fence_ptr;
*fence_state = f;
wb_conn = drm_connector_to_writeback(conn);
fence = drm_writeback_get_out_fence(wb_conn);
if (!fence)
return -ENOMEM;
ret = setup_out_fence(&f[(*num_fences)++], fence);
if (ret) {
dma_fence_put(fence);
return ret;
}
conn_state->writeback_job->out_fence = fence;
}
/*
* Having this flag means user mode pends on event which will never
* reach due to lack of at least one CRTC for signaling
*/
if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) {
drm_dbg_atomic(dev, "need at least one CRTC for DRM_MODE_PAGE_FLIP_EVENT");
return -EINVAL;
}
return 0;
}
static void complete_signaling(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_out_fence_state *fence_state,
unsigned int num_fences,
bool install_fds)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
if (install_fds) {
for (i = 0; i < num_fences; i++)
fd_install(fence_state[i].fd,
fence_state[i].sync_file->file);
kfree(fence_state);
return;
}
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_pending_vblank_event *event = crtc_state->event;
/*
* Free the allocated event. drm_atomic_helper_setup_commit
* can allocate an event too, so only free it if it's ours
* to prevent a double free in drm_atomic_state_clear.
*/
if (event && (event->base.fence || event->base.file_priv)) {
drm_event_cancel_free(dev, &event->base);
crtc_state->event = NULL;
}
}
if (!fence_state)
return;
for (i = 0; i < num_fences; i++) {
if (fence_state[i].sync_file)
fput(fence_state[i].sync_file->file);
if (fence_state[i].fd >= 0)
put_unused_fd(fence_state[i].fd);
/* If this fails log error to the user */
if (fence_state[i].out_fence_ptr &&
put_user(-1, fence_state[i].out_fence_ptr))
drm_dbg_atomic(dev, "Couldn't clear out_fence_ptr\n");
}
kfree(fence_state);
}
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_atomic *arg = data;
uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
unsigned int copied_objs, copied_props;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_out_fence_state *fence_state;
int ret = 0;
unsigned int i, j, num_fences;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
return -EOPNOTSUPP;
/* disallow for userspace that has not enabled atomic cap (even
* though this may be a bit overkill, since legacy userspace
* wouldn't know how to call this ioctl)
*/
if (!file_priv->atomic) {
drm_dbg_atomic(dev,
"commit failed: atomic cap not enabled\n");
return -EINVAL;
}
if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) {
drm_dbg_atomic(dev, "commit failed: invalid flag\n");
return -EINVAL;
}
if (arg->reserved) {
drm_dbg_atomic(dev, "commit failed: reserved field set\n");
return -EINVAL;
}
if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) {
drm_dbg_atomic(dev,
"commit failed: invalid flag DRM_MODE_PAGE_FLIP_ASYNC\n");
return -EINVAL;
}
/* can't test and expect an event at the same time. */
if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
(arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) {
drm_dbg_atomic(dev,
"commit failed: page-flip event requested with test-only commit\n");
return -EINVAL;
}
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
state->acquire_ctx = &ctx;
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
retry:
copied_objs = 0;
copied_props = 0;
fence_state = NULL;
num_fences = 0;
for (i = 0; i < arg->count_objs; i++) {
uint32_t obj_id, count_props;
struct drm_mode_object *obj;
if (get_user(obj_id, objs_ptr + copied_objs)) {
ret = -EFAULT;
goto out;
}
obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
if (!obj) {
drm_dbg_atomic(dev, "cannot find object ID %d", obj_id);
ret = -ENOENT;
goto out;
}
if (!obj->properties) {
drm_dbg_atomic(dev, "[OBJECT:%d] has no properties", obj_id);
drm_mode_object_put(obj);
ret = -ENOENT;
goto out;
}
if (get_user(count_props, count_props_ptr + copied_objs)) {
drm_mode_object_put(obj);
ret = -EFAULT;
goto out;
}
copied_objs++;
for (j = 0; j < count_props; j++) {
uint32_t prop_id;
uint64_t prop_value;
struct drm_property *prop;
if (get_user(prop_id, props_ptr + copied_props)) {
drm_mode_object_put(obj);
ret = -EFAULT;
goto out;
}
prop = drm_mode_obj_find_prop_id(obj, prop_id);
if (!prop) {
drm_dbg_atomic(dev,
"[OBJECT:%d] cannot find property ID %d",
obj_id, prop_id);
drm_mode_object_put(obj);
ret = -ENOENT;
goto out;
}
if (copy_from_user(&prop_value,
prop_values_ptr + copied_props,
sizeof(prop_value))) {
drm_mode_object_put(obj);
ret = -EFAULT;
goto out;
}
ret = drm_atomic_set_property(state, file_priv,
obj, prop, prop_value);
if (ret) {
drm_mode_object_put(obj);
goto out;
}
copied_props++;
}
drm_mode_object_put(obj);
}
ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
&num_fences);
if (ret)
goto out;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
} else {
ret = drm_atomic_commit(state);
}
out:
complete_signaling(dev, state, fence_state, num_fences, !ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
ret = drm_modeset_backoff(&ctx);
if (!ret)
goto retry;
}
drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
| linux-master | drivers/gpu/drm/drm_atomic_uapi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MIPI Display Bus Interface (DBI) LCD controller support
*
* Copyright 2016 Noralf Trønnes
*/
#include <linux/backlight.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <video/mipi_display.h>
#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
#define DCS_POWER_MODE_DISPLAY BIT(2)
#define DCS_POWER_MODE_DISPLAY_NORMAL_MODE BIT(3)
#define DCS_POWER_MODE_SLEEP_MODE BIT(4)
#define DCS_POWER_MODE_PARTIAL_MODE BIT(5)
#define DCS_POWER_MODE_IDLE_MODE BIT(6)
#define DCS_POWER_MODE_RESERVED_MASK (BIT(0) | BIT(1) | BIT(7))
/**
* DOC: overview
*
* This library provides helpers for MIPI Display Bus Interface (DBI)
* compatible display controllers.
*
* Many controllers for tiny lcd displays are MIPI compliant and can use this
* library. If a controller uses registers 0x2A and 0x2B to set the area to
* update and uses register 0x2C to write to frame memory, it is most likely
* MIPI compliant.
*
* Only MIPI Type 1 displays are supported since a full frame memory is needed.
*
* There are 3 MIPI DBI implementation types:
*
* A. Motorola 6800 type parallel bus
*
* B. Intel 8080 type parallel bus
*
* C. SPI type with 3 options:
*
* 1. 9-bit with the Data/Command signal as the ninth bit
* 2. Same as above except it's sent as 16 bits
* 3. 8-bit with the Data/Command signal as a separate D/CX pin
*
* Currently mipi_dbi only supports Type C options 1 and 3 with
* mipi_dbi_spi_init().
*/
#define MIPI_DBI_DEBUG_COMMAND(cmd, data, len) \
({ \
if (!len) \
DRM_DEBUG_DRIVER("cmd=%02x\n", cmd); \
else if (len <= 32) \
DRM_DEBUG_DRIVER("cmd=%02x, par=%*ph\n", cmd, (int)len, data);\
else \
DRM_DEBUG_DRIVER("cmd=%02x, len=%zu\n", cmd, len); \
})
static const u8 mipi_dbi_dcs_read_commands[] = {
MIPI_DCS_GET_DISPLAY_ID,
MIPI_DCS_GET_RED_CHANNEL,
MIPI_DCS_GET_GREEN_CHANNEL,
MIPI_DCS_GET_BLUE_CHANNEL,
MIPI_DCS_GET_DISPLAY_STATUS,
MIPI_DCS_GET_POWER_MODE,
MIPI_DCS_GET_ADDRESS_MODE,
MIPI_DCS_GET_PIXEL_FORMAT,
MIPI_DCS_GET_DISPLAY_MODE,
MIPI_DCS_GET_SIGNAL_MODE,
MIPI_DCS_GET_DIAGNOSTIC_RESULT,
MIPI_DCS_READ_MEMORY_START,
MIPI_DCS_READ_MEMORY_CONTINUE,
MIPI_DCS_GET_SCANLINE,
MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
MIPI_DCS_GET_CONTROL_DISPLAY,
MIPI_DCS_GET_POWER_SAVE,
MIPI_DCS_GET_CABC_MIN_BRIGHTNESS,
MIPI_DCS_READ_DDB_START,
MIPI_DCS_READ_DDB_CONTINUE,
0, /* sentinel */
};
static bool mipi_dbi_command_is_read(struct mipi_dbi *dbi, u8 cmd)
{
unsigned int i;
if (!dbi->read_commands)
return false;
for (i = 0; i < 0xff; i++) {
if (!dbi->read_commands[i])
return false;
if (cmd == dbi->read_commands[i])
return true;
}
return false;
}
/**
* mipi_dbi_command_read - MIPI DCS read command
* @dbi: MIPI DBI structure
* @cmd: Command
* @val: Value read
*
* Send MIPI DCS read command to the controller.
*
* Returns:
* Zero on success, negative error code on failure.
*/
int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val)
{
if (!dbi->read_commands)
return -EACCES;
if (!mipi_dbi_command_is_read(dbi, cmd))
return -EINVAL;
return mipi_dbi_command_buf(dbi, cmd, val, 1);
}
EXPORT_SYMBOL(mipi_dbi_command_read);
/**
* mipi_dbi_command_buf - MIPI DCS command with parameter(s) in an array
* @dbi: MIPI DBI structure
* @cmd: Command
* @data: Parameter buffer
* @len: Buffer length
*
* Returns:
* Zero on success, negative error code on failure.
*/
int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
{
u8 *cmdbuf;
int ret;
/* SPI requires dma-safe buffers */
cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL);
if (!cmdbuf)
return -ENOMEM;
mutex_lock(&dbi->cmdlock);
ret = dbi->command(dbi, cmdbuf, data, len);
mutex_unlock(&dbi->cmdlock);
kfree(cmdbuf);
return ret;
}
EXPORT_SYMBOL(mipi_dbi_command_buf);
/* This should only be used by mipi_dbi_command() */
int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
size_t len)
{
u8 *buf;
int ret;
buf = kmemdup(data, len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = mipi_dbi_command_buf(dbi, cmd, buf, len);
kfree(buf);
return ret;
}
EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
/**
* mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
* @dst: The destination buffer
* @src: The source buffer
* @fb: The source framebuffer
* @clip: Clipping rectangle of the area to be copied
* @swap: When true, swap MSB/LSB of 16-bit values
*
* Returns:
* Zero on success, negative error code on failure.
*/
int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
struct iosys_map dst_map = IOSYS_MAP_INIT_VADDR(dst);
int ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach);
else
drm_fb_memcpy(&dst_map, NULL, src, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, swap);
break;
default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
&fb->format->format);
ret = -EINVAL;
}
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return ret;
}
EXPORT_SYMBOL(mipi_dbi_buf_copy);
static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
unsigned int xs, unsigned int xe,
unsigned int ys, unsigned int ye)
{
struct mipi_dbi *dbi = &dbidev->dbi;
xs += dbidev->left_offset;
xe += dbidev->left_offset;
ys += dbidev->top_offset;
ye += dbidev->top_offset;
mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, (xs >> 8) & 0xff,
xs & 0xff, (xe >> 8) & 0xff, xe & 0xff);
mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, (ys >> 8) & 0xff,
ys & 0xff, (ye >> 8) & 0xff, ye & 0xff);
}
static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *rect)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
struct mipi_dbi *dbi = &dbidev->dbi;
bool swap = dbi->swap_bytes;
int ret = 0;
bool full;
void *tr;
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
if (ret)
goto err_msg;
} else {
tr = src->vaddr; /* TODO: Use mapping abstraction properly */
}
mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
rect->y2 - 1);
ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START, tr,
width * height * 2);
err_msg:
if (ret)
drm_err_once(fb->dev, "Failed to update display %d\n", ret);
}
/**
* mipi_dbi_pipe_mode_valid - MIPI DBI mode-valid helper
* @pipe: Simple display pipe
* @mode: The mode to test
*
* This function validates a given display mode against the MIPI DBI's hardware
* display. Drivers can use this as their &drm_simple_display_pipe_funcs->mode_valid
* callback.
*/
enum drm_mode_status mipi_dbi_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
return drm_crtc_helper_mode_valid_fixed(&pipe->crtc, mode, &dbidev->mode);
}
EXPORT_SYMBOL(mipi_dbi_pipe_mode_valid);
/**
* mipi_dbi_pipe_update - Display pipe update helper
* @pipe: Simple display pipe
* @old_state: Old plane state
*
* This function handles framebuffer flushing and vblank events. Drivers can use
* this as their &drm_simple_display_pipe_funcs->update callback.
*/
void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
int idx;
if (!pipe->crtc.state->active)
return;
if (WARN_ON(!fb))
return;
if (!drm_dev_enter(fb->dev, &idx))
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
drm_dev_exit(idx);
}
EXPORT_SYMBOL(mipi_dbi_pipe_update);
/**
* mipi_dbi_enable_flush - MIPI DBI enable helper
* @dbidev: MIPI DBI device structure
* @crtc_state: CRTC state
* @plane_state: Plane state
*
* Flushes the whole framebuffer and enables the backlight. Drivers can use this
* in their &drm_simple_display_pipe_funcs->enable callback.
*
* Note: Drivers which don't use mipi_dbi_pipe_update() because they have custom
* framebuffer flushing, can't use this function since they both use the same
* flushing code.
*/
void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_rect rect = {
.x1 = 0,
.x2 = fb->width,
.y1 = 0,
.y2 = fb->height,
};
int idx;
if (!drm_dev_enter(&dbidev->drm, &idx))
return;
mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
backlight_enable(dbidev->backlight);
drm_dev_exit(idx);
}
EXPORT_SYMBOL(mipi_dbi_enable_flush);
static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
{
struct drm_device *drm = &dbidev->drm;
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
struct mipi_dbi *dbi = &dbidev->dbi;
size_t len = width * height * 2;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
memset(dbidev->tx_buf, 0, len);
mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
(u8 *)dbidev->tx_buf, len);
drm_dev_exit(idx);
}
/**
* mipi_dbi_pipe_disable - MIPI DBI pipe disable helper
* @pipe: Display pipe
*
* This function disables backlight if present, if not the display memory is
* blanked. The regulator is disabled if in use. Drivers can use this as their
* &drm_simple_display_pipe_funcs->disable callback.
*/
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
DRM_DEBUG_KMS("\n");
if (dbidev->backlight)
backlight_disable(dbidev->backlight);
else
mipi_dbi_blank(dbidev);
if (dbidev->regulator)
regulator_disable(dbidev->regulator);
if (dbidev->io_regulator)
regulator_disable(dbidev->io_regulator);
}
EXPORT_SYMBOL(mipi_dbi_pipe_disable);
/**
* mipi_dbi_pipe_begin_fb_access - MIPI DBI pipe begin-access helper
* @pipe: Display pipe
* @plane_state: Plane state
*
* This function implements struct &drm_simple_display_funcs.begin_fb_access.
*
* See drm_gem_begin_shadow_fb_access() for details and mipi_dbi_pipe_cleanup_fb()
* for cleanup.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int mipi_dbi_pipe_begin_fb_access(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(mipi_dbi_pipe_begin_fb_access);
/**
* mipi_dbi_pipe_end_fb_access - MIPI DBI pipe end-access helper
* @pipe: Display pipe
* @plane_state: Plane state
*
* This function implements struct &drm_simple_display_funcs.end_fb_access.
*
* See mipi_dbi_pipe_begin_fb_access().
*/
void mipi_dbi_pipe_end_fb_access(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
drm_gem_end_shadow_fb_access(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(mipi_dbi_pipe_end_fb_access);
/**
* mipi_dbi_pipe_reset_plane - MIPI DBI plane-reset helper
* @pipe: Display pipe
*
* This function implements struct &drm_simple_display_funcs.reset_plane
* for MIPI DBI planes.
*/
void mipi_dbi_pipe_reset_plane(struct drm_simple_display_pipe *pipe)
{
drm_gem_reset_shadow_plane(&pipe->plane);
}
EXPORT_SYMBOL(mipi_dbi_pipe_reset_plane);
/**
* mipi_dbi_pipe_duplicate_plane_state - duplicates MIPI DBI plane state
* @pipe: Display pipe
*
* This function implements struct &drm_simple_display_funcs.duplicate_plane_state
* for MIPI DBI planes.
*
* See drm_gem_duplicate_shadow_plane_state() for additional details.
*
* Returns:
* A pointer to a new plane state on success, or NULL otherwise.
*/
struct drm_plane_state *mipi_dbi_pipe_duplicate_plane_state(struct drm_simple_display_pipe *pipe)
{
return drm_gem_duplicate_shadow_plane_state(&pipe->plane);
}
EXPORT_SYMBOL(mipi_dbi_pipe_duplicate_plane_state);
/**
* mipi_dbi_pipe_destroy_plane_state - cleans up MIPI DBI plane state
* @pipe: Display pipe
* @plane_state: Plane state
*
* This function implements struct drm_simple_display_funcs.destroy_plane_state
* for MIPI DBI planes.
*
* See drm_gem_destroy_shadow_plane_state() for additional details.
*/
void mipi_dbi_pipe_destroy_plane_state(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(mipi_dbi_pipe_destroy_plane_state);
static int mipi_dbi_connector_get_modes(struct drm_connector *connector)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(connector->dev);
return drm_connector_helper_get_modes_fixed(connector, &dbidev->mode);
}
static const struct drm_connector_helper_funcs mipi_dbi_connector_hfuncs = {
.get_modes = mipi_dbi_connector_get_modes,
};
static const struct drm_connector_funcs mipi_dbi_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int mipi_dbi_rotate_mode(struct drm_display_mode *mode,
unsigned int rotation)
{
if (rotation == 0 || rotation == 180) {
return 0;
} else if (rotation == 90 || rotation == 270) {
swap(mode->hdisplay, mode->vdisplay);
swap(mode->hsync_start, mode->vsync_start);
swap(mode->hsync_end, mode->vsync_end);
swap(mode->htotal, mode->vtotal);
swap(mode->width_mm, mode->height_mm);
return 0;
} else {
return -EINVAL;
}
}
static const struct drm_mode_config_funcs mipi_dbi_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const uint32_t mipi_dbi_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
/**
* mipi_dbi_dev_init_with_formats - MIPI DBI device initialization with custom formats
* @dbidev: MIPI DBI device structure to initialize
* @funcs: Display pipe functions
* @formats: Array of supported formats (DRM_FORMAT\_\*).
* @format_count: Number of elements in @formats
* @mode: Display mode
* @rotation: Initial rotation in degrees Counter Clock Wise
* @tx_buf_size: Allocate a transmit buffer of this size.
*
* This function sets up a &drm_simple_display_pipe with a &drm_connector that
* has one fixed &drm_display_mode which is rotated according to @rotation.
* This mode is used to set the mode config min/max width/height properties.
*
* Use mipi_dbi_dev_init() if you don't need custom formats.
*
* Note:
* Some of the helper functions expects RGB565 to be the default format and the
* transmit buffer sized to fit that.
*
* Returns:
* Zero on success, negative error code on failure.
*/
int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
const struct drm_simple_display_pipe_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const struct drm_display_mode *mode,
unsigned int rotation, size_t tx_buf_size)
{
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
struct drm_device *drm = &dbidev->drm;
int ret;
if (!dbidev->dbi.command)
return -EINVAL;
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
dbidev->tx_buf = devm_kmalloc(drm->dev, tx_buf_size, GFP_KERNEL);
if (!dbidev->tx_buf)
return -ENOMEM;
drm_mode_copy(&dbidev->mode, mode);
ret = mipi_dbi_rotate_mode(&dbidev->mode, rotation);
if (ret) {
DRM_ERROR("Illegal rotation value %u\n", rotation);
return -EINVAL;
}
drm_connector_helper_add(&dbidev->connector, &mipi_dbi_connector_hfuncs);
ret = drm_connector_init(drm, &dbidev->connector, &mipi_dbi_connector_funcs,
DRM_MODE_CONNECTOR_SPI);
if (ret)
return ret;
ret = drm_simple_display_pipe_init(drm, &dbidev->pipe, funcs, formats, format_count,
modifiers, &dbidev->connector);
if (ret)
return ret;
drm_plane_enable_fb_damage_clips(&dbidev->pipe.plane);
drm->mode_config.funcs = &mipi_dbi_mode_config_funcs;
drm->mode_config.min_width = dbidev->mode.hdisplay;
drm->mode_config.max_width = dbidev->mode.hdisplay;
drm->mode_config.min_height = dbidev->mode.vdisplay;
drm->mode_config.max_height = dbidev->mode.vdisplay;
dbidev->rotation = rotation;
DRM_DEBUG_KMS("rotation = %u\n", rotation);
return 0;
}
EXPORT_SYMBOL(mipi_dbi_dev_init_with_formats);
/**
* mipi_dbi_dev_init - MIPI DBI device initialization
* @dbidev: MIPI DBI device structure to initialize
* @funcs: Display pipe functions
* @mode: Display mode
* @rotation: Initial rotation in degrees Counter Clock Wise
*
* This function sets up a &drm_simple_display_pipe with a &drm_connector that
* has one fixed &drm_display_mode which is rotated according to @rotation.
* This mode is used to set the mode config min/max width/height properties.
* Additionally &mipi_dbi.tx_buf is allocated.
*
* Supported formats: Native RGB565 and emulated XRGB8888.
*
* Returns:
* Zero on success, negative error code on failure.
*/
int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation)
{
size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
dbidev->drm.mode_config.preferred_depth = 16;
return mipi_dbi_dev_init_with_formats(dbidev, funcs, mipi_dbi_formats,
ARRAY_SIZE(mipi_dbi_formats), mode,
rotation, bufsize);
}
EXPORT_SYMBOL(mipi_dbi_dev_init);
/**
* mipi_dbi_hw_reset - Hardware reset of controller
* @dbi: MIPI DBI structure
*
* Reset controller if the &mipi_dbi->reset gpio is set.
*/
void mipi_dbi_hw_reset(struct mipi_dbi *dbi)
{
if (!dbi->reset)
return;
gpiod_set_value_cansleep(dbi->reset, 0);
usleep_range(20, 1000);
gpiod_set_value_cansleep(dbi->reset, 1);
msleep(120);
}
EXPORT_SYMBOL(mipi_dbi_hw_reset);
/**
* mipi_dbi_display_is_on - Check if display is on
* @dbi: MIPI DBI structure
*
* This function checks the Power Mode register (if readable) to see if
* display output is turned on. This can be used to see if the bootloader
* has already turned on the display avoiding flicker when the pipeline is
* enabled.
*
* Returns:
* true if the display can be verified to be on, false otherwise.
*/
bool mipi_dbi_display_is_on(struct mipi_dbi *dbi)
{
u8 val;
if (mipi_dbi_command_read(dbi, MIPI_DCS_GET_POWER_MODE, &val))
return false;
val &= ~DCS_POWER_MODE_RESERVED_MASK;
/* The poweron/reset value is 08h DCS_POWER_MODE_DISPLAY_NORMAL_MODE */
if (val != (DCS_POWER_MODE_DISPLAY |
DCS_POWER_MODE_DISPLAY_NORMAL_MODE | DCS_POWER_MODE_SLEEP_MODE))
return false;
DRM_DEBUG_DRIVER("Display is ON\n");
return true;
}
EXPORT_SYMBOL(mipi_dbi_display_is_on);
static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi_dev *dbidev, bool cond)
{
struct device *dev = dbidev->drm.dev;
struct mipi_dbi *dbi = &dbidev->dbi;
int ret;
if (dbidev->regulator) {
ret = regulator_enable(dbidev->regulator);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to enable regulator (%d)\n", ret);
return ret;
}
}
if (dbidev->io_regulator) {
ret = regulator_enable(dbidev->io_regulator);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to enable I/O regulator (%d)\n", ret);
if (dbidev->regulator)
regulator_disable(dbidev->regulator);
return ret;
}
}
if (cond && mipi_dbi_display_is_on(dbi))
return 1;
mipi_dbi_hw_reset(dbi);
ret = mipi_dbi_command(dbi, MIPI_DCS_SOFT_RESET);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to send reset command (%d)\n", ret);
if (dbidev->regulator)
regulator_disable(dbidev->regulator);
if (dbidev->io_regulator)
regulator_disable(dbidev->io_regulator);
return ret;
}
/*
* If we did a hw reset, we know the controller is in Sleep mode and
* per MIPI DSC spec should wait 5ms after soft reset. If we didn't,
* we assume worst case and wait 120ms.
*/
if (dbi->reset)
usleep_range(5000, 20000);
else
msleep(120);
return 0;
}
/**
* mipi_dbi_poweron_reset - MIPI DBI poweron and reset
* @dbidev: MIPI DBI device structure
*
* This function enables the regulator if used and does a hardware and software
* reset.
*
* Returns:
* Zero on success, or a negative error code.
*/
int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev)
{
return mipi_dbi_poweron_reset_conditional(dbidev, false);
}
EXPORT_SYMBOL(mipi_dbi_poweron_reset);
/**
* mipi_dbi_poweron_conditional_reset - MIPI DBI poweron and conditional reset
* @dbidev: MIPI DBI device structure
*
* This function enables the regulator if used and if the display is off, it
* does a hardware and software reset. If mipi_dbi_display_is_on() determines
* that the display is on, no reset is performed.
*
* Returns:
* Zero if the controller was reset, 1 if the display was already on, or a
* negative error code.
*/
int mipi_dbi_poweron_conditional_reset(struct mipi_dbi_dev *dbidev)
{
return mipi_dbi_poweron_reset_conditional(dbidev, true);
}
EXPORT_SYMBOL(mipi_dbi_poweron_conditional_reset);
#if IS_ENABLED(CONFIG_SPI)
/**
* mipi_dbi_spi_cmd_max_speed - get the maximum SPI bus speed
* @spi: SPI device
* @len: The transfer buffer length.
*
* Many controllers have a max speed of 10MHz, but can be pushed way beyond
* that. Increase reliability by running pixel data at max speed and the rest
* at 10MHz, preventing transfer glitches from messing up the init settings.
*/
u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len)
{
if (len > 64)
return 0; /* use default */
return min_t(u32, 10000000, spi->max_speed_hz);
}
EXPORT_SYMBOL(mipi_dbi_spi_cmd_max_speed);
static bool mipi_dbi_machine_little_endian(void)
{
#if defined(__LITTLE_ENDIAN)
return true;
#else
return false;
#endif
}
/*
* MIPI DBI Type C Option 1
*
* If the SPI controller doesn't have 9 bits per word support,
* use blocks of 9 bytes to send 8x 9-bit words using a 8-bit SPI transfer.
* Pad partial blocks with MIPI_DCS_NOP (zero).
* This is how the D/C bit (x) is added:
* x7654321
* 0x765432
* 10x76543
* 210x7654
* 3210x765
* 43210x76
* 543210x7
* 6543210x
* 76543210
*/
static int mipi_dbi_spi1e_transfer(struct mipi_dbi *dbi, int dc,
const void *buf, size_t len,
unsigned int bpw)
{
bool swap_bytes = (bpw == 16 && mipi_dbi_machine_little_endian());
size_t chunk, max_chunk = dbi->tx_buf9_len;
struct spi_device *spi = dbi->spi;
struct spi_transfer tr = {
.tx_buf = dbi->tx_buf9,
.bits_per_word = 8,
};
struct spi_message m;
const u8 *src = buf;
int i, ret;
u8 *dst;
if (drm_debug_enabled(DRM_UT_DRIVER))
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
__func__, dc, max_chunk);
tr.speed_hz = mipi_dbi_spi_cmd_max_speed(spi, len);
spi_message_init_with_transfers(&m, &tr, 1);
if (!dc) {
if (WARN_ON_ONCE(len != 1))
return -EINVAL;
/* Command: pad no-op's (zeroes) at beginning of block */
dst = dbi->tx_buf9;
memset(dst, 0, 9);
dst[8] = *src;
tr.len = 9;
return spi_sync(spi, &m);
}
/* max with room for adding one bit per byte */
max_chunk = max_chunk / 9 * 8;
/* but no bigger than len */
max_chunk = min(max_chunk, len);
/* 8 byte blocks */
max_chunk = max_t(size_t, 8, max_chunk & ~0x7);
while (len) {
size_t added = 0;
chunk = min(len, max_chunk);
len -= chunk;
dst = dbi->tx_buf9;
if (chunk < 8) {
u8 val, carry = 0;
/* Data: pad no-op's (zeroes) at end of block */
memset(dst, 0, 9);
if (swap_bytes) {
for (i = 1; i < (chunk + 1); i++) {
val = src[1];
*dst++ = carry | BIT(8 - i) | (val >> i);
carry = val << (8 - i);
i++;
val = src[0];
*dst++ = carry | BIT(8 - i) | (val >> i);
carry = val << (8 - i);
src += 2;
}
*dst++ = carry;
} else {
for (i = 1; i < (chunk + 1); i++) {
val = *src++;
*dst++ = carry | BIT(8 - i) | (val >> i);
carry = val << (8 - i);
}
*dst++ = carry;
}
chunk = 8;
added = 1;
} else {
for (i = 0; i < chunk; i += 8) {
if (swap_bytes) {
*dst++ = BIT(7) | (src[1] >> 1);
*dst++ = (src[1] << 7) | BIT(6) | (src[0] >> 2);
*dst++ = (src[0] << 6) | BIT(5) | (src[3] >> 3);
*dst++ = (src[3] << 5) | BIT(4) | (src[2] >> 4);
*dst++ = (src[2] << 4) | BIT(3) | (src[5] >> 5);
*dst++ = (src[5] << 3) | BIT(2) | (src[4] >> 6);
*dst++ = (src[4] << 2) | BIT(1) | (src[7] >> 7);
*dst++ = (src[7] << 1) | BIT(0);
*dst++ = src[6];
} else {
*dst++ = BIT(7) | (src[0] >> 1);
*dst++ = (src[0] << 7) | BIT(6) | (src[1] >> 2);
*dst++ = (src[1] << 6) | BIT(5) | (src[2] >> 3);
*dst++ = (src[2] << 5) | BIT(4) | (src[3] >> 4);
*dst++ = (src[3] << 4) | BIT(3) | (src[4] >> 5);
*dst++ = (src[4] << 3) | BIT(2) | (src[5] >> 6);
*dst++ = (src[5] << 2) | BIT(1) | (src[6] >> 7);
*dst++ = (src[6] << 1) | BIT(0);
*dst++ = src[7];
}
src += 8;
added++;
}
}
tr.len = chunk + added;
ret = spi_sync(spi, &m);
if (ret)
return ret;
}
return 0;
}
static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
const void *buf, size_t len,
unsigned int bpw)
{
struct spi_device *spi = dbi->spi;
struct spi_transfer tr = {
.bits_per_word = 9,
};
const u16 *src16 = buf;
const u8 *src8 = buf;
struct spi_message m;
size_t max_chunk;
u16 *dst16;
int ret;
if (!spi_is_bpw_supported(spi, 9))
return mipi_dbi_spi1e_transfer(dbi, dc, buf, len, bpw);
tr.speed_hz = mipi_dbi_spi_cmd_max_speed(spi, len);
max_chunk = dbi->tx_buf9_len;
dst16 = dbi->tx_buf9;
if (drm_debug_enabled(DRM_UT_DRIVER))
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
__func__, dc, max_chunk);
max_chunk = min(max_chunk / 2, len);
spi_message_init_with_transfers(&m, &tr, 1);
tr.tx_buf = dst16;
while (len) {
size_t chunk = min(len, max_chunk);
unsigned int i;
if (bpw == 16 && mipi_dbi_machine_little_endian()) {
for (i = 0; i < (chunk * 2); i += 2) {
dst16[i] = *src16 >> 8;
dst16[i + 1] = *src16++ & 0xFF;
if (dc) {
dst16[i] |= 0x0100;
dst16[i + 1] |= 0x0100;
}
}
} else {
for (i = 0; i < chunk; i++) {
dst16[i] = *src8++;
if (dc)
dst16[i] |= 0x0100;
}
}
tr.len = chunk * 2;
len -= chunk;
ret = spi_sync(spi, &m);
if (ret)
return ret;
}
return 0;
}
static int mipi_dbi_typec1_command_read(struct mipi_dbi *dbi, u8 *cmd,
u8 *data, size_t len)
{
struct spi_device *spi = dbi->spi;
u32 speed_hz = min_t(u32, MIPI_DBI_MAX_SPI_READ_SPEED,
spi->max_speed_hz / 2);
struct spi_transfer tr[2] = {
{
.speed_hz = speed_hz,
.bits_per_word = 9,
.tx_buf = dbi->tx_buf9,
.len = 2,
}, {
.speed_hz = speed_hz,
.bits_per_word = 8,
.len = len,
.rx_buf = data,
},
};
struct spi_message m;
u16 *dst16;
int ret;
if (!len)
return -EINVAL;
if (!spi_is_bpw_supported(spi, 9)) {
/*
* FIXME: implement something like mipi_dbi_spi1e_transfer() but
* for reads using emulation.
*/
dev_err(&spi->dev,
"reading on host not supporting 9 bpw not yet implemented\n");
return -EOPNOTSUPP;
}
/*
* Turn the 8bit command into a 16bit version of the command in the
* buffer. Only 9 bits of this will be used when executing the actual
* transfer.
*/
dst16 = dbi->tx_buf9;
dst16[0] = *cmd;
spi_message_init_with_transfers(&m, tr, ARRAY_SIZE(tr));
ret = spi_sync(spi, &m);
if (!ret)
MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
return ret;
}
static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd,
u8 *parameters, size_t num)
{
unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
int ret;
if (mipi_dbi_command_is_read(dbi, *cmd))
return mipi_dbi_typec1_command_read(dbi, cmd, parameters, num);
MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
ret = mipi_dbi_spi1_transfer(dbi, 0, cmd, 1, 8);
if (ret || !num)
return ret;
return mipi_dbi_spi1_transfer(dbi, 1, parameters, num, bpw);
}
/* MIPI DBI Type C Option 3 */
static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd,
u8 *data, size_t len)
{
struct spi_device *spi = dbi->spi;
u32 speed_hz = min_t(u32, MIPI_DBI_MAX_SPI_READ_SPEED,
spi->max_speed_hz / 2);
struct spi_transfer tr[2] = {
{
.speed_hz = speed_hz,
.tx_buf = cmd,
.len = 1,
}, {
.speed_hz = speed_hz,
.len = len,
},
};
struct spi_message m;
u8 *buf;
int ret;
if (!len)
return -EINVAL;
/*
* Support non-standard 24-bit and 32-bit Nokia read commands which
* start with a dummy clock, so we need to read an extra byte.
*/
if (*cmd == MIPI_DCS_GET_DISPLAY_ID ||
*cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
if (!(len == 3 || len == 4))
return -EINVAL;
tr[1].len = len + 1;
}
buf = kmalloc(tr[1].len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
tr[1].rx_buf = buf;
spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(dbi->dc, 0);
spi_message_init_with_transfers(&m, tr, ARRAY_SIZE(tr));
ret = spi_sync_locked(spi, &m);
spi_bus_unlock(spi->controller);
if (ret)
goto err_free;
if (tr[1].len == len) {
memcpy(data, buf, len);
} else {
unsigned int i;
for (i = 0; i < len; i++)
data[i] = (buf[i] << 1) | (buf[i + 1] >> 7);
}
MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
err_free:
kfree(buf);
return ret;
}
static int mipi_dbi_typec3_command(struct mipi_dbi *dbi, u8 *cmd,
u8 *par, size_t num)
{
struct spi_device *spi = dbi->spi;
unsigned int bpw = 8;
u32 speed_hz;
int ret;
if (mipi_dbi_command_is_read(dbi, *cmd))
return mipi_dbi_typec3_command_read(dbi, cmd, par, num);
MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(dbi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, cmd, 1);
spi_bus_unlock(spi->controller);
if (ret || !num)
return ret;
if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !dbi->swap_bytes)
bpw = 16;
spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(dbi->dc, 1);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
spi_bus_unlock(spi->controller);
return ret;
}
/**
* mipi_dbi_spi_init - Initialize MIPI DBI SPI interface
* @spi: SPI device
* @dbi: MIPI DBI structure to initialize
* @dc: D/C gpio (optional)
*
* This function sets &mipi_dbi->command, enables &mipi_dbi->read_commands for the
* usual read commands. It should be followed by a call to mipi_dbi_dev_init() or
* a driver-specific init.
*
* If @dc is set, a Type C Option 3 interface is assumed, if not
* Type C Option 1.
*
* If the SPI master driver doesn't support the necessary bits per word,
* the following transformation is used:
*
* - 9-bit: reorder buffer as 9x 8-bit words, padded with no-op command.
* - 16-bit: if big endian send as 8-bit, if little endian swap bytes
*
* Returns:
* Zero on success, negative error code on failure.
*/
int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi,
struct gpio_desc *dc)
{
struct device *dev = &spi->dev;
int ret;
/*
* Even though it's not the SPI device that does DMA (the master does),
* the dma mask is necessary for the dma_alloc_wc() in the GEM code
* (e.g., drm_gem_dma_create()). The dma_addr returned will be a physical
* address which might be different from the bus address, but this is
* not a problem since the address will not be used.
* The virtual address is used in the transfer and the SPI core
* re-maps it on the SPI master device using the DMA streaming API
* (spi_map_buf()).
*/
if (!dev->coherent_dma_mask) {
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_warn(dev, "Failed to set dma mask %d\n", ret);
return ret;
}
}
dbi->spi = spi;
dbi->read_commands = mipi_dbi_dcs_read_commands;
if (dc) {
dbi->command = mipi_dbi_typec3_command;
dbi->dc = dc;
if (mipi_dbi_machine_little_endian() && !spi_is_bpw_supported(spi, 16))
dbi->swap_bytes = true;
} else {
dbi->command = mipi_dbi_typec1_command;
dbi->tx_buf9_len = SZ_16K;
dbi->tx_buf9 = devm_kmalloc(dev, dbi->tx_buf9_len, GFP_KERNEL);
if (!dbi->tx_buf9)
return -ENOMEM;
}
mutex_init(&dbi->cmdlock);
DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
return 0;
}
EXPORT_SYMBOL(mipi_dbi_spi_init);
/**
* mipi_dbi_spi_transfer - SPI transfer helper
* @spi: SPI device
* @speed_hz: Override speed (optional)
* @bpw: Bits per word
* @buf: Buffer to transfer
* @len: Buffer length
*
* This SPI transfer helper breaks up the transfer of @buf into chunks which
* the SPI controller driver can handle. The SPI bus must be locked when
* calling this.
*
* Returns:
* Zero on success, negative error code on failure.
*/
int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
u8 bpw, const void *buf, size_t len)
{
size_t max_chunk = spi_max_transfer_size(spi);
struct spi_transfer tr = {
.bits_per_word = bpw,
.speed_hz = speed_hz,
};
struct spi_message m;
size_t chunk;
int ret;
/* In __spi_validate, there's a validation that no partial transfers
* are accepted (xfer->len % w_size must be zero).
* Here we align max_chunk to multiple of 2 (16bits),
* to prevent transfers from being rejected.
*/
max_chunk = ALIGN_DOWN(max_chunk, 2);
spi_message_init_with_transfers(&m, &tr, 1);
while (len) {
chunk = min(len, max_chunk);
tr.tx_buf = buf;
tr.len = chunk;
buf += chunk;
len -= chunk;
ret = spi_sync_locked(spi, &m);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(mipi_dbi_spi_transfer);
#endif /* CONFIG_SPI */
#ifdef CONFIG_DEBUG_FS
static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *m = file->private_data;
struct mipi_dbi_dev *dbidev = m->private;
u8 val, cmd = 0, parameters[64];
char *buf, *pos, *token;
int i, ret, idx;
if (!drm_dev_enter(&dbidev->drm, &idx))
return -ENODEV;
buf = memdup_user_nul(ubuf, count);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto err_exit;
}
/* strip trailing whitespace */
for (i = count - 1; i > 0; i--)
if (isspace(buf[i]))
buf[i] = '\0';
else
break;
i = 0;
pos = buf;
while (pos) {
token = strsep(&pos, " ");
if (!token) {
ret = -EINVAL;
goto err_free;
}
ret = kstrtou8(token, 16, &val);
if (ret < 0)
goto err_free;
if (token == buf)
cmd = val;
else
parameters[i++] = val;
if (i == 64) {
ret = -E2BIG;
goto err_free;
}
}
ret = mipi_dbi_command_buf(&dbidev->dbi, cmd, parameters, i);
err_free:
kfree(buf);
err_exit:
drm_dev_exit(idx);
return ret < 0 ? ret : count;
}
static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
{
struct mipi_dbi_dev *dbidev = m->private;
struct mipi_dbi *dbi = &dbidev->dbi;
u8 cmd, val[4];
int ret, idx;
size_t len;
if (!drm_dev_enter(&dbidev->drm, &idx))
return -ENODEV;
for (cmd = 0; cmd < 255; cmd++) {
if (!mipi_dbi_command_is_read(dbi, cmd))
continue;
switch (cmd) {
case MIPI_DCS_READ_MEMORY_START:
case MIPI_DCS_READ_MEMORY_CONTINUE:
len = 2;
break;
case MIPI_DCS_GET_DISPLAY_ID:
len = 3;
break;
case MIPI_DCS_GET_DISPLAY_STATUS:
len = 4;
break;
default:
len = 1;
break;
}
seq_printf(m, "%02x: ", cmd);
ret = mipi_dbi_command_buf(dbi, cmd, val, len);
if (ret) {
seq_puts(m, "XX\n");
continue;
}
seq_printf(m, "%*phN\n", (int)len, val);
}
drm_dev_exit(idx);
return 0;
}
static int mipi_dbi_debugfs_command_open(struct inode *inode,
struct file *file)
{
return single_open(file, mipi_dbi_debugfs_command_show,
inode->i_private);
}
static const struct file_operations mipi_dbi_debugfs_command_fops = {
.owner = THIS_MODULE,
.open = mipi_dbi_debugfs_command_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = mipi_dbi_debugfs_command_write,
};
/**
* mipi_dbi_debugfs_init - Create debugfs entries
* @minor: DRM minor
*
* This function creates a 'command' debugfs file for sending commands to the
* controller or getting the read command values.
* Drivers can use this as their &drm_driver->debugfs_init callback.
*
*/
void mipi_dbi_debugfs_init(struct drm_minor *minor)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(minor->dev);
umode_t mode = S_IFREG | S_IWUSR;
if (dbidev->dbi.read_commands)
mode |= S_IRUGO;
debugfs_create_file("command", mode, minor->debugfs_root, dbidev,
&mipi_dbi_debugfs_command_fops);
}
EXPORT_SYMBOL(mipi_dbi_debugfs_init);
#endif
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/drm_mipi_dbi.c |
/*
* Copyright (c) 2016 Laurent Pinchart <[email protected]>
*
* DRM core format related functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <drm/drm_device.h>
#include <drm/drm_fourcc.h>
/**
* drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
* @bpp: bits per pixels
* @depth: bit depth per pixel
*
* Computes a drm fourcc pixel format code for the given @bpp/@depth values.
* Useful in fbdev emulation code, since that deals in those values.
*/
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
{
uint32_t fmt = DRM_FORMAT_INVALID;
switch (bpp) {
case 1:
if (depth == 1)
fmt = DRM_FORMAT_C1;
break;
case 2:
if (depth == 2)
fmt = DRM_FORMAT_C2;
break;
case 4:
if (depth == 4)
fmt = DRM_FORMAT_C4;
break;
case 8:
if (depth == 8)
fmt = DRM_FORMAT_C8;
break;
case 16:
switch (depth) {
case 15:
fmt = DRM_FORMAT_XRGB1555;
break;
case 16:
fmt = DRM_FORMAT_RGB565;
break;
default:
break;
}
break;
case 24:
if (depth == 24)
fmt = DRM_FORMAT_RGB888;
break;
case 32:
switch (depth) {
case 24:
fmt = DRM_FORMAT_XRGB8888;
break;
case 30:
fmt = DRM_FORMAT_XRGB2101010;
break;
case 32:
fmt = DRM_FORMAT_ARGB8888;
break;
default:
break;
}
break;
default:
break;
}
return fmt;
}
EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
* drm_driver_legacy_fb_format - compute drm fourcc code from legacy description
* @dev: DRM device
* @bpp: bits per pixels
* @depth: bit depth per pixel
*
* Computes a drm fourcc pixel format code for the given @bpp/@depth values.
* Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config,
* and depending on the &drm_mode_config.quirk_addfb_prefer_host_byte_order flag
* it returns little endian byte order or host byte order framebuffer formats.
*/
uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
uint32_t bpp, uint32_t depth)
{
uint32_t fmt = drm_mode_legacy_fb_format(bpp, depth);
if (dev->mode_config.quirk_addfb_prefer_host_byte_order) {
if (fmt == DRM_FORMAT_XRGB8888)
fmt = DRM_FORMAT_HOST_XRGB8888;
if (fmt == DRM_FORMAT_ARGB8888)
fmt = DRM_FORMAT_HOST_ARGB8888;
if (fmt == DRM_FORMAT_RGB565)
fmt = DRM_FORMAT_HOST_RGB565;
if (fmt == DRM_FORMAT_XRGB1555)
fmt = DRM_FORMAT_HOST_XRGB1555;
}
if (dev->mode_config.quirk_addfb_prefer_xbgr_30bpp &&
fmt == DRM_FORMAT_XRGB2101010)
fmt = DRM_FORMAT_XBGR2101010;
return fmt;
}
EXPORT_SYMBOL(drm_driver_legacy_fb_format);
/*
* Internal function to query information for a given format. See
* drm_format_info() for the public API.
*/
const struct drm_format_info *__drm_format_info(u32 format)
{
static const struct drm_format_info formats[] = {
{ .format = DRM_FORMAT_C1, .depth = 1, .num_planes = 1,
.char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
{ .format = DRM_FORMAT_C2, .depth = 2, .num_planes = 1,
.char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
{ .format = DRM_FORMAT_C4, .depth = 4, .num_planes = 1,
.char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
{ .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
{ .format = DRM_FORMAT_D1, .depth = 1, .num_planes = 1,
.char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_D2, .depth = 2, .num_planes = 1,
.char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_D4, .depth = 4, .num_planes = 1,
.char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_D8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R1, .depth = 1, .num_planes = 1,
.char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R2, .depth = 2, .num_planes = 1,
.char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R4, .depth = 4, .num_planes = 1,
.char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R10, .depth = 10, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R12, .depth = 12, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
#ifdef __BIG_ENDIAN
{ .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
#endif
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGR565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_AXBXGXRX106106106106, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XBGR8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true },
{ .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true },
{ .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VUY888, .depth = 0, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
{ .format = DRM_FORMAT_Y210, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_Y212, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_Y216, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_Y410, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
{ .format = DRM_FORMAT_Y412, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
{ .format = DRM_FORMAT_Y416, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
{ .format = DRM_FORMAT_XVYU2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_XVYU12_16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_XVYU16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1,
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
.hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
{ .format = DRM_FORMAT_X0L0, .depth = 0, .num_planes = 1,
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_Y0L2, .depth = 0, .num_planes = 1,
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
.hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
{ .format = DRM_FORMAT_X0L2, .depth = 0, .num_planes = 1,
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P010, .depth = 0, .num_planes = 2,
.char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P012, .depth = 0, .num_planes = 2,
.char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2,
.char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P210, .depth = 0,
.num_planes = 2, .char_per_block = { 2, 4, 0 },
.block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2,
.vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VUY101010, .depth = 0,
.num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1,
.is_yuv = true },
{ .format = DRM_FORMAT_YUV420_8BIT, .depth = 0,
.num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
.is_yuv = true },
{ .format = DRM_FORMAT_YUV420_10BIT, .depth = 0,
.num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
.is_yuv = true },
{ .format = DRM_FORMAT_NV15, .depth = 0,
.num_planes = 2, .char_per_block = { 5, 5, 0 },
.block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2,
.vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_Q410, .depth = 0,
.num_planes = 3, .char_per_block = { 2, 2, 2 },
.block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1,
.vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_Q401, .depth = 0,
.num_planes = 3, .char_per_block = { 2, 2, 2 },
.block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1,
.vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2,
.char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
if (formats[i].format == format)
return &formats[i];
}
return NULL;
}
/**
* drm_format_info - query information for a given format
* @format: pixel format (DRM_FORMAT_*)
*
* The caller should only pass a supported pixel format to this function.
* Unsupported pixel formats will generate a warning in the kernel log.
*
* Returns:
* The instance of struct drm_format_info that describes the pixel format, or
* NULL if the format is unsupported.
*/
const struct drm_format_info *drm_format_info(u32 format)
{
const struct drm_format_info *info;
info = __drm_format_info(format);
WARN_ON(!info);
return info;
}
EXPORT_SYMBOL(drm_format_info);
/**
* drm_get_format_info - query information for a given framebuffer configuration
* @dev: DRM device
* @mode_cmd: metadata from the userspace fb creation request
*
* Returns:
* The instance of struct drm_format_info that describes the pixel format, or
* NULL if the format is unsupported.
*/
const struct drm_format_info *
drm_get_format_info(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct drm_format_info *info = NULL;
if (dev->mode_config.funcs->get_format_info)
info = dev->mode_config.funcs->get_format_info(mode_cmd);
if (!info)
info = drm_format_info(mode_cmd->pixel_format);
return info;
}
EXPORT_SYMBOL(drm_get_format_info);
/**
* drm_format_info_block_width - width in pixels of block.
* @info: pixel format info
* @plane: plane index
*
* Returns:
* The width in pixels of a block, depending on the plane index.
*/
unsigned int drm_format_info_block_width(const struct drm_format_info *info,
int plane)
{
if (!info || plane < 0 || plane >= info->num_planes)
return 0;
if (!info->block_w[plane])
return 1;
return info->block_w[plane];
}
EXPORT_SYMBOL(drm_format_info_block_width);
/**
* drm_format_info_block_height - height in pixels of a block
* @info: pixel format info
* @plane: plane index
*
* Returns:
* The height in pixels of a block, depending on the plane index.
*/
unsigned int drm_format_info_block_height(const struct drm_format_info *info,
int plane)
{
if (!info || plane < 0 || plane >= info->num_planes)
return 0;
if (!info->block_h[plane])
return 1;
return info->block_h[plane];
}
EXPORT_SYMBOL(drm_format_info_block_height);
/**
* drm_format_info_bpp - number of bits per pixel
* @info: pixel format info
* @plane: plane index
*
* Returns:
* The actual number of bits per pixel, depending on the plane index.
*/
unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane)
{
if (!info || plane < 0 || plane >= info->num_planes)
return 0;
return info->char_per_block[plane] * 8 /
(drm_format_info_block_width(info, plane) *
drm_format_info_block_height(info, plane));
}
EXPORT_SYMBOL(drm_format_info_bpp);
/**
* drm_format_info_min_pitch - computes the minimum required pitch in bytes
* @info: pixel format info
* @plane: plane index
* @buffer_width: buffer width in pixels
*
* Returns:
* The minimum required pitch in bytes for a buffer by taking into consideration
* the pixel format information and the buffer width.
*/
uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
int plane, unsigned int buffer_width)
{
if (!info || plane < 0 || plane >= info->num_planes)
return 0;
return DIV_ROUND_UP_ULL((u64)buffer_width * info->char_per_block[plane],
drm_format_info_block_width(info, plane) *
drm_format_info_block_height(info, plane));
}
EXPORT_SYMBOL(drm_format_info_min_pitch);
| linux-master | drivers/gpu/drm/drm_fourcc.c |
/*
* \author Rickard E. (Rik) Faith <[email protected]>
* \author Daryll Strauss <[email protected]>
* \author Gareth Hughes <[email protected]>
*/
/*
* Created: Mon Jan 4 08:58:31 1999 by [email protected]
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/anon_inodes.h>
#include <linux/dma-fence.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <drm/drm_client.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
#include "drm_legacy.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
bool drm_dev_needs_global_mutex(struct drm_device *dev)
{
/*
* Legacy drivers rely on all kinds of BKL locking semantics, don't
* bother. They also still need BKL locking for their ioctls, so better
* safe than sorry.
*/
if (drm_core_check_feature(dev, DRIVER_LEGACY))
return true;
/*
* The deprecated ->load callback must be called after the driver is
* already registered. This means such drivers rely on the BKL to make
* sure an open can't proceed until the driver is actually fully set up.
* Similar hilarity holds for the unload callback.
*/
if (dev->driver->load || dev->driver->unload)
return true;
/*
* Drivers with the lastclose callback assume that it's synchronized
* against concurrent opens, which again needs the BKL. The proper fix
* is to use the drm_client infrastructure with proper locking for each
* client.
*/
if (dev->driver->lastclose)
return true;
return false;
}
/**
* DOC: file operations
*
* Drivers must define the file operations structure that forms the DRM
* userspace API entry point, even though most of those operations are
* implemented in the DRM core. The resulting &struct file_operations must be
* stored in the &drm_driver.fops field. The mandatory functions are drm_open(),
* drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
* Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no
* need to sprinkle #ifdef into the code. Drivers which implement private ioctls
* that require 32/64 bit compatibility support must provide their own
* &file_operations.compat_ioctl handler that processes private ioctls and calls
* drm_compat_ioctl() for core ioctls.
*
* In addition drm_read() and drm_poll() provide support for DRM events. DRM
* events are a generic and extensible means to send asynchronous events to
* userspace through the file descriptor. They are used to send vblank event and
* page flip completions by the KMS API. But drivers can also use it for their
* own needs, e.g. to signal completion of rendering.
*
* For the driver-side event interface see drm_event_reserve_init() and
* drm_send_event() as the main starting points.
*
* The memory mapping implementation will vary depending on how the driver
* manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
* function, modern drivers should use one of the provided memory-manager
* specific implementations. For GEM-based drivers this is drm_gem_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
* following is an example &file_operations structure::
*
* static const example_drm_fops = {
* .owner = THIS_MODULE,
* .open = drm_open,
* .release = drm_release,
* .unlocked_ioctl = drm_ioctl,
* .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
* .poll = drm_poll,
* .read = drm_read,
* .llseek = no_llseek,
* .mmap = drm_gem_mmap,
* };
*
* For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
* DMA based drivers there is the DEFINE_DRM_GEM_DMA_FOPS() macro to make this
* simpler.
*
* The driver's &file_operations must be stored in &drm_driver.fops.
*
* For driver-private IOCTL handling see the more detailed discussion in
* :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
*/
/**
* drm_file_alloc - allocate file context
* @minor: minor to allocate on
*
* This allocates a new DRM file context. It is not linked into any context and
* can be used by the caller freely. Note that the context keeps a pointer to
* @minor, so it must be freed before @minor is.
*
* RETURNS:
* Pointer to newly allocated context, ERR_PTR on failure.
*/
struct drm_file *drm_file_alloc(struct drm_minor *minor)
{
static atomic64_t ident = ATOMIC_INIT(0);
struct drm_device *dev = minor->dev;
struct drm_file *file;
int ret;
file = kzalloc(sizeof(*file), GFP_KERNEL);
if (!file)
return ERR_PTR(-ENOMEM);
/* Get a unique identifier for fdinfo: */
file->client_id = atomic64_inc_return(&ident);
file->pid = get_pid(task_tgid(current));
file->minor = minor;
/* for compatibility root is always authenticated */
file->authenticated = capable(CAP_SYS_ADMIN);
INIT_LIST_HEAD(&file->lhead);
INIT_LIST_HEAD(&file->fbs);
mutex_init(&file->fbs_lock);
INIT_LIST_HEAD(&file->blobs);
INIT_LIST_HEAD(&file->pending_event_list);
INIT_LIST_HEAD(&file->event_list);
init_waitqueue_head(&file->event_wait);
file->event_space = 4096; /* set aside 4k for event buffer */
spin_lock_init(&file->master_lookup_lock);
mutex_init(&file->event_read_lock);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_open(dev, file);
if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
drm_syncobj_open(file);
drm_prime_init_file_private(&file->prime);
if (dev->driver->open) {
ret = dev->driver->open(dev, file);
if (ret < 0)
goto out_prime_destroy;
}
return file;
out_prime_destroy:
drm_prime_destroy_file_private(&file->prime);
if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
drm_syncobj_release(file);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file);
put_pid(file->pid);
kfree(file);
return ERR_PTR(ret);
}
static void drm_events_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_pending_event *e, *et;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
/* Unlink pending events */
list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
pending_link) {
list_del(&e->pending_link);
e->file_priv = NULL;
}
/* Remove unconsumed events */
list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
list_del(&e->link);
kfree(e);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
/**
* drm_file_free - free file context
* @file: context to free, or NULL
*
* This destroys and deallocates a DRM file context previously allocated via
* drm_file_alloc(). The caller must make sure to unlink it from any contexts
* before calling this.
*
* If NULL is passed, this is a no-op.
*/
void drm_file_free(struct drm_file *file)
{
struct drm_device *dev;
if (!file)
return;
dev = file->minor->dev;
drm_dbg_core(dev, "comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
current->comm, task_pid_nr(current),
(long)old_encode_dev(file->minor->kdev->devt),
atomic_read(&dev->open_count));
#ifdef CONFIG_DRM_LEGACY
if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
dev->driver->preclose)
dev->driver->preclose(dev, file);
#endif
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_lock_release(dev, file->filp);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
drm_legacy_reclaim_buffers(dev, file);
drm_events_release(file);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_fb_release(file);
drm_property_destroy_user_blobs(dev, file);
}
if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
drm_syncobj_release(file);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file);
drm_legacy_ctxbitmap_flush(dev, file);
if (drm_is_primary_client(file))
drm_master_release(file);
if (dev->driver->postclose)
dev->driver->postclose(dev, file);
drm_prime_destroy_file_private(&file->prime);
WARN_ON(!list_empty(&file->event_list));
put_pid(file->pid);
kfree(file);
}
static void drm_close_helper(struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
mutex_lock(&dev->filelist_mutex);
list_del(&file_priv->lhead);
mutex_unlock(&dev->filelist_mutex);
drm_file_free(file_priv);
}
/*
* Check whether DRI will run on this CPU.
*
* \return non-zero if the DRI will run on this CPU, or zero otherwise.
*/
static int drm_cpu_valid(void)
{
#if defined(__sparc__) && !defined(__sparc_v9__)
return 0; /* No cmpxchg before v9 sparc. */
#endif
return 1;
}
/*
* Called whenever a process opens a drm node
*
* \param filp file pointer.
* \param minor acquired minor-object.
* \return zero on success or a negative number on failure.
*
* Creates and initializes a drm_file structure for the file private data in \p
* filp and add it into the double linked list in \p dev.
*/
int drm_open_helper(struct file *filp, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct drm_file *priv;
int ret;
if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
return -EINVAL;
drm_dbg_core(dev, "comm=\"%s\", pid=%d, minor=%d\n",
current->comm, task_pid_nr(current), minor->index);
priv = drm_file_alloc(minor);
if (IS_ERR(priv))
return PTR_ERR(priv);
if (drm_is_primary_client(priv)) {
ret = drm_master_open(priv);
if (ret) {
drm_file_free(priv);
return ret;
}
}
filp->private_data = priv;
filp->f_mode |= FMODE_UNSIGNED_OFFSET;
priv->filp = filp;
mutex_lock(&dev->filelist_mutex);
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->filelist_mutex);
#ifdef CONFIG_DRM_LEGACY
#ifdef __alpha__
/*
* Default the hose
*/
if (!dev->hose) {
struct pci_dev *pci_dev;
pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
if (pci_dev) {
dev->hose = pci_dev->sysdata;
pci_dev_put(pci_dev);
}
if (!dev->hose) {
struct pci_bus *b = list_entry(pci_root_buses.next,
struct pci_bus, node);
if (b)
dev->hose = b->sysdata;
}
}
#endif
#endif
return 0;
}
/**
* drm_open - open method for DRM file
* @inode: device inode
* @filp: file pointer.
*
* This function must be used by drivers as their &file_operations.open method.
* It looks up the correct DRM device and instantiates all the per-file
* resources for it. It also calls the &drm_driver.open driver callback.
*
* RETURNS:
*
* 0 on success or negative errno value on failure.
*/
int drm_open(struct inode *inode, struct file *filp)
{
struct drm_device *dev;
struct drm_minor *minor;
int retcode;
int need_setup = 0;
minor = drm_minor_acquire(iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
dev = minor->dev;
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
if (!atomic_fetch_inc(&dev->open_count))
need_setup = 1;
/* share address_space across all char-devs of a single device */
filp->f_mapping = dev->anon_inode->i_mapping;
retcode = drm_open_helper(filp, minor);
if (retcode)
goto err_undo;
if (need_setup) {
retcode = drm_legacy_setup(dev);
if (retcode) {
drm_close_helper(filp);
goto err_undo;
}
}
if (drm_dev_needs_global_mutex(dev))
mutex_unlock(&drm_global_mutex);
return 0;
err_undo:
atomic_dec(&dev->open_count);
if (drm_dev_needs_global_mutex(dev))
mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
return retcode;
}
EXPORT_SYMBOL(drm_open);
void drm_lastclose(struct drm_device * dev)
{
drm_dbg_core(dev, "\n");
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
drm_dbg_core(dev, "driver lastclose completed\n");
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_dev_reinit(dev);
drm_client_dev_restore(dev);
}
/**
* drm_release - release method for DRM file
* @inode: device inode
* @filp: file pointer.
*
* This function must be used by drivers as their &file_operations.release
* method. It frees any resources associated with the open file, and calls the
* &drm_driver.postclose driver callback. If this is the last open file for the
* DRM device also proceeds to call the &drm_driver.lastclose driver callback.
*
* RETURNS:
*
* Always succeeds and returns 0.
*/
int drm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
struct drm_minor *minor = file_priv->minor;
struct drm_device *dev = minor->dev;
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
drm_dbg_core(dev, "open_count = %d\n", atomic_read(&dev->open_count));
drm_close_helper(filp);
if (atomic_dec_and_test(&dev->open_count))
drm_lastclose(dev);
if (drm_dev_needs_global_mutex(dev))
mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
return 0;
}
EXPORT_SYMBOL(drm_release);
/**
* drm_release_noglobal - release method for DRM file
* @inode: device inode
* @filp: file pointer.
*
* This function may be used by drivers as their &file_operations.release
* method. It frees any resources associated with the open file prior to taking
* the drm_global_mutex, which then calls the &drm_driver.postclose driver
* callback. If this is the last open file for the DRM device also proceeds to
* call the &drm_driver.lastclose driver callback.
*
* RETURNS:
*
* Always succeeds and returns 0.
*/
int drm_release_noglobal(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
struct drm_minor *minor = file_priv->minor;
struct drm_device *dev = minor->dev;
drm_close_helper(filp);
if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
drm_lastclose(dev);
mutex_unlock(&drm_global_mutex);
}
drm_minor_release(minor);
return 0;
}
EXPORT_SYMBOL(drm_release_noglobal);
/**
* drm_read - read method for DRM file
* @filp: file pointer
* @buffer: userspace destination pointer for the read
* @count: count in bytes to read
* @offset: offset to read
*
* This function must be used by drivers as their &file_operations.read
* method if they use DRM events for asynchronous signalling to userspace.
* Since events are used by the KMS API for vblank and page flip completion this
* means all modern display drivers must use it.
*
* @offset is ignored, DRM events are read like a pipe. Polling support is
* provided by drm_poll().
*
* This function will only ever read a full event. Therefore userspace must
* supply a big enough buffer to fit any event to ensure forward progress. Since
* the maximum event space is currently 4K it's recommended to just use that for
* safety.
*
* RETURNS:
*
* Number of bytes read (always aligned to full events, and can be 0) or a
* negative error code on failure.
*/
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset)
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
ssize_t ret;
ret = mutex_lock_interruptible(&file_priv->event_read_lock);
if (ret)
return ret;
for (;;) {
struct drm_pending_event *e = NULL;
spin_lock_irq(&dev->event_lock);
if (!list_empty(&file_priv->event_list)) {
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
file_priv->event_space += e->event->length;
list_del(&e->link);
}
spin_unlock_irq(&dev->event_lock);
if (e == NULL) {
if (ret)
break;
if (filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
mutex_unlock(&file_priv->event_read_lock);
ret = wait_event_interruptible(file_priv->event_wait,
!list_empty(&file_priv->event_list));
if (ret >= 0)
ret = mutex_lock_interruptible(&file_priv->event_read_lock);
if (ret)
return ret;
} else {
unsigned length = e->event->length;
if (length > count - ret) {
put_back_event:
spin_lock_irq(&dev->event_lock);
file_priv->event_space -= length;
list_add(&e->link, &file_priv->event_list);
spin_unlock_irq(&dev->event_lock);
wake_up_interruptible_poll(&file_priv->event_wait,
EPOLLIN | EPOLLRDNORM);
break;
}
if (copy_to_user(buffer + ret, e->event, length)) {
if (ret == 0)
ret = -EFAULT;
goto put_back_event;
}
ret += length;
kfree(e);
}
}
mutex_unlock(&file_priv->event_read_lock);
return ret;
}
EXPORT_SYMBOL(drm_read);
/**
* drm_poll - poll method for DRM file
* @filp: file pointer
* @wait: poll waiter table
*
* This function must be used by drivers as their &file_operations.read method
* if they use DRM events for asynchronous signalling to userspace. Since
* events are used by the KMS API for vblank and page flip completion this means
* all modern display drivers must use it.
*
* See also drm_read().
*
* RETURNS:
*
* Mask of POLL flags indicating the current status of the file.
*/
__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
__poll_t mask = 0;
poll_wait(filp, &file_priv->event_wait, wait);
if (!list_empty(&file_priv->event_list))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
EXPORT_SYMBOL(drm_poll);
/**
* drm_event_reserve_init_locked - init a DRM event and reserve space for it
* @dev: DRM device
* @file_priv: DRM file private data
* @p: tracking structure for the pending event
* @e: actual event data to deliver to userspace
*
* This function prepares the passed in event for eventual delivery. If the event
* doesn't get delivered (because the IOCTL fails later on, before queuing up
* anything) then the even must be cancelled and freed using
* drm_event_cancel_free(). Successfully initialized events should be sent out
* using drm_send_event() or drm_send_event_locked() to signal completion of the
* asynchronous event to userspace.
*
* If callers embedded @p into a larger structure it must be allocated with
* kmalloc and @p must be the first member element.
*
* This is the locked version of drm_event_reserve_init() for callers which
* already hold &drm_device.event_lock.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_pending_event *p,
struct drm_event *e)
{
if (file_priv->event_space < e->length)
return -ENOMEM;
file_priv->event_space -= e->length;
p->event = e;
list_add(&p->pending_link, &file_priv->pending_event_list);
p->file_priv = file_priv;
return 0;
}
EXPORT_SYMBOL(drm_event_reserve_init_locked);
/**
* drm_event_reserve_init - init a DRM event and reserve space for it
* @dev: DRM device
* @file_priv: DRM file private data
* @p: tracking structure for the pending event
* @e: actual event data to deliver to userspace
*
* This function prepares the passed in event for eventual delivery. If the event
* doesn't get delivered (because the IOCTL fails later on, before queuing up
* anything) then the even must be cancelled and freed using
* drm_event_cancel_free(). Successfully initialized events should be sent out
* using drm_send_event() or drm_send_event_locked() to signal completion of the
* asynchronous event to userspace.
*
* If callers embedded @p into a larger structure it must be allocated with
* kmalloc and @p must be the first member element.
*
* Callers which already hold &drm_device.event_lock should use
* drm_event_reserve_init_locked() instead.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int drm_event_reserve_init(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_pending_event *p,
struct drm_event *e)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->event_lock, flags);
ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
spin_unlock_irqrestore(&dev->event_lock, flags);
return ret;
}
EXPORT_SYMBOL(drm_event_reserve_init);
/**
* drm_event_cancel_free - free a DRM event and release its space
* @dev: DRM device
* @p: tracking structure for the pending event
*
* This function frees the event @p initialized with drm_event_reserve_init()
* and releases any allocated space. It is used to cancel an event when the
* nonblocking operation could not be submitted and needed to be aborted.
*/
void drm_event_cancel_free(struct drm_device *dev,
struct drm_pending_event *p)
{
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (p->file_priv) {
p->file_priv->event_space += p->event->length;
list_del(&p->pending_link);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
if (p->fence)
dma_fence_put(p->fence);
kfree(p);
}
EXPORT_SYMBOL(drm_event_cancel_free);
static void drm_send_event_helper(struct drm_device *dev,
struct drm_pending_event *e, ktime_t timestamp)
{
assert_spin_locked(&dev->event_lock);
if (e->completion) {
complete_all(e->completion);
e->completion_release(e->completion);
e->completion = NULL;
}
if (e->fence) {
if (timestamp)
dma_fence_signal_timestamp(e->fence, timestamp);
else
dma_fence_signal(e->fence);
dma_fence_put(e->fence);
}
if (!e->file_priv) {
kfree(e);
return;
}
list_del(&e->pending_link);
list_add_tail(&e->link,
&e->file_priv->event_list);
wake_up_interruptible_poll(&e->file_priv->event_wait,
EPOLLIN | EPOLLRDNORM);
}
/**
* drm_send_event_timestamp_locked - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
* @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
* time domain
*
* This function sends the event @e, initialized with drm_event_reserve_init(),
* to its associated userspace DRM file. Callers must already hold
* &drm_device.event_lock.
*
* Note that the core will take care of unlinking and disarming events when the
* corresponding DRM file is closed. Drivers need not worry about whether the
* DRM file for this event still exists and can call this function upon
* completion of the asynchronous work unconditionally.
*/
void drm_send_event_timestamp_locked(struct drm_device *dev,
struct drm_pending_event *e, ktime_t timestamp)
{
drm_send_event_helper(dev, e, timestamp);
}
EXPORT_SYMBOL(drm_send_event_timestamp_locked);
/**
* drm_send_event_locked - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
*
* This function sends the event @e, initialized with drm_event_reserve_init(),
* to its associated userspace DRM file. Callers must already hold
* &drm_device.event_lock, see drm_send_event() for the unlocked version.
*
* Note that the core will take care of unlinking and disarming events when the
* corresponding DRM file is closed. Drivers need not worry about whether the
* DRM file for this event still exists and can call this function upon
* completion of the asynchronous work unconditionally.
*/
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
{
drm_send_event_helper(dev, e, 0);
}
EXPORT_SYMBOL(drm_send_event_locked);
/**
* drm_send_event - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
*
* This function sends the event @e, initialized with drm_event_reserve_init(),
* to its associated userspace DRM file. This function acquires
* &drm_device.event_lock, see drm_send_event_locked() for callers which already
* hold this lock.
*
* Note that the core will take care of unlinking and disarming events when the
* corresponding DRM file is closed. Drivers need not worry about whether the
* DRM file for this event still exists and can call this function upon
* completion of the asynchronous work unconditionally.
*/
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
{
unsigned long irqflags;
spin_lock_irqsave(&dev->event_lock, irqflags);
drm_send_event_helper(dev, e, 0);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);
static void print_size(struct drm_printer *p, const char *stat,
const char *region, u64 sz)
{
const char *units[] = {"", " KiB", " MiB"};
unsigned u;
for (u = 0; u < ARRAY_SIZE(units) - 1; u++) {
if (sz < SZ_1K)
break;
sz = div_u64(sz, SZ_1K);
}
drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]);
}
/**
* drm_print_memory_stats - A helper to print memory stats
* @p: The printer to print output to
* @stats: The collected memory stats
* @supported_status: Bitmask of optional stats which are available
* @region: The memory region
*
*/
void drm_print_memory_stats(struct drm_printer *p,
const struct drm_memory_stats *stats,
enum drm_gem_object_status supported_status,
const char *region)
{
print_size(p, "total", region, stats->private + stats->shared);
print_size(p, "shared", region, stats->shared);
print_size(p, "active", region, stats->active);
if (supported_status & DRM_GEM_OBJECT_RESIDENT)
print_size(p, "resident", region, stats->resident);
if (supported_status & DRM_GEM_OBJECT_PURGEABLE)
print_size(p, "purgeable", region, stats->purgeable);
}
EXPORT_SYMBOL(drm_print_memory_stats);
/**
* drm_show_memory_stats - Helper to collect and show standard fdinfo memory stats
* @p: the printer to print output to
* @file: the DRM file
*
* Helper to iterate over GEM objects with a handle allocated in the specified
* file.
*/
void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
{
struct drm_gem_object *obj;
struct drm_memory_stats status = {};
enum drm_gem_object_status supported_status;
int id;
spin_lock(&file->table_lock);
idr_for_each_entry (&file->object_idr, obj, id) {
enum drm_gem_object_status s = 0;
if (obj->funcs && obj->funcs->status) {
s = obj->funcs->status(obj);
supported_status = DRM_GEM_OBJECT_RESIDENT |
DRM_GEM_OBJECT_PURGEABLE;
}
if (obj->handle_count > 1) {
status.shared += obj->size;
} else {
status.private += obj->size;
}
if (s & DRM_GEM_OBJECT_RESIDENT) {
status.resident += obj->size;
} else {
/* If already purged or not yet backed by pages, don't
* count it as purgeable:
*/
s &= ~DRM_GEM_OBJECT_PURGEABLE;
}
if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) {
status.active += obj->size;
/* If still active, don't count as purgeable: */
s &= ~DRM_GEM_OBJECT_PURGEABLE;
}
if (s & DRM_GEM_OBJECT_PURGEABLE)
status.purgeable += obj->size;
}
spin_unlock(&file->table_lock);
drm_print_memory_stats(p, &status, supported_status, "memory");
}
EXPORT_SYMBOL(drm_show_memory_stats);
/**
* drm_show_fdinfo - helper for drm file fops
* @m: output stream
* @f: the device file instance
*
* Helper to implement fdinfo, for userspace to query usage stats, etc, of a
* process using the GPU. See also &drm_driver.show_fdinfo.
*
* For text output format description please see Documentation/gpu/drm-usage-stats.rst
*/
void drm_show_fdinfo(struct seq_file *m, struct file *f)
{
struct drm_file *file = f->private_data;
struct drm_device *dev = file->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
if (dev_is_pci(dev->dev)) {
struct pci_dev *pdev = to_pci_dev(dev->dev);
drm_printf(&p, "drm-pdev:\t%04x:%02x:%02x.%d\n",
pci_domain_nr(pdev->bus), pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
}
if (dev->driver->show_fdinfo)
dev->driver->show_fdinfo(&p, file);
}
EXPORT_SYMBOL(drm_show_fdinfo);
/**
* mock_drm_getfile - Create a new struct file for the drm device
* @minor: drm minor to wrap (e.g. #drm_device.primary)
* @flags: file creation mode (O_RDWR etc)
*
* This create a new struct file that wraps a DRM file context around a
* DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
* invoking userspace. The struct file may be operated on using its f_op
* (the drm_device.driver.fops) to mimick userspace operations, or be supplied
* to userspace facing functions as an internal/anonymous client.
*
* RETURNS:
* Pointer to newly created struct file, ERR_PTR on failure.
*/
struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
{
struct drm_device *dev = minor->dev;
struct drm_file *priv;
struct file *file;
priv = drm_file_alloc(minor);
if (IS_ERR(priv))
return ERR_CAST(priv);
file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
if (IS_ERR(file)) {
drm_file_free(priv);
return file;
}
/* Everyone shares a single global address space */
file->f_mapping = dev->anon_inode->i_mapping;
drm_dev_get(dev);
priv->filp = file;
return file;
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
| linux-master | drivers/gpu/drm/drm_file.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drm gem DMA helper functions
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
*
* Based on Samsung Exynos code
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
*/
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vma_manager.h>
/**
* DOC: dma helpers
*
* The DRM GEM/DMA helpers are a means to provide buffer objects that are
* presented to the device as a contiguous chunk of memory. This is useful
* for devices that do not support scatter-gather DMA (either directly or
* by using an intimately attached IOMMU).
*
* For devices that access the memory bus through an (external) IOMMU then
* the buffer objects are allocated using a traditional page-based
* allocator and may be scattered through physical memory. However they
* are contiguous in the IOVA space so appear contiguous to devices using
* them.
*
* For other devices then the helpers rely on CMA to provide buffer
* objects that are physically contiguous in memory.
*
* For GEM callback helpers in struct &drm_gem_object functions, see likewise
* named functions with an _object_ infix (e.g., drm_gem_dma_object_vmap() wraps
* drm_gem_dma_vmap()). These helpers perform the necessary type conversion.
*/
static const struct drm_gem_object_funcs drm_gem_dma_default_funcs = {
.free = drm_gem_dma_object_free,
.print_info = drm_gem_dma_object_print_info,
.get_sg_table = drm_gem_dma_object_get_sg_table,
.vmap = drm_gem_dma_object_vmap,
.mmap = drm_gem_dma_object_mmap,
.vm_ops = &drm_gem_dma_vm_ops,
};
/**
* __drm_gem_dma_create - Create a GEM DMA object without allocating memory
* @drm: DRM device
* @size: size of the object to allocate
* @private: true if used for internal purposes
*
* This function creates and initializes a GEM DMA object of the given size,
* but doesn't allocate any memory to back the object.
*
* Returns:
* A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
static struct drm_gem_dma_object *
__drm_gem_dma_create(struct drm_device *drm, size_t size, bool private)
{
struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem_obj;
int ret = 0;
if (drm->driver->gem_create_object) {
gem_obj = drm->driver->gem_create_object(drm, size);
if (IS_ERR(gem_obj))
return ERR_CAST(gem_obj);
dma_obj = to_drm_gem_dma_obj(gem_obj);
} else {
dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
if (!dma_obj)
return ERR_PTR(-ENOMEM);
gem_obj = &dma_obj->base;
}
if (!gem_obj->funcs)
gem_obj->funcs = &drm_gem_dma_default_funcs;
if (private) {
drm_gem_private_object_init(drm, gem_obj, size);
/* Always use writecombine for dma-buf mappings */
dma_obj->map_noncoherent = false;
} else {
ret = drm_gem_object_init(drm, gem_obj, size);
}
if (ret)
goto error;
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret) {
drm_gem_object_release(gem_obj);
goto error;
}
return dma_obj;
error:
kfree(dma_obj);
return ERR_PTR(ret);
}
/**
* drm_gem_dma_create - allocate an object with the given size
* @drm: DRM device
* @size: size of the object to allocate
*
* This function creates a DMA GEM object and allocates memory as backing store.
* The allocated memory will occupy a contiguous chunk of bus address space.
*
* For devices that are directly connected to the memory bus then the allocated
* memory will be physically contiguous. For devices that access through an
* IOMMU, then the allocated memory is not expected to be physically contiguous
* because having contiguous IOVAs is sufficient to meet a devices DMA
* requirements.
*
* Returns:
* A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
size_t size)
{
struct drm_gem_dma_object *dma_obj;
int ret;
size = round_up(size, PAGE_SIZE);
dma_obj = __drm_gem_dma_create(drm, size, false);
if (IS_ERR(dma_obj))
return dma_obj;
if (dma_obj->map_noncoherent) {
dma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
&dma_obj->dma_addr,
DMA_TO_DEVICE,
GFP_KERNEL | __GFP_NOWARN);
} else {
dma_obj->vaddr = dma_alloc_wc(drm->dev, size,
&dma_obj->dma_addr,
GFP_KERNEL | __GFP_NOWARN);
}
if (!dma_obj->vaddr) {
drm_dbg(drm, "failed to allocate buffer with size %zu\n",
size);
ret = -ENOMEM;
goto error;
}
return dma_obj;
error:
drm_gem_object_put(&dma_obj->base);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_dma_create);
/**
* drm_gem_dma_create_with_handle - allocate an object with the given size and
* return a GEM handle to it
* @file_priv: DRM file-private structure to register the handle for
* @drm: DRM device
* @size: size of the object to allocate
* @handle: return location for the GEM handle
*
* This function creates a DMA GEM object, allocating a chunk of memory as
* backing store. The GEM object is then added to the list of object associated
* with the given file and a handle to it is returned.
*
* The allocated memory will occupy a contiguous chunk of bus address space.
* See drm_gem_dma_create() for more details.
*
* Returns:
* A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
static struct drm_gem_dma_object *
drm_gem_dma_create_with_handle(struct drm_file *file_priv,
struct drm_device *drm, size_t size,
uint32_t *handle)
{
struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem_obj;
int ret;
dma_obj = drm_gem_dma_create(drm, size);
if (IS_ERR(dma_obj))
return dma_obj;
gem_obj = &dma_obj->base;
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see.
*/
ret = drm_gem_handle_create(file_priv, gem_obj, handle);
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(gem_obj);
if (ret)
return ERR_PTR(ret);
return dma_obj;
}
/**
* drm_gem_dma_free - free resources associated with a DMA GEM object
* @dma_obj: DMA GEM object to free
*
* This function frees the backing memory of the DMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
* If the buffer is imported and the virtual address is set, it is released.
*/
void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
{
struct drm_gem_object *gem_obj = &dma_obj->base;
struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
if (gem_obj->import_attach) {
if (dma_obj->vaddr)
dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
} else if (dma_obj->vaddr) {
if (dma_obj->map_noncoherent)
dma_free_noncoherent(gem_obj->dev->dev, dma_obj->base.size,
dma_obj->vaddr, dma_obj->dma_addr,
DMA_TO_DEVICE);
else
dma_free_wc(gem_obj->dev->dev, dma_obj->base.size,
dma_obj->vaddr, dma_obj->dma_addr);
}
drm_gem_object_release(gem_obj);
kfree(dma_obj);
}
EXPORT_SYMBOL_GPL(drm_gem_dma_free);
/**
* drm_gem_dma_dumb_create_internal - create a dumb buffer object
* @file_priv: DRM file-private structure to create the dumb buffer for
* @drm: DRM device
* @args: IOCTL data
*
* This aligns the pitch and size arguments to the minimum required. This is
* an internal helper that can be wrapped by a driver to account for hardware
* with more specific alignment requirements. It should not be used directly
* as their &drm_driver.dumb_create callback.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
struct drm_gem_dma_object *dma_obj;
if (args->pitch < min_pitch)
args->pitch = min_pitch;
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
&args->handle);
return PTR_ERR_OR_ZERO(dma_obj);
}
EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create_internal);
/**
* drm_gem_dma_dumb_create - create a dumb buffer object
* @file_priv: DRM file-private structure to create the dumb buffer for
* @drm: DRM device
* @args: IOCTL data
*
* This function computes the pitch of the dumb buffer and rounds it up to an
* integer number of bytes per pixel. Drivers for hardware that doesn't have
* any additional restrictions on the pitch can directly use this function as
* their &drm_driver.dumb_create callback.
*
* For hardware with additional restrictions, drivers can adjust the fields
* set up by userspace and pass the IOCTL data along to the
* drm_gem_dma_dumb_create_internal() function.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
struct drm_gem_dma_object *dma_obj;
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
args->size = args->pitch * args->height;
dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
&args->handle);
return PTR_ERR_OR_ZERO(dma_obj);
}
EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create);
const struct vm_operations_struct drm_gem_dma_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
EXPORT_SYMBOL_GPL(drm_gem_dma_vm_ops);
#ifndef CONFIG_MMU
/**
* drm_gem_dma_get_unmapped_area - propose address for mapping in noMMU cases
* @filp: file object
* @addr: memory address
* @len: buffer size
* @pgoff: page offset
* @flags: memory flags
*
* This function is used in noMMU platforms to propose address mapping
* for a given buffer.
* It's intended to be used as a direct handler for the struct
* &file_operations.get_unmapped_area operation.
*
* Returns:
* mapping address on success or a negative error code on failure.
*/
unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *obj = NULL;
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_vma_offset_node *node;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
pgoff,
len >> PAGE_SHIFT);
if (likely(node)) {
obj = container_of(node, struct drm_gem_object, vma_node);
/*
* When the object is being freed, after it hits 0-refcnt it
* proceeds to tear down the object. In the process it will
* attempt to remove the VMA offset and so acquire this
* mgr->vm_lock. Therefore if we find an object with a 0-refcnt
* that matches our range, we know it is in the process of being
* destroyed and will be freed as soon as we release the lock -
* so we have to check for the 0-refcnted object and treat it as
* invalid.
*/
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
if (!obj)
return -EINVAL;
if (!drm_vma_node_is_allowed(node, priv)) {
drm_gem_object_put(obj);
return -EACCES;
}
dma_obj = to_drm_gem_dma_obj(obj);
drm_gem_object_put(obj);
return dma_obj->vaddr ? (unsigned long)dma_obj->vaddr : -EINVAL;
}
EXPORT_SYMBOL_GPL(drm_gem_dma_get_unmapped_area);
#endif
/**
* drm_gem_dma_print_info() - Print &drm_gem_dma_object info for debugfs
* @dma_obj: DMA GEM object
* @p: DRM printer
* @indent: Tab indentation level
*
* This function prints dma_addr and vaddr for use in e.g. debugfs output.
*/
void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
struct drm_printer *p, unsigned int indent)
{
drm_printf_indent(p, indent, "dma_addr=%pad\n", &dma_obj->dma_addr);
drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
}
EXPORT_SYMBOL(drm_gem_dma_print_info);
/**
* drm_gem_dma_get_sg_table - provide a scatter/gather table of pinned
* pages for a DMA GEM object
* @dma_obj: DMA GEM object
*
* This function exports a scatter/gather table by calling the standard
* DMA mapping API.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj)
{
struct drm_gem_object *obj = &dma_obj->base;
struct sg_table *sgt;
int ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr,
dma_obj->dma_addr, obj->size);
if (ret < 0)
goto out;
return sgt;
out:
kfree(sgt);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_dma_get_sg_table);
/**
* drm_gem_dma_prime_import_sg_table - produce a DMA GEM object from another
* driver's scatter/gather table of pinned pages
* @dev: device to import into
* @attach: DMA-BUF attachment
* @sgt: scatter/gather table of pinned pages
*
* This function imports a scatter/gather table exported via DMA-BUF by
* another driver. Imported buffers must be physically contiguous in memory
* (i.e. the scatter/gather table must contain a single entry). Drivers that
* use the DMA helpers should set this as their
* &drm_driver.gem_prime_import_sg_table callback.
*
* Returns:
* A pointer to a newly created GEM object or an ERR_PTR-encoded negative
* error code on failure.
*/
struct drm_gem_object *
drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct drm_gem_dma_object *dma_obj;
/* check if the entries in the sg_table are contiguous */
if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
return ERR_PTR(-EINVAL);
/* Create a DMA GEM buffer. */
dma_obj = __drm_gem_dma_create(dev, attach->dmabuf->size, true);
if (IS_ERR(dma_obj))
return ERR_CAST(dma_obj);
dma_obj->dma_addr = sg_dma_address(sgt->sgl);
dma_obj->sgt = sgt;
drm_dbg_prime(dev, "dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
attach->dmabuf->size);
return &dma_obj->base;
}
EXPORT_SYMBOL_GPL(drm_gem_dma_prime_import_sg_table);
/**
* drm_gem_dma_vmap - map a DMA GEM object into the kernel's virtual
* address space
* @dma_obj: DMA GEM object
* @map: Returns the kernel virtual address of the DMA GEM object's backing
* store.
*
* This function maps a buffer into the kernel's virtual address space.
* Since the DMA buffers are already mapped into the kernel virtual address
* space this simply returns the cached virtual address.
*
* Returns:
* 0 on success, or a negative error code otherwise.
*/
int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
struct iosys_map *map)
{
iosys_map_set_vaddr(map, dma_obj->vaddr);
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_dma_vmap);
/**
* drm_gem_dma_mmap - memory-map an exported DMA GEM object
* @dma_obj: DMA GEM object
* @vma: VMA for the area to be mapped
*
* This function maps a buffer into a userspace process's address space.
* In addition to the usual GEM VMA setup it immediately faults in the entire
* object instead of using on-demand faulting.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma)
{
struct drm_gem_object *obj = &dma_obj->base;
int ret;
/*
* Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
* the whole buffer.
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
if (dma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
ret = dma_mmap_pages(dma_obj->base.dev->dev,
vma, vma->vm_end - vma->vm_start,
virt_to_page(dma_obj->vaddr));
} else {
ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_obj->vaddr,
dma_obj->dma_addr,
vma->vm_end - vma->vm_start);
}
if (ret)
drm_gem_vm_close(vma);
return ret;
}
EXPORT_SYMBOL_GPL(drm_gem_dma_mmap);
/**
* drm_gem_dma_prime_import_sg_table_vmap - PRIME import another driver's
* scatter/gather table and get the virtual address of the buffer
* @dev: DRM device
* @attach: DMA-BUF attachment
* @sgt: Scatter/gather table of pinned pages
*
* This function imports a scatter/gather table using
* drm_gem_dma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
* virtual address. This ensures that a DMA GEM object always has its virtual
* address set. This address is released when the object is freed.
*
* This function can be used as the &drm_driver.gem_prime_import_sg_table
* callback. The &DRM_GEM_DMA_DRIVER_OPS_VMAP macro provides a shortcut to set
* the necessary DRM driver operations.
*
* Returns:
* A pointer to a newly created GEM object or an ERR_PTR-encoded negative
* error code on failure.
*/
struct drm_gem_object *
drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *obj;
struct iosys_map map;
int ret;
ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
if (ret) {
DRM_ERROR("Failed to vmap PRIME buffer\n");
return ERR_PTR(ret);
}
obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
dma_buf_vunmap_unlocked(attach->dmabuf, &map);
return obj;
}
dma_obj = to_drm_gem_dma_obj(obj);
dma_obj->vaddr = map.vaddr;
return obj;
}
EXPORT_SYMBOL(drm_gem_dma_prime_import_sg_table_vmap);
MODULE_DESCRIPTION("DRM DMA memory-management helpers");
MODULE_IMPORT_NS(DMA_BUF);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/drm_gem_dma_helper.c |
// SPDX-License-Identifier: MIT
#include <linux/fb.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_fbdev_dma.h>
/*
* struct fb_ops
*/
static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
/* No need to take a ref for fbcon because it unbinds on unregister */
if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
return -ENODEV;
return 0;
}
static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
if (user)
module_put(fb_helper->dev->driver->fops->owner);
return 0;
}
static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
if (!fb_helper->dev)
return;
drm_fb_helper_fini(fb_helper);
drm_client_buffer_vunmap(fb_helper->buffer);
drm_client_framebuffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
}
static const struct fb_ops drm_fbdev_dma_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
__FB_DEFAULT_DMAMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_mmap = drm_fbdev_dma_fb_mmap,
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
/*
* struct drm_fb_helper
*/
static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer;
struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb;
struct fb_info *info;
u32 format;
struct iosys_map map;
int ret;
drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
sizes->surface_bpp);
format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
buffer = drm_client_framebuffer_create(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
dma_obj = to_drm_gem_dma_obj(buffer->gem);
fb = buffer->fb;
if (drm_WARN_ON(dev, fb->funcs->dirty)) {
ret = -ENODEV; /* damage handling not supported; use generic emulation */
goto err_drm_client_buffer_delete;
}
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
goto err_drm_client_buffer_delete;
} else if (drm_WARN_ON(dev, map.is_iomem)) {
ret = -ENODEV; /* I/O memory not supported; use generic emulation */
goto err_drm_client_buffer_delete;
}
fb_helper->buffer = buffer;
fb_helper->fb = buffer->fb;
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_drm_client_buffer_vunmap;
}
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_dma_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */
if (dma_obj->map_noncoherent)
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_size = sizes->surface_height * fb->pitches[0];
info->screen_buffer = map.vaddr;
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
info->fix.smem_len = info->screen_size;
return 0;
err_drm_client_buffer_vunmap:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
drm_client_buffer_vunmap(buffer);
err_drm_client_buffer_delete:
drm_client_framebuffer_delete(buffer);
return ret;
}
static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
.fb_probe = drm_fbdev_dma_helper_fb_probe,
};
/*
* struct drm_client_funcs
*/
static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->info) {
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
}
static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
int ret;
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err_drm_err;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
goto err_drm_fb_helper_fini;
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(fb_helper);
err_drm_err:
drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
return ret;
}
static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
.owner = THIS_MODULE,
.unregister = drm_fbdev_dma_client_unregister,
.restore = drm_fbdev_dma_client_restore,
.hotplug = drm_fbdev_dma_client_hotplug,
};
/**
* drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device.
* 32 is used if this is zero.
*
* This function sets up fbdev emulation for GEM DMA drivers that support
* dumb buffers with a virtual address and that can be mmap'ed.
* drm_fbdev_dma_setup() shall be called after the DRM driver registered
* the new DRM device with drm_dev_register().
*
* Restore, hotplug events and teardown are all taken care of. Drivers that do
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
* Simple drivers might use drm_mode_config_helper_suspend().
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
*
* The fbdev is destroyed by drm_dev_unregister().
*/
void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
{
struct drm_fb_helper *fb_helper;
int ret;
drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
if (!fb_helper)
return;
drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
if (ret) {
drm_err(dev, "Failed to register client: %d\n", ret);
goto err_drm_client_init;
}
drm_client_register(&fb_helper->client);
return;
err_drm_client_init:
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
EXPORT_SYMBOL(drm_fbdev_dma_setup);
| linux-master | drivers/gpu/drm/drm_fbdev_dma.c |
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Rafael Antognolli <[email protected]>
*
*/
#include <linux/module.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include "drm_crtc_helper_internal.h"
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
#if IS_ENABLED(CONFIG_DRM_LOAD_EDID_FIRMWARE)
/* Backward compatibility for drm_kms_helper.edid_firmware */
static int edid_firmware_set(const char *val, const struct kernel_param *kp)
{
DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
return __drm_set_edid_firmware_path(val);
}
static int edid_firmware_get(char *buffer, const struct kernel_param *kp)
{
return __drm_get_edid_firmware_path(buffer, PAGE_SIZE);
}
static const struct kernel_param_ops edid_firmware_ops = {
.set = edid_firmware_set,
.get = edid_firmware_get,
};
module_param_cb(edid_firmware, &edid_firmware_ops, NULL, 0644);
__MODULE_PARM_TYPE(edid_firmware, "charp");
MODULE_PARM_DESC(edid_firmware,
"DEPRECATED. Use drm.edid_firmware module parameter instead.");
#endif
| linux-master | drivers/gpu/drm/drm_kms_helper_common.c |
/*
* Legacy: Generic DRM Buffer Management
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author: Rickard E. (Rik) Faith <[email protected]>
* Author: Gareth Hughes <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/nospec.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <asm/shmparam.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
{
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
/*
* Because the kernel-userspace ABI is fixed at a 32-bit offset
* while PCI resources may live above that, we only compare the
* lower 32 bits of the map offset for maps of type
* _DRM_FRAMEBUFFER or _DRM_REGISTERS.
* It is assumed that if a driver have more than one resource
* of each type, the lower 32 bits are different.
*/
if (!entry->map ||
map->type != entry->map->type ||
entry->master != dev->master)
continue;
switch (map->type) {
case _DRM_SHM:
if (map->flags != _DRM_CONTAINS_LOCK)
break;
return entry;
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
if ((entry->map->offset & 0xffffffff) ==
(map->offset & 0xffffffff))
return entry;
break;
default: /* Make gcc happy */
break;
}
if (entry->map->offset == map->offset)
return entry;
}
return NULL;
}
static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
unsigned long user_token, int hashed_handle, int shm)
{
int use_hashed_handle, shift;
unsigned long add;
#if (BITS_PER_LONG == 64)
use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
#elif (BITS_PER_LONG == 32)
use_hashed_handle = hashed_handle;
#else
#error Unsupported long size. Neither 64 nor 32 bits.
#endif
if (!use_hashed_handle) {
int ret;
hash->key = user_token >> PAGE_SHIFT;
ret = drm_ht_insert_item(&dev->map_hash, hash);
if (ret != -EINVAL)
return ret;
}
shift = 0;
add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
if (shm && (SHMLBA > PAGE_SIZE)) {
int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
/* For shared memory, we have to preserve the SHMLBA
* bits of the eventual vma->vm_pgoff value during
* mmap(). Otherwise we run into cache aliasing problems
* on some platforms. On these platforms, the pgoff of
* a mmap() request is used to pick a suitable virtual
* address for the mmap() region such that it will not
* cause cache aliasing problems.
*
* Therefore, make sure the SHMLBA relevant bits of the
* hash value we use are equal to those in the original
* kernel virtual address.
*/
shift = bits;
add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
}
return drm_ht_just_insert_please(&dev->map_hash, hash,
user_token, 32 - PAGE_SHIFT - 3,
shift, add);
}
/*
* Core function to create a range of memory available for mapping by a
* non-root process.
*
* Adjusts the memory offset to its absolute value according to the mapping
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
* applicable and if supported by the kernel.
*/
static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags,
struct drm_map_list **maplist)
{
struct drm_local_map *map;
struct drm_map_list *list;
unsigned long user_token;
int ret;
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
map->offset = offset;
map->size = size;
map->flags = flags;
map->type = type;
/* Only allow shared memory to be removable since we only keep enough
* book keeping information about shared memory to allow for removal
* when processes fork.
*/
if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
kfree(map);
return -EINVAL;
}
DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
(unsigned long long)map->offset, map->size, map->type);
/* page-align _DRM_SHM maps. They are allocated here so there is no security
* hole created by that and it works around various broken drivers that use
* a non-aligned quantity to map the SAREA. --BenH
*/
if (map->type == _DRM_SHM)
map->size = PAGE_ALIGN(map->size);
if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
kfree(map);
return -EINVAL;
}
map->mtrr = -1;
map->handle = NULL;
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
if (map->offset + (map->size-1) < map->offset ||
map->offset < virt_to_phys(high_memory)) {
kfree(map);
return -EINVAL;
}
#endif
/* Some drivers preinitialize some maps, without the X Server
* needing to be aware of it. Therefore, we just return success
* when the server tries to create a duplicate map.
*/
list = drm_find_matching_map(dev, map);
if (list != NULL) {
if (list->map->size != map->size) {
DRM_DEBUG("Matching maps of type %d with "
"mismatched sizes, (%ld vs %ld)\n",
map->type, map->size,
list->map->size);
list->map->size = map->size;
}
kfree(map);
*maplist = list;
return 0;
}
if (map->type == _DRM_FRAME_BUFFER ||
(map->flags & _DRM_WRITE_COMBINING)) {
map->mtrr =
arch_phys_wc_add(map->offset, map->size);
}
if (map->type == _DRM_REGISTERS) {
if (map->flags & _DRM_WRITE_COMBINING)
map->handle = ioremap_wc(map->offset,
map->size);
else
map->handle = ioremap(map->offset, map->size);
if (!map->handle) {
kfree(map);
return -ENOMEM;
}
}
break;
case _DRM_SHM:
list = drm_find_matching_map(dev, map);
if (list != NULL) {
if (list->map->size != map->size) {
DRM_DEBUG("Matching maps of type %d with "
"mismatched sizes, (%ld vs %ld)\n",
map->type, map->size, list->map->size);
list->map->size = map->size;
}
kfree(map);
*maplist = list;
return 0;
}
map->handle = vmalloc_user(map->size);
DRM_DEBUG("%lu %d %p\n",
map->size, order_base_2(map->size), map->handle);
if (!map->handle) {
kfree(map);
return -ENOMEM;
}
map->offset = (unsigned long)map->handle;
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
if (dev->master->lock.hw_lock != NULL) {
vfree(map->handle);
kfree(map);
return -EBUSY;
}
dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
}
break;
case _DRM_AGP: {
struct drm_agp_mem *entry;
int valid = 0;
if (!dev->agp) {
kfree(map);
return -EINVAL;
}
#ifdef __alpha__
map->offset += dev->hose->mem_space->start;
#endif
/* In some cases (i810 driver), user space may have already
* added the AGP base itself, because dev->agp->base previously
* only got set during AGP enable. So, only add the base
* address if the map's offset isn't already within the
* aperture.
*/
if (map->offset < dev->agp->base ||
map->offset > dev->agp->base +
dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
map->offset += dev->agp->base;
}
map->mtrr = dev->agp->agp_mtrr; /* for getmap */
/* This assumes the DRM is in total control of AGP space.
* It's not always the case as AGP can be in the control
* of user space (i.e. i810 driver). So this loop will get
* skipped and we double check that dev->agp->memory is
* actually set as well as being invalid before EPERM'ing
*/
list_for_each_entry(entry, &dev->agp->memory, head) {
if ((map->offset >= entry->bound) &&
(map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (!list_empty(&dev->agp->memory) && !valid) {
kfree(map);
return -EPERM;
}
DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
(unsigned long long)map->offset, map->size);
break;
}
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
kfree(map);
return -EINVAL;
}
map->offset += (unsigned long)dev->sg->virtual;
break;
case _DRM_CONSISTENT:
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first.
*/
map->handle = dma_alloc_coherent(dev->dev,
map->size,
&map->offset,
GFP_KERNEL);
if (!map->handle) {
kfree(map);
return -ENOMEM;
}
break;
default:
kfree(map);
return -EINVAL;
}
list = kzalloc(sizeof(*list), GFP_KERNEL);
if (!list) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
kfree(map);
return -EINVAL;
}
list->map = map;
mutex_lock(&dev->struct_mutex);
list_add(&list->head, &dev->maplist);
/* Assign a 32-bit handle */
/* We do it here so that dev->struct_mutex protects the increment */
user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
map->offset;
ret = drm_map_handle(dev, &list->hash, user_token, 0,
(map->type == _DRM_SHM));
if (ret) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
kfree(map);
kfree(list);
mutex_unlock(&dev->struct_mutex);
return ret;
}
list->user_token = list->hash.key << PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex);
if (!(map->flags & _DRM_DRIVER))
list->master = dev->master;
*maplist = list;
return 0;
}
int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map **map_ptr)
{
struct drm_map_list *list;
int rc;
rc = drm_addmap_core(dev, offset, size, type, flags, &list);
if (!rc)
*map_ptr = list->map;
return rc;
}
EXPORT_SYMBOL(drm_legacy_addmap);
struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
unsigned int token)
{
struct drm_map_list *_entry;
list_for_each_entry(_entry, &dev->maplist, head)
if (_entry->user_token == token)
return _entry->map;
return NULL;
}
EXPORT_SYMBOL(drm_legacy_findmap);
/*
* Ioctl to specify a range of memory that is available for mapping by a
* non-root process.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_map structure.
* \return zero on success or a negative value on error.
*
*/
int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *map = data;
struct drm_map_list *maplist;
int err;
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
err = drm_addmap_core(dev, map->offset, map->size, map->type,
map->flags, &maplist);
if (err)
return err;
/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
map->handle = (void *)(unsigned long)maplist->user_token;
/*
* It appears that there are no users of this value whatsoever --
* drmAddMap just discards it. Let's not encourage its use.
* (Keeping drm_addmap_core's returned mtrr value would be wrong --
* it's not a real mtrr index anymore.)
*/
map->mtrr = -1;
return 0;
}
/*
* Get a mapping information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_map structure.
*
* \return zero on success or a negative number on failure.
*
* Searches for the mapping with the specified offset and copies its information
* into userspace
*/
int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *map = data;
struct drm_map_list *r_list = NULL;
struct list_head *list;
int idx;
int i;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
idx = map->offset;
if (idx < 0)
return -EINVAL;
i = 0;
mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, struct drm_map_list, head);
break;
}
i++;
}
if (!r_list || !r_list->map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
map->offset = r_list->map->offset;
map->size = r_list->map->size;
map->type = r_list->map->type;
map->flags = r_list->map->flags;
map->handle = (void *)(unsigned long) r_list->user_token;
map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
mutex_unlock(&dev->struct_mutex);
return 0;
}
/*
* Remove a map private from list and deallocate resources if the mapping
* isn't in use.
*
* Searches the map on drm_device::maplist, removes it from the list, see if
* it's being used, and free any associated resource (such as MTRR's) if it's not
* being on use.
*
* \sa drm_legacy_addmap
*/
int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
struct drm_map_list *r_list = NULL, *list_t;
int found = 0;
struct drm_master *master;
/* Find the list entry for the map and remove it */
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
master = r_list->master;
list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
kfree(r_list);
found = 1;
break;
}
}
if (!found)
return -EINVAL;
switch (map->type) {
case _DRM_REGISTERS:
iounmap(map->handle);
fallthrough;
case _DRM_FRAME_BUFFER:
arch_phys_wc_del(map->mtrr);
break;
case _DRM_SHM:
vfree(map->handle);
if (master) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
master->lock.hw_lock = NULL; /* SHM removed */
master->lock.file_priv = NULL;
wake_up_interruptible_all(&master->lock.lock_queue);
}
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
dma_free_coherent(dev->dev,
map->size,
map->handle,
map->offset);
break;
}
kfree(map);
return 0;
}
EXPORT_SYMBOL(drm_legacy_rmmap_locked);
void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
drm_legacy_rmmap_locked(dev, map);
mutex_unlock(&dev->struct_mutex);
}
EXPORT_SYMBOL(drm_legacy_rmmap);
void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
{
struct drm_map_list *r_list, *list_temp;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
if (r_list->master == master) {
drm_legacy_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
mutex_unlock(&dev->struct_mutex);
}
void drm_legacy_rmmaps(struct drm_device *dev)
{
struct drm_map_list *r_list, *list_temp;
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_legacy_rmmap(dev, r_list->map);
}
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
* exit uncleanly. Therefore, having userland manually remove mappings seems
* like a pointless exercise since they're going away anyway.
*
* One use case might be after addmap is allowed for normal users for SHM and
* gets used by drivers that the server doesn't need to care about. This seems
* unlikely.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a struct drm_map structure.
* \return zero on success or a negative value on error.
*/
int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *request = data;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list;
int ret;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->user_token == (unsigned long)request->handle &&
r_list->map->flags & _DRM_REMOVABLE) {
map = r_list->map;
break;
}
}
/* List has wrapped around to the head pointer, or it's empty we didn't
* find anything.
*/
if (list_empty(&dev->maplist) || !map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
/* Register and framebuffer maps are permanent */
if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
ret = drm_legacy_rmmap_locked(dev, map);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* Cleanup after an error on one of the addbufs() functions.
*
* \param dev DRM device.
* \param entry buffer entry where the error occurred.
*
* Frees any pages and buffers associated with the given entry.
*/
static void drm_cleanup_buf_error(struct drm_device *dev,
struct drm_buf_entry *entry)
{
drm_dma_handle_t *dmah;
int i;
if (entry->seg_count) {
for (i = 0; i < entry->seg_count; i++) {
if (entry->seglist[i]) {
dmah = entry->seglist[i];
dma_free_coherent(dev->dev,
dmah->size,
dmah->vaddr,
dmah->busaddr);
kfree(dmah);
}
}
kfree(entry->seglist);
entry->seg_count = 0;
}
if (entry->buf_count) {
for (i = 0; i < entry->buf_count; i++) {
kfree(entry->buflist[i].dev_private);
}
kfree(entry->buflist);
entry->buf_count = 0;
}
}
#if IS_ENABLED(CONFIG_AGP)
/*
* Add AGP buffers for DMA transfers.
*
* \param dev struct drm_device to which the buffers are to be added.
* \param request pointer to a struct drm_buf_desc describing the request.
* \return zero on success or a negative number on failure.
*
* After some sanity checks creates a drm_buf structure for each buffer and
* reallocates the buffer list of the same size order to accommodate the new
* buffers.
*/
int drm_legacy_addbufs_agp(struct drm_device *dev,
struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry;
struct drm_agp_mem *agp_entry;
struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i, valid;
struct drm_buf **temp_buflist;
if (!dma)
return -EINVAL;
count = request->count;
order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = dev->agp->base + request->agp_start;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %lx\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
list_for_each_entry(agp_entry, &dev->agp->memory, head) {
if ((agp_offset >= agp_entry->bound) &&
(agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (!list_empty(&dev->agp->memory) && !valid) {
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}
spin_lock(&dev->buf_lock);
if (dev->buf_use) {
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
if (entry->buf_count) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while (entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
DRM_DEBUG("byte_count: %d\n", byte_count);
temp_buflist = krealloc(dma->buflist,
(dma->buf_count + entry->buf_count) *
sizeof(*dma->buflist), GFP_KERNEL);
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dma->buflist = temp_buflist;
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += byte_count >> PAGE_SHIFT;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
dma->flags = _DRM_DMA_USE_AGP;
atomic_dec(&dev->buf_alloc);
return 0;
}
EXPORT_SYMBOL(drm_legacy_addbufs_agp);
#endif /* CONFIG_AGP */
int drm_legacy_addbufs_pci(struct drm_device *dev,
struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
int count;
int order;
int size;
int total;
int page_order;
struct drm_buf_entry *entry;
drm_dma_handle_t *dmah;
struct drm_buf *buf;
int alignment;
unsigned long offset;
int i;
int byte_count;
int page_count;
unsigned long *temp_pagelist;
struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
count = request->count;
order = order_base_2(request->size);
size = 1 << order;
DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
request->count, request->size, size, order);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
spin_lock(&dev->buf_lock);
if (dev->buf_use) {
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
if (entry->buf_count) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
if (!entry->seglist) {
kfree(entry->buflist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
/* Keep the original pagelist until we know all the allocations
* have succeeded
*/
temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
sizeof(*dma->pagelist),
GFP_KERNEL);
if (!temp_pagelist) {
kfree(entry->buflist);
kfree(entry->seglist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memcpy(temp_pagelist,
dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
DRM_DEBUG("pagelist: %d entries\n",
dma->page_count + (count << page_order));
entry->buf_size = size;
entry->page_order = page_order;
byte_count = 0;
page_count = 0;
while (entry->buf_count < count) {
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
drm_cleanup_buf_error(dev, entry);
kfree(temp_pagelist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dmah->size = total;
dmah->vaddr = dma_alloc_coherent(dev->dev,
dmah->size,
&dmah->busaddr,
GFP_KERNEL);
if (!dmah->vaddr) {
kfree(dmah);
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
drm_cleanup_buf_error(dev, entry);
kfree(temp_pagelist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
entry->seglist[entry->seg_count++] = dmah;
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ 0x%08lx\n",
dma->page_count + page_count,
(unsigned long)dmah->vaddr + PAGE_SIZE * i);
temp_pagelist[dma->page_count + page_count++]
= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
}
for (offset = 0;
offset + size <= total && entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + byte_count + offset);
buf->address = (void *)(dmah->vaddr + offset);
buf->bus_address = dmah->busaddr + offset;
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = kzalloc(buf->dev_priv_size,
GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
drm_cleanup_buf_error(dev, entry);
kfree(temp_pagelist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
byte_count += PAGE_SIZE << page_order;
}
temp_buflist = krealloc(dma->buflist,
(dma->buf_count + entry->buf_count) *
sizeof(*dma->buflist), GFP_KERNEL);
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
kfree(temp_pagelist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dma->buflist = temp_buflist;
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
/* No allocations failed, so now we can replace the original pagelist
* with the new one.
*/
if (dma->page_count) {
kfree(dma->pagelist);
}
dma->pagelist = temp_pagelist;
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += entry->seg_count << page_order;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
if (request->flags & _DRM_PCI_BUFFER_RO)
dma->flags = _DRM_DMA_USE_PCI_RO;
atomic_dec(&dev->buf_alloc);
return 0;
}
EXPORT_SYMBOL(drm_legacy_addbufs_pci);
static int drm_legacy_addbufs_sg(struct drm_device *dev,
struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry;
struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
count = request->count;
order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = request->agp_start;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %lu\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
spin_lock(&dev->buf_lock);
if (dev->buf_use) {
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
if (entry->buf_count) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while (entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset
+ (unsigned long)dev->sg->virtual);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
DRM_DEBUG("byte_count: %d\n", byte_count);
temp_buflist = krealloc(dma->buflist,
(dma->buf_count + entry->buf_count) *
sizeof(*dma->buflist), GFP_KERNEL);
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dma->buflist = temp_buflist;
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += byte_count >> PAGE_SHIFT;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
dma->flags = _DRM_DMA_USE_SG;
atomic_dec(&dev->buf_alloc);
return 0;
}
/*
* Add buffers for DMA transfers (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a struct drm_buf_desc request.
* \return zero on success or a negative number on failure.
*
* According with the memory type specified in drm_buf_desc::flags and the
* build options, it dispatches the call either to addbufs_agp(),
* addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
* PCI memory respectively.
*/
int drm_legacy_addbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_buf_desc *request = data;
int ret;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EOPNOTSUPP;
#if IS_ENABLED(CONFIG_AGP)
if (request->flags & _DRM_AGP_BUFFER)
ret = drm_legacy_addbufs_agp(dev, request);
else
#endif
if (request->flags & _DRM_SG_BUFFER)
ret = drm_legacy_addbufs_sg(dev, request);
else if (request->flags & _DRM_FB_BUFFER)
ret = -EINVAL;
else
ret = drm_legacy_addbufs_pci(dev, request);
return ret;
}
/*
* Get information about the buffer mappings.
*
* This was originally mean for debugging purposes, or by a sophisticated
* client library to determine how best to use the available buffers (e.g.,
* large buffers can be used for image transfer).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_buf_info structure.
* \return zero on success or a negative number on failure.
*
* Increments drm_device::buf_use while holding the drm_device::buf_lock
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
int __drm_legacy_infobufs(struct drm_device *dev,
void *data, int *p,
int (*f)(void *, int, struct drm_buf_entry *))
{
struct drm_device_dma *dma = dev->dma;
int i;
int count;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->buf_lock);
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count)
++count;
}
DRM_DEBUG("count = %d\n", count);
if (*p >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
struct drm_buf_entry *from = &dma->bufs[i];
if (from->buf_count) {
if (f(data, count, from) < 0)
return -EFAULT;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
dma->bufs[i].low_mark,
dma->bufs[i].high_mark);
++count;
}
}
}
*p = count;
return 0;
}
static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
{
struct drm_buf_info *request = data;
struct drm_buf_desc __user *to = &request->list[count];
struct drm_buf_desc v = {.count = from->buf_count,
.size = from->buf_size,
.low_mark = from->low_mark,
.high_mark = from->high_mark};
if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
return -EFAULT;
return 0;
}
int drm_legacy_infobufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_buf_info *request = data;
return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
}
/*
* Specifies a low and high water mark for buffer allocation
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg a pointer to a drm_buf_desc structure.
* \return zero on success or a negative number on failure.
*
* Verifies that the size order is bounded between the admissible orders and
* updates the respective drm_device_dma::bufs entry low and high water mark.
*
* \note This ioctl is deprecated and mostly never used.
*/
int drm_legacy_markbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_desc *request = data;
int order;
struct drm_buf_entry *entry;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
DRM_DEBUG("%d, %d, %d\n",
request->size, request->low_mark, request->high_mark);
order = order_base_2(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
entry = &dma->bufs[order];
if (request->low_mark < 0 || request->low_mark > entry->buf_count)
return -EINVAL;
if (request->high_mark < 0 || request->high_mark > entry->buf_count)
return -EINVAL;
entry->low_mark = request->low_mark;
entry->high_mark = request->high_mark;
return 0;
}
/*
* Unreserve the buffers in list, previously reserved using drmDMA.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_buf_free structure.
* \return zero on success or a negative number on failure.
*
* Calls free_buffer() for each used buffer.
* This function is primarily used for debugging.
*/
int drm_legacy_freebufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_free *request = data;
int i;
int idx;
struct drm_buf *buf;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
DRM_DEBUG("%d\n", request->count);
for (i = 0; i < request->count; i++) {
if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
return -EFAULT;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
return -EINVAL;
}
idx = array_index_nospec(idx, dma->buf_count);
buf = dma->buflist[idx];
if (buf->file_priv != file_priv) {
DRM_ERROR("Process %d freeing buffer not owned\n",
task_pid_nr(current));
return -EINVAL;
}
drm_legacy_free_buffer(dev, buf);
}
return 0;
}
/*
* Maps all of the DMA buffers into client-virtual space (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_buf_map structure.
* \return zero on success or a negative number on failure.
*
* Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
* about each buffer into user space. For PCI buffers, it calls vm_mmap() with
* offset equal to 0, which drm_mmap() interprets as PCI buffers and calls
* drm_mmap_dma().
*/
int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
void __user **v,
int (*f)(void *, int, unsigned long,
struct drm_buf *),
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int retcode = 0;
unsigned long virtual;
int i;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
dev->buf_use++; /* Can't allocate more after this call */
spin_unlock(&dev->buf_lock);
if (*p >= dma->buf_count) {
if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
&& (dma->flags & _DRM_DMA_USE_SG))) {
struct drm_local_map *map = dev->agp_buffer_map;
unsigned long token = dev->agp_buffer_token;
if (!map) {
retcode = -EINVAL;
goto done;
}
virtual = vm_mmap(file_priv->filp, 0, map->size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
token);
} else {
virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
PROT_READ | PROT_WRITE,
MAP_SHARED, 0);
}
if (virtual > -1024UL) {
/* Real error */
retcode = (signed long)virtual;
goto done;
}
*v = (void __user *)virtual;
for (i = 0; i < dma->buf_count; i++) {
if (f(data, i, virtual, dma->buflist[i]) < 0) {
retcode = -EFAULT;
goto done;
}
}
}
done:
*p = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
return retcode;
}
static int map_one_buf(void *data, int idx, unsigned long virtual,
struct drm_buf *buf)
{
struct drm_buf_map *request = data;
unsigned long address = virtual + buf->offset; /* *** */
if (copy_to_user(&request->list[idx].idx, &buf->idx,
sizeof(request->list[0].idx)))
return -EFAULT;
if (copy_to_user(&request->list[idx].total, &buf->total,
sizeof(request->list[0].total)))
return -EFAULT;
if (clear_user(&request->list[idx].used, sizeof(int)))
return -EFAULT;
if (copy_to_user(&request->list[idx].address, &address,
sizeof(address)))
return -EFAULT;
return 0;
}
int drm_legacy_mapbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_buf_map *request = data;
return __drm_legacy_mapbufs(dev, data, &request->count,
&request->virtual, map_one_buf,
file_priv);
}
int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (dev->driver->dma_ioctl)
return dev->driver->dma_ioctl(dev, data, file_priv);
else
return -EINVAL;
}
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
{
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && entry->map->type == _DRM_SHM &&
(entry->map->flags & _DRM_CONTAINS_LOCK)) {
return entry->map;
}
}
return NULL;
}
EXPORT_SYMBOL(drm_legacy_getsarea);
| linux-master | drivers/gpu/drm/drm_bufs.c |
/*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <[email protected]>
* Daniel Vetter <[email protected]>
*/
#include <linux/dma-fence.h>
#include <linux/ktime.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_self_refresh_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
#include "drm_crtc_helper_internal.h"
#include "drm_crtc_internal.h"
/**
* DOC: overview
*
* This helper library provides implementations of check and commit functions on
* top of the CRTC modeset helper callbacks and the plane helper callbacks. It
* also provides convenience implementations for the atomic state handling
* callbacks for drivers which don't need to subclass the drm core structures to
* add their own additional internal state.
*
* This library also provides default implementations for the check callback in
* drm_atomic_helper_check() and for the commit callback with
* drm_atomic_helper_commit(). But the individual stages and callbacks are
* exposed to allow drivers to mix and match and e.g. use the plane helpers only
* together with a driver private modeset implementation.
*
* This library also provides implementations for all the legacy driver
* interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
* drm_atomic_helper_disable_plane(), and the various functions to implement
* set_property callbacks. New drivers must not implement these functions
* themselves but must use the provided helpers.
*
* The atomic helper uses the same function table structures as all other
* modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
* struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
* also shares the &struct drm_plane_helper_funcs function table with the plane
* helpers.
*/
static void
drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_plane_state *old_plane_state,
struct drm_plane_state *plane_state,
struct drm_plane *plane)
{
struct drm_crtc_state *crtc_state;
if (old_plane_state->crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state,
old_plane_state->crtc);
if (WARN_ON(!crtc_state))
return;
crtc_state->planes_changed = true;
}
if (plane_state->crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
if (WARN_ON(!crtc_state))
return;
crtc_state->planes_changed = true;
}
}
static int handle_conflicting_encoders(struct drm_atomic_state *state,
bool disable_conflicting_encoders)
{
struct drm_connector_state *new_conn_state;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_encoder *encoder;
unsigned int encoder_mask = 0;
int i, ret = 0;
/*
* First loop, find all newly assigned encoders from the connectors
* part of the state. If the same encoder is assigned to multiple
* connectors bail out.
*/
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
struct drm_encoder *new_encoder;
if (!new_conn_state->crtc)
continue;
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector,
state);
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
new_encoder = drm_connector_get_single_encoder(connector);
if (new_encoder) {
if (encoder_mask & drm_encoder_mask(new_encoder)) {
drm_dbg_atomic(connector->dev,
"[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
new_encoder->base.id, new_encoder->name,
connector->base.id, connector->name);
return -EINVAL;
}
encoder_mask |= drm_encoder_mask(new_encoder);
}
}
if (!encoder_mask)
return 0;
/*
* Second loop, iterate over all connectors not part of the state.
*
* If a conflicting encoder is found and disable_conflicting_encoders
* is not set, an error is returned. Userspace can provide a solution
* through the atomic ioctl.
*
* If the flag is set conflicting connectors are removed from the CRTC
* and the CRTC is disabled if no encoder is left. This preserves
* compatibility with the legacy set_config behavior.
*/
drm_connector_list_iter_begin(state->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct drm_crtc_state *crtc_state;
if (drm_atomic_get_new_connector_state(state, connector))
continue;
encoder = connector->state->best_encoder;
if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
continue;
if (!disable_conflicting_encoders) {
drm_dbg_atomic(connector->dev,
"[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
encoder->base.id, encoder->name,
connector->state->crtc->base.id,
connector->state->crtc->name,
connector->base.id, connector->name);
ret = -EINVAL;
goto out;
}
new_conn_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(new_conn_state)) {
ret = PTR_ERR(new_conn_state);
goto out;
}
drm_dbg_atomic(connector->dev,
"[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
encoder->base.id, encoder->name,
new_conn_state->crtc->base.id, new_conn_state->crtc->name,
connector->base.id, connector->name);
crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
if (ret)
goto out;
if (!crtc_state->connector_mask) {
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
NULL);
if (ret < 0)
goto out;
crtc_state->active = false;
}
}
out:
drm_connector_list_iter_end(&conn_iter);
return ret;
}
static void
set_best_encoder(struct drm_atomic_state *state,
struct drm_connector_state *conn_state,
struct drm_encoder *encoder)
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
if (conn_state->best_encoder) {
/* Unset the encoder_mask in the old crtc state. */
crtc = conn_state->connector->state->crtc;
/* A NULL crtc is an error here because we should have
* duplicated a NULL best_encoder when crtc was NULL.
* As an exception restoring duplicated atomic state
* during resume is allowed, so don't warn when
* best_encoder is equal to encoder we intend to set.
*/
WARN_ON(!crtc && encoder != conn_state->best_encoder);
if (crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
crtc_state->encoder_mask &=
~drm_encoder_mask(conn_state->best_encoder);
}
}
if (encoder) {
crtc = conn_state->crtc;
WARN_ON(!crtc);
if (crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
crtc_state->encoder_mask |=
drm_encoder_mask(encoder);
}
}
conn_state->best_encoder = encoder;
}
static void
steal_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *old_connector_state, *new_connector_state;
int i;
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
struct drm_crtc *encoder_crtc;
if (new_connector_state->best_encoder != encoder)
continue;
encoder_crtc = old_connector_state->crtc;
drm_dbg_atomic(encoder->dev,
"[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
encoder->base.id, encoder->name,
encoder_crtc->base.id, encoder_crtc->name);
set_best_encoder(state, new_connector_state, NULL);
crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
crtc_state->connectors_changed = true;
return;
}
}
static int
update_connector_routing(struct drm_atomic_state *state,
struct drm_connector *connector,
struct drm_connector_state *old_connector_state,
struct drm_connector_state *new_connector_state)
{
const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
struct drm_crtc_state *crtc_state;
drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (old_connector_state->crtc != new_connector_state->crtc) {
if (old_connector_state->crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
crtc_state->connectors_changed = true;
}
if (new_connector_state->crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
crtc_state->connectors_changed = true;
}
}
if (!new_connector_state->crtc) {
drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
set_best_encoder(state, new_connector_state, NULL);
return 0;
}
crtc_state = drm_atomic_get_new_crtc_state(state,
new_connector_state->crtc);
/*
* For compatibility with legacy users, we want to make sure that
* we allow DPMS On->Off modesets on unregistered connectors. Modesets
* which would result in anything else must be considered invalid, to
* avoid turning on new displays on dead connectors.
*
* Since the connector can be unregistered at any point during an
* atomic check or commit, this is racy. But that's OK: all we care
* about is ensuring that userspace can't do anything but shut off the
* display on a connector that was destroyed after it's been notified,
* not before.
*
* Additionally, we also want to ignore connector registration when
* we're trying to restore an atomic state during system resume since
* there's a chance the connector may have been destroyed during the
* process, but it's better to ignore that then cause
* drm_atomic_helper_resume() to fail.
*/
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
crtc_state->active) {
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] is not registered\n",
connector->base.id, connector->name);
return -EINVAL;
}
funcs = connector->helper_private;
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector, state);
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
new_encoder = drm_connector_get_single_encoder(connector);
if (!new_encoder) {
drm_dbg_atomic(connector->dev,
"No suitable encoder found for [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
return -EINVAL;
}
if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
drm_dbg_atomic(connector->dev,
"[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
new_encoder->base.id,
new_encoder->name,
new_connector_state->crtc->base.id,
new_connector_state->crtc->name);
return -EINVAL;
}
if (new_encoder == new_connector_state->best_encoder) {
set_best_encoder(state, new_connector_state, new_encoder);
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
connector->base.id,
connector->name,
new_encoder->base.id,
new_encoder->name,
new_connector_state->crtc->base.id,
new_connector_state->crtc->name);
return 0;
}
steal_encoder(state, new_encoder);
set_best_encoder(state, new_connector_state, new_encoder);
crtc_state->connectors_changed = true;
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
connector->base.id,
connector->name,
new_encoder->base.id,
new_encoder->name,
new_connector_state->crtc->base.id,
new_connector_state->crtc->name);
return 0;
}
static int
mode_fixup(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
int ret;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (!new_crtc_state->mode_changed &&
!new_crtc_state->connectors_changed)
continue;
drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
}
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
if (!new_conn_state->crtc || !new_conn_state->best_encoder)
continue;
new_crtc_state =
drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call ->mode_fixup twice.
*/
encoder = new_conn_state->best_encoder;
funcs = encoder->helper_private;
bridge = drm_bridge_chain_get_first_bridge(encoder);
ret = drm_atomic_bridge_chain_check(bridge,
new_crtc_state,
new_conn_state);
if (ret) {
drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
return ret;
}
if (funcs && funcs->atomic_check) {
ret = funcs->atomic_check(encoder, new_crtc_state,
new_conn_state);
if (ret) {
drm_dbg_atomic(encoder->dev,
"[ENCODER:%d:%s] check failed\n",
encoder->base.id, encoder->name);
return ret;
}
} else if (funcs && funcs->mode_fixup) {
ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
&new_crtc_state->adjusted_mode);
if (!ret) {
drm_dbg_atomic(encoder->dev,
"[ENCODER:%d:%s] fixup failed\n",
encoder->base.id, encoder->name);
return -EINVAL;
}
}
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
if (!new_crtc_state->enable)
continue;
if (!new_crtc_state->mode_changed &&
!new_crtc_state->connectors_changed)
continue;
funcs = crtc->helper_private;
if (!funcs || !funcs->mode_fixup)
continue;
ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
&new_crtc_state->adjusted_mode);
if (!ret) {
drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
}
return 0;
}
static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
struct drm_encoder *encoder,
struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct drm_bridge *bridge;
enum drm_mode_status ret;
ret = drm_encoder_mode_valid(encoder, mode);
if (ret != MODE_OK) {
drm_dbg_atomic(encoder->dev,
"[ENCODER:%d:%s] mode_valid() failed\n",
encoder->base.id, encoder->name);
return ret;
}
bridge = drm_bridge_chain_get_first_bridge(encoder);
ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
mode);
if (ret != MODE_OK) {
drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
return ret;
}
ret = drm_crtc_mode_valid(crtc, mode);
if (ret != MODE_OK) {
drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n",
crtc->base.id, crtc->name);
return ret;
}
return ret;
}
static int
mode_valid(struct drm_atomic_state *state)
{
struct drm_connector_state *conn_state;
struct drm_connector *connector;
int i;
for_each_new_connector_in_state(state, connector, conn_state, i) {
struct drm_encoder *encoder = conn_state->best_encoder;
struct drm_crtc *crtc = conn_state->crtc;
struct drm_crtc_state *crtc_state;
enum drm_mode_status mode_status;
const struct drm_display_mode *mode;
if (!crtc || !encoder)
continue;
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state)
continue;
if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
continue;
mode = &crtc_state->mode;
mode_status = mode_valid_path(connector, encoder, crtc, mode);
if (mode_status != MODE_OK)
return -EINVAL;
}
return 0;
}
/**
* drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
* This does all the CRTC and connector related computations for an atomic
* update and adds any additional connectors needed for full modesets. It calls
* the various per-object callbacks in the follow order:
*
* 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
* 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
* 3. If it's determined a modeset is needed then all connectors on the affected
* CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
* 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
* &drm_crtc_helper_funcs.mode_valid are called on the affected components.
* 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
* 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
* This function is only called when the encoder will be part of a configured CRTC,
* it must not be used for implementing connector property validation.
* If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
* instead.
* 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
*
* &drm_crtc_state.mode_changed is set when the input mode is changed.
* &drm_crtc_state.connectors_changed is set when a connector is added or
* removed from the CRTC. &drm_crtc_state.active_changed is set when
* &drm_crtc_state.active changes, which is used for DPMS.
* &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
* See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
*
* Drivers which set &drm_crtc_state.mode_changed (e.g. in their
* &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
* without a full modeset) _must_ call this function after that change. It is
* permitted to call this function multiple times for the same update, e.g.
* when the &drm_crtc_helper_funcs.atomic_check functions depend upon the
* adjusted dotclock for fifo space allocation and watermark computation.
*
* RETURNS:
* Zero for success or -errno
*/
int
drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *old_connector_state, *new_connector_state;
int i, ret;
unsigned int connectors_mask = 0;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
bool has_connectors =
!!new_crtc_state->connector_mask;
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n",
crtc->base.id, crtc->name);
new_crtc_state->mode_changed = true;
}
if (old_crtc_state->enable != new_crtc_state->enable) {
drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n",
crtc->base.id, crtc->name);
/*
* For clarity this assignment is done here, but
* enable == 0 is only true when there are no
* connectors and a NULL mode.
*
* The other way around is true as well. enable != 0
* implies that connectors are attached and a mode is set.
*/
new_crtc_state->mode_changed = true;
new_crtc_state->connectors_changed = true;
}
if (old_crtc_state->active != new_crtc_state->active) {
drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n",
crtc->base.id, crtc->name);
new_crtc_state->active_changed = true;
}
if (new_crtc_state->enable != has_connectors) {
drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
if (drm_dev_has_vblank(dev))
new_crtc_state->no_vblank = false;
else
new_crtc_state->no_vblank = true;
}
ret = handle_conflicting_encoders(state, false);
if (ret)
return ret;
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
/*
* This only sets crtc->connectors_changed for routing changes,
* drivers must set crtc->connectors_changed themselves when
* connector properties need to be updated.
*/
ret = update_connector_routing(state, connector,
old_connector_state,
new_connector_state);
if (ret)
return ret;
if (old_connector_state->crtc) {
new_crtc_state = drm_atomic_get_new_crtc_state(state,
old_connector_state->crtc);
if (old_connector_state->link_status !=
new_connector_state->link_status)
new_crtc_state->connectors_changed = true;
if (old_connector_state->max_requested_bpc !=
new_connector_state->max_requested_bpc)
new_crtc_state->connectors_changed = true;
}
if (funcs->atomic_check)
ret = funcs->atomic_check(connector, state);
if (ret) {
drm_dbg_atomic(dev,
"[CONNECTOR:%d:%s] driver check failed\n",
connector->base.id, connector->name);
return ret;
}
connectors_mask |= BIT(i);
}
/*
* After all the routing has been prepared we need to add in any
* connector which is itself unchanged, but whose CRTC changes its
* configuration. This must be done before calling mode_fixup in case a
* crtc only changed its mode but has the same set of connectors.
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
drm_dbg_atomic(dev,
"[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
crtc->base.id, crtc->name,
new_crtc_state->enable ? 'y' : 'n',
new_crtc_state->active ? 'y' : 'n');
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret != 0)
return ret;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret != 0)
return ret;
}
/*
* Iterate over all connectors again, to make sure atomic_check()
* has been called on them when a modeset is forced.
*/
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
if (connectors_mask & BIT(i))
continue;
if (funcs->atomic_check)
ret = funcs->atomic_check(connector, state);
if (ret) {
drm_dbg_atomic(dev,
"[CONNECTOR:%d:%s] driver check failed\n",
connector->base.id, connector->name);
return ret;
}
}
/*
* Iterate over all connectors again, and add all affected bridges to
* the state.
*/
for_each_oldnew_connector_in_state(state, connector,
old_connector_state,
new_connector_state, i) {
struct drm_encoder *encoder;
encoder = old_connector_state->best_encoder;
ret = drm_atomic_add_encoder_bridges(state, encoder);
if (ret)
return ret;
encoder = new_connector_state->best_encoder;
ret = drm_atomic_add_encoder_bridges(state, encoder);
if (ret)
return ret;
}
ret = mode_valid(state);
if (ret)
return ret;
return mode_fixup(state);
}
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
* drm_atomic_helper_check_wb_encoder_state() - Check writeback encoder state
* @encoder: encoder state to check
* @conn_state: connector state to check
*
* Checks if the writeback connector state is valid, and returns an error if it
* isn't.
*
* RETURNS:
* Zero for success or -errno
*/
int
drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_writeback_job *wb_job = conn_state->writeback_job;
struct drm_property_blob *pixel_format_blob;
struct drm_framebuffer *fb;
size_t i, nformats;
u32 *formats;
if (!wb_job || !wb_job->fb)
return 0;
pixel_format_blob = wb_job->connector->pixel_formats_blob_ptr;
nformats = pixel_format_blob->length / sizeof(u32);
formats = pixel_format_blob->data;
fb = wb_job->fb;
for (i = 0; i < nformats; i++)
if (fb->format->format == formats[i])
return 0;
drm_dbg_kms(encoder->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
return -EINVAL;
}
EXPORT_SYMBOL(drm_atomic_helper_check_wb_encoder_state);
/**
* drm_atomic_helper_check_plane_state() - Check plane state for validity
* @plane_state: plane state to check
* @crtc_state: CRTC state to check
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
* @can_position: is it legal to position the plane such that it
* doesn't cover the entire CRTC? This will generally
* only be false for primary planes.
* @can_update_disabled: can the plane be updated while the CRTC
* is disabled?
*
* Checks that a desired plane update is valid, and updates various
* bits of derived state (clipped coordinates etc.). Drivers that provide
* their own plane handling rather than helper-provided implementations may
* still wish to call this function to avoid duplication of error checking
* code.
*
* RETURNS:
* Zero if update appears valid, error code on failure
*/
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
const struct drm_crtc_state *crtc_state,
int min_scale,
int max_scale,
bool can_position,
bool can_update_disabled)
{
struct drm_framebuffer *fb = plane_state->fb;
struct drm_rect *src = &plane_state->src;
struct drm_rect *dst = &plane_state->dst;
unsigned int rotation = plane_state->rotation;
struct drm_rect clip = {};
int hscale, vscale;
WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
*src = drm_plane_state_src(plane_state);
*dst = drm_plane_state_dest(plane_state);
if (!fb) {
plane_state->visible = false;
return 0;
}
/* crtc should only be NULL when disabling (i.e., !fb) */
if (WARN_ON(!plane_state->crtc)) {
plane_state->visible = false;
return 0;
}
if (!crtc_state->enable && !can_update_disabled) {
drm_dbg_kms(plane_state->plane->dev,
"Cannot update plane of a disabled CRTC.\n");
return -EINVAL;
}
drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
/* Check scaling */
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (hscale < 0 || vscale < 0) {
drm_dbg_kms(plane_state->plane->dev,
"Invalid scaling of plane\n");
drm_rect_debug_print("src: ", &plane_state->src, true);
drm_rect_debug_print("dst: ", &plane_state->dst, false);
return -ERANGE;
}
if (crtc_state->enable)
drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
if (!plane_state->visible)
/*
* Plane isn't visible; some drivers can handle this
* so we just return success here. Drivers that can't
* (including those that use the primary plane helper's
* update function) will return an error from their
* update_plane handler.
*/
return 0;
if (!can_position && !drm_rect_equals(dst, &clip)) {
drm_dbg_kms(plane_state->plane->dev,
"Plane must cover entire CRTC\n");
drm_rect_debug_print("dst: ", dst, false);
drm_rect_debug_print("clip: ", &clip, false);
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
/**
* drm_atomic_helper_check_crtc_primary_plane() - Check CRTC state for primary plane
* @crtc_state: CRTC state to check
*
* Checks that a CRTC has at least one primary plane attached to it, which is
* a requirement on some hardware. Note that this only involves the CRTC side
* of the test. To test if the primary plane is visible or if it can be updated
* without the CRTC being enabled, use drm_atomic_helper_check_plane_state() in
* the plane's atomic check.
*
* RETURNS:
* 0 if a primary plane is attached to the CRTC, or an error code otherwise
*/
int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_plane *plane;
/* needs at least one primary plane to be enabled */
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
return 0;
}
drm_dbg_atomic(dev, "[CRTC:%d:%s] primary plane missing\n", crtc->base.id, crtc->name);
return -EINVAL;
}
EXPORT_SYMBOL(drm_atomic_helper_check_crtc_primary_plane);
/**
* drm_atomic_helper_check_planes - validate state object for planes changes
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
* This does all the plane update related checks using by calling into the
* &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
* hooks provided by the driver.
*
* It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
* updated planes.
*
* RETURNS:
* Zero for success or -errno
*/
int
drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state, *old_plane_state;
int i, ret = 0;
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
WARN_ON(!drm_modeset_is_locked(&plane->mutex));
funcs = plane->helper_private;
drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
drm_atomic_helper_check_plane_damage(state, new_plane_state);
if (!funcs || !funcs->atomic_check)
continue;
ret = funcs->atomic_check(plane, state);
if (ret) {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] atomic driver check failed\n",
plane->base.id, plane->name);
return ret;
}
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
if (!funcs || !funcs->atomic_check)
continue;
ret = funcs->atomic_check(crtc, state);
if (ret) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] atomic driver check failed\n",
crtc->base.id, crtc->name);
return ret;
}
}
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check_planes);
/**
* drm_atomic_helper_check - validate state object
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
* Only CRTCs and planes have check callbacks, so for any additional (global)
* checking that a driver needs it can simply wrap that around this function.
* Drivers without such needs can directly use this as their
* &drm_mode_config_funcs.atomic_check callback.
*
* This just wraps the two parts of the state checking for planes and modeset
* state in the default order: First it calls drm_atomic_helper_check_modeset()
* and then drm_atomic_helper_check_planes(). The assumption is that the
* @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
* functions depend upon an updated adjusted_mode.clock to e.g. properly compute
* watermarks.
*
* Note that zpos normalization will add all enable planes to the state which
* might not desired for some drivers.
* For example enable/disable of a cursor plane which have fixed zpos value
* would trigger all other enabled planes to be forced to the state change.
*
* RETURNS:
* Zero for success or -errno
*/
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
if (dev->mode_config.normalize_zpos) {
ret = drm_atomic_normalize_zpos(dev, state);
if (ret)
return ret;
}
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
if (state->legacy_cursor_update)
state->async_update = !drm_atomic_helper_async_check(dev, state);
drm_self_refresh_helper_alter_state(state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check);
static bool
crtc_needs_disable(struct drm_crtc_state *old_state,
struct drm_crtc_state *new_state)
{
/*
* No new_state means the CRTC is off, so the only criteria is whether
* it's currently active or in self refresh mode.
*/
if (!new_state)
return drm_atomic_crtc_effectively_active(old_state);
/*
* We need to disable bridge(s) and CRTC if we're transitioning out of
* self-refresh and changing CRTCs at the same time, because the
* bridge tracks self-refresh status via CRTC state.
*/
if (old_state->self_refresh_active &&
old_state->crtc != new_state->crtc)
return true;
/*
* We also need to run through the crtc_funcs->disable() function if
* the CRTC is currently on, if it's transitioning to self refresh
* mode, or if it's in self refresh mode and needs to be fully
* disabled.
*/
return old_state->active ||
(old_state->self_refresh_active && !new_state->active) ||
new_state->self_refresh_active;
}
static void
disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
/*
* Shut down everything that's in the changeset and currently
* still on. So need to check the old, saved state.
*/
if (!old_conn_state->crtc)
continue;
old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
if (new_conn_state->crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(
old_state,
new_conn_state->crtc);
else
new_crtc_state = NULL;
if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
continue;
encoder = old_conn_state->best_encoder;
/* We shouldn't get this far if we didn't previously have
* an encoder.. but WARN_ON() rather than explode.
*/
if (WARN_ON(!encoder))
continue;
funcs = encoder->helper_private;
drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call disable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_disable(bridge, old_state);
/* Right function depends upon target state. */
if (funcs) {
if (funcs->atomic_disable)
funcs->atomic_disable(encoder, old_state);
else if (new_conn_state->crtc && funcs->prepare)
funcs->prepare(encoder);
else if (funcs->disable)
funcs->disable(encoder);
else if (funcs->dpms)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
drm_atomic_bridge_chain_post_disable(bridge, old_state);
}
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
int ret;
/* Shut down everything that needs a full modeset. */
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
continue;
funcs = crtc->helper_private;
drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
/* Right function depends upon target state. */
if (new_crtc_state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->atomic_disable)
funcs->atomic_disable(crtc, old_state);
else if (funcs->disable)
funcs->disable(crtc);
else if (funcs->dpms)
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (!drm_dev_has_vblank(dev))
continue;
ret = drm_crtc_vblank_get(crtc);
/*
* Self-refresh is not a true "disable"; ensure vblank remains
* enabled.
*/
if (new_crtc_state->self_refresh_active)
WARN_ONCE(ret != 0,
"driver disabled vblank in self-refresh\n");
else
WARN_ONCE(ret != -EINVAL,
"driver forgot to call drm_crtc_vblank_off()\n");
if (ret == 0)
drm_crtc_vblank_put(crtc);
}
}
/**
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* This function updates all the various legacy modeset state pointers in
* connectors, encoders and CRTCs.
*
* Drivers can use this for building their own atomic commit if they don't have
* a pure helper-based modeset implementation.
*
* Since these updates are not synchronized with lockings, only code paths
* called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
* legacy state filled out by this helper. Defacto this means this helper and
* the legacy state pointers are only really useful for transitioning an
* existing driver to the atomic world.
*/
void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
int i;
/* clear out existing links and update dpms */
for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
if (connector->encoder) {
WARN_ON(!connector->encoder->crtc);
connector->encoder->crtc = NULL;
connector->encoder = NULL;
}
crtc = new_conn_state->crtc;
if ((!crtc && old_conn_state->crtc) ||
(crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
int mode = DRM_MODE_DPMS_OFF;
if (crtc && crtc->state->active)
mode = DRM_MODE_DPMS_ON;
connector->dpms = mode;
}
}
/* set new links */
for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
if (!new_conn_state->crtc)
continue;
if (WARN_ON(!new_conn_state->best_encoder))
continue;
connector->encoder = new_conn_state->best_encoder;
connector->encoder->crtc = new_conn_state->crtc;
}
/* set legacy state in the crtc structure */
for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
struct drm_plane *primary = crtc->primary;
struct drm_plane_state *new_plane_state;
crtc->mode = new_crtc_state->mode;
crtc->enabled = new_crtc_state->enable;
new_plane_state =
drm_atomic_get_new_plane_state(old_state, primary);
if (new_plane_state && new_plane_state->crtc == crtc) {
crtc->x = new_plane_state->src_x >> 16;
crtc->y = new_plane_state->src_y >> 16;
}
}
}
EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
/**
* drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
* @state: atomic state object
*
* Updates the timestamping constants used for precise vblank timestamps
* by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
*/
void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
{
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->enable)
drm_calc_timestamping_constants(crtc,
&new_crtc_state->adjusted_mode);
}
}
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
static void
crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
if (!new_crtc_state->mode_changed)
continue;
funcs = crtc->helper_private;
if (new_crtc_state->enable && funcs->mode_set_nofb) {
drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
funcs->mode_set_nofb(crtc);
}
}
for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_display_mode *mode, *adjusted_mode;
struct drm_bridge *bridge;
if (!new_conn_state->best_encoder)
continue;
encoder = new_conn_state->best_encoder;
funcs = encoder->helper_private;
new_crtc_state = new_conn_state->crtc->state;
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
if (!new_crtc_state->mode_changed)
continue;
drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call mode_set hooks twice.
*/
if (funcs && funcs->atomic_mode_set) {
funcs->atomic_mode_set(encoder, new_crtc_state,
new_conn_state);
} else if (funcs && funcs->mode_set) {
funcs->mode_set(encoder, mode, adjusted_mode);
}
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
}
}
/**
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
*
* For compatibility with legacy CRTC helpers this should be called before
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
disable_outputs(dev, old_state);
drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
drm_atomic_helper_calc_timestamping_constants(old_state);
crtc_set_mode(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
const struct drm_connector_helper_funcs *funcs;
funcs = connector->helper_private;
if (!funcs->atomic_commit)
continue;
if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
funcs->atomic_commit(connector, old_state);
}
}
}
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* This function enables all the outputs with the new configuration which had to
* be turned off for the update.
*
* For compatibility with legacy CRTC helpers this should be called after
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
/* Need to filter out CRTCs where only planes change. */
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
if (!new_crtc_state->active)
continue;
funcs = crtc->helper_private;
if (new_crtc_state->enable) {
drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
if (funcs->atomic_enable)
funcs->atomic_enable(crtc, old_state);
else if (funcs->commit)
funcs->commit(crtc);
}
}
for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
if (!new_conn_state->best_encoder)
continue;
if (!new_conn_state->crtc->state->active ||
!drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
continue;
encoder = new_conn_state->best_encoder;
funcs = encoder->helper_private;
drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call enable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_pre_enable(bridge, old_state);
if (funcs) {
if (funcs->atomic_enable)
funcs->atomic_enable(encoder, old_state);
else if (funcs->enable)
funcs->enable(encoder);
else if (funcs->commit)
funcs->commit(encoder);
}
drm_atomic_bridge_chain_enable(bridge, old_state);
}
drm_atomic_helper_commit_writebacks(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
/*
* For atomic updates which touch just a single CRTC, calculate the time of the
* next vblank, and inform all the fences of the deadline.
*/
static void set_fence_deadline(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
ktime_t vbltime = 0;
int i;
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
ktime_t v;
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
if (!new_crtc_state->active)
continue;
if (drm_crtc_next_vblank_start(crtc, &v))
continue;
if (!vbltime || ktime_before(v, vbltime))
vbltime = v;
}
/* If no CRTCs updated, then nothing to do: */
if (!vbltime)
return;
for_each_new_plane_in_state (state, plane, new_plane_state, i) {
if (!new_plane_state->fence)
continue;
dma_fence_set_deadline(new_plane_state->fence, vbltime);
}
}
/**
* drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
* @dev: DRM device
* @state: atomic state object with old state structures
* @pre_swap: If true, do an interruptible wait, and @state is the new state.
* Otherwise @state is the old state.
*
* For implicit sync, driver should fish the exclusive fence out from the
* incoming fb's and stash it in the drm_plane_state. This is called after
* drm_atomic_helper_swap_state() so it uses the current plane state (and
* just uses the atomic state to find the changed planes)
*
* Note that @pre_swap is needed since the point where we block for fences moves
* around depending upon whether an atomic commit is blocking or
* non-blocking. For non-blocking commit all waiting needs to happen after
* drm_atomic_helper_swap_state() is called, but for blocking commits we want
* to wait **before** we do anything that can't be easily rolled back. That is
* before we call drm_atomic_helper_swap_state().
*
* Returns zero if success or < 0 if dma_fence_wait() fails.
*/
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state,
bool pre_swap)
{
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
int i, ret;
set_fence_deadline(dev, state);
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
if (!new_plane_state->fence)
continue;
WARN_ON(!new_plane_state->fb);
/*
* If waiting for fences pre-swap (ie: nonblock), userspace can
* still interrupt the operation. Instead of blocking until the
* timer expires, make the wait interruptible.
*/
ret = dma_fence_wait(new_plane_state->fence, pre_swap);
if (ret)
return ret;
dma_fence_put(new_plane_state->fence);
new_plane_state->fence = NULL;
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
/**
* drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* Helper to, after atomic commit, wait for vblanks on all affected
* CRTCs (ie. before cleaning up old framebuffers using
* drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
* framebuffers have actually changed to optimize for the legacy cursor and
* plane update use-case.
*
* Drivers using the nonblocking commit tracking support initialized by calling
* drm_atomic_helper_setup_commit() should look at
* drm_atomic_helper_wait_for_flip_done() as an alternative.
*/
void
drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i, ret;
unsigned int crtc_mask = 0;
/*
* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates).
*/
if (old_state->legacy_cursor_update)
return;
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
if (!new_crtc_state->active)
continue;
ret = drm_crtc_vblank_get(crtc);
if (ret != 0)
continue;
crtc_mask |= drm_crtc_mask(crtc);
old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
}
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
if (!(crtc_mask & drm_crtc_mask(crtc)))
continue;
ret = wait_event_timeout(dev->vblank[i].queue,
old_state->crtcs[i].last_vblank_count !=
drm_crtc_vblank_count(crtc),
msecs_to_jiffies(100));
WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
crtc->base.id, crtc->name);
drm_crtc_vblank_put(crtc);
}
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
* drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* Helper to, after atomic commit, wait for page flips on all affected
* crtcs (ie. before cleaning up old framebuffers using
* drm_atomic_helper_cleanup_planes()). Compared to
* drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
* CRTCs, assuming that cursors-only updates are signalling their completion
* immediately (or using a different path).
*
* This requires that drivers use the nonblocking commit tracking support
* initialized using drm_atomic_helper_setup_commit().
*/
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
int i;
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
int ret;
crtc = old_state->crtcs[i].ptr;
if (!crtc || !commit)
continue;
ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
if (ret == 0)
drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n",
crtc->base.id, crtc->name);
}
if (old_state->fake_commit)
complete_all(&old_state->fake_commit->flip_done);
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
/**
* drm_atomic_helper_commit_tail - commit atomic update to hardware
* @old_state: atomic state object with old state structures
*
* This is the default implementation for the
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
* that do not support runtime_pm or do not need the CRTC to be
* enabled to perform a commit. Otherwise, see
* drm_atomic_helper_commit_tail_rpm().
*
* Note that the default ordering of how the various stages are called is to
* match the legacy modeset helper library closest.
*/
void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
drm_atomic_helper_commit_modeset_disables(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state, 0);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_fake_vblank(old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
/**
* drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
* @old_state: new modeset state to be committed
*
* This is an alternative implementation for the
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
* that support runtime_pm or need the CRTC to be enabled to perform a
* commit. Otherwise, one should use the default implementation
* drm_atomic_helper_commit_tail().
*/
void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
drm_atomic_helper_commit_modeset_disables(dev, old_state);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_fake_vblank(old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
static void commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
const struct drm_mode_config_helper_funcs *funcs;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
ktime_t start;
s64 commit_time_ms;
unsigned int i, new_self_refresh_mask = 0;
funcs = dev->mode_config.helper_private;
/*
* We're measuring the _entire_ commit, so the time will vary depending
* on how many fences and objects are involved. For the purposes of self
* refresh, this is desirable since it'll give us an idea of how
* congested things are. This will inform our decision on how often we
* should enter self refresh after idle.
*
* These times will be averaged out in the self refresh helpers to avoid
* overreacting over one outlier frame
*/
start = ktime_get();
drm_atomic_helper_wait_for_fences(dev, old_state, false);
drm_atomic_helper_wait_for_dependencies(old_state);
/*
* We cannot safely access new_crtc_state after
* drm_atomic_helper_commit_hw_done() so figure out which crtc's have
* self-refresh active beforehand:
*/
for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
if (new_crtc_state->self_refresh_active)
new_self_refresh_mask |= BIT(i);
if (funcs && funcs->atomic_commit_tail)
funcs->atomic_commit_tail(old_state);
else
drm_atomic_helper_commit_tail(old_state);
commit_time_ms = ktime_ms_delta(ktime_get(), start);
if (commit_time_ms > 0)
drm_self_refresh_helper_update_avg_times(old_state,
(unsigned long)commit_time_ms,
new_self_refresh_mask);
drm_atomic_helper_commit_cleanup_done(old_state);
drm_atomic_state_put(old_state);
}
static void commit_work(struct work_struct *work)
{
struct drm_atomic_state *state = container_of(work,
struct drm_atomic_state,
commit_work);
commit_tail(state);
}
/**
* drm_atomic_helper_async_check - check if state can be committed asynchronously
* @dev: DRM device
* @state: the driver state object
*
* This helper will check if it is possible to commit the state asynchronously.
* Async commits are not supposed to swap the states like normal sync commits
* but just do in-place changes on the current state.
*
* It will return 0 if the commit can happen in an asynchronous fashion or error
* if not. Note that error just mean it can't be committed asynchronously, if it
* fails the commit should be treated like a normal synchronous commit.
*/
int drm_atomic_helper_async_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane = NULL;
struct drm_plane_state *old_plane_state = NULL;
struct drm_plane_state *new_plane_state = NULL;
const struct drm_plane_helper_funcs *funcs;
int i, ret, n_planes = 0;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
return -EINVAL;
}
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
n_planes++;
/* FIXME: we support only single plane updates for now */
if (n_planes != 1) {
drm_dbg_atomic(dev,
"only single plane async updates are supported\n");
return -EINVAL;
}
if (!new_plane_state->crtc ||
old_plane_state->crtc != new_plane_state->crtc) {
drm_dbg_atomic(dev,
"[PLANE:%d:%s] async update cannot change CRTC\n",
plane->base.id, plane->name);
return -EINVAL;
}
funcs = plane->helper_private;
if (!funcs->atomic_async_update) {
drm_dbg_atomic(dev,
"[PLANE:%d:%s] driver does not support async updates\n",
plane->base.id, plane->name);
return -EINVAL;
}
if (new_plane_state->fence) {
drm_dbg_atomic(dev,
"[PLANE:%d:%s] missing fence for async update\n",
plane->base.id, plane->name);
return -EINVAL;
}
/*
* Don't do an async update if there is an outstanding commit modifying
* the plane. This prevents our async update's changes from getting
* overridden by a previous synchronous update's state.
*/
if (old_plane_state->commit &&
!try_wait_for_completion(&old_plane_state->commit->hw_done)) {
drm_dbg_atomic(dev,
"[PLANE:%d:%s] inflight previous commit preventing async commit\n",
plane->base.id, plane->name);
return -EBUSY;
}
ret = funcs->atomic_async_check(plane, state);
if (ret != 0)
drm_dbg_atomic(dev,
"[PLANE:%d:%s] driver async check failed\n",
plane->base.id, plane->name);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_async_check);
/**
* drm_atomic_helper_async_commit - commit state asynchronously
* @dev: DRM device
* @state: the driver state object
*
* This function commits a state asynchronously, i.e., not vblank
* synchronized. It should be used on a state only when
* drm_atomic_async_check() succeeds. Async commits are not supposed to swap
* the states like normal sync commits, but just do in-place changes on the
* current state.
*
* TODO: Implement full swap instead of doing in-place changes.
*/
void drm_atomic_helper_async_commit(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_plane_state *plane_state;
const struct drm_plane_helper_funcs *funcs;
int i;
for_each_new_plane_in_state(state, plane, plane_state, i) {
struct drm_framebuffer *new_fb = plane_state->fb;
struct drm_framebuffer *old_fb = plane->state->fb;
funcs = plane->helper_private;
funcs->atomic_async_update(plane, state);
/*
* ->atomic_async_update() is supposed to update the
* plane->state in-place, make sure at least common
* properties have been properly updated.
*/
WARN_ON_ONCE(plane->state->fb != new_fb);
WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
/*
* Make sure the FBs have been swapped so that cleanups in the
* new_state performs a cleanup in the old FB.
*/
WARN_ON_ONCE(plane_state->fb != old_fb);
}
}
EXPORT_SYMBOL(drm_atomic_helper_async_commit);
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
* @nonblock: whether nonblocking behavior is requested.
*
* This function commits a with drm_atomic_helper_check() pre-validated state
* object. This can still fail when e.g. the framebuffer reservation fails. This
* function implements nonblocking commits, using
* drm_atomic_helper_setup_commit() and related functions.
*
* Committing the actual hardware state is done through the
* &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
* implementation drm_atomic_helper_commit_tail().
*
* RETURNS:
* Zero for success or -errno.
*/
int drm_atomic_helper_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock)
{
int ret;
if (state->async_update) {
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
drm_atomic_helper_async_commit(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
return 0;
}
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
INIT_WORK(&state->commit_work, commit_work);
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
if (ret)
goto err;
}
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
*/
ret = drm_atomic_helper_swap_state(state, true);
if (ret)
goto err;
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one condition: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
* supports it, which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state().
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update. Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout.
*
* NOTE: Commit work has multiple phases, first hardware commit, then
* cleanup. We want them to overlap, hence need system_unbound_wq to
* make sure work items don't artificially stall on each another.
*/
drm_atomic_state_get(state);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
commit_tail(state);
return 0;
err:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit);
/**
* DOC: implementing nonblocking commit
*
* Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
* different operations against each another. Locks, especially struct
* &drm_modeset_lock, should not be held in worker threads or any other
* asynchronous context used to commit the hardware state.
*
* drm_atomic_helper_commit() implements the recommended sequence for
* nonblocking commits, using drm_atomic_helper_setup_commit() internally:
*
* 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
* need to propagate out of memory/VRAM errors to userspace, it must be called
* synchronously.
*
* 2. Synchronize with any outstanding nonblocking commit worker threads which
* might be affected by the new state update. This is handled by
* drm_atomic_helper_setup_commit().
*
* Asynchronous workers need to have sufficient parallelism to be able to run
* different atomic commits on different CRTCs in parallel. The simplest way to
* achieve this is by running them on the &system_unbound_wq work queue. Note
* that drivers are not required to split up atomic commits and run an
* individual commit in parallel - userspace is supposed to do that if it cares.
* But it might be beneficial to do that for modesets, since those necessarily
* must be done as one global operation, and enabling or disabling a CRTC can
* take a long time. But even that is not required.
*
* IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
* against all CRTCs therein. Therefore for atomic state updates which only flip
* planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
* in its atomic check code: This would prevent committing of atomic updates to
* multiple CRTCs in parallel. In general, adding additional state structures
* should be avoided as much as possible, because this reduces parallelism in
* (nonblocking) commits, both due to locking and due to commit sequencing
* requirements.
*
* 3. The software state is updated synchronously with
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
* locks means concurrent callers never see inconsistent state. Note that commit
* workers do not hold any locks; their access is only coordinated through
* ordering. If workers would access state only through the pointers in the
* free-standing state objects (currently not the case for any driver) then even
* multiple pending commits could be in-flight at the same time.
*
* 4. Schedule a work item to do all subsequent steps, using the split-out
* commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
* then cleaning up the framebuffers after the old framebuffer is no longer
* being displayed. The scheduled work should synchronize against other workers
* using the &drm_crtc_commit infrastructure as needed. See
* drm_atomic_helper_setup_commit() for more details.
*/
static int stall_checks(struct drm_crtc *crtc, bool nonblock)
{
struct drm_crtc_commit *commit, *stall_commit = NULL;
bool completed = true;
int i;
long ret = 0;
spin_lock(&crtc->commit_lock);
i = 0;
list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
if (i == 0) {
completed = try_wait_for_completion(&commit->flip_done);
/*
* Userspace is not allowed to get ahead of the previous
* commit with nonblocking ones.
*/
if (!completed && nonblock) {
spin_unlock(&crtc->commit_lock);
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] busy with a previous commit\n",
crtc->base.id, crtc->name);
return -EBUSY;
}
} else if (i == 1) {
stall_commit = drm_crtc_commit_get(commit);
break;
}
i++;
}
spin_unlock(&crtc->commit_lock);
if (!stall_commit)
return 0;
/* We don't want to let commits get ahead of cleanup work too much,
* stalling on 2nd previous commit means triple-buffer won't ever stall.
*/
ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
10*HZ);
if (ret == 0)
drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n",
crtc->base.id, crtc->name);
drm_crtc_commit_put(stall_commit);
return ret < 0 ? ret : 0;
}
static void release_crtc_commit(struct completion *completion)
{
struct drm_crtc_commit *commit = container_of(completion,
typeof(*commit),
flip_done);
drm_crtc_commit_put(commit);
}
static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
{
init_completion(&commit->flip_done);
init_completion(&commit->hw_done);
init_completion(&commit->cleanup_done);
INIT_LIST_HEAD(&commit->commit_entry);
kref_init(&commit->ref);
commit->crtc = crtc;
}
static struct drm_crtc_commit *
crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
if (crtc) {
struct drm_crtc_state *new_crtc_state;
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
return new_crtc_state->commit;
}
if (!state->fake_commit) {
state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
if (!state->fake_commit)
return NULL;
init_commit(state->fake_commit, NULL);
}
return state->fake_commit;
}
/**
* drm_atomic_helper_setup_commit - setup possibly nonblocking commit
* @state: new modeset state to be committed
* @nonblock: whether nonblocking behavior is requested.
*
* This function prepares @state to be used by the atomic helper's support for
* nonblocking commits. Drivers using the nonblocking commit infrastructure
* should always call this function from their
* &drm_mode_config_funcs.atomic_commit hook.
*
* Drivers that need to extend the commit setup to private objects can use the
* &drm_mode_config_helper_funcs.atomic_commit_setup hook.
*
* To be able to use this support drivers need to use a few more helper
* functions. drm_atomic_helper_wait_for_dependencies() must be called before
* actually committing the hardware state, and for nonblocking commits this call
* must be placed in the async worker. See also drm_atomic_helper_swap_state()
* and its stall parameter, for when a driver's commit hooks look at the
* &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
*
* Completion of the hardware commit step must be signalled using
* drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
* to read or change any permanent software or hardware modeset state. The only
* exception is state protected by other means than &drm_modeset_lock locks.
* Only the free standing @state with pointers to the old state structures can
* be inspected, e.g. to clean up old buffers using
* drm_atomic_helper_cleanup_planes().
*
* At the very end, before cleaning up @state drivers must call
* drm_atomic_helper_commit_cleanup_done().
*
* This is all implemented by in drm_atomic_helper_commit(), giving drivers a
* complete and easy-to-use default implementation of the atomic_commit() hook.
*
* The tracking of asynchronously executed and still pending commits is done
* using the core structure &drm_crtc_commit.
*
* By default there's no need to clean up resources allocated by this function
* explicitly: drm_atomic_state_default_clear() will take care of that
* automatically.
*
* Returns:
*
* 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
* -ENOMEM on allocation failures and -EINTR when a signal is pending.
*/
int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
bool nonblock)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_connector *conn;
struct drm_connector_state *old_conn_state, *new_conn_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_crtc_commit *commit;
const struct drm_mode_config_helper_funcs *funcs;
int i, ret;
funcs = state->dev->mode_config.helper_private;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
if (!commit)
return -ENOMEM;
init_commit(commit, crtc);
new_crtc_state->commit = commit;
ret = stall_checks(crtc, nonblock);
if (ret)
return ret;
/*
* Drivers only send out events when at least either current or
* new CRTC state is active. Complete right away if everything
* stays off.
*/
if (!old_crtc_state->active && !new_crtc_state->active) {
complete_all(&commit->flip_done);
continue;
}
/* Legacy cursor updates are fully unsynced. */
if (state->legacy_cursor_update) {
complete_all(&commit->flip_done);
continue;
}
if (!new_crtc_state->event) {
commit->event = kzalloc(sizeof(*commit->event),
GFP_KERNEL);
if (!commit->event)
return -ENOMEM;
new_crtc_state->event = commit->event;
}
new_crtc_state->event->base.completion = &commit->flip_done;
new_crtc_state->event->base.completion_release = release_crtc_commit;
drm_crtc_commit_get(commit);
commit->abort_completion = true;
state->crtcs[i].commit = commit;
drm_crtc_commit_get(commit);
}
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
/*
* Userspace is not allowed to get ahead of the previous
* commit with nonblocking ones.
*/
if (nonblock && old_conn_state->commit &&
!try_wait_for_completion(&old_conn_state->commit->flip_done)) {
drm_dbg_atomic(conn->dev,
"[CONNECTOR:%d:%s] busy with a previous commit\n",
conn->base.id, conn->name);
return -EBUSY;
}
/* Always track connectors explicitly for e.g. link retraining. */
commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
if (!commit)
return -ENOMEM;
new_conn_state->commit = drm_crtc_commit_get(commit);
}
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
/*
* Userspace is not allowed to get ahead of the previous
* commit with nonblocking ones.
*/
if (nonblock && old_plane_state->commit &&
!try_wait_for_completion(&old_plane_state->commit->flip_done)) {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] busy with a previous commit\n",
plane->base.id, plane->name);
return -EBUSY;
}
/* Always track planes explicitly for async pageflip support. */
commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
if (!commit)
return -ENOMEM;
new_plane_state->commit = drm_crtc_commit_get(commit);
}
if (funcs && funcs->atomic_commit_setup)
return funcs->atomic_commit_setup(state);
return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
/**
* drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
* @old_state: atomic state object with old state structures
*
* This function waits for all preceeding commits that touch the same CRTC as
* @old_state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
struct drm_connector *conn;
struct drm_connector_state *old_conn_state;
int i;
long ret;
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
ret = drm_crtc_commit_wait(old_crtc_state->commit);
if (ret)
drm_err(crtc->dev,
"[CRTC:%d:%s] commit wait timed out\n",
crtc->base.id, crtc->name);
}
for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
ret = drm_crtc_commit_wait(old_conn_state->commit);
if (ret)
drm_err(conn->dev,
"[CONNECTOR:%d:%s] commit wait timed out\n",
conn->base.id, conn->name);
}
for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
ret = drm_crtc_commit_wait(old_plane_state->commit);
if (ret)
drm_err(plane->dev,
"[PLANE:%d:%s] commit wait timed out\n",
plane->base.id, plane->name);
}
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
/**
* drm_atomic_helper_fake_vblank - fake VBLANK events if needed
* @old_state: atomic state object with old state structures
*
* This function walks all CRTCs and fakes VBLANK events on those with
* &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
* The primary use of this function is writeback connectors working in oneshot
* mode and faking VBLANK events. In this case they only fake the VBLANK event
* when a job is queued, and any change to the pipeline that does not touch the
* connector is leading to timeouts when calling
* drm_atomic_helper_wait_for_vblanks() or
* drm_atomic_helper_wait_for_flip_done(). In addition to writeback
* connectors, this function can also fake VBLANK events for CRTCs without
* VBLANK interrupt.
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
{
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
unsigned long flags;
if (!new_crtc_state->no_vblank)
continue;
spin_lock_irqsave(&old_state->dev->event_lock, flags);
if (new_crtc_state->event) {
drm_crtc_send_vblank_event(crtc,
new_crtc_state->event);
new_crtc_state->event = NULL;
}
spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
}
}
EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
/**
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
* @old_state: atomic state object with old state structures
*
* This function is used to signal completion of the hardware commit step. After
* this step the driver is not allowed to read or change any permanent software
* or hardware modeset state. The only exception is state protected by other
* means than &drm_modeset_lock locks.
*
* Drivers should try to postpone any expensive or delayed cleanup work after
* this function is called.
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc_commit *commit;
int i;
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
commit = new_crtc_state->commit;
if (!commit)
continue;
/*
* copy new_crtc_state->commit to old_crtc_state->commit,
* it's unsafe to touch new_crtc_state after hw_done,
* but we still need to do so in cleanup_done().
*/
if (old_crtc_state->commit)
drm_crtc_commit_put(old_crtc_state->commit);
old_crtc_state->commit = drm_crtc_commit_get(commit);
/* backend must have consumed any event by now */
WARN_ON(new_crtc_state->event);
complete_all(&commit->hw_done);
}
if (old_state->fake_commit) {
complete_all(&old_state->fake_commit->hw_done);
complete_all(&old_state->fake_commit->flip_done);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
/**
* drm_atomic_helper_commit_cleanup_done - signal completion of commit
* @old_state: atomic state object with old state structures
*
* This signals completion of the atomic update @old_state, including any
* cleanup work. If used, it must be called right before calling
* drm_atomic_state_put().
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_commit *commit;
int i;
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
commit = old_crtc_state->commit;
if (WARN_ON(!commit))
continue;
complete_all(&commit->cleanup_done);
WARN_ON(!try_wait_for_completion(&commit->hw_done));
spin_lock(&crtc->commit_lock);
list_del(&commit->commit_entry);
spin_unlock(&crtc->commit_lock);
}
if (old_state->fake_commit) {
complete_all(&old_state->fake_commit->cleanup_done);
WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
/**
* drm_atomic_helper_prepare_planes - prepare plane resources before commit
* @dev: DRM device
* @state: atomic state object with new state structures
*
* This function prepares plane state, specifically framebuffers, for the new
* configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
* is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
* any already successfully prepared framebuffer.
*
* Returns:
* 0 on success, negative error code on failure.
*/
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
int ret, i, j;
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
if (!new_conn_state->writeback_job)
continue;
ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
if (ret < 0)
return ret;
}
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
funcs = plane->helper_private;
if (funcs->prepare_fb) {
ret = funcs->prepare_fb(plane, new_plane_state);
if (ret)
goto fail_prepare_fb;
} else {
WARN_ON_ONCE(funcs->cleanup_fb);
if (!drm_core_check_feature(dev, DRIVER_GEM))
continue;
ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state);
if (ret)
goto fail_prepare_fb;
}
}
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->begin_fb_access) {
ret = funcs->begin_fb_access(plane, new_plane_state);
if (ret)
goto fail_begin_fb_access;
}
}
return 0;
fail_begin_fb_access:
for_each_new_plane_in_state(state, plane, new_plane_state, j) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (j >= i)
continue;
if (funcs->end_fb_access)
funcs->end_fb_access(plane, new_plane_state);
}
i = j; /* set i to upper limit to cleanup all planes */
fail_prepare_fb:
for_each_new_plane_in_state(state, plane, new_plane_state, j) {
const struct drm_plane_helper_funcs *funcs;
if (j >= i)
continue;
funcs = plane->helper_private;
if (funcs->cleanup_fb)
funcs->cleanup_fb(plane, new_plane_state);
}
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
static bool plane_crtc_active(const struct drm_plane_state *state)
{
return state->crtc && state->crtc->state->active;
}
/**
* drm_atomic_helper_commit_planes - commit plane state
* @dev: DRM device
* @old_state: atomic state object with old state structures
* @flags: flags for committing plane state
*
* This function commits the new plane state using the plane and atomic helper
* functions for planes and CRTCs. It assumes that the atomic state has already
* been pushed into the relevant object state pointers, since this step can no
* longer fail.
*
* It still requires the global state object @old_state to know which planes and
* crtcs need to be updated though.
*
* Note that this function does all plane updates across all CRTCs in one step.
* If the hardware can't support this approach look at
* drm_atomic_helper_commit_planes_on_crtc() instead.
*
* Plane parameters can be updated by applications while the associated CRTC is
* disabled. The DRM/KMS core will store the parameters in the plane state,
* which will be available to the driver when the CRTC is turned on. As a result
* most drivers don't need to be immediately notified of plane updates for a
* disabled CRTC.
*
* Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
* @flags in order not to receive plane update notifications related to a
* disabled CRTC. This avoids the need to manually ignore plane updates in
* driver code when the driver and/or hardware can't or just don't need to deal
* with updates on disabled CRTCs, for example when supporting runtime PM.
*
* Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
* display controllers require to disable a CRTC's planes when the CRTC is
* disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
* call for a plane if the CRTC of the old plane state needs a modesetting
* operation. Of course, the drivers need to disable the planes in their CRTC
* disable callbacks since no one else would do that.
*
* The drm_atomic_helper_commit() default implementation doesn't set the
* ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
* This should not be copied blindly by drivers.
*/
void drm_atomic_helper_commit_planes(struct drm_device *dev,
struct drm_atomic_state *old_state,
uint32_t flags)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
int i;
bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
if (!funcs || !funcs->atomic_begin)
continue;
if (active_only && !new_crtc_state->active)
continue;
funcs->atomic_begin(crtc, old_state);
}
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
bool disabling;
funcs = plane->helper_private;
if (!funcs)
continue;
disabling = drm_atomic_plane_disabling(old_plane_state,
new_plane_state);
if (active_only) {
/*
* Skip planes related to inactive CRTCs. If the plane
* is enabled use the state of the current CRTC. If the
* plane is being disabled use the state of the old
* CRTC to avoid skipping planes being disabled on an
* active CRTC.
*/
if (!disabling && !plane_crtc_active(new_plane_state))
continue;
if (disabling && !plane_crtc_active(old_plane_state))
continue;
}
/*
* Special-case disabling the plane if drivers support it.
*/
if (disabling && funcs->atomic_disable) {
struct drm_crtc_state *crtc_state;
crtc_state = old_plane_state->crtc->state;
if (drm_atomic_crtc_needs_modeset(crtc_state) &&
no_disable)
continue;
funcs->atomic_disable(plane, old_state);
} else if (new_plane_state->crtc || disabling) {
funcs->atomic_update(plane, old_state);
if (!disabling && funcs->atomic_enable) {
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
funcs->atomic_enable(plane, old_state);
}
}
}
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
if (!funcs || !funcs->atomic_flush)
continue;
if (active_only && !new_crtc_state->active)
continue;
funcs->atomic_flush(crtc, old_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
/**
* drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
* @old_crtc_state: atomic state object with the old CRTC state
*
* This function commits the new plane state using the plane and atomic helper
* functions for planes on the specific CRTC. It assumes that the atomic state
* has already been pushed into the relevant object state pointers, since this
* step can no longer fail.
*
* This function is useful when plane updates should be done CRTC-by-CRTC
* instead of one global step like drm_atomic_helper_commit_planes() does.
*
* This function can only be savely used when planes are not allowed to move
* between different CRTCs because this function doesn't handle inter-CRTC
* dependencies. Callers need to ensure that either no such dependencies exist,
* resolve them through ordering of commit calls or through some other means.
*/
void
drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
{
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_crtc *crtc = old_crtc_state->crtc;
struct drm_atomic_state *old_state = old_crtc_state->state;
struct drm_crtc_state *new_crtc_state =
drm_atomic_get_new_crtc_state(old_state, crtc);
struct drm_plane *plane;
unsigned int plane_mask;
plane_mask = old_crtc_state->plane_mask;
plane_mask |= new_crtc_state->plane_mask;
crtc_funcs = crtc->helper_private;
if (crtc_funcs && crtc_funcs->atomic_begin)
crtc_funcs->atomic_begin(crtc, old_state);
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(old_state, plane);
struct drm_plane_state *new_plane_state =
drm_atomic_get_new_plane_state(old_state, plane);
const struct drm_plane_helper_funcs *plane_funcs;
bool disabling;
plane_funcs = plane->helper_private;
if (!old_plane_state || !plane_funcs)
continue;
WARN_ON(new_plane_state->crtc &&
new_plane_state->crtc != crtc);
disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
if (disabling && plane_funcs->atomic_disable) {
plane_funcs->atomic_disable(plane, old_state);
} else if (new_plane_state->crtc || disabling) {
plane_funcs->atomic_update(plane, old_state);
if (!disabling && plane_funcs->atomic_enable) {
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
plane_funcs->atomic_enable(plane, old_state);
}
}
}
if (crtc_funcs && crtc_funcs->atomic_flush)
crtc_funcs->atomic_flush(crtc, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
/**
* drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
* @old_crtc_state: atomic state object with the old CRTC state
* @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
*
* Disables all planes associated with the given CRTC. This can be
* used for instance in the CRTC helper atomic_disable callback to disable
* all planes.
*
* If the atomic-parameter is set the function calls the CRTC's
* atomic_begin hook before and atomic_flush hook after disabling the
* planes.
*
* It is a bug to call this function without having implemented the
* &drm_plane_helper_funcs.atomic_disable plane hook.
*/
void
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
bool atomic)
{
struct drm_crtc *crtc = old_crtc_state->crtc;
const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
struct drm_plane *plane;
if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
crtc_funcs->atomic_begin(crtc, NULL);
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
const struct drm_plane_helper_funcs *plane_funcs =
plane->helper_private;
if (!plane_funcs)
continue;
WARN_ON(!plane_funcs->atomic_disable);
if (plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, NULL);
}
if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
crtc_funcs->atomic_flush(crtc, NULL);
}
EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
/**
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* This function cleans up plane state, specifically framebuffers, from the old
* configuration. Hence the old configuration must be perserved in @old_state to
* be able to call this function.
*
* This function must also be called on the new state when the atomic update
* fails at any point after calling drm_atomic_helper_prepare_planes().
*/
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
int i;
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->end_fb_access)
funcs->end_fb_access(plane, new_plane_state);
}
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
struct drm_plane_state *plane_state;
/*
* This might be called before swapping when commit is aborted,
* in which case we have to cleanup the new state.
*/
if (old_plane_state == plane->state)
plane_state = new_plane_state;
else
plane_state = old_plane_state;
funcs = plane->helper_private;
if (funcs->cleanup_fb)
funcs->cleanup_fb(plane, plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
/**
* drm_atomic_helper_swap_state - store atomic state into current sw state
* @state: atomic state
* @stall: stall for preceding commits
*
* This function stores the atomic state into the current state pointers in all
* driver objects. It should be called after all failing steps have been done
* and succeeded, but before the actual hardware state is committed.
*
* For cleanup and error recovery the current state for all changed objects will
* be swapped into @state.
*
* With that sequence it fits perfectly into the plane prepare/cleanup sequence:
*
* 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
*
* 2. Do any other steps that might fail.
*
* 3. Put the staged state into the current state pointers with this function.
*
* 4. Actually commit the hardware state.
*
* 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
* contains the old state. Also do any other cleanup required with that state.
*
* @stall must be set when nonblocking commits for this driver directly access
* the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
* the current atomic helpers this is almost always the case, since the helpers
* don't pass the right state structures to the callbacks.
*
* Returns:
*
* Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
* waiting for the previous commits has been interrupted.
*/
int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
bool stall)
{
int i, ret;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_crtc_commit *commit;
struct drm_private_obj *obj;
struct drm_private_state *old_obj_state, *new_obj_state;
if (stall) {
/*
* We have to stall for hw_done here before
* drm_atomic_helper_wait_for_dependencies() because flip
* depth > 1 is not yet supported by all drivers. As long as
* obj->state is directly dereferenced anywhere in the drivers
* atomic_commit_tail function, then it's unsafe to swap state
* before drm_atomic_helper_commit_hw_done() is called.
*/
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
commit = old_crtc_state->commit;
if (!commit)
continue;
ret = wait_for_completion_interruptible(&commit->hw_done);
if (ret)
return ret;
}
for_each_old_connector_in_state(state, connector, old_conn_state, i) {
commit = old_conn_state->commit;
if (!commit)
continue;
ret = wait_for_completion_interruptible(&commit->hw_done);
if (ret)
return ret;
}
for_each_old_plane_in_state(state, plane, old_plane_state, i) {
commit = old_plane_state->commit;
if (!commit)
continue;
ret = wait_for_completion_interruptible(&commit->hw_done);
if (ret)
return ret;
}
}
for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
WARN_ON(connector->state != old_conn_state);
old_conn_state->state = state;
new_conn_state->state = NULL;
state->connectors[i].state = old_conn_state;
connector->state = new_conn_state;
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
WARN_ON(crtc->state != old_crtc_state);
old_crtc_state->state = state;
new_crtc_state->state = NULL;
state->crtcs[i].state = old_crtc_state;
crtc->state = new_crtc_state;
if (new_crtc_state->commit) {
spin_lock(&crtc->commit_lock);
list_add(&new_crtc_state->commit->commit_entry,
&crtc->commit_list);
spin_unlock(&crtc->commit_lock);
new_crtc_state->commit->event = NULL;
}
}
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
WARN_ON(plane->state != old_plane_state);
old_plane_state->state = state;
new_plane_state->state = NULL;
state->planes[i].state = old_plane_state;
plane->state = new_plane_state;
}
for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
WARN_ON(obj->state != old_obj_state);
old_obj_state->state = state;
new_obj_state->state = NULL;
state->private_objs[i].state = old_obj_state;
obj->state = new_obj_state;
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_swap_state);
/**
* drm_atomic_helper_update_plane - Helper for primary plane update using atomic
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
* @crtc_x: x offset of primary plane on @crtc
* @crtc_y: y offset of primary plane on @crtc
* @crtc_w: width of primary plane rectangle on @crtc
* @crtc_h: height of primary plane rectangle on @crtc
* @src_x: x offset of @fb for panning
* @src_y: y offset of @fb for panning
* @src_w: width of source rectangle in @fb
* @src_h: height of source rectangle in @fb
* @ctx: lock acquire context
*
* Provides a default plane update handler using the atomic driver interface.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_atomic_helper_update_plane(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
int ret = 0;
state = drm_atomic_state_alloc(plane->dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto fail;
}
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
plane_state->crtc_x = crtc_x;
plane_state->crtc_y = crtc_y;
plane_state->crtc_w = crtc_w;
plane_state->crtc_h = crtc_h;
plane_state->src_x = src_x;
plane_state->src_y = src_y;
plane_state->src_w = src_w;
plane_state->src_h = src_h;
if (plane == crtc->cursor)
state->legacy_cursor_update = true;
ret = drm_atomic_commit(state);
fail:
drm_atomic_state_put(state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_update_plane);
/**
* drm_atomic_helper_disable_plane - Helper for primary plane disable using atomic
* @plane: plane to disable
* @ctx: lock acquire context
*
* Provides a default plane disable handler using the atomic driver interface.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_atomic_helper_disable_plane(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
int ret = 0;
state = drm_atomic_state_alloc(plane->dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto fail;
}
if (plane_state->crtc && plane_state->crtc->cursor == plane)
plane_state->state->legacy_cursor_update = true;
ret = __drm_atomic_helper_disable_plane(plane, plane_state);
if (ret != 0)
goto fail;
ret = drm_atomic_commit(state);
fail:
drm_atomic_state_put(state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
/**
* drm_atomic_helper_set_config - set a new config from userspace
* @set: mode set configuration
* @ctx: lock acquisition context
*
* Provides a default CRTC set_config handler using the atomic driver interface.
*
* NOTE: For backwards compatibility with old userspace this automatically
* resets the "link-status" property to GOOD, to force any link
* re-training. The SETCRTC ioctl does not define whether an update does
* need a full modeset or just a plane update, hence we're allowed to do
* that. See also drm_connector_set_link_status_property().
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
int drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_crtc *crtc = set->crtc;
int ret = 0;
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
ret = __drm_atomic_helper_set_config(set, state);
if (ret != 0)
goto fail;
ret = handle_conflicting_encoders(state, true);
if (ret)
goto fail;
ret = drm_atomic_commit(state);
fail:
drm_atomic_state_put(state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_set_config);
/**
* drm_atomic_helper_disable_all - disable all currently active outputs
* @dev: DRM device
* @ctx: lock acquisition context
*
* Loops through all connectors, finding those that aren't turned off and then
* turns them off by setting their DPMS mode to OFF and deactivating the CRTC
* that they are connected to.
*
* This is used for example in suspend/resume to disable all currently active
* functions when suspending. If you just want to shut down everything at e.g.
* driver unload, look at drm_atomic_helper_shutdown().
*
* Note that if callers haven't already acquired all modeset locks this might
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
*
* Returns:
* 0 on success or a negative error code on failure.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
* drm_atomic_helper_shutdown().
*/
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector_state *conn_state;
struct drm_connector *conn;
struct drm_plane_state *plane_state;
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret, i;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
drm_for_each_crtc(crtc, dev) {
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto free;
}
crtc_state->active = false;
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
if (ret < 0)
goto free;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret < 0)
goto free;
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret < 0)
goto free;
}
for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret < 0)
goto free;
}
for_each_new_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret < 0)
goto free;
drm_atomic_set_fb_for_plane(plane_state, NULL);
}
ret = drm_atomic_commit(state);
free:
drm_atomic_state_put(state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
/**
* drm_atomic_helper_shutdown - shutdown all CRTC
* @dev: DRM device
*
* This shuts down all CRTC, which is useful for driver unloading. Shutdown on
* suspend should instead be handled with drm_atomic_helper_suspend(), since
* that also takes a snapshot of the modeset state to be restored on resume.
*
* This is just a convenience wrapper around drm_atomic_helper_disable_all(),
* and it is the atomic version of drm_helper_force_disable_all().
*/
void drm_atomic_helper_shutdown(struct drm_device *dev)
{
struct drm_modeset_acquire_ctx ctx;
int ret;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
ret = drm_atomic_helper_disable_all(dev, &ctx);
if (ret)
drm_err(dev,
"Disabling all crtc's during unload failed with %i\n",
ret);
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
}
EXPORT_SYMBOL(drm_atomic_helper_shutdown);
/**
* drm_atomic_helper_duplicate_state - duplicate an atomic state object
* @dev: DRM device
* @ctx: lock acquisition context
*
* Makes a copy of the current atomic state by looping over all objects and
* duplicating their respective states. This is used for example by suspend/
* resume support code to save the state prior to suspend such that it can
* be restored upon resume.
*
* Note that this treats atomic state as persistent between save and restore.
* Drivers must make sure that this is possible and won't result in confusion
* or erroneous behaviour.
*
* Note that if callers haven't already acquired all modeset locks this might
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
*
* Returns:
* A pointer to the copy of the atomic state object on success or an
* ERR_PTR()-encoded error code on failure.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector *conn;
struct drm_connector_list_iter conn_iter;
struct drm_plane *plane;
struct drm_crtc *crtc;
int err = 0;
state = drm_atomic_state_alloc(dev);
if (!state)
return ERR_PTR(-ENOMEM);
state->acquire_ctx = ctx;
state->duplicated = true;
drm_for_each_crtc(crtc, dev) {
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto free;
}
}
drm_for_each_plane(plane, dev) {
struct drm_plane_state *plane_state;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
err = PTR_ERR(plane_state);
goto free;
}
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
conn_state = drm_atomic_get_connector_state(state, conn);
if (IS_ERR(conn_state)) {
err = PTR_ERR(conn_state);
drm_connector_list_iter_end(&conn_iter);
goto free;
}
}
drm_connector_list_iter_end(&conn_iter);
/* clear the acquire context so that it isn't accidentally reused */
state->acquire_ctx = NULL;
free:
if (err < 0) {
drm_atomic_state_put(state);
state = ERR_PTR(err);
}
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
/**
* drm_atomic_helper_suspend - subsystem-level suspend helper
* @dev: DRM device
*
* Duplicates the current atomic state, disables all active outputs and then
* returns a pointer to the original atomic state to the caller. Drivers can
* pass this pointer to the drm_atomic_helper_resume() helper upon resume to
* restore the output configuration that was active at the time the system
* entered suspend.
*
* Note that it is potentially unsafe to use this. The atomic state object
* returned by this function is assumed to be persistent. Drivers must ensure
* that this holds true. Before calling this function, drivers must make sure
* to suspend fbdev emulation so that nothing can be using the device.
*
* Returns:
* A pointer to a copy of the state before suspend on success or an ERR_PTR()-
* encoded error code on failure. Drivers should store the returned atomic
* state object and pass it to the drm_atomic_helper_resume() helper upon
* resume.
*
* See also:
* drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
* drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
*/
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
int err;
/* This can never be returned, but it makes the compiler happy */
state = ERR_PTR(-EINVAL);
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
state = drm_atomic_helper_duplicate_state(dev, &ctx);
if (IS_ERR(state))
goto unlock;
err = drm_atomic_helper_disable_all(dev, &ctx);
if (err < 0) {
drm_atomic_state_put(state);
state = ERR_PTR(err);
goto unlock;
}
unlock:
DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
if (err)
return ERR_PTR(err);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_suspend);
/**
* drm_atomic_helper_commit_duplicated_state - commit duplicated state
* @state: duplicated atomic state to commit
* @ctx: pointer to acquire_ctx to use for commit.
*
* The state returned by drm_atomic_helper_duplicate_state() and
* drm_atomic_helper_suspend() is partially invalid, and needs to
* be fixed up before commit.
*
* Returns:
* 0 on success or a negative error code on failure.
*
* See also:
* drm_atomic_helper_suspend()
*/
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
int i, ret;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
state->acquire_ctx = ctx;
for_each_new_plane_in_state(state, plane, new_plane_state, i)
state->planes[i].old_state = plane->state;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
state->crtcs[i].old_state = crtc->state;
for_each_new_connector_in_state(state, connector, new_conn_state, i)
state->connectors[i].old_state = connector->state;
ret = drm_atomic_commit(state);
state->acquire_ctx = NULL;
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
/**
* drm_atomic_helper_resume - subsystem-level resume helper
* @dev: DRM device
* @state: atomic state to resume to
*
* Calls drm_mode_config_reset() to synchronize hardware and software states,
* grabs all modeset locks and commits the atomic state object. This can be
* used in conjunction with the drm_atomic_helper_suspend() helper to
* implement suspend/resume for drivers that support atomic mode-setting.
*
* Returns:
* 0 on success or a negative error code on failure.
*
* See also:
* drm_atomic_helper_suspend()
*/
int drm_atomic_helper_resume(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_modeset_acquire_ctx ctx;
int err;
drm_mode_config_reset(dev);
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
drm_atomic_state_put(state);
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_resume);
static int page_flip_common(struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags)
{
struct drm_plane *plane = crtc->primary;
struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
int ret = 0;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
crtc_state->event = event;
crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
return ret;
drm_atomic_set_fb_for_plane(plane_state, fb);
/* Make sure we don't accidentally do a full modeset. */
state->allow_modeset = false;
if (!crtc_state->active) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] disabled, rejecting legacy flip\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
return ret;
}
/**
* drm_atomic_helper_page_flip - execute a legacy page flip
* @crtc: DRM CRTC
* @fb: DRM framebuffer
* @event: optional DRM event to signal upon completion
* @flags: flip flags for non-vblank sync'ed updates
* @ctx: lock acquisition context
*
* Provides a default &drm_crtc_funcs.page_flip implementation
* using the atomic driver interface.
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*
* See also:
* drm_atomic_helper_page_flip_target()
*/
int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane *plane = crtc->primary;
struct drm_atomic_state *state;
int ret = 0;
state = drm_atomic_state_alloc(plane->dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
ret = page_flip_common(state, crtc, fb, event, flags);
if (ret != 0)
goto fail;
ret = drm_atomic_nonblocking_commit(state);
fail:
drm_atomic_state_put(state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_page_flip);
/**
* drm_atomic_helper_page_flip_target - do page flip on target vblank period.
* @crtc: DRM CRTC
* @fb: DRM framebuffer
* @event: optional DRM event to signal upon completion
* @flags: flip flags for non-vblank sync'ed updates
* @target: specifying the target vblank period when the flip to take effect
* @ctx: lock acquisition context
*
* Provides a default &drm_crtc_funcs.page_flip_target implementation.
* Similar to drm_atomic_helper_page_flip() with extra parameter to specify
* target vblank period to flip.
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane *plane = crtc->primary;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
int ret = 0;
state = drm_atomic_state_alloc(plane->dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
ret = page_flip_common(state, crtc, fb, event, flags);
if (ret != 0)
goto fail;
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state)) {
ret = -EINVAL;
goto fail;
}
crtc_state->target_vblank = target;
ret = drm_atomic_nonblocking_commit(state);
fail:
drm_atomic_state_put(state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
/**
* drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
* the input end of a bridge
* @bridge: bridge control structure
* @bridge_state: new bridge state
* @crtc_state: new CRTC state
* @conn_state: new connector state
* @output_fmt: tested output bus format
* @num_input_fmts: will contain the size of the returned array
*
* This helper is a pluggable implementation of the
* &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
* modify the bus configuration between their input and their output. It
* returns an array of input formats with a single element set to @output_fmt.
*
* RETURNS:
* a valid format array of size @num_input_fmts, or NULL if the allocation
* failed
*/
u32 *
drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
u32 *input_fmts;
input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts) {
*num_input_fmts = 0;
return NULL;
}
*num_input_fmts = 1;
input_fmts[0] = output_fmt;
return input_fmts;
}
EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
| linux-master | drivers/gpu/drm/drm_atomic_helper.c |
/*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_property.h>
#include "drm_crtc_internal.h"
/**
* DOC: overview
*
* Properties as represented by &drm_property are used to extend the modeset
* interface exposed to userspace. For the atomic modeset IOCTL properties are
* even the only way to transport metadata about the desired new modeset
* configuration from userspace to the kernel. Properties have a well-defined
* value range, which is enforced by the drm core. See the documentation of the
* flags member of &struct drm_property for an overview of the different
* property types and ranges.
*
* Properties don't store the current value directly, but need to be
* instantiated by attaching them to a &drm_mode_object with
* drm_object_attach_property().
*
* Property values are only 64bit. To support bigger piles of data (like gamma
* tables, color correction matrices or large structures) a property can instead
* point at a &drm_property_blob with that additional data.
*
* Properties are defined by their symbolic name, userspace must keep a
* per-object mapping from those names to the property ID used in the atomic
* IOCTL and in the get/set property IOCTL.
*/
static bool drm_property_flags_valid(u32 flags)
{
u32 legacy_type = flags & DRM_MODE_PROP_LEGACY_TYPE;
u32 ext_type = flags & DRM_MODE_PROP_EXTENDED_TYPE;
/* Reject undefined/deprecated flags */
if (flags & ~(DRM_MODE_PROP_LEGACY_TYPE |
DRM_MODE_PROP_EXTENDED_TYPE |
DRM_MODE_PROP_IMMUTABLE |
DRM_MODE_PROP_ATOMIC))
return false;
/* We want either a legacy type or an extended type, but not both */
if (!legacy_type == !ext_type)
return false;
/* Only one legacy type at a time please */
if (legacy_type && !is_power_of_2(legacy_type))
return false;
return true;
}
/**
* drm_property_create - create a new property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property(). The returned property object must
* be freed with drm_property_destroy(), which is done automatically when
* calling drm_mode_config_cleanup().
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create(struct drm_device *dev,
u32 flags, const char *name,
int num_values)
{
struct drm_property *property = NULL;
int ret;
if (WARN_ON(!drm_property_flags_valid(flags)))
return NULL;
if (WARN_ON(strlen(name) >= DRM_PROP_NAME_LEN))
return NULL;
property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
if (!property)
return NULL;
property->dev = dev;
if (num_values) {
property->values = kcalloc(num_values, sizeof(uint64_t),
GFP_KERNEL);
if (!property->values)
goto fail;
}
ret = drm_mode_object_add(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
if (ret)
goto fail;
property->flags = flags;
property->num_values = num_values;
INIT_LIST_HEAD(&property->enum_list);
strscpy_pad(property->name, name, DRM_PROP_NAME_LEN);
list_add_tail(&property->head, &dev->mode_config.property_list);
return property;
fail:
kfree(property->values);
kfree(property);
return NULL;
}
EXPORT_SYMBOL(drm_property_create);
/**
* drm_property_create_enum - create a new enumeration property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @props: enumeration lists with property values
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property(). The returned property object must
* be freed with drm_property_destroy(), which is done automatically when
* calling drm_mode_config_cleanup().
*
* Userspace is only allowed to set one of the predefined values for enumeration
* properties.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_enum(struct drm_device *dev,
u32 flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values)
{
struct drm_property *property;
int i, ret;
flags |= DRM_MODE_PROP_ENUM;
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
for (i = 0; i < num_values; i++) {
ret = drm_property_add_enum(property,
props[i].type,
props[i].name);
if (ret) {
drm_property_destroy(dev, property);
return NULL;
}
}
return property;
}
EXPORT_SYMBOL(drm_property_create_enum);
/**
* drm_property_create_bitmask - create a new bitmask property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @props: enumeration lists with property bitflags
* @num_props: size of the @props array
* @supported_bits: bitmask of all supported enumeration values
*
* This creates a new bitmask drm property which can then be attached to a drm
* object with drm_object_attach_property(). The returned property object must
* be freed with drm_property_destroy(), which is done automatically when
* calling drm_mode_config_cleanup().
*
* Compared to plain enumeration properties userspace is allowed to set any
* or'ed together combination of the predefined property bitflag values
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
u32 flags, const char *name,
const struct drm_prop_enum_list *props,
int num_props,
uint64_t supported_bits)
{
struct drm_property *property;
int i, ret;
int num_values = hweight64(supported_bits);
flags |= DRM_MODE_PROP_BITMASK;
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
for (i = 0; i < num_props; i++) {
if (!(supported_bits & (1ULL << props[i].type)))
continue;
ret = drm_property_add_enum(property,
props[i].type,
props[i].name);
if (ret) {
drm_property_destroy(dev, property);
return NULL;
}
}
return property;
}
EXPORT_SYMBOL(drm_property_create_bitmask);
static struct drm_property *property_create_range(struct drm_device *dev,
u32 flags, const char *name,
uint64_t min, uint64_t max)
{
struct drm_property *property;
property = drm_property_create(dev, flags, name, 2);
if (!property)
return NULL;
property->values[0] = min;
property->values[1] = max;
return property;
}
/**
* drm_property_create_range - create a new unsigned ranged property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @min: minimum value of the property
* @max: maximum value of the property
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property(). The returned property object must
* be freed with drm_property_destroy(), which is done automatically when
* calling drm_mode_config_cleanup().
*
* Userspace is allowed to set any unsigned integer value in the (min, max)
* range inclusive.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_range(struct drm_device *dev,
u32 flags, const char *name,
uint64_t min, uint64_t max)
{
return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
name, min, max);
}
EXPORT_SYMBOL(drm_property_create_range);
/**
* drm_property_create_signed_range - create a new signed ranged property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @min: minimum value of the property
* @max: maximum value of the property
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property(). The returned property object must
* be freed with drm_property_destroy(), which is done automatically when
* calling drm_mode_config_cleanup().
*
* Userspace is allowed to set any signed integer value in the (min, max)
* range inclusive.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
u32 flags, const char *name,
int64_t min, int64_t max)
{
return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
name, I642U64(min), I642U64(max));
}
EXPORT_SYMBOL(drm_property_create_signed_range);
/**
* drm_property_create_object - create a new object property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @type: object type from DRM_MODE_OBJECT_* defines
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property(). The returned property object must
* be freed with drm_property_destroy(), which is done automatically when
* calling drm_mode_config_cleanup().
*
* Userspace is only allowed to set this to any property value of the given
* @type. Only useful for atomic properties, which is enforced.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_object(struct drm_device *dev,
u32 flags, const char *name,
uint32_t type)
{
struct drm_property *property;
flags |= DRM_MODE_PROP_OBJECT;
if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
return NULL;
property = drm_property_create(dev, flags, name, 1);
if (!property)
return NULL;
property->values[0] = type;
return property;
}
EXPORT_SYMBOL(drm_property_create_object);
/**
* drm_property_create_bool - create a new boolean property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property(). The returned property object must
* be freed with drm_property_destroy(), which is done automatically when
* calling drm_mode_config_cleanup().
*
* This is implemented as a ranged property with only {0, 1} as valid values.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_bool(struct drm_device *dev,
u32 flags, const char *name)
{
return drm_property_create_range(dev, flags, name, 0, 1);
}
EXPORT_SYMBOL(drm_property_create_bool);
/**
* drm_property_add_enum - add a possible value to an enumeration property
* @property: enumeration property to change
* @value: value of the new enumeration
* @name: symbolic name of the new enumeration
*
* This functions adds enumerations to a property.
*
* It's use is deprecated, drivers should use one of the more specific helpers
* to directly create the property with all enumerations already attached.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_property_add_enum(struct drm_property *property,
uint64_t value, const char *name)
{
struct drm_property_enum *prop_enum;
int index = 0;
if (WARN_ON(strlen(name) >= DRM_PROP_NAME_LEN))
return -EINVAL;
if (WARN_ON(!drm_property_type_is(property, DRM_MODE_PROP_ENUM) &&
!drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
return -EINVAL;
/*
* Bitmask enum properties have the additional constraint of values
* from 0 to 63
*/
if (WARN_ON(drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
value > 63))
return -EINVAL;
list_for_each_entry(prop_enum, &property->enum_list, head) {
if (WARN_ON(prop_enum->value == value))
return -EINVAL;
index++;
}
if (WARN_ON(index >= property->num_values))
return -EINVAL;
prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
if (!prop_enum)
return -ENOMEM;
strscpy_pad(prop_enum->name, name, DRM_PROP_NAME_LEN);
prop_enum->value = value;
property->values[index] = value;
list_add_tail(&prop_enum->head, &property->enum_list);
return 0;
}
EXPORT_SYMBOL(drm_property_add_enum);
/**
* drm_property_destroy - destroy a drm property
* @dev: drm device
* @property: property to destroy
*
* This function frees a property including any attached resources like
* enumeration values.
*/
void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
{
struct drm_property_enum *prop_enum, *pt;
list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
list_del(&prop_enum->head);
kfree(prop_enum);
}
if (property->num_values)
kfree(property->values);
drm_mode_object_unregister(dev, &property->base);
list_del(&property->head);
kfree(property);
}
EXPORT_SYMBOL(drm_property_destroy);
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_get_property *out_resp = data;
struct drm_property *property;
int enum_count = 0;
int value_count = 0;
int i, copied;
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
uint64_t __user *values_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
property = drm_property_find(dev, file_priv, out_resp->prop_id);
if (!property)
return -ENOENT;
strscpy_pad(out_resp->name, property->name, DRM_PROP_NAME_LEN);
out_resp->flags = property->flags;
value_count = property->num_values;
values_ptr = u64_to_user_ptr(out_resp->values_ptr);
for (i = 0; i < value_count; i++) {
if (i < out_resp->count_values &&
put_user(property->values[i], values_ptr + i)) {
return -EFAULT;
}
}
out_resp->count_values = value_count;
copied = 0;
enum_ptr = u64_to_user_ptr(out_resp->enum_blob_ptr);
if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_list, head) {
enum_count++;
if (out_resp->count_enum_blobs < enum_count)
continue;
if (copy_to_user(&enum_ptr[copied].value,
&prop_enum->value, sizeof(uint64_t)))
return -EFAULT;
if (copy_to_user(&enum_ptr[copied].name,
&prop_enum->name, DRM_PROP_NAME_LEN))
return -EFAULT;
copied++;
}
out_resp->count_enum_blobs = enum_count;
}
/*
* NOTE: The idea seems to have been to use this to read all the blob
* property values. But nothing ever added them to the corresponding
* list, userspace always used the special-purpose get_blob ioctl to
* read the value for a blob property. It also doesn't make a lot of
* sense to return values here when everything else is just metadata for
* the property itself.
*/
if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
out_resp->count_enum_blobs = 0;
return 0;
}
static void drm_property_free_blob(struct kref *kref)
{
struct drm_property_blob *blob =
container_of(kref, struct drm_property_blob, base.refcount);
mutex_lock(&blob->dev->mode_config.blob_lock);
list_del(&blob->head_global);
mutex_unlock(&blob->dev->mode_config.blob_lock);
drm_mode_object_unregister(blob->dev, &blob->base);
kvfree(blob);
}
/**
* drm_property_create_blob - Create new blob property
* @dev: DRM device to create property for
* @length: Length to allocate for blob data
* @data: If specified, copies data into blob
*
* Creates a new blob property for a specified DRM device, optionally
* copying data. Note that blob properties are meant to be invariant, hence the
* data must be filled out before the blob is used as the value of any property.
*
* Returns:
* New blob property with a single reference on success, or an ERR_PTR
* value on failure.
*/
struct drm_property_blob *
drm_property_create_blob(struct drm_device *dev, size_t length,
const void *data)
{
struct drm_property_blob *blob;
int ret;
if (!length || length > INT_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
if (!blob)
return ERR_PTR(-ENOMEM);
/* This must be explicitly initialised, so we can safely call list_del
* on it in the removal handler, even if it isn't in a file list. */
INIT_LIST_HEAD(&blob->head_file);
blob->data = (void *)blob + sizeof(*blob);
blob->length = length;
blob->dev = dev;
if (data)
memcpy(blob->data, data, length);
ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
true, drm_property_free_blob);
if (ret) {
kvfree(blob);
return ERR_PTR(-EINVAL);
}
mutex_lock(&dev->mode_config.blob_lock);
list_add_tail(&blob->head_global,
&dev->mode_config.property_blob_list);
mutex_unlock(&dev->mode_config.blob_lock);
return blob;
}
EXPORT_SYMBOL(drm_property_create_blob);
/**
* drm_property_blob_put - release a blob property reference
* @blob: DRM blob property
*
* Releases a reference to a blob property. May free the object.
*/
void drm_property_blob_put(struct drm_property_blob *blob)
{
if (!blob)
return;
drm_mode_object_put(&blob->base);
}
EXPORT_SYMBOL(drm_property_blob_put);
void drm_property_destroy_user_blobs(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_property_blob *blob, *bt;
/*
* When the file gets released that means no one else can access the
* blob list any more, so no need to grab dev->blob_lock.
*/
list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
list_del_init(&blob->head_file);
drm_property_blob_put(blob);
}
}
/**
* drm_property_blob_get - acquire blob property reference
* @blob: DRM blob property
*
* Acquires a reference to an existing blob property. Returns @blob, which
* allows this to be used as a shorthand in assignments.
*/
struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob)
{
drm_mode_object_get(&blob->base);
return blob;
}
EXPORT_SYMBOL(drm_property_blob_get);
/**
* drm_property_lookup_blob - look up a blob property and take a reference
* @dev: drm device
* @id: id of the blob property
*
* If successful, this takes an additional reference to the blob property.
* callers need to make sure to eventually unreferenced the returned property
* again, using drm_property_blob_put().
*
* Return:
* NULL on failure, pointer to the blob on success.
*/
struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
uint32_t id)
{
struct drm_mode_object *obj;
struct drm_property_blob *blob = NULL;
obj = __drm_mode_object_find(dev, NULL, id, DRM_MODE_OBJECT_BLOB);
if (obj)
blob = obj_to_blob(obj);
return blob;
}
EXPORT_SYMBOL(drm_property_lookup_blob);
/**
* drm_property_replace_global_blob - replace existing blob property
* @dev: drm device
* @replace: location of blob property pointer to be replaced
* @length: length of data for new blob, or 0 for no data
* @data: content for new blob, or NULL for no data
* @obj_holds_id: optional object for property holding blob ID
* @prop_holds_id: optional property holding blob ID
* @return 0 on success or error on failure
*
* This function will replace a global property in the blob list, optionally
* updating a property which holds the ID of that property.
*
* If length is 0 or data is NULL, no new blob will be created, and the holding
* property, if specified, will be set to 0.
*
* Access to the replace pointer is assumed to be protected by the caller, e.g.
* by holding the relevant modesetting object lock for its parent.
*
* For example, a drm_connector has a 'PATH' property, which contains the ID
* of a blob property with the value of the MST path information. Calling this
* function with replace pointing to the connector's path_blob_ptr, length and
* data set for the new path information, obj_holds_id set to the connector's
* base object, and prop_holds_id set to the path property name, will perform
* a completely atomic update. The access to path_blob_ptr is protected by the
* caller holding a lock on the connector.
*/
int drm_property_replace_global_blob(struct drm_device *dev,
struct drm_property_blob **replace,
size_t length,
const void *data,
struct drm_mode_object *obj_holds_id,
struct drm_property *prop_holds_id)
{
struct drm_property_blob *new_blob = NULL;
struct drm_property_blob *old_blob = NULL;
int ret;
WARN_ON(replace == NULL);
old_blob = *replace;
if (length && data) {
new_blob = drm_property_create_blob(dev, length, data);
if (IS_ERR(new_blob))
return PTR_ERR(new_blob);
}
if (obj_holds_id) {
ret = drm_object_property_set_value(obj_holds_id,
prop_holds_id,
new_blob ?
new_blob->base.id : 0);
if (ret != 0)
goto err_created;
}
drm_property_blob_put(old_blob);
*replace = new_blob;
return 0;
err_created:
drm_property_blob_put(new_blob);
return ret;
}
EXPORT_SYMBOL(drm_property_replace_global_blob);
/**
* drm_property_replace_blob - replace a blob property
* @blob: a pointer to the member blob to be replaced
* @new_blob: the new blob to replace with
*
* Return: true if the blob was in fact replaced.
*/
bool drm_property_replace_blob(struct drm_property_blob **blob,
struct drm_property_blob *new_blob)
{
struct drm_property_blob *old_blob = *blob;
if (old_blob == new_blob)
return false;
drm_property_blob_put(old_blob);
if (new_blob)
drm_property_blob_get(new_blob);
*blob = new_blob;
return true;
}
EXPORT_SYMBOL(drm_property_replace_blob);
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
blob = drm_property_lookup_blob(dev, out_resp->blob_id);
if (!blob)
return -ENOENT;
if (out_resp->length == blob->length) {
if (copy_to_user(u64_to_user_ptr(out_resp->data),
blob->data,
blob->length)) {
ret = -EFAULT;
goto unref;
}
}
out_resp->length = blob->length;
unref:
drm_property_blob_put(blob);
return ret;
}
int drm_mode_createblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_create_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
blob = drm_property_create_blob(dev, out_resp->length, NULL);
if (IS_ERR(blob))
return PTR_ERR(blob);
if (copy_from_user(blob->data,
u64_to_user_ptr(out_resp->data),
out_resp->length)) {
ret = -EFAULT;
goto out_blob;
}
/* Dropping the lock between create_blob and our access here is safe
* as only the same file_priv can remove the blob; at this point, it is
* not associated with any file_priv. */
mutex_lock(&dev->mode_config.blob_lock);
out_resp->blob_id = blob->base.id;
list_add_tail(&blob->head_file, &file_priv->blobs);
mutex_unlock(&dev->mode_config.blob_lock);
return 0;
out_blob:
drm_property_blob_put(blob);
return ret;
}
int drm_mode_destroyblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_destroy_blob *out_resp = data;
struct drm_property_blob *blob = NULL, *bt;
bool found = false;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
blob = drm_property_lookup_blob(dev, out_resp->blob_id);
if (!blob)
return -ENOENT;
mutex_lock(&dev->mode_config.blob_lock);
/* Ensure the property was actually created by this user. */
list_for_each_entry(bt, &file_priv->blobs, head_file) {
if (bt == blob) {
found = true;
break;
}
}
if (!found) {
ret = -EPERM;
goto err;
}
/* We must drop head_file here, because we may not be the last
* reference on the blob. */
list_del_init(&blob->head_file);
mutex_unlock(&dev->mode_config.blob_lock);
/* One reference from lookup, and one from the filp. */
drm_property_blob_put(blob);
drm_property_blob_put(blob);
return 0;
err:
mutex_unlock(&dev->mode_config.blob_lock);
drm_property_blob_put(blob);
return ret;
}
/* Some properties could refer to dynamic refcnt'd objects, or things that
* need special locking to handle lifetime issues (ie. to ensure the prop
* value doesn't become invalid part way through the property update due to
* race). The value returned by reference via 'obj' should be passed back
* to drm_property_change_valid_put() after the property is set (and the
* object to which the property is attached has a chance to take its own
* reference).
*/
bool drm_property_change_valid_get(struct drm_property *property,
uint64_t value, struct drm_mode_object **ref)
{
int i;
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
*ref = NULL;
if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
if (value < property->values[0] || value > property->values[1])
return false;
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
int64_t svalue = U642I64(value);
if (svalue < U642I64(property->values[0]) ||
svalue > U642I64(property->values[1]))
return false;
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
uint64_t valid_mask = 0;
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
struct drm_property_blob *blob;
if (value == 0)
return true;
blob = drm_property_lookup_blob(property->dev, value);
if (blob) {
*ref = &blob->base;
return true;
} else {
return false;
}
} else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
/* a zero value for an object property translates to null: */
if (value == 0)
return true;
*ref = __drm_mode_object_find(property->dev, NULL, value,
property->values[0]);
return *ref != NULL;
}
for (i = 0; i < property->num_values; i++)
if (property->values[i] == value)
return true;
return false;
}
void drm_property_change_valid_put(struct drm_property *property,
struct drm_mode_object *ref)
{
if (!ref)
return;
if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
drm_mode_object_put(ref);
} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
drm_property_blob_put(obj_to_blob(ref));
}
| linux-master | drivers/gpu/drm/drm_property.c |
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <linux/cc_platform.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/ioport.h>
#include <linux/iosys-map.h>
#include <xen/xen.h>
#include <drm/drm_cache.h>
/* A small bounce buffer that fits on the stack. */
#define MEMCPY_BOUNCE_SIZE 128
#if defined(CONFIG_X86)
#include <asm/smp.h>
/*
* clflushopt is an unordered instruction which needs fencing with mfence or
* sfence to avoid ordering issues. For drm_clflush_page this fencing happens
* in the caller.
*/
static void
drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
const int size = boot_cpu_data.x86_clflush_size;
if (unlikely(page == NULL))
return;
page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += size)
clflushopt(page_virtual + i);
kunmap_atomic(page_virtual);
}
static void drm_cache_flush_clflush(struct page *pages[],
unsigned long num_pages)
{
unsigned long i;
mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
mb(); /*Also used after CLFLUSH so that all cache is flushed*/
}
#endif
/**
* drm_clflush_pages - Flush dcache lines of a set of pages.
* @pages: List of pages to be flushed.
* @num_pages: Number of pages in the array.
*
* Flush every data cache line entry that points to an address belonging
* to a page in the array.
*/
void
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
#if defined(CONFIG_X86)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
drm_cache_flush_clflush(pages, num_pages);
return;
}
if (wbinvd_on_all_cpus())
pr_err("Timed out waiting for cache flush\n");
#elif defined(__powerpc__)
unsigned long i;
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
if (unlikely(page == NULL))
continue;
page_virtual = kmap_atomic(page);
flush_dcache_range((unsigned long)page_virtual,
(unsigned long)page_virtual + PAGE_SIZE);
kunmap_atomic(page_virtual);
}
#else
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);
/**
* drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
* @st: struct sg_table.
*
* Flush every data cache line entry that points to an address in the
* sg.
*/
void
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
mb(); /*CLFLUSH is ordered only by using memory barriers*/
for_each_sgtable_page(st, &sg_iter, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb(); /*Make sure that all cache line entry is flushed*/
return;
}
if (wbinvd_on_all_cpus())
pr_err("Timed out waiting for cache flush\n");
#else
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
}
EXPORT_SYMBOL(drm_clflush_sg);
/**
* drm_clflush_virt_range - Flush dcache lines of a region
* @addr: Initial kernel memory address.
* @length: Region size.
*
* Flush every data cache line entry that points to an address in the
* region requested.
*/
void
drm_clflush_virt_range(void *addr, unsigned long length)
{
#if defined(CONFIG_X86)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
addr = (void *)(((unsigned long)addr) & -size);
mb(); /*CLFLUSH is only ordered with a full memory barrier*/
for (; addr < end; addr += size)
clflushopt(addr);
clflushopt(end - 1); /* force serialisation */
mb(); /*Ensure that every data cache line entry is flushed*/
return;
}
if (wbinvd_on_all_cpus())
pr_err("Timed out waiting for cache flush\n");
#else
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
}
EXPORT_SYMBOL(drm_clflush_virt_range);
bool drm_need_swiotlb(int dma_bits)
{
struct resource *tmp;
resource_size_t max_iomem = 0;
/*
* Xen paravirtual hosts require swiotlb regardless of requested dma
* transfer size.
*
* NOTE: Really, what it requires is use of the dma_alloc_coherent
* allocator used in ttm_dma_populate() instead of
* ttm_populate_and_map_pages(), which bounce buffers so much in
* Xen it leads to swiotlb buffer exhaustion.
*/
if (xen_pv_domain())
return true;
/*
* Enforce dma_alloc_coherent when memory encryption is active as well
* for the same reasons as for Xen paravirtual hosts.
*/
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return true;
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
max_iomem = max(max_iomem, tmp->end);
return max_iomem > ((u64)1 << dma_bits);
}
EXPORT_SYMBOL(drm_need_swiotlb);
static void memcpy_fallback(struct iosys_map *dst,
const struct iosys_map *src,
unsigned long len)
{
if (!dst->is_iomem && !src->is_iomem) {
memcpy(dst->vaddr, src->vaddr, len);
} else if (!src->is_iomem) {
iosys_map_memcpy_to(dst, 0, src->vaddr, len);
} else if (!dst->is_iomem) {
memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
} else {
/*
* Bounce size is not performance tuned, but using a
* bounce buffer like this is significantly faster than
* resorting to ioreadxx() + iowritexx().
*/
char bounce[MEMCPY_BOUNCE_SIZE];
void __iomem *_src = src->vaddr_iomem;
void __iomem *_dst = dst->vaddr_iomem;
while (len >= MEMCPY_BOUNCE_SIZE) {
memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
_src += MEMCPY_BOUNCE_SIZE;
_dst += MEMCPY_BOUNCE_SIZE;
len -= MEMCPY_BOUNCE_SIZE;
}
if (len) {
memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
}
}
}
#ifdef CONFIG_X86
static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
{
kernel_fpu_begin();
while (len >= 4) {
asm("movntdqa (%0), %%xmm0\n"
"movntdqa 16(%0), %%xmm1\n"
"movntdqa 32(%0), %%xmm2\n"
"movntdqa 48(%0), %%xmm3\n"
"movaps %%xmm0, (%1)\n"
"movaps %%xmm1, 16(%1)\n"
"movaps %%xmm2, 32(%1)\n"
"movaps %%xmm3, 48(%1)\n"
:: "r" (src), "r" (dst) : "memory");
src += 64;
dst += 64;
len -= 4;
}
while (len--) {
asm("movntdqa (%0), %%xmm0\n"
"movaps %%xmm0, (%1)\n"
:: "r" (src), "r" (dst) : "memory");
src += 16;
dst += 16;
}
kernel_fpu_end();
}
/*
* __drm_memcpy_from_wc copies @len bytes from @src to @dst using
* non-temporal instructions where available. Note that all arguments
* (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
* of 16.
*/
static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len)
{
if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
memcpy(dst, src, len);
else if (likely(len))
__memcpy_ntdqa(dst, src, len >> 4);
}
/**
* drm_memcpy_from_wc - Perform the fastest available memcpy from a source
* that may be WC.
* @dst: The destination pointer
* @src: The source pointer
* @len: The size of the area o transfer in bytes
*
* Tries an arch optimized memcpy for prefetching reading out of a WC region,
* and if no such beast is available, falls back to a normal memcpy.
*/
void drm_memcpy_from_wc(struct iosys_map *dst,
const struct iosys_map *src,
unsigned long len)
{
if (WARN_ON(in_interrupt())) {
memcpy_fallback(dst, src, len);
return;
}
if (static_branch_likely(&has_movntdqa)) {
__drm_memcpy_from_wc(dst->is_iomem ?
(void __force *)dst->vaddr_iomem :
dst->vaddr,
src->is_iomem ?
(void const __force *)src->vaddr_iomem :
src->vaddr,
len);
return;
}
memcpy_fallback(dst, src, len);
}
EXPORT_SYMBOL(drm_memcpy_from_wc);
/*
* drm_memcpy_init_early - One time initialization of the WC memcpy code
*/
void drm_memcpy_init_early(void)
{
/*
* Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
* emulation. So don't enable movntdqa in hypervisor guest.
*/
if (static_cpu_has(X86_FEATURE_XMM4_1) &&
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_enable(&has_movntdqa);
}
#else
void drm_memcpy_from_wc(struct iosys_map *dst,
const struct iosys_map *src,
unsigned long len)
{
WARN_ON(in_interrupt());
memcpy_fallback(dst, src, len);
}
EXPORT_SYMBOL(drm_memcpy_from_wc);
void drm_memcpy_init_early(void)
{
}
#endif /* CONFIG_X86 */
| linux-master | drivers/gpu/drm/drm_cache.c |
/*
* \file drm_vm.c
* Memory mapping for DRM
*
* \author Rickard E. (Rik) Faith <[email protected]>
* \author Gareth Hughes <[email protected]>
*/
/*
* Created: Mon Jan 4 08:58:31 1999 by [email protected]
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
#if defined(__ia64__)
#include <linux/efi.h>
#include <linux/slab.h>
#endif
#include <linux/mem_encrypt.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
struct drm_vma_entry {
struct list_head head;
struct vm_area_struct *vma;
pid_t pid;
};
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static pgprot_t drm_io_prot(struct drm_local_map *map,
struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
defined(__mips__) || defined(__loongarch__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
tmp = pgprot_noncached(tmp);
else
tmp = pgprot_writecombine(tmp);
#elif defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#elif defined(__sparc__) || defined(__arm__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
tmp = pgprot_noncached_wc(tmp);
#endif
return tmp;
}
/*
* \c fault method for AGP virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Find the right map and if it's AGP memory find the real physical page to
* map, get the page, increment the use count and return it.
*/
#if IS_ENABLED(CONFIG_AGP)
static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list;
struct drm_hash_item *hash;
/*
* Find the right map
*/
if (!dev->agp)
goto vm_fault_error;
if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_fault_error;
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
goto vm_fault_error;
r_list = drm_hash_entry(hash, struct drm_map_list, hash);
map = r_list->map;
if (map && map->type == _DRM_AGP) {
/*
* Using vm_pgoff as a selector forces us to use this unusual
* addressing scheme.
*/
resource_size_t offset = vmf->address - vma->vm_start;
resource_size_t baddr = map->offset + offset;
struct drm_agp_mem *agpmem;
struct page *page;
#ifdef __alpha__
/*
* Adjust to a bus-relative address
*/
baddr -= dev->hose->mem_space->start;
#endif
/*
* It's AGP memory - find the real physical page to map
*/
list_for_each_entry(agpmem, &dev->agp->memory, head) {
if (agpmem->bound <= baddr &&
agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
break;
}
if (&agpmem->head == &dev->agp->memory)
goto vm_fault_error;
/*
* Get the page, inc the use count, and return it
*/
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
page = agpmem->memory->pages[offset];
get_page(page);
vmf->page = page;
DRM_DEBUG
("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
(unsigned long long)baddr,
agpmem->memory->pages[offset],
(unsigned long long)offset,
page_count(page));
return 0;
}
vm_fault_error:
return VM_FAULT_SIGBUS; /* Disallow mremap */
}
#else
static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
#endif
/*
* \c nopage method for shared virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
unsigned long offset;
unsigned long i;
struct page *page;
if (!map)
return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = vmf->address - vma->vm_start;
i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i);
if (!page)
return VM_FAULT_SIGBUS;
get_page(page);
vmf->page = page;
DRM_DEBUG("shm_fault 0x%lx\n", offset);
return 0;
}
/*
* \c close method for shared virtual memory.
*
* \param vma virtual memory area.
*
* Deletes map information if we are the last
* person to close a mapping and it's not in the global maplist.
*/
static void drm_vm_shm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
struct drm_local_map *map;
struct drm_map_list *r_list;
int found_maps = 0;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
map = vma->vm_private_data;
mutex_lock(&dev->struct_mutex);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma->vm_private_data == map)
found_maps++;
if (pt->vma == vma) {
list_del(&pt->head);
kfree(pt);
}
}
/* We were the only map that was found */
if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
/* Check to see if we are in the maplist, if we are not, then
* we delete this mappings information.
*/
found_maps = 0;
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map == map)
found_maps++;
}
if (!found_maps) {
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
arch_phys_wc_del(map->mtrr);
iounmap(map->handle);
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
dma_free_coherent(dev->dev,
map->size,
map->handle,
map->offset);
break;
}
kfree(map);
}
}
mutex_unlock(&dev->struct_mutex);
}
/*
* \c fault method for DMA virtual memory.
*
* \param address access address.
* \return pointer to the page structure.
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_device_dma *dma = dev->dma;
unsigned long offset;
unsigned long page_nr;
struct page *page;
if (!dma)
return VM_FAULT_SIGBUS; /* Error */
if (!dma->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = vmf->address - vma->vm_start;
/* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
page = virt_to_page((void *)dma->pagelist[page_nr]);
get_page(page);
vmf->page = page;
DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
return 0;
}
/*
* \c fault method for scatter-gather virtual memory.
*
* \param address access address.
* \return pointer to the page structure.
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_sg_mem *entry = dev->sg;
unsigned long offset;
unsigned long map_offset;
unsigned long page_offset;
struct page *page;
if (!entry)
return VM_FAULT_SIGBUS; /* Error */
if (!entry->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = vmf->address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset];
get_page(page);
vmf->page = page;
return 0;
}
/** AGP virtual memory operations */
static const struct vm_operations_struct drm_vm_ops = {
.fault = drm_vm_fault,
.open = drm_vm_open,
.close = drm_vm_close,
};
/** Shared virtual memory operations */
static const struct vm_operations_struct drm_vm_shm_ops = {
.fault = drm_vm_shm_fault,
.open = drm_vm_open,
.close = drm_vm_shm_close,
};
/** DMA virtual memory operations */
static const struct vm_operations_struct drm_vm_dma_ops = {
.fault = drm_vm_dma_fault,
.open = drm_vm_open,
.close = drm_vm_close,
};
/** Scatter-gather virtual memory operations */
static const struct vm_operations_struct drm_vm_sg_ops = {
.fault = drm_vm_sg_fault,
.open = drm_vm_open,
.close = drm_vm_close,
};
static void drm_vm_open_locked(struct drm_device *dev,
struct vm_area_struct *vma)
{
struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
if (vma_entry) {
vma_entry->vma = vma;
vma_entry->pid = current->pid;
list_add(&vma_entry->head, &dev->vmalist);
}
}
static void drm_vm_open(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_open_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
static void drm_vm_close_locked(struct drm_device *dev,
struct vm_area_struct *vma)
{
struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
list_del(&pt->head);
kfree(pt);
break;
}
}
}
/*
* \c close method for all virtual memory types.
*
* \param vma virtual memory area.
*
* Search the \p vma private data entry in drm_device::vmalist, unlink it, and
* free it.
*/
static void drm_vm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_close_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
* \param vma virtual memory area.
* \return zero on success or a negative number on failure.
*
* Sets the virtual memory area operations structure to vm_dma_ops, the file
* pointer, and calls vm_open().
*/
static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev;
struct drm_device_dma *dma;
unsigned long length = vma->vm_end - vma->vm_start;
dev = priv->minor->dev;
dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
/* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
return -EINVAL;
}
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
/* Ye gads this is ugly. With more thought
we could move this up higher and use
`protection_map' instead. */
vma->vm_page_prot =
__pgprot(pte_val
(pte_wrprotect
(__pte(pgprot_val(vma->vm_page_prot)))));
#endif
}
vma->vm_ops = &drm_vm_dma_ops;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
}
static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
return dev->hose->dense_mem_base;
#else
return 0;
#endif
}
/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
* \param vma virtual memory area.
* \return zero on success or a negative number on failure.
*
* If the virtual memory area has no offset associated with it then it's a DMA
* area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
* checks that the restricted flag is not set, sets the virtual memory operations
* according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open().
*/
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_local_map *map = NULL;
resource_size_t offset = 0;
struct drm_hash_item *hash;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
if (!priv->authenticated)
return -EACCES;
/* We check for "dma". On Apple's UniNorth, it's valid to have
* the AGP mapped at physical address 0
* --BenH.
*/
if (!vma->vm_pgoff
#if IS_ENABLED(CONFIG_AGP)
&& (!dev->agp
|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
#endif
)
return drm_mmap_dma(filp, vma);
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
DRM_ERROR("Could not find map\n");
return -EINVAL;
}
map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
/* Check for valid size. */
if (map->size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
/* Ye gads this is ugly. With more thought
we could move this up higher and use
`protection_map' instead. */
vma->vm_page_prot =
__pgprot(pte_val
(pte_wrprotect
(__pte(pgprot_val(vma->vm_page_prot)))));
#endif
}
switch (map->type) {
#if !defined(__arm__)
case _DRM_AGP:
if (dev->agp && dev->agp->cant_use_aperture) {
/*
* On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical
* pages and mappings in fault()
*/
#if defined(__powerpc__)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
vma->vm_ops = &drm_vm_ops;
break;
}
fallthrough; /* to _DRM_FRAME_BUFFER... */
#endif
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = drm_core_get_reg_ofs(dev);
vma->vm_page_prot = drm_io_prot(map, vma);
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%llx\n",
map->type,
vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
vma->vm_ops = &drm_vm_ops;
break;
case _DRM_CONSISTENT:
/* Consistent memory is really like shared memory. But
* it's allocated in a different way, so avoid fault */
if (remap_pfn_range(vma, vma->vm_start,
page_to_pfn(virt_to_page(map->handle)),
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
fallthrough; /* to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
break;
default:
return -EINVAL; /* This should never happen. */
}
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
}
int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
int ret;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
ret = drm_mmap_locked(filp, vma);
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_legacy_mmap);
#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *dev)
{
struct drm_vma_entry *vma, *vma_temp;
/* Clear vma list (only needed for legacy drivers) */
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
kfree(vma);
}
}
#endif
| linux-master | drivers/gpu/drm/drm_vm.c |
/*
* \file drm_legacy_misc.c
* Misc legacy support functions.
*
* \author Rickard E. (Rik) Faith <[email protected]>
* \author Gareth Hughes <[email protected]>
*/
/*
* Created: Tue Feb 2 08:37:54 1999 by [email protected]
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
void drm_legacy_init_members(struct drm_device *dev)
{
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
spin_lock_init(&dev->buf_lock);
mutex_init(&dev->ctxlist_mutex);
}
void drm_legacy_destroy_members(struct drm_device *dev)
{
mutex_destroy(&dev->ctxlist_mutex);
}
int drm_legacy_setup(struct drm_device * dev)
{
int ret;
if (dev->driver->firstopen &&
drm_core_check_feature(dev, DRIVER_LEGACY)) {
ret = dev->driver->firstopen(dev);
if (ret != 0)
return ret;
}
ret = drm_legacy_dma_setup(dev);
if (ret < 0)
return ret;
DRM_DEBUG("\n");
return 0;
}
void drm_legacy_dev_reinit(struct drm_device *dev)
{
if (dev->irq_enabled)
drm_legacy_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
drm_legacy_agp_clear(dev);
drm_legacy_sg_cleanup(dev);
drm_legacy_vma_flush(dev);
drm_legacy_dma_takedown(dev);
mutex_unlock(&dev->struct_mutex);
dev->sigdata.lock = NULL;
dev->context_flag = 0;
dev->last_context = 0;
dev->if_version = 0;
DRM_DEBUG("lastclose completed\n");
}
void drm_master_legacy_init(struct drm_master *master)
{
spin_lock_init(&master->lock.spinlock);
init_waitqueue_head(&master->lock.lock_queue);
}
| linux-master | drivers/gpu/drm/drm_legacy_misc.c |
/*
* Copyright (C) 2009 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/module.h>
#include <drm/drm_encoder_slave.h>
/**
* drm_i2c_encoder_init - Initialize an I2C slave encoder
* @dev: DRM device.
* @encoder: Encoder to be attached to the I2C device. You aren't
* required to have called drm_encoder_init() before.
* @adap: I2C adapter that will be used to communicate with
* the device.
* @info: Information that will be used to create the I2C device.
* Required fields are @addr and @type.
*
* Create an I2C device on the specified bus (the module containing its
* driver is transparently loaded) and attach it to the specified
* &drm_encoder_slave. The @slave_funcs field will be initialized with
* the hooks provided by the slave driver.
*
* If @info.platform_data is non-NULL it will be used as the initial
* slave config.
*
* Returns 0 on success or a negative errno on failure, in particular,
* -ENODEV is returned when no matching driver is found.
*/
int drm_i2c_encoder_init(struct drm_device *dev,
struct drm_encoder_slave *encoder,
struct i2c_adapter *adap,
const struct i2c_board_info *info)
{
struct module *module = NULL;
struct i2c_client *client;
struct drm_i2c_encoder_driver *encoder_drv;
int err = 0;
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
client = i2c_new_client_device(adap, info);
if (!i2c_client_has_driver(client)) {
err = -ENODEV;
goto fail_unregister;
}
module = client->dev.driver->owner;
if (!try_module_get(module)) {
err = -ENODEV;
goto fail_unregister;
}
encoder->bus_priv = client;
encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
err = encoder_drv->encoder_init(client, dev, encoder);
if (err)
goto fail_module_put;
if (info->platform_data)
encoder->slave_funcs->set_config(&encoder->base,
info->platform_data);
return 0;
fail_module_put:
module_put(module);
fail_unregister:
i2c_unregister_device(client);
return err;
}
EXPORT_SYMBOL(drm_i2c_encoder_init);
/**
* drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
* @drm_encoder: Encoder to be unregistered.
*
* This should be called from the @destroy method of an I2C slave
* encoder driver once I2C access is no longer needed.
*/
void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
{
struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
struct module *module = client->dev.driver->owner;
i2c_unregister_device(client);
encoder->bus_priv = NULL;
module_put(module);
}
EXPORT_SYMBOL(drm_i2c_encoder_destroy);
/*
* Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
*/
static inline const struct drm_encoder_slave_funcs *
get_slave_funcs(struct drm_encoder *enc)
{
return to_encoder_slave(enc)->slave_funcs;
}
void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode)
{
get_slave_funcs(encoder)->dpms(encoder, mode);
}
EXPORT_SYMBOL(drm_i2c_encoder_dpms);
bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (!get_slave_funcs(encoder)->mode_fixup)
return true;
return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
}
EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
void drm_i2c_encoder_prepare(struct drm_encoder *encoder)
{
drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
}
EXPORT_SYMBOL(drm_i2c_encoder_prepare);
void drm_i2c_encoder_commit(struct drm_encoder *encoder)
{
drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
}
EXPORT_SYMBOL(drm_i2c_encoder_commit);
void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
}
EXPORT_SYMBOL(drm_i2c_encoder_mode_set);
enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
return get_slave_funcs(encoder)->detect(encoder, connector);
}
EXPORT_SYMBOL(drm_i2c_encoder_detect);
void drm_i2c_encoder_save(struct drm_encoder *encoder)
{
get_slave_funcs(encoder)->save(encoder);
}
EXPORT_SYMBOL(drm_i2c_encoder_save);
void drm_i2c_encoder_restore(struct drm_encoder *encoder)
{
get_slave_funcs(encoder)->restore(encoder);
}
EXPORT_SYMBOL(drm_i2c_encoder_restore);
| linux-master | drivers/gpu/drm/drm_encoder_slave.c |
/*
* \file drm_agpsupport.c
* DRM support for AGP/GART backend
*
* \author Rickard E. (Rik) Faith <[email protected]>
* \author Gareth Hughes <[email protected]>
*/
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#if IS_ENABLED(CONFIG_AGP)
#include <asm/agp.h>
#endif
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
#if IS_ENABLED(CONFIG_AGP)
/*
* Get AGP information.
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info.
*/
int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
struct agp_kern_info *kern;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
kern = &dev->agp->agp_info;
info->agp_version_major = kern->version.major;
info->agp_version_minor = kern->version.minor;
info->mode = kern->mode;
info->aperture_base = kern->aper_base;
info->aperture_size = kern->aper_size * 1024 * 1024;
info->memory_allowed = kern->max_memory << PAGE_SHIFT;
info->memory_used = kern->current_memory << PAGE_SHIFT;
info->id_vendor = kern->device->vendor;
info->id_device = kern->device->device;
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_info);
int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_info *info = data;
int err;
err = drm_legacy_agp_info(dev, info);
if (err)
return err;
return 0;
}
/*
* Acquire the AGP device.
*
* \param dev DRM device that is to acquire AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
int drm_legacy_agp_acquire(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
dev->agp->bridge = agp_backend_acquire(pdev);
if (!dev->agp->bridge)
return -ENODEV;
dev->agp->acquired = 1;
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_acquire);
/*
* Acquire the AGP device (ioctl).
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_legacy_agp_acquire((struct drm_device *)file_priv->minor->dev);
}
/*
* Release the AGP device.
*
* \param dev DRM device that is to release AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/
int drm_legacy_agp_release(struct drm_device *dev)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
agp_backend_release(dev->agp->bridge);
dev->agp->acquired = 0;
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_release);
int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_legacy_agp_release(dev);
}
/*
* Enable the AGP bus.
*
* \param dev DRM device that has previously acquired AGP.
* \param mode Requested AGP mode.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired but not enabled, and calls
* \c agp_enable.
*/
int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
dev->agp->mode = mode.mode;
agp_enable(dev->agp->bridge, mode.mode);
dev->agp->enabled = 1;
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_enable);
int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_mode *mode = data;
return drm_legacy_agp_enable(dev, *mode);
}
/*
* Allocate AGP memory.
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
* memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
*/
int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
struct agp_memory *memory;
unsigned long pages;
u32 type;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
pages = DIV_ROUND_UP(request->size, PAGE_SIZE);
type = (u32) request->type;
memory = agp_allocate_memory(dev->agp->bridge, pages, type);
if (!memory) {
kfree(entry);
return -ENOMEM;
}
entry->handle = (unsigned long)memory->key + 1;
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
list_add(&entry->head, &dev->agp->memory);
request->handle = entry->handle;
request->physical = memory->physical;
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_alloc);
int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer *request = data;
return drm_legacy_agp_alloc(dev, request);
}
/*
* Search for the AGP memory entry associated with a handle.
*
* \param dev DRM device structure.
* \param handle AGP memory handle.
* \return pointer to the drm_agp_mem structure associated with \p handle.
*
* Walks through drm_agp_head::memory until finding a matching handle.
*/
static struct drm_agp_mem *drm_legacy_agp_lookup_entry(struct drm_device *dev,
unsigned long handle)
{
struct drm_agp_mem *entry;
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == handle)
return entry;
}
return NULL;
}
/*
* Unbind AGP memory from the GATT (ioctl).
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and acquired, looks-up the AGP memory
* entry and passes it to the unbind_agp() function.
*/
int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
struct drm_agp_mem *entry;
int ret;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
entry = drm_legacy_agp_lookup_entry(dev, request->handle);
if (!entry || !entry->bound)
return -EINVAL;
ret = agp_unbind_memory(entry->memory);
if (ret == 0)
entry->bound = 0;
return ret;
}
EXPORT_SYMBOL(drm_legacy_agp_unbind);
int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_binding *request = data;
return drm_legacy_agp_unbind(dev, request);
}
/*
* Bind AGP memory into the GATT (ioctl)
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and that no memory
* is currently bound into the GATT. Looks-up the AGP memory entry and passes
* it to bind_agp() function.
*/
int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
{
struct drm_agp_mem *entry;
int retcode;
int page;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
entry = drm_legacy_agp_lookup_entry(dev, request->handle);
if (!entry || entry->bound)
return -EINVAL;
page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
retcode = agp_bind_memory(entry->memory, page);
if (retcode)
return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
dev->agp->base, entry->bound);
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_bind);
int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_binding *request = data;
return drm_legacy_agp_bind(dev, request);
}
/*
* Free AGP memory (ioctl).
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and looks up the
* AGP memory entry. If the memory is currently bound, unbind it via
* unbind_agp(). Frees it via free_agp() as well as the entry itself
* and unlinks from the doubly linked list it's inserted in.
*/
int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
entry = drm_legacy_agp_lookup_entry(dev, request->handle);
if (!entry)
return -EINVAL;
if (entry->bound)
agp_unbind_memory(entry->memory);
list_del(&entry->head);
agp_free_memory(entry->memory);
kfree(entry);
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_free);
int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer *request = data;
return drm_legacy_agp_free(dev, request);
}
/*
* Initialize the AGP resources.
*
* \return pointer to a drm_agp_head structure.
*
* Gets the drm_agp_t structure which is made available by the agpgart module
* via the inter_module_* functions. Creates and initializes a drm_agp_head
* structure.
*
* Note that final cleanup of the kmalloced structure is directly done in
* drm_pci_agp_destroy.
*/
struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_agp_head *head = NULL;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return NULL;
head->bridge = agp_find_bridge(pdev);
if (!head->bridge) {
head->bridge = agp_backend_acquire(pdev);
if (!head->bridge) {
kfree(head);
return NULL;
}
agp_copy_info(head->bridge, &head->agp_info);
agp_backend_release(head->bridge);
} else {
agp_copy_info(head->bridge, &head->agp_info);
}
if (head->agp_info.chipset == NOT_SUPPORTED) {
kfree(head);
return NULL;
}
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
head->base = head->agp_info.aper_base;
return head;
}
/* Only exported for i810.ko */
EXPORT_SYMBOL(drm_legacy_agp_init);
/**
* drm_legacy_agp_clear - Clear AGP resource list
* @dev: DRM device
*
* Iterate over all AGP resources and remove them. But keep the AGP head
* intact so it can still be used. It is safe to call this if AGP is disabled or
* was already removed.
*
* Cleanup is only done for drivers who have DRIVER_LEGACY set.
*/
void drm_legacy_agp_clear(struct drm_device *dev)
{
struct drm_agp_mem *entry, *tempe;
if (!dev->agp)
return;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
agp_unbind_memory(entry->memory);
agp_free_memory(entry->memory);
kfree(entry);
}
INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
drm_legacy_agp_release(dev);
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
#endif
| linux-master | drivers/gpu/drm/drm_agpsupport.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Samsung Electronics Co.Ltd
* Authors:
* Hyungwon Hwang <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_print.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
/* Sysreg registers for MIC */
#define DSD_CFG_MUX 0x1004
#define MIC0_RGB_MUX (1 << 0)
#define MIC0_I80_MUX (1 << 1)
#define MIC0_ON_MUX (1 << 5)
/* MIC registers */
#define MIC_OP 0x0
#define MIC_IP_VER 0x0004
#define MIC_V_TIMING_0 0x0008
#define MIC_V_TIMING_1 0x000C
#define MIC_IMG_SIZE 0x0010
#define MIC_INPUT_TIMING_0 0x0014
#define MIC_INPUT_TIMING_1 0x0018
#define MIC_2D_OUTPUT_TIMING_0 0x001C
#define MIC_2D_OUTPUT_TIMING_1 0x0020
#define MIC_2D_OUTPUT_TIMING_2 0x0024
#define MIC_3D_OUTPUT_TIMING_0 0x0028
#define MIC_3D_OUTPUT_TIMING_1 0x002C
#define MIC_3D_OUTPUT_TIMING_2 0x0030
#define MIC_CORE_PARA_0 0x0034
#define MIC_CORE_PARA_1 0x0038
#define MIC_CTC_CTRL 0x0040
#define MIC_RD_DATA 0x0044
#define MIC_UPD_REG (1 << 31)
#define MIC_ON_REG (1 << 30)
#define MIC_TD_ON_REG (1 << 29)
#define MIC_BS_CHG_OUT (1 << 16)
#define MIC_VIDEO_TYPE(x) (((x) & 0xf) << 12)
#define MIC_PSR_EN (1 << 5)
#define MIC_SW_RST (1 << 4)
#define MIC_ALL_RST (1 << 3)
#define MIC_CORE_VER_CONTROL (1 << 2)
#define MIC_MODE_SEL_COMMAND_MODE (1 << 1)
#define MIC_MODE_SEL_MASK (1 << 1)
#define MIC_CORE_EN (1 << 0)
#define MIC_V_PULSE_WIDTH(x) (((x) & 0x3fff) << 16)
#define MIC_V_PERIOD_LINE(x) ((x) & 0x3fff)
#define MIC_VBP_SIZE(x) (((x) & 0x3fff) << 16)
#define MIC_VFP_SIZE(x) ((x) & 0x3fff)
#define MIC_IMG_V_SIZE(x) (((x) & 0x3fff) << 16)
#define MIC_IMG_H_SIZE(x) ((x) & 0x3fff)
#define MIC_H_PULSE_WIDTH_IN(x) (((x) & 0x3fff) << 16)
#define MIC_H_PERIOD_PIXEL_IN(x) ((x) & 0x3fff)
#define MIC_HBP_SIZE_IN(x) (((x) & 0x3fff) << 16)
#define MIC_HFP_SIZE_IN(x) ((x) & 0x3fff)
#define MIC_H_PULSE_WIDTH_2D(x) (((x) & 0x3fff) << 16)
#define MIC_H_PERIOD_PIXEL_2D(x) ((x) & 0x3fff)
#define MIC_HBP_SIZE_2D(x) (((x) & 0x3fff) << 16)
#define MIC_HFP_SIZE_2D(x) ((x) & 0x3fff)
#define MIC_BS_SIZE_2D(x) ((x) & 0x3fff)
static const char *const clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
#define NUM_CLKS ARRAY_SIZE(clk_names)
static DEFINE_MUTEX(mic_mutex);
struct exynos_mic {
struct device *dev;
void __iomem *reg;
struct regmap *sysreg;
struct clk *clks[NUM_CLKS];
bool i80_mode;
struct videomode vm;
struct drm_bridge bridge;
bool enabled;
};
static void mic_set_path(struct exynos_mic *mic, bool enable)
{
int ret;
unsigned int val;
ret = regmap_read(mic->sysreg, DSD_CFG_MUX, &val);
if (ret) {
DRM_DEV_ERROR(mic->dev,
"mic: Failed to read system register\n");
return;
}
if (enable) {
if (mic->i80_mode)
val |= MIC0_I80_MUX;
else
val |= MIC0_RGB_MUX;
val |= MIC0_ON_MUX;
} else
val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
if (ret)
DRM_DEV_ERROR(mic->dev,
"mic: Failed to read system register\n");
}
static int mic_sw_reset(struct exynos_mic *mic)
{
unsigned int retry = 100;
int ret;
writel(MIC_SW_RST, mic->reg + MIC_OP);
while (retry-- > 0) {
ret = readl(mic->reg + MIC_OP);
if (!(ret & MIC_SW_RST))
return 0;
udelay(10);
}
return -ETIMEDOUT;
}
static void mic_set_porch_timing(struct exynos_mic *mic)
{
struct videomode vm = mic->vm;
u32 reg;
reg = MIC_V_PULSE_WIDTH(vm.vsync_len) +
MIC_V_PERIOD_LINE(vm.vsync_len + vm.vactive +
vm.vback_porch + vm.vfront_porch);
writel(reg, mic->reg + MIC_V_TIMING_0);
reg = MIC_VBP_SIZE(vm.vback_porch) +
MIC_VFP_SIZE(vm.vfront_porch);
writel(reg, mic->reg + MIC_V_TIMING_1);
reg = MIC_V_PULSE_WIDTH(vm.hsync_len) +
MIC_V_PERIOD_LINE(vm.hsync_len + vm.hactive +
vm.hback_porch + vm.hfront_porch);
writel(reg, mic->reg + MIC_INPUT_TIMING_0);
reg = MIC_VBP_SIZE(vm.hback_porch) +
MIC_VFP_SIZE(vm.hfront_porch);
writel(reg, mic->reg + MIC_INPUT_TIMING_1);
}
static void mic_set_img_size(struct exynos_mic *mic)
{
struct videomode *vm = &mic->vm;
u32 reg;
reg = MIC_IMG_H_SIZE(vm->hactive) +
MIC_IMG_V_SIZE(vm->vactive);
writel(reg, mic->reg + MIC_IMG_SIZE);
}
static void mic_set_output_timing(struct exynos_mic *mic)
{
struct videomode vm = mic->vm;
u32 reg, bs_size_2d;
DRM_DEV_DEBUG(mic->dev, "w: %u, h: %u\n", vm.hactive, vm.vactive);
bs_size_2d = ((vm.hactive >> 2) << 1) + (vm.vactive % 4);
reg = MIC_BS_SIZE_2D(bs_size_2d);
writel(reg, mic->reg + MIC_2D_OUTPUT_TIMING_2);
if (!mic->i80_mode) {
reg = MIC_H_PULSE_WIDTH_2D(vm.hsync_len) +
MIC_H_PERIOD_PIXEL_2D(vm.hsync_len + bs_size_2d +
vm.hback_porch + vm.hfront_porch);
writel(reg, mic->reg + MIC_2D_OUTPUT_TIMING_0);
reg = MIC_HBP_SIZE_2D(vm.hback_porch) +
MIC_H_PERIOD_PIXEL_2D(vm.hfront_porch);
writel(reg, mic->reg + MIC_2D_OUTPUT_TIMING_1);
}
}
static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
{
u32 reg = readl(mic->reg + MIC_OP);
if (enable) {
reg &= ~(MIC_MODE_SEL_MASK | MIC_CORE_VER_CONTROL | MIC_PSR_EN);
reg |= (MIC_CORE_EN | MIC_BS_CHG_OUT | MIC_ON_REG);
reg &= ~MIC_MODE_SEL_COMMAND_MODE;
if (mic->i80_mode)
reg |= MIC_MODE_SEL_COMMAND_MODE;
} else {
reg &= ~MIC_CORE_EN;
}
reg |= MIC_UPD_REG;
writel(reg, mic->reg + MIC_OP);
}
static void mic_post_disable(struct drm_bridge *bridge)
{
struct exynos_mic *mic = bridge->driver_private;
mutex_lock(&mic_mutex);
if (!mic->enabled)
goto already_disabled;
mic_set_path(mic, 0);
pm_runtime_put(mic->dev);
mic->enabled = 0;
already_disabled:
mutex_unlock(&mic_mutex);
}
static void mic_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct exynos_mic *mic = bridge->driver_private;
mutex_lock(&mic_mutex);
drm_display_mode_to_videomode(mode, &mic->vm);
mic->i80_mode = to_exynos_crtc(bridge->encoder->crtc)->i80_mode;
mutex_unlock(&mic_mutex);
}
static void mic_pre_enable(struct drm_bridge *bridge)
{
struct exynos_mic *mic = bridge->driver_private;
int ret;
mutex_lock(&mic_mutex);
if (mic->enabled)
goto unlock;
ret = pm_runtime_resume_and_get(mic->dev);
if (ret < 0)
goto unlock;
mic_set_path(mic, 1);
ret = mic_sw_reset(mic);
if (ret) {
DRM_DEV_ERROR(mic->dev, "Failed to reset\n");
goto turn_off;
}
if (!mic->i80_mode)
mic_set_porch_timing(mic);
mic_set_img_size(mic);
mic_set_output_timing(mic);
mic_set_reg_on(mic, 1);
mic->enabled = 1;
mutex_unlock(&mic_mutex);
return;
turn_off:
pm_runtime_put(mic->dev);
unlock:
mutex_unlock(&mic_mutex);
}
static const struct drm_bridge_funcs mic_bridge_funcs = {
.post_disable = mic_post_disable,
.mode_set = mic_mode_set,
.pre_enable = mic_pre_enable,
};
static int exynos_mic_bind(struct device *dev, struct device *master,
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(drm_dev,
EXYNOS_DISPLAY_TYPE_LCD);
struct drm_encoder *e, *encoder = NULL;
drm_for_each_encoder(e, drm_dev)
if (e->possible_crtcs == drm_crtc_mask(&crtc->base))
encoder = e;
if (!encoder)
return -ENODEV;
mic->bridge.driver_private = mic;
return drm_bridge_attach(encoder, &mic->bridge, NULL, 0);
}
static void exynos_mic_unbind(struct device *dev, struct device *master,
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
mutex_lock(&mic_mutex);
if (!mic->enabled)
goto already_disabled;
pm_runtime_put(mic->dev);
already_disabled:
mutex_unlock(&mic_mutex);
}
static const struct component_ops exynos_mic_component_ops = {
.bind = exynos_mic_bind,
.unbind = exynos_mic_unbind,
};
static int exynos_mic_suspend(struct device *dev)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
int i;
for (i = NUM_CLKS - 1; i > -1; i--)
clk_disable_unprepare(mic->clks[i]);
return 0;
}
static int exynos_mic_resume(struct device *dev)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
int ret, i;
for (i = 0; i < NUM_CLKS; i++) {
ret = clk_prepare_enable(mic->clks[i]);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to enable clock (%s)\n",
clk_names[i]);
while (--i > -1)
clk_disable_unprepare(mic->clks[i]);
return ret;
}
}
return 0;
}
static DEFINE_RUNTIME_DEV_PM_OPS(exynos_mic_pm_ops, exynos_mic_suspend,
exynos_mic_resume, NULL);
static int exynos_mic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_mic *mic;
struct resource res;
int ret, i;
mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
if (!mic) {
DRM_DEV_ERROR(dev,
"mic: Failed to allocate memory for MIC object\n");
ret = -ENOMEM;
goto err;
}
mic->dev = dev;
ret = of_address_to_resource(dev->of_node, 0, &res);
if (ret) {
DRM_DEV_ERROR(dev, "mic: Failed to get mem region for MIC\n");
goto err;
}
mic->reg = devm_ioremap(dev, res.start, resource_size(&res));
if (!mic->reg) {
DRM_DEV_ERROR(dev, "mic: Failed to remap for MIC\n");
ret = -ENOMEM;
goto err;
}
mic->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,disp-syscon");
if (IS_ERR(mic->sysreg)) {
DRM_DEV_ERROR(dev, "mic: Failed to get system register.\n");
ret = PTR_ERR(mic->sysreg);
goto err;
}
for (i = 0; i < NUM_CLKS; i++) {
mic->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(mic->clks[i])) {
DRM_DEV_ERROR(dev, "mic: Failed to get clock (%s)\n",
clk_names[i]);
ret = PTR_ERR(mic->clks[i]);
goto err;
}
}
platform_set_drvdata(pdev, mic);
mic->bridge.funcs = &mic_bridge_funcs;
mic->bridge.of_node = dev->of_node;
drm_bridge_add(&mic->bridge);
pm_runtime_enable(dev);
ret = component_add(dev, &exynos_mic_component_ops);
if (ret)
goto err_pm;
DRM_DEV_DEBUG_KMS(dev, "MIC has been probed\n");
return 0;
err_pm:
pm_runtime_disable(dev);
err:
return ret;
}
static int exynos_mic_remove(struct platform_device *pdev)
{
struct exynos_mic *mic = platform_get_drvdata(pdev);
component_del(&pdev->dev, &exynos_mic_component_ops);
pm_runtime_disable(&pdev->dev);
drm_bridge_remove(&mic->bridge);
return 0;
}
static const struct of_device_id exynos_mic_of_match[] = {
{ .compatible = "samsung,exynos5433-mic" },
{ }
};
MODULE_DEVICE_TABLE(of, exynos_mic_of_match);
struct platform_driver mic_driver = {
.probe = exynos_mic_probe,
.remove = exynos_mic_remove,
.driver = {
.name = "exynos-mic",
.pm = pm_ptr(&exynos_mic_pm_ops),
.owner = THIS_MODULE,
.of_match_table = exynos_mic_of_match,
},
};
| linux-master | drivers/gpu/drm/exynos/exynos_drm_mic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* Authors:
* Eunchul Kim <[email protected]>
* Jinyoung Jeon <[email protected]>
* Sangmin Lee <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_ipp.h"
#include "regs-gsc.h"
/*
* GSC stands for General SCaler and
* supports image scaler/rotator and input/output DMA operations.
* input DMA reads image data from the memory.
* output DMA writes image data to memory.
* GSC supports image rotation and image effect functions.
*/
#define GSC_MAX_CLOCKS 8
#define GSC_MAX_SRC 4
#define GSC_MAX_DST 16
#define GSC_RESET_TIMEOUT 50
#define GSC_BUF_STOP 1
#define GSC_BUF_START 2
#define GSC_REG_SZ 16
#define GSC_WIDTH_ITU_709 1280
#define GSC_SC_UP_MAX_RATIO 65536
#define GSC_SC_DOWN_RATIO_7_8 74898
#define GSC_SC_DOWN_RATIO_6_8 87381
#define GSC_SC_DOWN_RATIO_5_8 104857
#define GSC_SC_DOWN_RATIO_4_8 131072
#define GSC_SC_DOWN_RATIO_3_8 174762
#define GSC_SC_DOWN_RATIO_2_8 262144
#define GSC_CROP_MAX 8192
#define GSC_CROP_MIN 32
#define GSC_SCALE_MAX 4224
#define GSC_SCALE_MIN 32
#define GSC_COEF_RATIO 7
#define GSC_COEF_PHASE 9
#define GSC_COEF_ATTR 16
#define GSC_COEF_H_8T 8
#define GSC_COEF_V_4T 4
#define GSC_COEF_DEPTH 3
#define GSC_AUTOSUSPEND_DELAY 2000
#define get_gsc_context(dev) dev_get_drvdata(dev)
#define gsc_read(offset) readl(ctx->regs + (offset))
#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
/*
* A structure of scaler.
*
* @range: narrow, wide.
* @pre_shfactor: pre sclaer shift factor.
* @pre_hratio: horizontal ratio of the prescaler.
* @pre_vratio: vertical ratio of the prescaler.
* @main_hratio: the main scaler's horizontal ratio.
* @main_vratio: the main scaler's vertical ratio.
*/
struct gsc_scaler {
bool range;
u32 pre_shfactor;
u32 pre_hratio;
u32 pre_vratio;
unsigned long main_hratio;
unsigned long main_vratio;
};
/*
* A structure of gsc context.
*
* @regs: memory mapped io registers.
* @gsc_clk: gsc gate clock.
* @sc: scaler infomations.
* @id: gsc id.
* @irq: irq number.
* @rotation: supports rotation of src.
*/
struct gsc_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
void *dma_priv;
struct device *dev;
struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
void __iomem *regs;
const char **clk_names;
struct clk *clocks[GSC_MAX_CLOCKS];
int num_clocks;
struct gsc_scaler sc;
int id;
int irq;
bool rotation;
};
/**
* struct gsc_driverdata - per device type driver data for init time.
*
* @limits: picture size limits array
* @num_limits: number of items in the aforementioned array
* @clk_names: names of clocks needed by this variant
* @num_clocks: the number of clocks needed by this variant
*/
struct gsc_driverdata {
const struct drm_exynos_ipp_limit *limits;
int num_limits;
const char *clk_names[GSC_MAX_CLOCKS];
int num_clocks;
};
/* 8-tap Filter Coefficient */
static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
{ /* Ratio <= 65536 (~8:8) */
{ 0, 0, 0, 128, 0, 0, 0, 0 },
{ -1, 2, -6, 127, 7, -2, 1, 0 },
{ -1, 4, -12, 125, 16, -5, 1, 0 },
{ -1, 5, -15, 120, 25, -8, 2, 0 },
{ -1, 6, -18, 114, 35, -10, 3, -1 },
{ -1, 6, -20, 107, 46, -13, 4, -1 },
{ -2, 7, -21, 99, 57, -16, 5, -1 },
{ -1, 6, -20, 89, 68, -18, 5, -1 },
{ -1, 6, -20, 79, 79, -20, 6, -1 },
{ -1, 5, -18, 68, 89, -20, 6, -1 },
{ -1, 5, -16, 57, 99, -21, 7, -2 },
{ -1, 4, -13, 46, 107, -20, 6, -1 },
{ -1, 3, -10, 35, 114, -18, 6, -1 },
{ 0, 2, -8, 25, 120, -15, 5, -1 },
{ 0, 1, -5, 16, 125, -12, 4, -1 },
{ 0, 1, -2, 7, 127, -6, 2, -1 }
}, { /* 65536 < Ratio <= 74898 (~8:7) */
{ 3, -8, 14, 111, 13, -8, 3, 0 },
{ 2, -6, 7, 112, 21, -10, 3, -1 },
{ 2, -4, 1, 110, 28, -12, 4, -1 },
{ 1, -2, -3, 106, 36, -13, 4, -1 },
{ 1, -1, -7, 103, 44, -15, 4, -1 },
{ 1, 1, -11, 97, 53, -16, 4, -1 },
{ 0, 2, -13, 91, 61, -16, 4, -1 },
{ 0, 3, -15, 85, 69, -17, 4, -1 },
{ 0, 3, -16, 77, 77, -16, 3, 0 },
{ -1, 4, -17, 69, 85, -15, 3, 0 },
{ -1, 4, -16, 61, 91, -13, 2, 0 },
{ -1, 4, -16, 53, 97, -11, 1, 1 },
{ -1, 4, -15, 44, 103, -7, -1, 1 },
{ -1, 4, -13, 36, 106, -3, -2, 1 },
{ -1, 4, -12, 28, 110, 1, -4, 2 },
{ -1, 3, -10, 21, 112, 7, -6, 2 }
}, { /* 74898 < Ratio <= 87381 (~8:6) */
{ 2, -11, 25, 96, 25, -11, 2, 0 },
{ 2, -10, 19, 96, 31, -12, 2, 0 },
{ 2, -9, 14, 94, 37, -12, 2, 0 },
{ 2, -8, 10, 92, 43, -12, 1, 0 },
{ 2, -7, 5, 90, 49, -12, 1, 0 },
{ 2, -5, 1, 86, 55, -12, 0, 1 },
{ 2, -4, -2, 82, 61, -11, -1, 1 },
{ 1, -3, -5, 77, 67, -9, -1, 1 },
{ 1, -2, -7, 72, 72, -7, -2, 1 },
{ 1, -1, -9, 67, 77, -5, -3, 1 },
{ 1, -1, -11, 61, 82, -2, -4, 2 },
{ 1, 0, -12, 55, 86, 1, -5, 2 },
{ 0, 1, -12, 49, 90, 5, -7, 2 },
{ 0, 1, -12, 43, 92, 10, -8, 2 },
{ 0, 2, -12, 37, 94, 14, -9, 2 },
{ 0, 2, -12, 31, 96, 19, -10, 2 }
}, { /* 87381 < Ratio <= 104857 (~8:5) */
{ -1, -8, 33, 80, 33, -8, -1, 0 },
{ -1, -8, 28, 80, 37, -7, -2, 1 },
{ 0, -8, 24, 79, 41, -7, -2, 1 },
{ 0, -8, 20, 78, 46, -6, -3, 1 },
{ 0, -8, 16, 76, 50, -4, -3, 1 },
{ 0, -7, 13, 74, 54, -3, -4, 1 },
{ 1, -7, 10, 71, 58, -1, -5, 1 },
{ 1, -6, 6, 68, 62, 1, -5, 1 },
{ 1, -6, 4, 65, 65, 4, -6, 1 },
{ 1, -5, 1, 62, 68, 6, -6, 1 },
{ 1, -5, -1, 58, 71, 10, -7, 1 },
{ 1, -4, -3, 54, 74, 13, -7, 0 },
{ 1, -3, -4, 50, 76, 16, -8, 0 },
{ 1, -3, -6, 46, 78, 20, -8, 0 },
{ 1, -2, -7, 41, 79, 24, -8, 0 },
{ 1, -2, -7, 37, 80, 28, -8, -1 }
}, { /* 104857 < Ratio <= 131072 (~8:4) */
{ -3, 0, 35, 64, 35, 0, -3, 0 },
{ -3, -1, 32, 64, 38, 1, -3, 0 },
{ -2, -2, 29, 63, 41, 2, -3, 0 },
{ -2, -3, 27, 63, 43, 4, -4, 0 },
{ -2, -3, 24, 61, 46, 6, -4, 0 },
{ -2, -3, 21, 60, 49, 7, -4, 0 },
{ -1, -4, 19, 59, 51, 9, -4, -1 },
{ -1, -4, 16, 57, 53, 12, -4, -1 },
{ -1, -4, 14, 55, 55, 14, -4, -1 },
{ -1, -4, 12, 53, 57, 16, -4, -1 },
{ -1, -4, 9, 51, 59, 19, -4, -1 },
{ 0, -4, 7, 49, 60, 21, -3, -2 },
{ 0, -4, 6, 46, 61, 24, -3, -2 },
{ 0, -4, 4, 43, 63, 27, -3, -2 },
{ 0, -3, 2, 41, 63, 29, -2, -2 },
{ 0, -3, 1, 38, 64, 32, -1, -3 }
}, { /* 131072 < Ratio <= 174762 (~8:3) */
{ -1, 8, 33, 48, 33, 8, -1, 0 },
{ -1, 7, 31, 49, 35, 9, -1, -1 },
{ -1, 6, 30, 49, 36, 10, -1, -1 },
{ -1, 5, 28, 48, 38, 12, -1, -1 },
{ -1, 4, 26, 48, 39, 13, 0, -1 },
{ -1, 3, 24, 47, 41, 15, 0, -1 },
{ -1, 2, 23, 47, 42, 16, 0, -1 },
{ -1, 2, 21, 45, 43, 18, 1, -1 },
{ -1, 1, 19, 45, 45, 19, 1, -1 },
{ -1, 1, 18, 43, 45, 21, 2, -1 },
{ -1, 0, 16, 42, 47, 23, 2, -1 },
{ -1, 0, 15, 41, 47, 24, 3, -1 },
{ -1, 0, 13, 39, 48, 26, 4, -1 },
{ -1, -1, 12, 38, 48, 28, 5, -1 },
{ -1, -1, 10, 36, 49, 30, 6, -1 },
{ -1, -1, 9, 35, 49, 31, 7, -1 }
}, { /* 174762 < Ratio <= 262144 (~8:2) */
{ 2, 13, 30, 38, 30, 13, 2, 0 },
{ 2, 12, 29, 38, 30, 14, 3, 0 },
{ 2, 11, 28, 38, 31, 15, 3, 0 },
{ 2, 10, 26, 38, 32, 16, 4, 0 },
{ 1, 10, 26, 37, 33, 17, 4, 0 },
{ 1, 9, 24, 37, 34, 18, 5, 0 },
{ 1, 8, 24, 37, 34, 19, 5, 0 },
{ 1, 7, 22, 36, 35, 20, 6, 1 },
{ 1, 6, 21, 36, 36, 21, 6, 1 },
{ 1, 6, 20, 35, 36, 22, 7, 1 },
{ 0, 5, 19, 34, 37, 24, 8, 1 },
{ 0, 5, 18, 34, 37, 24, 9, 1 },
{ 0, 4, 17, 33, 37, 26, 10, 1 },
{ 0, 4, 16, 32, 38, 26, 10, 2 },
{ 0, 3, 15, 31, 38, 28, 11, 2 },
{ 0, 3, 14, 30, 38, 29, 12, 2 }
}
};
/* 4-tap Filter Coefficient */
static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
{ /* Ratio <= 65536 (~8:8) */
{ 0, 128, 0, 0 },
{ -4, 127, 5, 0 },
{ -6, 124, 11, -1 },
{ -8, 118, 19, -1 },
{ -8, 111, 27, -2 },
{ -8, 102, 37, -3 },
{ -8, 92, 48, -4 },
{ -7, 81, 59, -5 },
{ -6, 70, 70, -6 },
{ -5, 59, 81, -7 },
{ -4, 48, 92, -8 },
{ -3, 37, 102, -8 },
{ -2, 27, 111, -8 },
{ -1, 19, 118, -8 },
{ -1, 11, 124, -6 },
{ 0, 5, 127, -4 }
}, { /* 65536 < Ratio <= 74898 (~8:7) */
{ 8, 112, 8, 0 },
{ 4, 111, 14, -1 },
{ 1, 109, 20, -2 },
{ -2, 105, 27, -2 },
{ -3, 100, 34, -3 },
{ -5, 93, 43, -3 },
{ -5, 86, 51, -4 },
{ -5, 77, 60, -4 },
{ -5, 69, 69, -5 },
{ -4, 60, 77, -5 },
{ -4, 51, 86, -5 },
{ -3, 43, 93, -5 },
{ -3, 34, 100, -3 },
{ -2, 27, 105, -2 },
{ -2, 20, 109, 1 },
{ -1, 14, 111, 4 }
}, { /* 74898 < Ratio <= 87381 (~8:6) */
{ 16, 96, 16, 0 },
{ 12, 97, 21, -2 },
{ 8, 96, 26, -2 },
{ 5, 93, 32, -2 },
{ 2, 89, 39, -2 },
{ 0, 84, 46, -2 },
{ -1, 79, 53, -3 },
{ -2, 73, 59, -2 },
{ -2, 66, 66, -2 },
{ -2, 59, 73, -2 },
{ -3, 53, 79, -1 },
{ -2, 46, 84, 0 },
{ -2, 39, 89, 2 },
{ -2, 32, 93, 5 },
{ -2, 26, 96, 8 },
{ -2, 21, 97, 12 }
}, { /* 87381 < Ratio <= 104857 (~8:5) */
{ 22, 84, 22, 0 },
{ 18, 85, 26, -1 },
{ 14, 84, 31, -1 },
{ 11, 82, 36, -1 },
{ 8, 79, 42, -1 },
{ 6, 76, 47, -1 },
{ 4, 72, 52, 0 },
{ 2, 68, 58, 0 },
{ 1, 63, 63, 1 },
{ 0, 58, 68, 2 },
{ 0, 52, 72, 4 },
{ -1, 47, 76, 6 },
{ -1, 42, 79, 8 },
{ -1, 36, 82, 11 },
{ -1, 31, 84, 14 },
{ -1, 26, 85, 18 }
}, { /* 104857 < Ratio <= 131072 (~8:4) */
{ 26, 76, 26, 0 },
{ 22, 76, 30, 0 },
{ 19, 75, 34, 0 },
{ 16, 73, 38, 1 },
{ 13, 71, 43, 1 },
{ 10, 69, 47, 2 },
{ 8, 66, 51, 3 },
{ 6, 63, 55, 4 },
{ 5, 59, 59, 5 },
{ 4, 55, 63, 6 },
{ 3, 51, 66, 8 },
{ 2, 47, 69, 10 },
{ 1, 43, 71, 13 },
{ 1, 38, 73, 16 },
{ 0, 34, 75, 19 },
{ 0, 30, 76, 22 }
}, { /* 131072 < Ratio <= 174762 (~8:3) */
{ 29, 70, 29, 0 },
{ 26, 68, 32, 2 },
{ 23, 67, 36, 2 },
{ 20, 66, 39, 3 },
{ 17, 65, 43, 3 },
{ 15, 63, 46, 4 },
{ 12, 61, 50, 5 },
{ 10, 58, 53, 7 },
{ 8, 56, 56, 8 },
{ 7, 53, 58, 10 },
{ 5, 50, 61, 12 },
{ 4, 46, 63, 15 },
{ 3, 43, 65, 17 },
{ 3, 39, 66, 20 },
{ 2, 36, 67, 23 },
{ 2, 32, 68, 26 }
}, { /* 174762 < Ratio <= 262144 (~8:2) */
{ 32, 64, 32, 0 },
{ 28, 63, 34, 3 },
{ 25, 62, 37, 4 },
{ 22, 62, 40, 4 },
{ 19, 61, 43, 5 },
{ 17, 59, 46, 6 },
{ 15, 58, 48, 7 },
{ 13, 55, 51, 9 },
{ 11, 53, 53, 11 },
{ 9, 51, 55, 13 },
{ 7, 48, 58, 15 },
{ 6, 46, 59, 17 },
{ 5, 43, 61, 19 },
{ 4, 40, 62, 22 },
{ 4, 37, 62, 25 },
{ 3, 34, 63, 28 }
}
};
static int gsc_sw_reset(struct gsc_context *ctx)
{
u32 cfg;
int count = GSC_RESET_TIMEOUT;
/* s/w reset */
cfg = (GSC_SW_RESET_SRESET);
gsc_write(cfg, GSC_SW_RESET);
/* wait s/w reset complete */
while (count--) {
cfg = gsc_read(GSC_SW_RESET);
if (!cfg)
break;
usleep_range(1000, 2000);
}
if (cfg) {
DRM_DEV_ERROR(ctx->dev, "failed to reset gsc h/w.\n");
return -EBUSY;
}
/* reset sequence */
cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
cfg |= (GSC_IN_BASE_ADDR_MASK |
GSC_IN_BASE_ADDR_PINGPONG(0));
gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
cfg |= (GSC_OUT_BASE_ADDR_MASK |
GSC_OUT_BASE_ADDR_PINGPONG(0));
gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
return 0;
}
static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
bool overflow, bool done)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]overflow[%d]level[%d]\n",
enable, overflow, done);
cfg = gsc_read(GSC_IRQ);
cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
if (enable)
cfg |= GSC_IRQ_ENABLE;
else
cfg &= ~GSC_IRQ_ENABLE;
if (overflow)
cfg &= ~GSC_IRQ_OR_MASK;
else
cfg |= GSC_IRQ_OR_MASK;
if (done)
cfg &= ~GSC_IRQ_FRMDONE_MASK;
else
cfg |= GSC_IRQ_FRMDONE_MASK;
gsc_write(cfg, GSC_IRQ);
}
static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = gsc_read(GSC_IN_CON);
cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
switch (fmt) {
case DRM_FORMAT_RGB565:
cfg |= GSC_IN_RGB565;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
cfg |= GSC_IN_XRGB8888;
break;
case DRM_FORMAT_BGRX8888:
cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
break;
case DRM_FORMAT_YUYV:
cfg |= (GSC_IN_YUV422_1P |
GSC_IN_YUV422_1P_ORDER_LSB_Y |
GSC_IN_CHROMA_ORDER_CBCR);
break;
case DRM_FORMAT_YVYU:
cfg |= (GSC_IN_YUV422_1P |
GSC_IN_YUV422_1P_ORDER_LSB_Y |
GSC_IN_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_UYVY:
cfg |= (GSC_IN_YUV422_1P |
GSC_IN_YUV422_1P_OEDER_LSB_C |
GSC_IN_CHROMA_ORDER_CBCR);
break;
case DRM_FORMAT_VYUY:
cfg |= (GSC_IN_YUV422_1P |
GSC_IN_YUV422_1P_OEDER_LSB_C |
GSC_IN_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
break;
case DRM_FORMAT_NV61:
cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
break;
case DRM_FORMAT_YUV422:
cfg |= GSC_IN_YUV422_3P;
break;
case DRM_FORMAT_YUV420:
cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
break;
case DRM_FORMAT_YVU420:
cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
break;
case DRM_FORMAT_NV12:
cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
break;
case DRM_FORMAT_NV16:
cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
break;
}
if (tiled)
cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
gsc_write(cfg, GSC_IN_CON);
}
static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
{
unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
u32 cfg;
cfg = gsc_read(GSC_IN_CON);
cfg &= ~GSC_IN_ROT_MASK;
switch (degree) {
case DRM_MODE_ROTATE_0:
if (rotation & DRM_MODE_REFLECT_X)
cfg |= GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_Y)
cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_90:
cfg |= GSC_IN_ROT_90;
if (rotation & DRM_MODE_REFLECT_X)
cfg |= GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_Y)
cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_180:
cfg |= GSC_IN_ROT_180;
if (rotation & DRM_MODE_REFLECT_X)
cfg &= ~GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_270:
cfg |= GSC_IN_ROT_270;
if (rotation & DRM_MODE_REFLECT_X)
cfg &= ~GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~GSC_IN_ROT_YFLIP;
break;
}
gsc_write(cfg, GSC_IN_CON);
ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
}
static void gsc_src_set_size(struct gsc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
struct gsc_scaler *sc = &ctx->sc;
u32 cfg;
/* pixel offset */
cfg = (GSC_SRCIMG_OFFSET_X(buf->rect.x) |
GSC_SRCIMG_OFFSET_Y(buf->rect.y));
gsc_write(cfg, GSC_SRCIMG_OFFSET);
/* cropped size */
cfg = (GSC_CROPPED_WIDTH(buf->rect.w) |
GSC_CROPPED_HEIGHT(buf->rect.h));
gsc_write(cfg, GSC_CROPPED_SIZE);
/* original size */
cfg = gsc_read(GSC_SRCIMG_SIZE);
cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
GSC_SRCIMG_WIDTH_MASK);
cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
GSC_SRCIMG_HEIGHT(buf->buf.height));
gsc_write(cfg, GSC_SRCIMG_SIZE);
cfg = gsc_read(GSC_IN_CON);
cfg &= ~GSC_IN_RGB_TYPE_MASK;
if (buf->rect.w >= GSC_WIDTH_ITU_709)
if (sc->range)
cfg |= GSC_IN_RGB_HD_WIDE;
else
cfg |= GSC_IN_RGB_HD_NARROW;
else
if (sc->range)
cfg |= GSC_IN_RGB_SD_WIDE;
else
cfg |= GSC_IN_RGB_SD_NARROW;
gsc_write(cfg, GSC_IN_CON);
}
static void gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
bool enqueue)
{
bool masked = !enqueue;
u32 cfg;
u32 mask = 0x00000001 << buf_id;
/* mask register set */
cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
/* sequence id */
cfg &= ~mask;
cfg |= masked << buf_id;
gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
}
static void gsc_src_set_addr(struct gsc_context *ctx, u32 buf_id,
struct exynos_drm_ipp_buffer *buf)
{
/* address register set */
gsc_write(buf->dma_addr[0], GSC_IN_BASE_ADDR_Y(buf_id));
gsc_write(buf->dma_addr[1], GSC_IN_BASE_ADDR_CB(buf_id));
gsc_write(buf->dma_addr[2], GSC_IN_BASE_ADDR_CR(buf_id));
gsc_src_set_buf_seq(ctx, buf_id, true);
}
static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = gsc_read(GSC_OUT_CON);
cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
GSC_OUT_GLOBAL_ALPHA_MASK);
switch (fmt) {
case DRM_FORMAT_RGB565:
cfg |= GSC_OUT_RGB565;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_GLOBAL_ALPHA(0xff));
break;
case DRM_FORMAT_BGRX8888:
cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
break;
case DRM_FORMAT_YUYV:
cfg |= (GSC_OUT_YUV422_1P |
GSC_OUT_YUV422_1P_ORDER_LSB_Y |
GSC_OUT_CHROMA_ORDER_CBCR);
break;
case DRM_FORMAT_YVYU:
cfg |= (GSC_OUT_YUV422_1P |
GSC_OUT_YUV422_1P_ORDER_LSB_Y |
GSC_OUT_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_UYVY:
cfg |= (GSC_OUT_YUV422_1P |
GSC_OUT_YUV422_1P_OEDER_LSB_C |
GSC_OUT_CHROMA_ORDER_CBCR);
break;
case DRM_FORMAT_VYUY:
cfg |= (GSC_OUT_YUV422_1P |
GSC_OUT_YUV422_1P_OEDER_LSB_C |
GSC_OUT_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
break;
case DRM_FORMAT_NV61:
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
break;
case DRM_FORMAT_YUV422:
cfg |= GSC_OUT_YUV422_3P;
break;
case DRM_FORMAT_YUV420:
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
break;
case DRM_FORMAT_YVU420:
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
break;
case DRM_FORMAT_NV12:
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
break;
case DRM_FORMAT_NV16:
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
break;
}
if (tiled)
cfg |= (GSC_IN_TILE_C_16x8 | GSC_OUT_TILE_MODE);
gsc_write(cfg, GSC_OUT_CON);
}
static int gsc_get_ratio_shift(struct gsc_context *ctx, u32 src, u32 dst,
u32 *ratio)
{
DRM_DEV_DEBUG_KMS(ctx->dev, "src[%d]dst[%d]\n", src, dst);
if (src >= dst * 8) {
DRM_DEV_ERROR(ctx->dev, "failed to make ratio and shift.\n");
return -EINVAL;
} else if (src >= dst * 4)
*ratio = 4;
else if (src >= dst * 2)
*ratio = 2;
else
*ratio = 1;
return 0;
}
static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
{
if (hratio == 4 && vratio == 4)
*shfactor = 4;
else if ((hratio == 4 && vratio == 2) ||
(hratio == 2 && vratio == 4))
*shfactor = 3;
else if ((hratio == 4 && vratio == 1) ||
(hratio == 1 && vratio == 4) ||
(hratio == 2 && vratio == 2))
*shfactor = 2;
else if (hratio == 1 && vratio == 1)
*shfactor = 0;
else
*shfactor = 1;
}
static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
struct drm_exynos_ipp_task_rect *src,
struct drm_exynos_ipp_task_rect *dst)
{
u32 cfg;
u32 src_w, src_h, dst_w, dst_h;
int ret = 0;
src_w = src->w;
src_h = src->h;
if (ctx->rotation) {
dst_w = dst->h;
dst_h = dst->w;
} else {
dst_w = dst->w;
dst_h = dst->h;
}
ret = gsc_get_ratio_shift(ctx, src_w, dst_w, &sc->pre_hratio);
if (ret) {
DRM_DEV_ERROR(ctx->dev, "failed to get ratio horizontal.\n");
return ret;
}
ret = gsc_get_ratio_shift(ctx, src_h, dst_h, &sc->pre_vratio);
if (ret) {
DRM_DEV_ERROR(ctx->dev, "failed to get ratio vertical.\n");
return ret;
}
DRM_DEV_DEBUG_KMS(ctx->dev, "pre_hratio[%d]pre_vratio[%d]\n",
sc->pre_hratio, sc->pre_vratio);
sc->main_hratio = (src_w << 16) / dst_w;
sc->main_vratio = (src_h << 16) / dst_h;
DRM_DEV_DEBUG_KMS(ctx->dev, "main_hratio[%ld]main_vratio[%ld]\n",
sc->main_hratio, sc->main_vratio);
gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
&sc->pre_shfactor);
DRM_DEV_DEBUG_KMS(ctx->dev, "pre_shfactor[%d]\n", sc->pre_shfactor);
cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
GSC_PRESC_H_RATIO(sc->pre_hratio) |
GSC_PRESC_V_RATIO(sc->pre_vratio));
gsc_write(cfg, GSC_PRE_SCALE_RATIO);
return ret;
}
static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
{
int i, j, k, sc_ratio;
if (main_hratio <= GSC_SC_UP_MAX_RATIO)
sc_ratio = 0;
else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
sc_ratio = 1;
else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
sc_ratio = 2;
else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
sc_ratio = 3;
else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
sc_ratio = 4;
else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
sc_ratio = 5;
else
sc_ratio = 6;
for (i = 0; i < GSC_COEF_PHASE; i++)
for (j = 0; j < GSC_COEF_H_8T; j++)
for (k = 0; k < GSC_COEF_DEPTH; k++)
gsc_write(h_coef_8t[sc_ratio][i][j],
GSC_HCOEF(i, j, k));
}
static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
{
int i, j, k, sc_ratio;
if (main_vratio <= GSC_SC_UP_MAX_RATIO)
sc_ratio = 0;
else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
sc_ratio = 1;
else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
sc_ratio = 2;
else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
sc_ratio = 3;
else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
sc_ratio = 4;
else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
sc_ratio = 5;
else
sc_ratio = 6;
for (i = 0; i < GSC_COEF_PHASE; i++)
for (j = 0; j < GSC_COEF_V_4T; j++)
for (k = 0; k < GSC_COEF_DEPTH; k++)
gsc_write(v_coef_4t[sc_ratio][i][j],
GSC_VCOEF(i, j, k));
}
static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "main_hratio[%ld]main_vratio[%ld]\n",
sc->main_hratio, sc->main_vratio);
gsc_set_h_coef(ctx, sc->main_hratio);
cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
gsc_write(cfg, GSC_MAIN_H_RATIO);
gsc_set_v_coef(ctx, sc->main_vratio);
cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
gsc_write(cfg, GSC_MAIN_V_RATIO);
}
static void gsc_dst_set_size(struct gsc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
struct gsc_scaler *sc = &ctx->sc;
u32 cfg;
/* pixel offset */
cfg = (GSC_DSTIMG_OFFSET_X(buf->rect.x) |
GSC_DSTIMG_OFFSET_Y(buf->rect.y));
gsc_write(cfg, GSC_DSTIMG_OFFSET);
/* scaled size */
if (ctx->rotation)
cfg = (GSC_SCALED_WIDTH(buf->rect.h) |
GSC_SCALED_HEIGHT(buf->rect.w));
else
cfg = (GSC_SCALED_WIDTH(buf->rect.w) |
GSC_SCALED_HEIGHT(buf->rect.h));
gsc_write(cfg, GSC_SCALED_SIZE);
/* original size */
cfg = gsc_read(GSC_DSTIMG_SIZE);
cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
GSC_DSTIMG_HEIGHT(buf->buf.height);
gsc_write(cfg, GSC_DSTIMG_SIZE);
cfg = gsc_read(GSC_OUT_CON);
cfg &= ~GSC_OUT_RGB_TYPE_MASK;
if (buf->rect.w >= GSC_WIDTH_ITU_709)
if (sc->range)
cfg |= GSC_OUT_RGB_HD_WIDE;
else
cfg |= GSC_OUT_RGB_HD_NARROW;
else
if (sc->range)
cfg |= GSC_OUT_RGB_SD_WIDE;
else
cfg |= GSC_OUT_RGB_SD_NARROW;
gsc_write(cfg, GSC_OUT_CON);
}
static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
{
u32 cfg, i, buf_num = GSC_REG_SZ;
u32 mask = 0x00000001;
cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
for (i = 0; i < GSC_REG_SZ; i++)
if (cfg & (mask << i))
buf_num--;
DRM_DEV_DEBUG_KMS(ctx->dev, "buf_num[%d]\n", buf_num);
return buf_num;
}
static void gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
bool enqueue)
{
bool masked = !enqueue;
u32 cfg;
u32 mask = 0x00000001 << buf_id;
/* mask register set */
cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
/* sequence id */
cfg &= ~mask;
cfg |= masked << buf_id;
gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
/* interrupt enable */
if (enqueue && gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
gsc_handle_irq(ctx, true, false, true);
/* interrupt disable */
if (!enqueue && gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
gsc_handle_irq(ctx, false, false, true);
}
static void gsc_dst_set_addr(struct gsc_context *ctx,
u32 buf_id, struct exynos_drm_ipp_buffer *buf)
{
/* address register set */
gsc_write(buf->dma_addr[0], GSC_OUT_BASE_ADDR_Y(buf_id));
gsc_write(buf->dma_addr[1], GSC_OUT_BASE_ADDR_CB(buf_id));
gsc_write(buf->dma_addr[2], GSC_OUT_BASE_ADDR_CR(buf_id));
gsc_dst_set_buf_seq(ctx, buf_id, true);
}
static int gsc_get_src_buf_index(struct gsc_context *ctx)
{
u32 cfg, curr_index, i;
u32 buf_id = GSC_MAX_SRC;
DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
curr_index = GSC_IN_CURR_GET_INDEX(cfg);
for (i = curr_index; i < GSC_MAX_SRC; i++) {
if (!((cfg >> i) & 0x1)) {
buf_id = i;
break;
}
}
DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
curr_index, buf_id);
if (buf_id == GSC_MAX_SRC) {
DRM_DEV_ERROR(ctx->dev, "failed to get in buffer index.\n");
return -EINVAL;
}
gsc_src_set_buf_seq(ctx, buf_id, false);
return buf_id;
}
static int gsc_get_dst_buf_index(struct gsc_context *ctx)
{
u32 cfg, curr_index, i;
u32 buf_id = GSC_MAX_DST;
DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
for (i = curr_index; i < GSC_MAX_DST; i++) {
if (!((cfg >> i) & 0x1)) {
buf_id = i;
break;
}
}
if (buf_id == GSC_MAX_DST) {
DRM_DEV_ERROR(ctx->dev, "failed to get out buffer index.\n");
return -EINVAL;
}
gsc_dst_set_buf_seq(ctx, buf_id, false);
DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
curr_index, buf_id);
return buf_id;
}
static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
{
struct gsc_context *ctx = dev_id;
u32 status;
int err = 0;
DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
status = gsc_read(GSC_IRQ);
if (status & GSC_IRQ_STATUS_OR_IRQ) {
dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n",
ctx->id, status);
err = -EINVAL;
}
if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
int src_buf_id, dst_buf_id;
dev_dbg(ctx->dev, "occurred frame done at %d, status 0x%x.\n",
ctx->id, status);
src_buf_id = gsc_get_src_buf_index(ctx);
dst_buf_id = gsc_get_dst_buf_index(ctx);
DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id_src[%d]buf_id_dst[%d]\n",
src_buf_id, dst_buf_id);
if (src_buf_id < 0 || dst_buf_id < 0)
err = -EINVAL;
}
if (ctx->task) {
struct exynos_drm_ipp_task *task = ctx->task;
ctx->task = NULL;
pm_runtime_mark_last_busy(ctx->dev);
pm_runtime_put_autosuspend(ctx->dev);
exynos_drm_ipp_task_done(task, err);
}
return IRQ_HANDLED;
}
static int gsc_reset(struct gsc_context *ctx)
{
struct gsc_scaler *sc = &ctx->sc;
int ret;
/* reset h/w block */
ret = gsc_sw_reset(ctx);
if (ret < 0) {
dev_err(ctx->dev, "failed to reset hardware.\n");
return ret;
}
/* scaler setting */
memset(&ctx->sc, 0x0, sizeof(ctx->sc));
sc->range = true;
return 0;
}
static void gsc_start(struct gsc_context *ctx)
{
u32 cfg;
gsc_handle_irq(ctx, true, false, true);
/* enable one shot */
cfg = gsc_read(GSC_ENABLE);
cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
GSC_ENABLE_CLK_GATE_MODE_MASK);
cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
gsc_write(cfg, GSC_ENABLE);
/* src dma memory */
cfg = gsc_read(GSC_IN_CON);
cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
cfg |= GSC_IN_PATH_MEMORY;
gsc_write(cfg, GSC_IN_CON);
/* dst dma memory */
cfg = gsc_read(GSC_OUT_CON);
cfg |= GSC_OUT_PATH_MEMORY;
gsc_write(cfg, GSC_OUT_CON);
gsc_set_scaler(ctx, &ctx->sc);
cfg = gsc_read(GSC_ENABLE);
cfg |= GSC_ENABLE_ON;
gsc_write(cfg, GSC_ENABLE);
}
static int gsc_commit(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
struct gsc_context *ctx = container_of(ipp, struct gsc_context, ipp);
int ret;
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "failed to enable GScaler device.\n");
return ret;
}
ctx->task = task;
ret = gsc_reset(ctx);
if (ret) {
pm_runtime_put_autosuspend(ctx->dev);
ctx->task = NULL;
return ret;
}
gsc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier);
gsc_src_set_transf(ctx, task->transform.rotation);
gsc_src_set_size(ctx, &task->src);
gsc_src_set_addr(ctx, 0, &task->src);
gsc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier);
gsc_dst_set_size(ctx, &task->dst);
gsc_dst_set_addr(ctx, 0, &task->dst);
gsc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect);
gsc_start(ctx);
return 0;
}
static void gsc_abort(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
struct gsc_context *ctx =
container_of(ipp, struct gsc_context, ipp);
gsc_reset(ctx);
if (ctx->task) {
struct exynos_drm_ipp_task *task = ctx->task;
ctx->task = NULL;
pm_runtime_mark_last_busy(ctx->dev);
pm_runtime_put_autosuspend(ctx->dev);
exynos_drm_ipp_task_done(task, -EIO);
}
}
static struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = gsc_commit,
.abort = gsc_abort,
};
static int gsc_bind(struct device *dev, struct device *master, void *data)
{
struct gsc_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
ctx->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
ctx->formats, ctx->num_formats, "gsc");
dev_info(dev, "The exynos gscaler has been probed successfully\n");
return 0;
}
static void gsc_unbind(struct device *dev, struct device *master,
void *data)
{
struct gsc_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
}
static const struct component_ops gsc_component_ops = {
.bind = gsc_bind,
.unbind = gsc_unbind,
};
static const unsigned int gsc_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_BGRX8888,
DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV21, DRM_FORMAT_NV61,
DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422,
};
static const unsigned int gsc_tiled_formats[] = {
DRM_FORMAT_NV12, DRM_FORMAT_NV21,
};
static int gsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gsc_driverdata *driver_data;
struct exynos_drm_ipp_formats *formats;
struct gsc_context *ctx;
int num_formats, ret, i, j;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev);
ctx->dev = dev;
ctx->num_clocks = driver_data->num_clocks;
ctx->clk_names = driver_data->clk_names;
/* construct formats/limits array */
num_formats = ARRAY_SIZE(gsc_formats) + ARRAY_SIZE(gsc_tiled_formats);
formats = devm_kcalloc(dev, num_formats, sizeof(*formats), GFP_KERNEL);
if (!formats)
return -ENOMEM;
/* linear formats */
for (i = 0; i < ARRAY_SIZE(gsc_formats); i++) {
formats[i].fourcc = gsc_formats[i];
formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
DRM_EXYNOS_IPP_FORMAT_DESTINATION;
formats[i].limits = driver_data->limits;
formats[i].num_limits = driver_data->num_limits;
}
/* tiled formats */
for (j = i, i = 0; i < ARRAY_SIZE(gsc_tiled_formats); j++, i++) {
formats[j].fourcc = gsc_tiled_formats[i];
formats[j].modifier = DRM_FORMAT_MOD_SAMSUNG_16_16_TILE;
formats[j].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
DRM_EXYNOS_IPP_FORMAT_DESTINATION;
formats[j].limits = driver_data->limits;
formats[j].num_limits = driver_data->num_limits;
}
ctx->formats = formats;
ctx->num_formats = num_formats;
/* clock control */
for (i = 0; i < ctx->num_clocks; i++) {
ctx->clocks[i] = devm_clk_get(dev, ctx->clk_names[i]);
if (IS_ERR(ctx->clocks[i])) {
dev_err(dev, "failed to get clock: %s\n",
ctx->clk_names[i]);
return PTR_ERR(ctx->clocks[i]);
}
}
ctx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
/* resource irq */
ctx->irq = platform_get_irq(pdev, 0);
if (ctx->irq < 0)
return ctx->irq;
ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0,
dev_name(dev), ctx);
if (ret < 0) {
dev_err(dev, "failed to request irq.\n");
return ret;
}
/* context initailization */
ctx->id = pdev->id;
platform_set_drvdata(pdev, ctx);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, GSC_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
ret = component_add(dev, &gsc_component_ops);
if (ret)
goto err_pm_dis;
dev_info(dev, "drm gsc registered successfully.\n");
return 0;
err_pm_dis:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return ret;
}
static int gsc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &gsc_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return 0;
}
static int __maybe_unused gsc_runtime_suspend(struct device *dev)
{
struct gsc_context *ctx = get_gsc_context(dev);
int i;
DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
for (i = ctx->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(ctx->clocks[i]);
return 0;
}
static int __maybe_unused gsc_runtime_resume(struct device *dev)
{
struct gsc_context *ctx = get_gsc_context(dev);
int i, ret;
DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
for (i = 0; i < ctx->num_clocks; i++) {
ret = clk_prepare_enable(ctx->clocks[i]);
if (ret) {
while (--i > 0)
clk_disable_unprepare(ctx->clocks[i]);
return ret;
}
}
return 0;
}
static const struct dev_pm_ops gsc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
};
static const struct drm_exynos_ipp_limit gsc_5250_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) },
{ IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2048 }, .v = { 16, 2048 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
.v = { (1 << 16) / 16, (1 << 16) * 8 }) },
};
static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) },
{ IPP_SIZE_LIMIT(ROTATED, .h = { 16, 2016 }, .v = { 8, 2016 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
.v = { (1 << 16) / 16, (1 << 16) * 8 }) },
};
static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
{ IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
.v = { (1 << 16) / 16, (1 << 16) * 8 }) },
};
static struct gsc_driverdata gsc_exynos5250_drvdata = {
.clk_names = {"gscl"},
.num_clocks = 1,
.limits = gsc_5250_limits,
.num_limits = ARRAY_SIZE(gsc_5250_limits),
};
static struct gsc_driverdata gsc_exynos5420_drvdata = {
.clk_names = {"gscl"},
.num_clocks = 1,
.limits = gsc_5420_limits,
.num_limits = ARRAY_SIZE(gsc_5420_limits),
};
static struct gsc_driverdata gsc_exynos5433_drvdata = {
.clk_names = {"pclk", "aclk", "aclk_xiu", "aclk_gsclbend"},
.num_clocks = 4,
.limits = gsc_5433_limits,
.num_limits = ARRAY_SIZE(gsc_5433_limits),
};
static const struct of_device_id exynos_drm_gsc_of_match[] = {
{
.compatible = "samsung,exynos5-gsc",
.data = &gsc_exynos5250_drvdata,
}, {
.compatible = "samsung,exynos5250-gsc",
.data = &gsc_exynos5250_drvdata,
}, {
.compatible = "samsung,exynos5420-gsc",
.data = &gsc_exynos5420_drvdata,
}, {
.compatible = "samsung,exynos5433-gsc",
.data = &gsc_exynos5433_drvdata,
}, {
},
};
MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match);
struct platform_driver gsc_driver = {
.probe = gsc_probe,
.remove = gsc_remove,
.driver = {
.name = "exynos-drm-gsc",
.owner = THIS_MODULE,
.pm = &gsc_pm_ops,
.of_match_table = exynos_drm_gsc_of_match,
},
};
| linux-master | drivers/gpu/drm/exynos/exynos_drm_gsc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Authors:
* Seung-Woo Kim <[email protected]>
* Inki Dae <[email protected]>
* Joonyoung Shim <[email protected]>
*
* Based on drivers/media/video/s5p-tv/hdmi_drv.c
*/
#include <drm/exynos_drm.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/wait.h>
#include <sound/hdmi-codec.h>
#include <media/cec-notifier.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "regs-hdmi.h"
#define HOTPLUG_DEBOUNCE_MS 1100
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
HDMI_TYPE_COUNT
};
#define HDMI_MAPPED_BASE 0xffff0000
enum hdmi_mapped_regs {
HDMI_PHY_STATUS = HDMI_MAPPED_BASE,
HDMI_PHY_RSTOUT,
HDMI_ACR_CON,
HDMI_ACR_MCTS0,
HDMI_ACR_CTS0,
HDMI_ACR_N0
};
static const u32 hdmi_reg_map[][HDMI_TYPE_COUNT] = {
{ HDMI_V13_PHY_STATUS, HDMI_PHY_STATUS_0 },
{ HDMI_V13_PHY_RSTOUT, HDMI_V14_PHY_RSTOUT },
{ HDMI_V13_ACR_CON, HDMI_V14_ACR_CON },
{ HDMI_V13_ACR_MCTS0, HDMI_V14_ACR_MCTS0 },
{ HDMI_V13_ACR_CTS0, HDMI_V14_ACR_CTS0 },
{ HDMI_V13_ACR_N0, HDMI_V14_ACR_N0 },
};
static const char * const supply[] = {
"vdd",
"vdd_osc",
"vdd_pll",
};
struct hdmiphy_config {
int pixel_clock;
u8 conf[32];
};
struct hdmiphy_configs {
int count;
const struct hdmiphy_config *data;
};
struct string_array_spec {
int count;
const char * const *data;
};
#define INIT_ARRAY_SPEC(a) { .count = ARRAY_SIZE(a), .data = a }
struct hdmi_driver_data {
unsigned int type;
unsigned int is_apb_phy:1;
unsigned int has_sysreg:1;
struct hdmiphy_configs phy_confs;
struct string_array_spec clk_gates;
/*
* Array of triplets (p_off, p_on, clock), where p_off and p_on are
* required parents of clock when HDMI-PHY is respectively off or on.
*/
struct string_array_spec clk_muxes;
};
struct hdmi_audio {
struct platform_device *pdev;
struct hdmi_audio_infoframe infoframe;
struct hdmi_codec_params params;
bool mute;
};
struct hdmi_context {
struct drm_encoder encoder;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
bool dvi_mode;
struct delayed_work hotplug_work;
struct cec_notifier *notifier;
const struct hdmi_driver_data *drv_data;
void __iomem *regs;
void __iomem *regs_hdmiphy;
struct i2c_client *hdmiphy_port;
struct i2c_adapter *ddc_adpt;
struct gpio_desc *hpd_gpio;
int irq;
struct regmap *pmureg;
struct regmap *sysreg;
struct clk **clk_gates;
struct clk **clk_muxes;
struct regulator_bulk_data regul_bulk[ARRAY_SIZE(supply)];
struct regulator *reg_hdmi_en;
struct exynos_drm_clk phy_clk;
struct drm_bridge *bridge;
/* mutex protecting subsequent fields below */
struct mutex mutex;
struct hdmi_audio audio;
bool powered;
};
static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
{
return container_of(e, struct hdmi_context, encoder);
}
static inline struct hdmi_context *connector_to_hdmi(struct drm_connector *c)
{
return container_of(c, struct hdmi_context, connector);
}
static const struct hdmiphy_config hdmiphy_v13_configs[] = {
{
.pixel_clock = 27000000,
.conf = {
0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x80,
},
},
{
.pixel_clock = 27027000,
.conf = {
0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x80,
},
},
{
.pixel_clock = 74176000,
.conf = {
0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x80,
},
},
{
.pixel_clock = 74250000,
.conf = {
0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x80,
},
},
{
.pixel_clock = 148500000,
.conf = {
0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x80,
},
},
};
static const struct hdmiphy_config hdmiphy_v14_configs[] = {
{
.pixel_clock = 25200000,
.conf = {
0x01, 0x51, 0x2A, 0x75, 0x40, 0x01, 0x00, 0x08,
0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 27000000,
.conf = {
0x01, 0xd1, 0x22, 0x51, 0x40, 0x08, 0xfc, 0x20,
0x98, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xe4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 27027000,
.conf = {
0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 36000000,
.conf = {
0x01, 0x51, 0x2d, 0x55, 0x40, 0x01, 0x00, 0x08,
0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xab, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 40000000,
.conf = {
0x01, 0x51, 0x32, 0x55, 0x40, 0x01, 0x00, 0x08,
0x82, 0x80, 0x2c, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0x9a, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 65000000,
.conf = {
0x01, 0xd1, 0x36, 0x34, 0x40, 0x1e, 0x0a, 0x08,
0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 71000000,
.conf = {
0x01, 0xd1, 0x3b, 0x35, 0x40, 0x0c, 0x04, 0x08,
0x85, 0xa0, 0x63, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xad, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 73250000,
.conf = {
0x01, 0xd1, 0x3d, 0x35, 0x40, 0x18, 0x02, 0x08,
0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xa8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 74176000,
.conf = {
0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08,
0x82, 0xa0, 0x73, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x56, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 74250000,
.conf = {
0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 83500000,
.conf = {
0x01, 0xd1, 0x23, 0x11, 0x40, 0x0c, 0xfb, 0x08,
0x85, 0xa0, 0xd1, 0xd8, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0x93, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 85500000,
.conf = {
0x01, 0xd1, 0x24, 0x11, 0x40, 0x40, 0xd0, 0x08,
0x84, 0xa0, 0xd6, 0xd8, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0x90, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 106500000,
.conf = {
0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
0x84, 0xa0, 0x0a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 108000000,
.conf = {
0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08,
0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 115500000,
.conf = {
0x01, 0xd1, 0x30, 0x12, 0x40, 0x40, 0x10, 0x08,
0x80, 0x80, 0x21, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xaa, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 119000000,
.conf = {
0x01, 0xd1, 0x32, 0x1a, 0x40, 0x30, 0xd8, 0x08,
0x04, 0xa0, 0x2a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0x9d, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 146250000,
.conf = {
0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08,
0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0x50, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 148500000,
.conf = {
0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
},
},
};
static const struct hdmiphy_config hdmiphy_5420_configs[] = {
{
.pixel_clock = 25200000,
.conf = {
0x01, 0x52, 0x3F, 0x55, 0x40, 0x01, 0x00, 0xC8,
0x82, 0xC8, 0xBD, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
0x06, 0x80, 0x01, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0xF4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 27000000,
.conf = {
0x01, 0xD1, 0x22, 0x51, 0x40, 0x08, 0xFC, 0xE0,
0x98, 0xE8, 0xCB, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
0x06, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 27027000,
.conf = {
0x01, 0xD1, 0x2D, 0x72, 0x40, 0x64, 0x12, 0xC8,
0x43, 0xE8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
0x26, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 36000000,
.conf = {
0x01, 0x51, 0x2D, 0x55, 0x40, 0x40, 0x00, 0xC8,
0x02, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0xAB, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 40000000,
.conf = {
0x01, 0xD1, 0x21, 0x31, 0x40, 0x3C, 0x28, 0xC8,
0x87, 0xE8, 0xC8, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0x9A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 65000000,
.conf = {
0x01, 0xD1, 0x36, 0x34, 0x40, 0x0C, 0x04, 0xC8,
0x82, 0xE8, 0x45, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0xBD, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 71000000,
.conf = {
0x01, 0xD1, 0x3B, 0x35, 0x40, 0x0C, 0x04, 0xC8,
0x85, 0xE8, 0x63, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0x57, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 73250000,
.conf = {
0x01, 0xD1, 0x1F, 0x10, 0x40, 0x78, 0x8D, 0xC8,
0x81, 0xE8, 0xB7, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0xA8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 74176000,
.conf = {
0x01, 0xD1, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0xC8,
0x81, 0xE8, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 74250000,
.conf = {
0x01, 0xD1, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08,
0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66,
0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 83500000,
.conf = {
0x01, 0xD1, 0x23, 0x11, 0x40, 0x0C, 0xFB, 0xC8,
0x85, 0xE8, 0xD1, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0x4A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 88750000,
.conf = {
0x01, 0xD1, 0x25, 0x11, 0x40, 0x18, 0xFF, 0xC8,
0x83, 0xE8, 0xDE, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0x45, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 106500000,
.conf = {
0x01, 0xD1, 0x2C, 0x12, 0x40, 0x0C, 0x09, 0xC8,
0x84, 0xE8, 0x0A, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 108000000,
.conf = {
0x01, 0x51, 0x2D, 0x15, 0x40, 0x01, 0x00, 0xC8,
0x82, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0xC7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 115500000,
.conf = {
0x01, 0xD1, 0x30, 0x14, 0x40, 0x0C, 0x03, 0xC8,
0x88, 0xE8, 0x21, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0x6A, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 146250000,
.conf = {
0x01, 0xD1, 0x3D, 0x15, 0x40, 0x18, 0xFD, 0xC8,
0x83, 0xE8, 0x6E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
0x54, 0x54, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 148500000,
.conf = {
0x01, 0xD1, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08,
0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66,
0x54, 0x4B, 0x25, 0x03, 0x00, 0x80, 0x01, 0x80,
},
},
{
.pixel_clock = 154000000,
.conf = {
0x01, 0xD1, 0x20, 0x01, 0x40, 0x30, 0x08, 0xCC,
0x8C, 0xE8, 0xC1, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x86,
0x54, 0x3F, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
},
},
};
static const struct hdmiphy_config hdmiphy_5433_configs[] = {
{
.pixel_clock = 27000000,
.conf = {
0x01, 0x51, 0x2d, 0x75, 0x01, 0x00, 0x88, 0x02,
0x72, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac,
0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
{
.pixel_clock = 27027000,
.conf = {
0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3,
0x71, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac,
0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
{
.pixel_clock = 40000000,
.conf = {
0x01, 0x51, 0x32, 0x55, 0x01, 0x00, 0x88, 0x02,
0x4d, 0x50, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
{
.pixel_clock = 50000000,
.conf = {
0x01, 0x51, 0x34, 0x40, 0x64, 0x09, 0x88, 0xc3,
0x3d, 0x50, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
{
.pixel_clock = 65000000,
.conf = {
0x01, 0x51, 0x36, 0x31, 0x40, 0x10, 0x04, 0xc6,
0x2e, 0xe8, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
{
.pixel_clock = 74176000,
.conf = {
0x01, 0x51, 0x3E, 0x35, 0x5B, 0xDE, 0x88, 0x42,
0x53, 0x51, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
{
.pixel_clock = 74250000,
.conf = {
0x01, 0x51, 0x3E, 0x35, 0x40, 0xF0, 0x88, 0xC2,
0x52, 0x51, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
{
.pixel_clock = 108000000,
.conf = {
0x01, 0x51, 0x2d, 0x15, 0x01, 0x00, 0x88, 0x02,
0x72, 0x52, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
{
.pixel_clock = 148500000,
.conf = {
0x01, 0x51, 0x1f, 0x00, 0x40, 0xf8, 0x88, 0xc1,
0x52, 0x52, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40,
},
},
{
.pixel_clock = 297000000,
.conf = {
0x01, 0x51, 0x3E, 0x05, 0x40, 0xF0, 0x88, 0xC2,
0x52, 0x53, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
};
static const char * const hdmi_clk_gates4[] = {
"hdmi", "sclk_hdmi"
};
static const char * const hdmi_clk_muxes4[] = {
"sclk_pixel", "sclk_hdmiphy", "mout_hdmi"
};
static const char * const hdmi_clk_gates5433[] = {
"hdmi_pclk", "hdmi_i_pclk", "i_tmds_clk", "i_pixel_clk", "i_spdif_clk"
};
static const char * const hdmi_clk_muxes5433[] = {
"oscclk", "tmds_clko", "tmds_clko_user",
"oscclk", "pixel_clko", "pixel_clko_user"
};
static const struct hdmi_driver_data exynos4210_hdmi_driver_data = {
.type = HDMI_TYPE13,
.phy_confs = INIT_ARRAY_SPEC(hdmiphy_v13_configs),
.clk_gates = INIT_ARRAY_SPEC(hdmi_clk_gates4),
.clk_muxes = INIT_ARRAY_SPEC(hdmi_clk_muxes4),
};
static const struct hdmi_driver_data exynos4212_hdmi_driver_data = {
.type = HDMI_TYPE14,
.phy_confs = INIT_ARRAY_SPEC(hdmiphy_v14_configs),
.clk_gates = INIT_ARRAY_SPEC(hdmi_clk_gates4),
.clk_muxes = INIT_ARRAY_SPEC(hdmi_clk_muxes4),
};
static const struct hdmi_driver_data exynos5420_hdmi_driver_data = {
.type = HDMI_TYPE14,
.is_apb_phy = 1,
.phy_confs = INIT_ARRAY_SPEC(hdmiphy_5420_configs),
.clk_gates = INIT_ARRAY_SPEC(hdmi_clk_gates4),
.clk_muxes = INIT_ARRAY_SPEC(hdmi_clk_muxes4),
};
static const struct hdmi_driver_data exynos5433_hdmi_driver_data = {
.type = HDMI_TYPE14,
.is_apb_phy = 1,
.has_sysreg = 1,
.phy_confs = INIT_ARRAY_SPEC(hdmiphy_5433_configs),
.clk_gates = INIT_ARRAY_SPEC(hdmi_clk_gates5433),
.clk_muxes = INIT_ARRAY_SPEC(hdmi_clk_muxes5433),
};
static inline u32 hdmi_map_reg(struct hdmi_context *hdata, u32 reg_id)
{
if ((reg_id & 0xffff0000) == HDMI_MAPPED_BASE)
return hdmi_reg_map[reg_id & 0xffff][hdata->drv_data->type];
return reg_id;
}
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
return readl(hdata->regs + hdmi_map_reg(hdata, reg_id));
}
static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
u32 reg_id, u8 value)
{
writel(value, hdata->regs + hdmi_map_reg(hdata, reg_id));
}
static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
int bytes, u32 val)
{
reg_id = hdmi_map_reg(hdata, reg_id);
while (--bytes >= 0) {
writel(val & 0xff, hdata->regs + reg_id);
val >>= 8;
reg_id += 4;
}
}
static inline void hdmi_reg_write_buf(struct hdmi_context *hdata, u32 reg_id,
u8 *buf, int size)
{
for (reg_id = hdmi_map_reg(hdata, reg_id); size; --size, reg_id += 4)
writel(*buf++, hdata->regs + reg_id);
}
static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
u32 reg_id, u32 value, u32 mask)
{
u32 old;
reg_id = hdmi_map_reg(hdata, reg_id);
old = readl(hdata->regs + reg_id);
value = (value & mask) | (old & ~mask);
writel(value, hdata->regs + reg_id);
}
static int hdmiphy_reg_write_buf(struct hdmi_context *hdata,
u32 reg_offset, const u8 *buf, u32 len)
{
if ((reg_offset + len) > 32)
return -EINVAL;
if (hdata->hdmiphy_port) {
int ret;
ret = i2c_master_send(hdata->hdmiphy_port, buf, len);
if (ret == len)
return 0;
return ret;
} else {
int i;
for (i = 0; i < len; i++)
writel(buf[i], hdata->regs_hdmiphy +
((reg_offset + i)<<2));
return 0;
}
}
static int hdmi_clk_enable_gates(struct hdmi_context *hdata)
{
int i, ret;
for (i = 0; i < hdata->drv_data->clk_gates.count; ++i) {
ret = clk_prepare_enable(hdata->clk_gates[i]);
if (!ret)
continue;
dev_err(hdata->dev, "Cannot enable clock '%s', %d\n",
hdata->drv_data->clk_gates.data[i], ret);
while (i--)
clk_disable_unprepare(hdata->clk_gates[i]);
return ret;
}
return 0;
}
static void hdmi_clk_disable_gates(struct hdmi_context *hdata)
{
int i = hdata->drv_data->clk_gates.count;
while (i--)
clk_disable_unprepare(hdata->clk_gates[i]);
}
static int hdmi_clk_set_parents(struct hdmi_context *hdata, bool to_phy)
{
struct device *dev = hdata->dev;
int ret = 0;
int i;
for (i = 0; i < hdata->drv_data->clk_muxes.count; i += 3) {
struct clk **c = &hdata->clk_muxes[i];
ret = clk_set_parent(c[2], c[to_phy]);
if (!ret)
continue;
dev_err(dev, "Cannot set clock parent of '%s' to '%s', %d\n",
hdata->drv_data->clk_muxes.data[i + 2],
hdata->drv_data->clk_muxes.data[i + to_phy], ret);
}
return ret;
}
static int hdmi_audio_infoframe_apply(struct hdmi_context *hdata)
{
struct hdmi_audio_infoframe *infoframe = &hdata->audio.infoframe;
u8 buf[HDMI_INFOFRAME_SIZE(AUDIO)];
int len;
len = hdmi_audio_infoframe_pack(infoframe, buf, sizeof(buf));
if (len < 0)
return len;
hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_EVERY_VSYNC);
hdmi_reg_write_buf(hdata, HDMI_AUI_HEADER0, buf, len);
return 0;
}
static void hdmi_reg_infoframes(struct hdmi_context *hdata)
{
struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
union hdmi_infoframe frm;
u8 buf[25];
int ret;
if (hdata->dvi_mode) {
hdmi_reg_writeb(hdata, HDMI_AVI_CON,
HDMI_AVI_CON_DO_NOT_TRANSMIT);
hdmi_reg_writeb(hdata, HDMI_VSI_CON,
HDMI_VSI_CON_DO_NOT_TRANSMIT);
hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
return;
}
ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
&hdata->connector, m);
if (!ret)
ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf));
if (ret > 0) {
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
hdmi_reg_write_buf(hdata, HDMI_AVI_HEADER0, buf, ret);
} else {
DRM_INFO("%s: invalid AVI infoframe (%d)\n", __func__, ret);
}
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi,
&hdata->connector, m);
if (!ret)
ret = hdmi_vendor_infoframe_pack(&frm.vendor.hdmi, buf,
sizeof(buf));
if (ret > 0) {
hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC);
hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, 3);
hdmi_reg_write_buf(hdata, HDMI_VSI_DATA(0), buf + 3, ret - 3);
}
hdmi_audio_infoframe_apply(hdata);
}
static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
bool force)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
if (gpiod_get_value(hdata->hpd_gpio))
return connector_status_connected;
cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
return connector_status_disconnected;
}
static void hdmi_connector_destroy(struct drm_connector *connector)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
cec_notifier_conn_unregister(hdata->notifier);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = hdmi_detect,
.destroy = hdmi_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int hdmi_get_modes(struct drm_connector *connector)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
struct edid *edid;
int ret;
if (!hdata->ddc_adpt)
return -ENODEV;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
return -ENODEV;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
edid->width_cm, edid->height_cm);
drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
{
const struct hdmiphy_configs *confs = &hdata->drv_data->phy_confs;
int i;
for (i = 0; i < confs->count; i++)
if (confs->data[i].pixel_clock == pixel_clock)
return i;
DRM_DEV_DEBUG_KMS(hdata->dev, "Could not find phy config for %d\n",
pixel_clock);
return -EINVAL;
}
static enum drm_mode_status hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
int ret;
DRM_DEV_DEBUG_KMS(hdata->dev,
"xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
mode->hdisplay, mode->vdisplay,
drm_mode_vrefresh(mode),
(mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
false, mode->clock * 1000);
ret = hdmi_find_phy_conf(hdata, mode->clock * 1000);
if (ret < 0)
return MODE_BAD;
return MODE_OK;
}
static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
.get_modes = hdmi_get_modes,
.mode_valid = hdmi_mode_valid,
};
static int hdmi_create_connector(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct drm_connector *connector = &hdata->connector;
struct cec_connector_info conn_info;
int ret;
connector->interlace_allowed = true;
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init_with_ddc(hdata->drm_dev, connector,
&hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
hdata->ddc_adpt);
if (ret) {
DRM_DEV_ERROR(hdata->dev,
"Failed to initialize connector with drm\n");
return ret;
}
drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
if (hdata->bridge)
ret = drm_bridge_attach(encoder, hdata->bridge, NULL, 0);
cec_fill_conn_info_from_drm(&conn_info, connector);
hdata->notifier = cec_notifier_conn_register(hdata->dev, NULL,
&conn_info);
if (!hdata->notifier) {
ret = -ENOMEM;
DRM_DEV_ERROR(hdata->dev, "Failed to allocate CEC notifier\n");
}
return ret;
}
static bool hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct drm_display_mode *m;
struct drm_connector_list_iter conn_iter;
int mode_ok;
drm_mode_set_crtcinfo(adjusted_mode, 0);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder == encoder)
break;
}
if (connector)
drm_connector_get(connector);
drm_connector_list_iter_end(&conn_iter);
if (!connector)
return true;
mode_ok = hdmi_mode_valid(connector, adjusted_mode);
if (mode_ok == MODE_OK)
goto cleanup;
/*
* Find the most suitable mode and copy it to adjusted_mode.
*/
list_for_each_entry(m, &connector->modes, head) {
mode_ok = hdmi_mode_valid(connector, m);
if (mode_ok == MODE_OK) {
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
DRM_DEV_DEBUG_KMS(dev->dev,
"Adjusted Mode: [%d]x[%d] [%d]Hz\n",
m->hdisplay, m->vdisplay,
drm_mode_vrefresh(m));
drm_mode_copy(adjusted_mode, m);
break;
}
}
cleanup:
drm_connector_put(connector);
return true;
}
static void hdmi_reg_acr(struct hdmi_context *hdata, u32 freq)
{
u32 n, cts;
cts = (freq % 9) ? 27000 : 30000;
n = 128 * freq / (27000000 / cts);
hdmi_reg_writev(hdata, HDMI_ACR_N0, 3, n);
hdmi_reg_writev(hdata, HDMI_ACR_MCTS0, 3, cts);
hdmi_reg_writev(hdata, HDMI_ACR_CTS0, 3, cts);
hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
}
static void hdmi_audio_config(struct hdmi_context *hdata)
{
u32 bit_ch = 1;
u32 data_num, val;
int i;
switch (hdata->audio.params.sample_width) {
case 20:
data_num = 2;
break;
case 24:
data_num = 3;
break;
default:
data_num = 1;
bit_ch = 0;
break;
}
hdmi_reg_acr(hdata, hdata->audio.params.sample_rate);
hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE
| HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE
| HDMI_I2S_MUX_ENABLE);
hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CH, HDMI_I2S_CH0_EN
| HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN);
hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN);
hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS);
hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN);
val = hdmi_reg_read(hdata, HDMI_I2S_DSD_CON) | 0x01;
hdmi_reg_writeb(hdata, HDMI_I2S_DSD_CON, val);
/* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
| HDMI_I2S_SEL_LRCK(6));
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(3)
| HDMI_I2S_SEL_SDATA0(4));
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
| HDMI_I2S_SEL_SDATA2(2));
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
/* I2S_CON_1 & 2 */
hdmi_reg_writeb(hdata, HDMI_I2S_CON_1, HDMI_I2S_SCLK_FALLING_EDGE
| HDMI_I2S_L_CH_LOW_POL);
hdmi_reg_writeb(hdata, HDMI_I2S_CON_2, HDMI_I2S_MSB_FIRST_MODE
| HDMI_I2S_SET_BIT_CH(bit_ch)
| HDMI_I2S_SET_SDATA_BIT(data_num)
| HDMI_I2S_BASIC_FORMAT);
/* Configuration of the audio channel status registers */
for (i = 0; i < HDMI_I2S_CH_ST_MAXNUM; i++)
hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST(i),
hdata->audio.params.iec.status[i]);
hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD);
}
static void hdmi_audio_control(struct hdmi_context *hdata)
{
bool enable = !hdata->audio.mute;
if (hdata->dvi_mode)
return;
hdmi_reg_writeb(hdata, HDMI_AUI_CON, enable ?
HDMI_AVI_CON_EVERY_VSYNC : HDMI_AUI_CON_NO_TRAN);
hdmi_reg_writemask(hdata, HDMI_CON_0, enable ?
HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK);
}
static void hdmi_start(struct hdmi_context *hdata, bool start)
{
struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
u32 val = start ? HDMI_TG_EN : 0;
if (m->flags & DRM_MODE_FLAG_INTERLACE)
val |= HDMI_FIELD_EN;
hdmi_reg_writemask(hdata, HDMI_CON_0, val, HDMI_EN);
hdmi_reg_writemask(hdata, HDMI_TG_CMD, val, HDMI_TG_EN | HDMI_FIELD_EN);
}
static void hdmi_conf_init(struct hdmi_context *hdata)
{
/* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
/* choose HDMI mode */
hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
/* apply video pre-amble and guard band in HDMI mode only */
hdmi_reg_writeb(hdata, HDMI_CON_2, 0);
/* disable bluescreen */
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
if (hdata->dvi_mode) {
hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
hdmi_reg_writeb(hdata, HDMI_CON_2,
HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
}
if (hdata->drv_data->type == HDMI_TYPE13) {
/* choose bluescreen (fecal) color */
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_2, 0x56);
/* enable AVI packet every vsync, fixes purple line problem */
hdmi_reg_writeb(hdata, HDMI_V13_AVI_CON, 0x02);
/* force RGB, look to CEA-861-D, table 7 for more detail */
hdmi_reg_writeb(hdata, HDMI_V13_AVI_BYTE(0), 0 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
hdmi_reg_writeb(hdata, HDMI_V13_SPD_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
hdmi_reg_infoframes(hdata);
/* enable AVI packet every vsync, fixes purple line problem */
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
}
static void hdmiphy_wait_for_pll(struct hdmi_context *hdata)
{
int tries;
for (tries = 0; tries < 10; ++tries) {
u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
if (val & HDMI_PHY_STATUS_READY) {
DRM_DEV_DEBUG_KMS(hdata->dev,
"PLL stabilized after %d tries\n",
tries);
return;
}
usleep_range(10, 20);
}
DRM_DEV_ERROR(hdata->dev, "PLL could not reach steady state\n");
}
static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
{
struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
unsigned int val;
hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
hdmi_reg_writev(hdata, HDMI_V13_H_V_LINE_0, 3,
(m->htotal << 12) | m->vtotal);
val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1, val);
val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1, val);
val = (m->hsync_start - m->hdisplay - 2);
val |= ((m->hsync_end - m->hdisplay - 2) << 10);
val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val);
/*
* Quirk requirement for exynos HDMI IP design,
* 2 pixels less than the actual calculation for hsync_start
* and end.
*/
/* Following values & calculations differ for different type of modes */
if (m->flags & DRM_MODE_FLAG_INTERLACE) {
val = ((m->vsync_end - m->vdisplay) / 2);
val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
val = m->vtotal / 2;
val |= ((m->vtotal - m->vdisplay) / 2) << 11;
hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
val = (m->vtotal +
((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
val |= m->vtotal << 11;
hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, val);
val = ((m->vtotal / 2) + 7);
val |= ((m->vtotal / 2) + 2) << 12;
hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, val);
val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
val |= ((m->htotal / 2) +
(m->hsync_start - m->hdisplay)) << 12;
hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, val);
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
(m->vtotal - m->vdisplay) / 2);
hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249);
} else {
val = m->vtotal;
val |= (m->vtotal - m->vdisplay) << 11;
hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, 0);
val = (m->vsync_end - m->vdisplay);
val |= ((m->vsync_start - m->vdisplay) << 12);
hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, 0x1001);
hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, 0x1001);
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
m->vtotal - m->vdisplay);
hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
}
hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
}
static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
{
struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
struct drm_display_mode *am =
&hdata->encoder.crtc->state->adjusted_mode;
int hquirk = 0;
/*
* In case video mode coming from CRTC differs from requested one HDMI
* sometimes is able to almost properly perform conversion - only
* first line is distorted.
*/
if ((m->vdisplay != am->vdisplay) &&
(m->hdisplay == 1280 || m->hdisplay == 1024 || m->hdisplay == 1366))
hquirk = 258;
hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal);
hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1,
(m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1,
(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1,
(m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
/*
* Quirk requirement for exynos 5 HDMI IP design,
* 2 pixels less than the actual calculation for hsync_start
* and end.
*/
/* Following values & calculations differ for different type of modes */
if (m->flags & DRM_MODE_FLAG_INTERLACE) {
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
(m->vsync_end - m->vdisplay) / 2);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
(m->vsync_start - m->vdisplay) / 2);
hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal / 2);
hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
(m->vtotal - m->vdisplay) / 2);
hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2,
m->vtotal - m->vdisplay / 2);
hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, m->vtotal);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2,
(m->vtotal / 2) + 7);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2,
(m->vtotal / 2) + 2);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2,
(m->htotal / 2) + (m->hsync_start - m->hdisplay));
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2,
(m->htotal / 2) + (m->hsync_start - m->hdisplay));
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
(m->vtotal - m->vdisplay) / 2);
hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2,
m->vtotal - m->vdisplay / 2);
hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2,
(m->vtotal / 2) + 1);
hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2,
(m->vtotal / 2) + 1);
hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2,
(m->vtotal / 2) + 1);
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0);
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0);
} else {
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
m->vsync_end - m->vdisplay);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
m->vsync_start - m->vdisplay);
hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal);
hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
m->vtotal - m->vdisplay);
hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
m->vtotal - m->vdisplay);
hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
}
hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2,
m->hsync_start - m->hdisplay - 2);
hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2,
m->hsync_end - m->hdisplay - 2);
hdmi_reg_writev(hdata, HDMI_VACT_SPACE_1_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_VACT_SPACE_2_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_VACT_SPACE_3_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_VACT_SPACE_4_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_VACT_SPACE_5_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_VACT_SPACE_6_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_BLANK_F2_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_BLANK_F3_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_BLANK_F4_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_BLANK_F5_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_3_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_4_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_5_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_6_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2,
m->htotal - m->hdisplay - hquirk);
hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay + hquirk);
hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
if (hdata->drv_data == &exynos5433_hdmi_driver_data)
hdmi_reg_writeb(hdata, HDMI_TG_DECON_EN, 1);
}
static void hdmi_mode_apply(struct hdmi_context *hdata)
{
if (hdata->drv_data->type == HDMI_TYPE13)
hdmi_v13_mode_apply(hdata);
else
hdmi_v14_mode_apply(hdata);
hdmi_start(hdata, true);
}
static void hdmiphy_conf_reset(struct hdmi_context *hdata)
{
hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, 0, 1);
usleep_range(10000, 12000);
hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, 1);
usleep_range(10000, 12000);
hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
usleep_range(10000, 12000);
hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
usleep_range(10000, 12000);
}
static void hdmiphy_enable_mode_set(struct hdmi_context *hdata, bool enable)
{
u8 v = enable ? HDMI_PHY_ENABLE_MODE_SET : HDMI_PHY_DISABLE_MODE_SET;
if (hdata->drv_data == &exynos5433_hdmi_driver_data)
writel(v, hdata->regs_hdmiphy + HDMIPHY5433_MODE_SET_DONE);
}
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
{
struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
int ret;
const u8 *phy_conf;
ret = hdmi_find_phy_conf(hdata, m->clock * 1000);
if (ret < 0) {
DRM_DEV_ERROR(hdata->dev, "failed to find hdmiphy conf\n");
return;
}
phy_conf = hdata->drv_data->phy_confs.data[ret].conf;
hdmi_clk_set_parents(hdata, false);
hdmiphy_conf_reset(hdata);
hdmiphy_enable_mode_set(hdata, true);
ret = hdmiphy_reg_write_buf(hdata, 0, phy_conf, 32);
if (ret) {
DRM_DEV_ERROR(hdata->dev, "failed to configure hdmiphy\n");
return;
}
hdmiphy_enable_mode_set(hdata, false);
hdmi_clk_set_parents(hdata, true);
usleep_range(10000, 12000);
hdmiphy_wait_for_pll(hdata);
}
/* Should be called with hdata->mutex mutex held */
static void hdmi_conf_apply(struct hdmi_context *hdata)
{
hdmi_start(hdata, false);
hdmi_conf_init(hdata);
hdmi_audio_config(hdata);
hdmi_mode_apply(hdata);
hdmi_audio_control(hdata);
}
static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
{
if (!hdata->sysreg)
return;
regmap_update_bits(hdata->sysreg, EXYNOS5433_SYSREG_DISP_HDMI_PHY,
SYSREG_HDMI_REFCLK_INT_CLK, on ? ~0 : 0);
}
/* Should be called with hdata->mutex mutex held. */
static void hdmiphy_enable(struct hdmi_context *hdata)
{
int ret;
if (hdata->powered)
return;
ret = pm_runtime_resume_and_get(hdata->dev);
if (ret < 0) {
dev_err(hdata->dev, "failed to enable HDMIPHY device.\n");
return;
}
if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
DRM_DEV_DEBUG_KMS(hdata->dev,
"failed to enable regulator bulk\n");
regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
PMU_HDMI_PHY_ENABLE_BIT, 1);
hdmi_set_refclk(hdata, true);
hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0, HDMI_PHY_POWER_OFF_EN);
hdmiphy_conf_apply(hdata);
hdata->powered = true;
}
/* Should be called with hdata->mutex mutex held. */
static void hdmiphy_disable(struct hdmi_context *hdata)
{
if (!hdata->powered)
return;
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0, HDMI_PHY_POWER_OFF_EN);
hdmi_set_refclk(hdata, false);
regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
PMU_HDMI_PHY_ENABLE_BIT, 0);
regulator_bulk_disable(ARRAY_SIZE(supply), hdata->regul_bulk);
pm_runtime_put_sync(hdata->dev);
hdata->powered = false;
}
static void hdmi_enable(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
mutex_lock(&hdata->mutex);
hdmiphy_enable(hdata);
hdmi_conf_apply(hdata);
mutex_unlock(&hdata->mutex);
}
static void hdmi_disable(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
mutex_lock(&hdata->mutex);
if (hdata->powered) {
/*
* The SFRs of VP and Mixer are updated by Vertical Sync of
* Timing generator which is a part of HDMI so the sequence
* to disable TV Subsystem should be as following,
* VP -> Mixer -> HDMI
*
* To achieve such sequence HDMI is disabled together with
* HDMI PHY, via pipe clock callback.
*/
mutex_unlock(&hdata->mutex);
cancel_delayed_work(&hdata->hotplug_work);
if (hdata->notifier)
cec_notifier_phys_addr_invalidate(hdata->notifier);
return;
}
mutex_unlock(&hdata->mutex);
}
static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
.mode_fixup = hdmi_mode_fixup,
.enable = hdmi_enable,
.disable = hdmi_disable,
};
static void hdmi_audio_shutdown(struct device *dev, void *data)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
mutex_lock(&hdata->mutex);
hdata->audio.mute = true;
if (hdata->powered)
hdmi_audio_control(hdata);
mutex_unlock(&hdata->mutex);
}
static int hdmi_audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
if (daifmt->fmt != HDMI_I2S || daifmt->bit_clk_inv ||
daifmt->frame_clk_inv || daifmt->bit_clk_provider ||
daifmt->frame_clk_provider) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
daifmt->bit_clk_provider,
daifmt->frame_clk_provider);
return -EINVAL;
}
mutex_lock(&hdata->mutex);
hdata->audio.params = *params;
if (hdata->powered) {
hdmi_audio_config(hdata);
hdmi_audio_infoframe_apply(hdata);
}
mutex_unlock(&hdata->mutex);
return 0;
}
static int hdmi_audio_mute(struct device *dev, void *data,
bool mute, int direction)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
mutex_lock(&hdata->mutex);
hdata->audio.mute = mute;
if (hdata->powered)
hdmi_audio_control(hdata);
mutex_unlock(&hdata->mutex);
return 0;
}
static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
size_t len)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
struct drm_connector *connector = &hdata->connector;
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
return 0;
}
static const struct hdmi_codec_ops audio_codec_ops = {
.hw_params = hdmi_audio_hw_params,
.audio_shutdown = hdmi_audio_shutdown,
.mute_stream = hdmi_audio_mute,
.get_eld = hdmi_audio_get_eld,
.no_capture_mute = 1,
};
static int hdmi_register_audio_device(struct hdmi_context *hdata)
{
struct hdmi_codec_pdata codec_data = {
.ops = &audio_codec_ops,
.max_i2s_channels = 6,
.i2s = 1,
};
hdata->audio.pdev = platform_device_register_data(
hdata->dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
&codec_data, sizeof(codec_data));
return PTR_ERR_OR_ZERO(hdata->audio.pdev);
}
static void hdmi_hotplug_work_func(struct work_struct *work)
{
struct hdmi_context *hdata;
hdata = container_of(work, struct hdmi_context, hotplug_work.work);
if (hdata->drm_dev)
drm_helper_hpd_irq_event(hdata->drm_dev);
}
static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
struct hdmi_context *hdata = arg;
mod_delayed_work(system_wq, &hdata->hotplug_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
return IRQ_HANDLED;
}
static int hdmi_clks_get(struct hdmi_context *hdata,
const struct string_array_spec *names,
struct clk **clks)
{
struct device *dev = hdata->dev;
int i;
for (i = 0; i < names->count; ++i) {
struct clk *clk = devm_clk_get(dev, names->data[i]);
if (IS_ERR(clk)) {
int ret = PTR_ERR(clk);
dev_err(dev, "Cannot get clock %s, %d\n",
names->data[i], ret);
return ret;
}
clks[i] = clk;
}
return 0;
}
static int hdmi_clk_init(struct hdmi_context *hdata)
{
const struct hdmi_driver_data *drv_data = hdata->drv_data;
int count = drv_data->clk_gates.count + drv_data->clk_muxes.count;
struct device *dev = hdata->dev;
struct clk **clks;
int ret;
if (!count)
return 0;
clks = devm_kcalloc(dev, count, sizeof(*clks), GFP_KERNEL);
if (!clks)
return -ENOMEM;
hdata->clk_gates = clks;
hdata->clk_muxes = clks + drv_data->clk_gates.count;
ret = hdmi_clks_get(hdata, &drv_data->clk_gates, hdata->clk_gates);
if (ret)
return ret;
return hdmi_clks_get(hdata, &drv_data->clk_muxes, hdata->clk_muxes);
}
static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable)
{
struct hdmi_context *hdata = container_of(clk, struct hdmi_context,
phy_clk);
mutex_lock(&hdata->mutex);
if (enable)
hdmiphy_enable(hdata);
else
hdmiphy_disable(hdata);
mutex_unlock(&hdata->mutex);
}
static int hdmi_bridge_init(struct hdmi_context *hdata)
{
struct device *dev = hdata->dev;
struct device_node *ep, *np;
ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
if (!ep)
return 0;
np = of_graph_get_remote_port_parent(ep);
of_node_put(ep);
if (!np) {
DRM_DEV_ERROR(dev, "failed to get remote port parent");
return -EINVAL;
}
hdata->bridge = of_drm_find_bridge(np);
of_node_put(np);
if (!hdata->bridge)
return -EPROBE_DEFER;
return 0;
}
static int hdmi_resources_init(struct hdmi_context *hdata)
{
struct device *dev = hdata->dev;
int i, ret;
DRM_DEV_DEBUG_KMS(dev, "HDMI resource init\n");
hdata->hpd_gpio = devm_gpiod_get(dev, "hpd", GPIOD_IN);
if (IS_ERR(hdata->hpd_gpio)) {
DRM_DEV_ERROR(dev, "cannot get hpd gpio property\n");
return PTR_ERR(hdata->hpd_gpio);
}
hdata->irq = gpiod_to_irq(hdata->hpd_gpio);
if (hdata->irq < 0) {
DRM_DEV_ERROR(dev, "failed to get GPIO irq\n");
return hdata->irq;
}
ret = hdmi_clk_init(hdata);
if (ret)
return ret;
ret = hdmi_clk_set_parents(hdata, false);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(supply); ++i)
hdata->regul_bulk[i].supply = supply[i];
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV)
if (IS_ERR(hdata->reg_hdmi_en))
return PTR_ERR(hdata->reg_hdmi_en);
return hdmi_bridge_init(hdata);
}
static const struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos4210-hdmi",
.data = &exynos4210_hdmi_driver_data,
}, {
.compatible = "samsung,exynos4212-hdmi",
.data = &exynos4212_hdmi_driver_data,
}, {
.compatible = "samsung,exynos5420-hdmi",
.data = &exynos5420_hdmi_driver_data,
}, {
.compatible = "samsung,exynos5433-hdmi",
.data = &exynos5433_hdmi_driver_data,
}, {
/* end node */
}
};
MODULE_DEVICE_TABLE (of, hdmi_match_types);
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm_dev = data;
struct hdmi_context *hdata = dev_get_drvdata(dev);
struct drm_encoder *encoder = &hdata->encoder;
struct exynos_drm_crtc *crtc;
int ret;
hdata->drm_dev = drm_dev;
hdata->phy_clk.enable = hdmiphy_clk_enable;
drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_HDMI);
if (ret < 0)
return ret;
crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
crtc->pipe_clk = &hdata->phy_clk;
ret = hdmi_create_connector(encoder);
if (ret) {
DRM_DEV_ERROR(dev, "failed to create connector ret = %d\n",
ret);
drm_encoder_cleanup(encoder);
return ret;
}
return 0;
}
static void hdmi_unbind(struct device *dev, struct device *master, void *data)
{
}
static const struct component_ops hdmi_component_ops = {
.bind = hdmi_bind,
.unbind = hdmi_unbind,
};
static int hdmi_get_ddc_adapter(struct hdmi_context *hdata)
{
const char *compatible_str = "samsung,exynos4210-hdmiddc";
struct device_node *np;
struct i2c_adapter *adpt;
np = of_find_compatible_node(NULL, NULL, compatible_str);
if (np)
np = of_get_next_parent(np);
else
np = of_parse_phandle(hdata->dev->of_node, "ddc", 0);
if (!np) {
DRM_DEV_ERROR(hdata->dev,
"Failed to find ddc node in device tree\n");
return -ENODEV;
}
adpt = of_find_i2c_adapter_by_node(np);
of_node_put(np);
if (!adpt) {
DRM_INFO("Failed to get ddc i2c adapter by node\n");
return -EPROBE_DEFER;
}
hdata->ddc_adpt = adpt;
return 0;
}
static int hdmi_get_phy_io(struct hdmi_context *hdata)
{
const char *compatible_str = "samsung,exynos4212-hdmiphy";
struct device_node *np;
int ret = 0;
np = of_find_compatible_node(NULL, NULL, compatible_str);
if (!np) {
np = of_parse_phandle(hdata->dev->of_node, "phy", 0);
if (!np) {
DRM_DEV_ERROR(hdata->dev,
"Failed to find hdmiphy node in device tree\n");
return -ENODEV;
}
}
if (hdata->drv_data->is_apb_phy) {
hdata->regs_hdmiphy = of_iomap(np, 0);
if (!hdata->regs_hdmiphy) {
DRM_DEV_ERROR(hdata->dev,
"failed to ioremap hdmi phy\n");
ret = -ENOMEM;
goto out;
}
} else {
hdata->hdmiphy_port = of_find_i2c_device_by_node(np);
if (!hdata->hdmiphy_port) {
DRM_INFO("Failed to get hdmi phy i2c client\n");
ret = -EPROBE_DEFER;
goto out;
}
}
out:
of_node_put(np);
return ret;
}
static int hdmi_probe(struct platform_device *pdev)
{
struct hdmi_audio_infoframe *audio_infoframe;
struct device *dev = &pdev->dev;
struct hdmi_context *hdata;
int ret;
hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
if (!hdata)
return -ENOMEM;
hdata->drv_data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, hdata);
hdata->dev = dev;
mutex_init(&hdata->mutex);
ret = hdmi_resources_init(hdata);
if (ret) {
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "hdmi_resources_init failed\n");
return ret;
}
hdata->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdata->regs)) {
ret = PTR_ERR(hdata->regs);
return ret;
}
ret = hdmi_get_ddc_adapter(hdata);
if (ret)
return ret;
ret = hdmi_get_phy_io(hdata);
if (ret)
goto err_ddc;
INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
hdmi_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hdmi", hdata);
if (ret) {
DRM_DEV_ERROR(dev, "failed to register hdmi interrupt\n");
goto err_hdmiphy;
}
hdata->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,syscon-phandle");
if (IS_ERR(hdata->pmureg)) {
DRM_DEV_ERROR(dev, "syscon regmap lookup failed.\n");
ret = -EPROBE_DEFER;
goto err_hdmiphy;
}
if (hdata->drv_data->has_sysreg) {
hdata->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,sysreg-phandle");
if (IS_ERR(hdata->sysreg)) {
DRM_DEV_ERROR(dev, "sysreg regmap lookup failed.\n");
ret = -EPROBE_DEFER;
goto err_hdmiphy;
}
}
if (!IS_ERR(hdata->reg_hdmi_en)) {
ret = regulator_enable(hdata->reg_hdmi_en);
if (ret) {
DRM_DEV_ERROR(dev,
"failed to enable hdmi-en regulator\n");
goto err_hdmiphy;
}
}
pm_runtime_enable(dev);
audio_infoframe = &hdata->audio.infoframe;
hdmi_audio_infoframe_init(audio_infoframe);
audio_infoframe->coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
audio_infoframe->sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
audio_infoframe->sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
audio_infoframe->channels = 2;
ret = hdmi_register_audio_device(hdata);
if (ret)
goto err_rpm_disable;
ret = component_add(&pdev->dev, &hdmi_component_ops);
if (ret)
goto err_unregister_audio;
return ret;
err_unregister_audio:
platform_device_unregister(hdata->audio.pdev);
err_rpm_disable:
pm_runtime_disable(dev);
if (!IS_ERR(hdata->reg_hdmi_en))
regulator_disable(hdata->reg_hdmi_en);
err_hdmiphy:
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
if (hdata->regs_hdmiphy)
iounmap(hdata->regs_hdmiphy);
err_ddc:
put_device(&hdata->ddc_adpt->dev);
return ret;
}
static int hdmi_remove(struct platform_device *pdev)
{
struct hdmi_context *hdata = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&hdata->hotplug_work);
component_del(&pdev->dev, &hdmi_component_ops);
platform_device_unregister(hdata->audio.pdev);
pm_runtime_disable(&pdev->dev);
if (!IS_ERR(hdata->reg_hdmi_en))
regulator_disable(hdata->reg_hdmi_en);
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
if (hdata->regs_hdmiphy)
iounmap(hdata->regs_hdmiphy);
put_device(&hdata->ddc_adpt->dev);
mutex_destroy(&hdata->mutex);
return 0;
}
static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
hdmi_clk_disable_gates(hdata);
return 0;
}
static int __maybe_unused exynos_hdmi_resume(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
int ret;
ret = hdmi_clk_enable_gates(hdata);
if (ret < 0)
return ret;
return 0;
}
static const struct dev_pm_ops exynos_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
.remove = hdmi_remove,
.driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
.pm = &exynos_hdmi_pm_ops,
.of_match_table = hdmi_match_types,
},
};
| linux-master | drivers/gpu/drm/exynos/exynos_hdmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* Authors: Joonyoung Shim <[email protected]>
*/
#include <linux/refcount.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <drm/drm_file.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_gem.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
/* vaild register range set from user: 0x0104 ~ 0x0880 */
#define G2D_VALID_START 0x0104
#define G2D_VALID_END 0x0880
/* general registers */
#define G2D_SOFT_RESET 0x0000
#define G2D_INTEN 0x0004
#define G2D_INTC_PEND 0x000C
#define G2D_DMA_SFR_BASE_ADDR 0x0080
#define G2D_DMA_COMMAND 0x0084
#define G2D_DMA_STATUS 0x008C
#define G2D_DMA_HOLD_CMD 0x0090
/* command registers */
#define G2D_BITBLT_START 0x0100
/* registers for base address */
#define G2D_SRC_BASE_ADDR 0x0304
#define G2D_SRC_STRIDE 0x0308
#define G2D_SRC_COLOR_MODE 0x030C
#define G2D_SRC_LEFT_TOP 0x0310
#define G2D_SRC_RIGHT_BOTTOM 0x0314
#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
#define G2D_DST_BASE_ADDR 0x0404
#define G2D_DST_STRIDE 0x0408
#define G2D_DST_COLOR_MODE 0x040C
#define G2D_DST_LEFT_TOP 0x0410
#define G2D_DST_RIGHT_BOTTOM 0x0414
#define G2D_DST_PLANE2_BASE_ADDR 0x0418
#define G2D_PAT_BASE_ADDR 0x0500
#define G2D_MSK_BASE_ADDR 0x0520
/* G2D_SOFT_RESET */
#define G2D_SFRCLEAR (1 << 1)
#define G2D_R (1 << 0)
/* G2D_INTEN */
#define G2D_INTEN_ACF (1 << 3)
#define G2D_INTEN_UCF (1 << 2)
#define G2D_INTEN_GCF (1 << 1)
#define G2D_INTEN_SCF (1 << 0)
/* G2D_INTC_PEND */
#define G2D_INTP_ACMD_FIN (1 << 3)
#define G2D_INTP_UCMD_FIN (1 << 2)
#define G2D_INTP_GCMD_FIN (1 << 1)
#define G2D_INTP_SCMD_FIN (1 << 0)
/* G2D_DMA_COMMAND */
#define G2D_DMA_HALT (1 << 2)
#define G2D_DMA_CONTINUE (1 << 1)
#define G2D_DMA_START (1 << 0)
/* G2D_DMA_STATUS */
#define G2D_DMA_LIST_DONE_COUNT (0xFF << 17)
#define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1)
#define G2D_DMA_DONE (1 << 0)
#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
/* G2D_DMA_HOLD_CMD */
#define G2D_USER_HOLD (1 << 2)
#define G2D_LIST_HOLD (1 << 1)
#define G2D_BITBLT_HOLD (1 << 0)
/* G2D_BITBLT_START */
#define G2D_START_CASESEL (1 << 2)
#define G2D_START_NHOLT (1 << 1)
#define G2D_START_BITBLT (1 << 0)
/* buffer color format */
#define G2D_FMT_XRGB8888 0
#define G2D_FMT_ARGB8888 1
#define G2D_FMT_RGB565 2
#define G2D_FMT_XRGB1555 3
#define G2D_FMT_ARGB1555 4
#define G2D_FMT_XRGB4444 5
#define G2D_FMT_ARGB4444 6
#define G2D_FMT_PACKED_RGB888 7
#define G2D_FMT_A8 11
#define G2D_FMT_L8 12
/* buffer valid length */
#define G2D_LEN_MIN 1
#define G2D_LEN_MAX 8000
#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
#define G2D_CMDLIST_NUM 64
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
/* maximum buffer pool size of userptr is 64MB as default */
#define MAX_POOL (64 * 1024 * 1024)
enum {
BUF_TYPE_GEM = 1,
BUF_TYPE_USERPTR,
};
enum g2d_reg_type {
REG_TYPE_NONE = -1,
REG_TYPE_SRC,
REG_TYPE_SRC_PLANE2,
REG_TYPE_DST,
REG_TYPE_DST_PLANE2,
REG_TYPE_PAT,
REG_TYPE_MSK,
MAX_REG_TYPE_NR
};
enum g2d_flag_bits {
/*
* If set, suspends the runqueue worker after the currently
* processed node is finished.
*/
G2D_BIT_SUSPEND_RUNQUEUE,
/*
* If set, indicates that the engine is currently busy.
*/
G2D_BIT_ENGINE_BUSY,
};
/* cmdlist data structure */
struct g2d_cmdlist {
u32 head;
unsigned long data[G2D_CMDLIST_DATA_NUM];
u32 last; /* last data offset */
};
/*
* A structure of buffer description
*
* @format: color format
* @stride: buffer stride/pitch in bytes
* @left_x: the x coordinates of left top corner
* @top_y: the y coordinates of left top corner
* @right_x: the x coordinates of right bottom corner
* @bottom_y: the y coordinates of right bottom corner
*
*/
struct g2d_buf_desc {
unsigned int format;
unsigned int stride;
unsigned int left_x;
unsigned int top_y;
unsigned int right_x;
unsigned int bottom_y;
};
/*
* A structure of buffer information
*
* @map_nr: manages the number of mapped buffers
* @reg_types: stores regitster type in the order of requested command
* @handles: stores buffer handle in its reg_type position
* @types: stores buffer type in its reg_type position
* @descs: stores buffer description in its reg_type position
*
*/
struct g2d_buf_info {
unsigned int map_nr;
enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
void *obj[MAX_REG_TYPE_NR];
unsigned int types[MAX_REG_TYPE_NR];
struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
};
struct drm_exynos_pending_g2d_event {
struct drm_pending_event base;
struct drm_exynos_g2d_event event;
};
struct g2d_cmdlist_userptr {
struct list_head list;
dma_addr_t dma_addr;
unsigned long userptr;
unsigned long size;
struct page **pages;
unsigned int npages;
struct sg_table *sgt;
refcount_t refcount;
bool in_pool;
bool out_of_list;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
dma_addr_t dma_addr;
struct g2d_buf_info buf_info;
struct drm_exynos_pending_g2d_event *event;
};
struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
struct drm_file *filp;
pid_t pid;
struct completion complete;
int async;
};
struct g2d_data {
struct device *dev;
void *dma_priv;
struct clk *gate_clk;
void __iomem *regs;
int irq;
struct workqueue_struct *g2d_workq;
struct work_struct runqueue_work;
struct drm_device *drm_dev;
unsigned long flags;
/* cmdlist */
struct g2d_cmdlist_node *cmdlist_node;
struct list_head free_cmdlist;
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
unsigned long cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue;
struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab;
unsigned long current_pool;
unsigned long max_pool;
};
static inline void g2d_hw_reset(struct g2d_data *g2d)
{
writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET);
clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
}
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node;
int nr;
int ret;
struct g2d_buf_info *buf_info;
g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
}
node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
if (!node) {
ret = -ENOMEM;
goto err;
}
for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
unsigned int i;
node[nr].cmdlist =
g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
node[nr].dma_addr =
g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
buf_info = &node[nr].buf_info;
for (i = 0; i < MAX_REG_TYPE_NR; i++)
buf_info->reg_types[i] = REG_TYPE_NONE;
list_add_tail(&node[nr].list, &g2d->free_cmdlist);
}
return 0;
err:
dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
return ret;
}
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
kfree(g2d->cmdlist_node);
if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
dma_free_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
}
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node;
mutex_lock(&g2d->cmdlist_mutex);
if (list_empty(&g2d->free_cmdlist)) {
dev_err(dev, "there is no free cmdlist\n");
mutex_unlock(&g2d->cmdlist_mutex);
return NULL;
}
node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
list);
list_del_init(&node->list);
mutex_unlock(&g2d->cmdlist_mutex);
return node;
}
static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
{
mutex_lock(&g2d->cmdlist_mutex);
list_move_tail(&node->list, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
}
static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv,
struct g2d_cmdlist_node *node)
{
struct g2d_cmdlist_node *lnode;
if (list_empty(&file_priv->inuse_cmdlist))
goto add_to_list;
/* this links to base address of new cmdlist */
lnode = list_entry(file_priv->inuse_cmdlist.prev,
struct g2d_cmdlist_node, list);
lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
add_to_list:
list_add_tail(&node->list, &file_priv->inuse_cmdlist);
if (node->event)
list_add_tail(&node->event->base.link, &file_priv->event_list);
}
static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
void *obj,
bool force)
{
struct g2d_cmdlist_userptr *g2d_userptr = obj;
if (!obj)
return;
if (force)
goto out;
refcount_dec(&g2d_userptr->refcount);
if (refcount_read(&g2d_userptr->refcount) > 0)
return;
if (g2d_userptr->in_pool)
return;
out:
dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
DMA_BIDIRECTIONAL, 0);
unpin_user_pages_dirty_lock(g2d_userptr->pages, g2d_userptr->npages,
true);
kvfree(g2d_userptr->pages);
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
sg_free_table(g2d_userptr->sgt);
kfree(g2d_userptr->sgt);
kfree(g2d_userptr);
}
static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
unsigned long userptr,
unsigned long size,
struct drm_file *filp,
void **obj)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct g2d_cmdlist_userptr *g2d_userptr;
struct sg_table *sgt;
unsigned long start, end;
unsigned int npages, offset;
int ret;
if (!size) {
DRM_DEV_ERROR(g2d->dev, "invalid userptr size.\n");
return ERR_PTR(-EINVAL);
}
/* check if userptr already exists in userptr_list. */
list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) {
if (g2d_userptr->userptr == userptr) {
/*
* also check size because there could be same address
* and different size.
*/
if (g2d_userptr->size == size) {
refcount_inc(&g2d_userptr->refcount);
*obj = g2d_userptr;
return &g2d_userptr->dma_addr;
}
/*
* at this moment, maybe g2d dma is accessing this
* g2d_userptr memory region so just remove this
* g2d_userptr object from userptr_list not to be
* referred again and also except it the userptr
* pool to be released after the dma access completion.
*/
g2d_userptr->out_of_list = true;
g2d_userptr->in_pool = false;
list_del_init(&g2d_userptr->list);
break;
}
}
g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
if (!g2d_userptr)
return ERR_PTR(-ENOMEM);
refcount_set(&g2d_userptr->refcount, 1);
g2d_userptr->size = size;
start = userptr & PAGE_MASK;
offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT;
g2d_userptr->pages = kvmalloc_array(npages, sizeof(*g2d_userptr->pages),
GFP_KERNEL);
if (!g2d_userptr->pages) {
ret = -ENOMEM;
goto err_free;
}
ret = pin_user_pages_fast(start, npages,
FOLL_WRITE | FOLL_LONGTERM,
g2d_userptr->pages);
if (ret != npages) {
DRM_DEV_ERROR(g2d->dev,
"failed to get user pages from userptr.\n");
if (ret < 0)
goto err_destroy_pages;
npages = ret;
ret = -EFAULT;
goto err_unpin_pages;
}
g2d_userptr->npages = npages;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
goto err_unpin_pages;
}
ret = sg_alloc_table_from_pages(sgt,
g2d_userptr->pages,
npages, offset, size, GFP_KERNEL);
if (ret < 0) {
DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
goto err_free_sgt;
}
g2d_userptr->sgt = sgt;
ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt,
DMA_BIDIRECTIONAL, 0);
if (ret) {
DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
goto err_sg_free_table;
}
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
g2d_userptr->userptr = userptr;
list_add_tail(&g2d_userptr->list, &file_priv->userptr_list);
if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
g2d->current_pool += npages << PAGE_SHIFT;
g2d_userptr->in_pool = true;
}
*obj = g2d_userptr;
return &g2d_userptr->dma_addr;
err_sg_free_table:
sg_free_table(sgt);
err_free_sgt:
kfree(sgt);
err_unpin_pages:
unpin_user_pages(g2d_userptr->pages, npages);
err_destroy_pages:
kvfree(g2d_userptr->pages);
err_free:
kfree(g2d_userptr);
return ERR_PTR(ret);
}
static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct g2d_cmdlist_userptr *g2d_userptr, *n;
list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list)
if (g2d_userptr->in_pool)
g2d_userptr_put_dma_addr(g2d, g2d_userptr, true);
g2d->current_pool = 0;
}
static enum g2d_reg_type g2d_get_reg_type(struct g2d_data *g2d, int reg_offset)
{
enum g2d_reg_type reg_type;
switch (reg_offset) {
case G2D_SRC_BASE_ADDR:
case G2D_SRC_STRIDE:
case G2D_SRC_COLOR_MODE:
case G2D_SRC_LEFT_TOP:
case G2D_SRC_RIGHT_BOTTOM:
reg_type = REG_TYPE_SRC;
break;
case G2D_SRC_PLANE2_BASE_ADDR:
reg_type = REG_TYPE_SRC_PLANE2;
break;
case G2D_DST_BASE_ADDR:
case G2D_DST_STRIDE:
case G2D_DST_COLOR_MODE:
case G2D_DST_LEFT_TOP:
case G2D_DST_RIGHT_BOTTOM:
reg_type = REG_TYPE_DST;
break;
case G2D_DST_PLANE2_BASE_ADDR:
reg_type = REG_TYPE_DST_PLANE2;
break;
case G2D_PAT_BASE_ADDR:
reg_type = REG_TYPE_PAT;
break;
case G2D_MSK_BASE_ADDR:
reg_type = REG_TYPE_MSK;
break;
default:
reg_type = REG_TYPE_NONE;
DRM_DEV_ERROR(g2d->dev, "Unknown register offset![%d]\n",
reg_offset);
break;
}
return reg_type;
}
static unsigned long g2d_get_buf_bpp(unsigned int format)
{
unsigned long bpp;
switch (format) {
case G2D_FMT_XRGB8888:
case G2D_FMT_ARGB8888:
bpp = 4;
break;
case G2D_FMT_RGB565:
case G2D_FMT_XRGB1555:
case G2D_FMT_ARGB1555:
case G2D_FMT_XRGB4444:
case G2D_FMT_ARGB4444:
bpp = 2;
break;
case G2D_FMT_PACKED_RGB888:
bpp = 3;
break;
default:
bpp = 1;
break;
}
return bpp;
}
static bool g2d_check_buf_desc_is_valid(struct g2d_data *g2d,
struct g2d_buf_desc *buf_desc,
enum g2d_reg_type reg_type,
unsigned long size)
{
int width, height;
unsigned long bpp, last_pos;
/*
* check source and destination buffers only.
* so the others are always valid.
*/
if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
return true;
/* This check also makes sure that right_x > left_x. */
width = (int)buf_desc->right_x - (int)buf_desc->left_x;
if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
DRM_DEV_ERROR(g2d->dev, "width[%d] is out of range!\n", width);
return false;
}
/* This check also makes sure that bottom_y > top_y. */
height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
DRM_DEV_ERROR(g2d->dev,
"height[%d] is out of range!\n", height);
return false;
}
bpp = g2d_get_buf_bpp(buf_desc->format);
/* Compute the position of the last byte that the engine accesses. */
last_pos = ((unsigned long)buf_desc->bottom_y - 1) *
(unsigned long)buf_desc->stride +
(unsigned long)buf_desc->right_x * bpp - 1;
/*
* Since right_x > left_x and bottom_y > top_y we already know
* that the first_pos < last_pos (first_pos being the position
* of the first byte the engine accesses), it just remains to
* check if last_pos is smaller then the buffer size.
*/
if (last_pos >= size) {
DRM_DEV_ERROR(g2d->dev, "last engine access position [%lu] "
"is out of range [%lu]!\n", last_pos, size);
return false;
}
return true;
}
static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_device *drm_dev,
struct drm_file *file)
{
struct g2d_cmdlist *cmdlist = node->cmdlist;
struct g2d_buf_info *buf_info = &node->buf_info;
int offset;
int ret;
int i;
for (i = 0; i < buf_info->map_nr; i++) {
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
int reg_pos;
unsigned long handle;
dma_addr_t *addr;
reg_pos = cmdlist->last - 2 * (i + 1);
offset = cmdlist->data[reg_pos];
handle = cmdlist->data[reg_pos + 1];
reg_type = g2d_get_reg_type(g2d, offset);
if (reg_type == REG_TYPE_NONE) {
ret = -EFAULT;
goto err;
}
buf_desc = &buf_info->descs[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
struct exynos_drm_gem *exynos_gem;
exynos_gem = exynos_drm_gem_get(file, handle);
if (!exynos_gem) {
ret = -EFAULT;
goto err;
}
if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
reg_type, exynos_gem->size)) {
exynos_drm_gem_put(exynos_gem);
ret = -EFAULT;
goto err;
}
addr = &exynos_gem->dma_addr;
buf_info->obj[reg_type] = exynos_gem;
} else {
struct drm_exynos_g2d_userptr g2d_userptr;
if (copy_from_user(&g2d_userptr, (void __user *)handle,
sizeof(struct drm_exynos_g2d_userptr))) {
ret = -EFAULT;
goto err;
}
if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
reg_type,
g2d_userptr.size)) {
ret = -EFAULT;
goto err;
}
addr = g2d_userptr_get_dma_addr(g2d,
g2d_userptr.userptr,
g2d_userptr.size,
file,
&buf_info->obj[reg_type]);
if (IS_ERR(addr)) {
ret = -EFAULT;
goto err;
}
}
cmdlist->data[reg_pos + 1] = *addr;
buf_info->reg_types[i] = reg_type;
}
return 0;
err:
buf_info->map_nr = i;
return ret;
}
static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_file *filp)
{
struct g2d_buf_info *buf_info = &node->buf_info;
int i;
for (i = 0; i < buf_info->map_nr; i++) {
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
void *obj;
reg_type = buf_info->reg_types[i];
buf_desc = &buf_info->descs[reg_type];
obj = buf_info->obj[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM)
exynos_drm_gem_put(obj);
else
g2d_userptr_put_dma_addr(g2d, obj, false);
buf_info->reg_types[i] = REG_TYPE_NONE;
buf_info->obj[reg_type] = NULL;
buf_info->types[reg_type] = 0;
memset(buf_desc, 0x00, sizeof(*buf_desc));
}
buf_info->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
struct g2d_cmdlist_node *node =
list_first_entry(&runqueue_node->run_cmdlist,
struct g2d_cmdlist_node, list);
set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
}
static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
{
struct g2d_runqueue_node *runqueue_node;
if (list_empty(&g2d->runqueue))
return NULL;
runqueue_node = list_first_entry(&g2d->runqueue,
struct g2d_runqueue_node, list);
list_del_init(&runqueue_node->list);
return runqueue_node;
}
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
struct g2d_cmdlist_node *node;
mutex_lock(&g2d->cmdlist_mutex);
/*
* commands in run_cmdlist have been completed so unmap all gem
* objects in each command node so that they are unreferenced.
*/
list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
}
/**
* g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes
* @g2d: G2D state object
* @file: if not zero, only remove items with this DRM file
*
* Has to be called under runqueue lock.
*/
static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file *file)
{
struct g2d_runqueue_node *node, *n;
if (list_empty(&g2d->runqueue))
return;
list_for_each_entry_safe(node, n, &g2d->runqueue, list) {
if (file && node->filp != file)
continue;
list_del_init(&node->list);
g2d_free_runqueue_node(g2d, node);
}
}
static void g2d_runqueue_worker(struct work_struct *work)
{
struct g2d_data *g2d = container_of(work, struct g2d_data,
runqueue_work);
struct g2d_runqueue_node *runqueue_node;
/*
* The engine is busy and the completion of the current node is going
* to poke the runqueue worker, so nothing to do here.
*/
if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags))
return;
mutex_lock(&g2d->runqueue_mutex);
runqueue_node = g2d->runqueue_node;
g2d->runqueue_node = NULL;
if (runqueue_node) {
pm_runtime_mark_last_busy(g2d->dev);
pm_runtime_put_autosuspend(g2d->dev);
complete(&runqueue_node->complete);
if (runqueue_node->async)
g2d_free_runqueue_node(g2d, runqueue_node);
}
if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) {
g2d->runqueue_node = g2d_get_runqueue_node(g2d);
if (g2d->runqueue_node) {
int ret;
ret = pm_runtime_resume_and_get(g2d->dev);
if (ret < 0) {
dev_err(g2d->dev, "failed to enable G2D device.\n");
goto out;
}
g2d_dma_start(g2d, g2d->runqueue_node);
}
}
out:
mutex_unlock(&g2d->runqueue_mutex);
}
static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
{
struct drm_device *drm_dev = g2d->drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
struct timespec64 now;
if (list_empty(&runqueue_node->event_list))
return;
e = list_first_entry(&runqueue_node->event_list,
struct drm_exynos_pending_g2d_event, base.link);
ktime_get_ts64(&now);
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
e->event.cmdlist_no = cmdlist_no;
drm_send_event(drm_dev, &e->base);
}
static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
{
struct g2d_data *g2d = dev_id;
u32 pending;
pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
if (pending)
writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
if (pending & G2D_INTP_GCMD_FIN) {
u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
G2D_DMA_LIST_DONE_COUNT_OFFSET;
g2d_finish_event(g2d, cmdlist_no);
writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
if (!(pending & G2D_INTP_ACMD_FIN)) {
writel_relaxed(G2D_DMA_CONTINUE,
g2d->regs + G2D_DMA_COMMAND);
}
}
if (pending & G2D_INTP_ACMD_FIN) {
clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
}
return IRQ_HANDLED;
}
/**
* g2d_wait_finish - wait for the G2D engine to finish the current runqueue node
* @g2d: G2D state object
* @file: if not zero, only wait if the current runqueue node belongs
* to the DRM file
*
* Should the engine not become idle after a 100ms timeout, a hardware
* reset is issued.
*/
static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file)
{
struct device *dev = g2d->dev;
struct g2d_runqueue_node *runqueue_node = NULL;
unsigned int tries = 10;
mutex_lock(&g2d->runqueue_mutex);
/* If no node is currently processed, we have nothing to do. */
if (!g2d->runqueue_node)
goto out;
runqueue_node = g2d->runqueue_node;
/* Check if the currently processed item belongs to us. */
if (file && runqueue_node->filp != file)
goto out;
mutex_unlock(&g2d->runqueue_mutex);
/* Wait for the G2D engine to finish. */
while (tries-- && (g2d->runqueue_node == runqueue_node))
mdelay(10);
mutex_lock(&g2d->runqueue_mutex);
if (g2d->runqueue_node != runqueue_node)
goto out;
dev_err(dev, "wait timed out, resetting engine...\n");
g2d_hw_reset(g2d);
/*
* After the hardware reset of the engine we are going to loose
* the IRQ which triggers the PM runtime put().
* So do this manually here.
*/
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
complete(&runqueue_node->complete);
if (runqueue_node->async)
g2d_free_runqueue_node(g2d, runqueue_node);
out:
mutex_unlock(&g2d->runqueue_mutex);
}
static int g2d_check_reg_offset(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
struct g2d_cmdlist *cmdlist = node->cmdlist;
int reg_offset;
int index;
int i;
for (i = 0; i < nr; i++) {
struct g2d_buf_info *buf_info = &node->buf_info;
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
unsigned long value;
index = cmdlist->last - 2 * (i + 1);
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
goto err;
if (reg_offset % 4)
goto err;
switch (reg_offset) {
case G2D_SRC_BASE_ADDR:
case G2D_SRC_PLANE2_BASE_ADDR:
case G2D_DST_BASE_ADDR:
case G2D_DST_PLANE2_BASE_ADDR:
case G2D_PAT_BASE_ADDR:
case G2D_MSK_BASE_ADDR:
if (!for_addr)
goto err;
reg_type = g2d_get_reg_type(g2d, reg_offset);
/* check userptr buffer type. */
if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
buf_info->types[reg_type] = BUF_TYPE_USERPTR;
cmdlist->data[index] &= ~G2D_BUF_USERPTR;
} else
buf_info->types[reg_type] = BUF_TYPE_GEM;
break;
case G2D_SRC_STRIDE:
case G2D_DST_STRIDE:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
buf_desc->stride = cmdlist->data[index + 1];
break;
case G2D_SRC_COLOR_MODE:
case G2D_DST_COLOR_MODE:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
buf_desc->format = value & 0xf;
break;
case G2D_SRC_LEFT_TOP:
case G2D_DST_LEFT_TOP:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
buf_desc->left_x = value & 0x1fff;
buf_desc->top_y = (value & 0x1fff0000) >> 16;
break;
case G2D_SRC_RIGHT_BOTTOM:
case G2D_DST_RIGHT_BOTTOM:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
buf_desc->right_x = value & 0x1fff;
buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
break;
default:
if (for_addr)
goto err;
break;
}
}
return 0;
err:
dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
/* ioctl functions */
int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_g2d_get_ver *ver = data;
ver->major = G2D_HW_MAJOR_VER;
ver->minor = G2D_HW_MINOR_VER;
return 0;
}
int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_private *priv = drm_dev->dev_private;
struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_set_cmdlist *req = data;
struct drm_exynos_g2d_cmd *cmd;
struct drm_exynos_pending_g2d_event *e;
struct g2d_cmdlist_node *node;
struct g2d_cmdlist *cmdlist;
int size;
int ret;
node = g2d_get_cmdlist(g2d);
if (!node)
return -ENOMEM;
/*
* To avoid an integer overflow for the later size computations, we
* enforce a maximum number of submitted commands here. This limit is
* sufficient for all conceivable usage cases of the G2D.
*/
if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n");
return -EINVAL;
}
node->event = NULL;
if (req->event_type != G2D_EVENT_NOT) {
e = kzalloc(sizeof(*node->event), GFP_KERNEL);
if (!e) {
ret = -ENOMEM;
goto err;
}
e->event.base.type = DRM_EXYNOS_G2D_EVENT;
e->event.base.length = sizeof(e->event);
e->event.user_data = req->user_data;
ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base);
if (ret) {
kfree(e);
goto err;
}
node->event = e;
}
cmdlist = node->cmdlist;
cmdlist->last = 0;
/*
* If don't clear SFR registers, the cmdlist is affected by register
* values of previous cmdlist. G2D hw executes SFR clear command and
* a next command at the same time then the next command is ignored and
* is executed rightly from next next command, so needs a dummy command
* to next command of SFR clear command.
*/
cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
cmdlist->data[cmdlist->last++] = 0;
/*
* 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
* and GCF bit should be set to INTEN register if user wants
* G2D interrupt event once current command list execution is
* finished.
* Otherwise only ACF bit should be set to INTEN register so
* that one interrupt is occurred after all command lists
* have been completed.
*/
if (node->event) {
cmdlist->data[cmdlist->last++] = G2D_INTEN;
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
} else {
cmdlist->data[cmdlist->last++] = G2D_INTEN;
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
}
/*
* Check the size of cmdlist. The 2 that is added last comes from
* the implicit G2D_BITBLT_START that is appended once we have
* checked all the submitted commands.
*/
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(g2d->dev, "cmdlist size is too big\n");
ret = -EINVAL;
goto err_free_event;
}
cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
if (copy_from_user(cmdlist->data + cmdlist->last,
(void __user *)cmd,
sizeof(*cmd) * req->cmd_nr)) {
ret = -EFAULT;
goto err_free_event;
}
cmdlist->last += req->cmd_nr * 2;
ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
node->buf_info.map_nr = req->cmd_buf_nr;
if (req->cmd_buf_nr) {
struct drm_exynos_g2d_cmd *cmd_buf;
cmd_buf = (struct drm_exynos_g2d_cmd *)
(unsigned long)req->cmd_buf;
if (copy_from_user(cmdlist->data + cmdlist->last,
(void __user *)cmd_buf,
sizeof(*cmd_buf) * req->cmd_buf_nr)) {
ret = -EFAULT;
goto err_free_event;
}
cmdlist->last += req->cmd_buf_nr * 2;
ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
if (ret < 0)
goto err_unmap;
}
cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
/* head */
cmdlist->head = cmdlist->last / 2;
/* tail */
cmdlist->data[cmdlist->last] = 0;
g2d_add_cmdlist_to_inuse(file_priv, node);
return 0;
err_unmap:
g2d_unmap_cmdlist_gem(g2d, node, file);
err_free_event:
if (node->event)
drm_event_cancel_free(drm_dev, &node->event->base);
err:
g2d_put_cmdlist(g2d, node);
return ret;
}
int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_private *priv = drm_dev->dev_private;
struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_exec *req = data;
struct g2d_runqueue_node *runqueue_node;
struct list_head *run_cmdlist;
struct list_head *event_list;
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
if (!runqueue_node)
return -ENOMEM;
run_cmdlist = &runqueue_node->run_cmdlist;
event_list = &runqueue_node->event_list;
INIT_LIST_HEAD(run_cmdlist);
INIT_LIST_HEAD(event_list);
init_completion(&runqueue_node->complete);
runqueue_node->async = req->async;
list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist);
list_splice_init(&file_priv->event_list, event_list);
if (list_empty(run_cmdlist)) {
dev_err(g2d->dev, "there is no inuse cmdlist\n");
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
return -EPERM;
}
mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid;
runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
mutex_unlock(&g2d->runqueue_mutex);
/* Let the runqueue know that there is work to do. */
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
if (req->async)
goto out;
wait_for_completion(&runqueue_node->complete);
g2d_free_runqueue_node(g2d, runqueue_node);
out:
return 0;
}
int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
INIT_LIST_HEAD(&file_priv->inuse_cmdlist);
INIT_LIST_HEAD(&file_priv->event_list);
INIT_LIST_HEAD(&file_priv->userptr_list);
return 0;
}
void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_private *priv = drm_dev->dev_private;
struct g2d_data *g2d;
struct g2d_cmdlist_node *node, *n;
if (!priv->g2d_dev)
return;
g2d = dev_get_drvdata(priv->g2d_dev);
/* Remove the runqueue nodes that belong to us. */
mutex_lock(&g2d->runqueue_mutex);
g2d_remove_runqueue_nodes(g2d, file);
mutex_unlock(&g2d->runqueue_mutex);
/*
* Wait for the runqueue worker to finish its current node.
* After this the engine should no longer be accessing any
* memory belonging to us.
*/
g2d_wait_finish(g2d, file);
/*
* Even after the engine is idle, there might still be stale cmdlists
* (i.e. cmdlisst which we submitted but never executed) around, with
* their corresponding GEM/userptr buffers.
* Properly unmap these buffers here.
*/
mutex_lock(&g2d->cmdlist_mutex);
list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) {
g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
}
mutex_unlock(&g2d->cmdlist_mutex);
/* release all g2d_userptr in pool. */
g2d_userptr_free_all(g2d, file);
}
static int g2d_bind(struct device *dev, struct device *master, void *data)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
g2d->drm_dev = drm_dev;
/* allocate dma-aware cmdlist buffer. */
ret = g2d_init_cmdlist(g2d);
if (ret < 0) {
dev_err(dev, "cmdlist init failed\n");
return ret;
}
ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
return ret;
}
priv->g2d_dev = dev;
dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n",
G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
return 0;
}
static void g2d_unbind(struct device *dev, struct device *master, void *data)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_private *priv = drm_dev->dev_private;
/* Suspend operation and wait for engine idle. */
set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
g2d_wait_finish(g2d, NULL);
priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work);
exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
}
static const struct component_ops g2d_component_ops = {
.bind = g2d_bind,
.unbind = g2d_unbind,
};
static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct g2d_data *g2d;
int ret;
g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
if (!g2d)
return -ENOMEM;
g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
sizeof(struct g2d_runqueue_node), 0, 0, NULL);
if (!g2d->runqueue_slab)
return -ENOMEM;
g2d->dev = dev;
g2d->g2d_workq = create_singlethread_workqueue("g2d");
if (!g2d->g2d_workq) {
dev_err(dev, "failed to create workqueue\n");
ret = -EINVAL;
goto err_destroy_slab;
}
INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
INIT_LIST_HEAD(&g2d->free_cmdlist);
INIT_LIST_HEAD(&g2d->runqueue);
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
g2d->gate_clk = devm_clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
goto err_destroy_workqueue;
}
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 2000);
pm_runtime_enable(dev);
clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
g2d->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g2d->regs)) {
ret = PTR_ERR(g2d->regs);
goto err_put_clk;
}
g2d->irq = platform_get_irq(pdev, 0);
if (g2d->irq < 0) {
ret = g2d->irq;
goto err_put_clk;
}
ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0,
"drm_g2d", g2d);
if (ret < 0) {
dev_err(dev, "irq request failed\n");
goto err_put_clk;
}
g2d->max_pool = MAX_POOL;
platform_set_drvdata(pdev, g2d);
ret = component_add(dev, &g2d_component_ops);
if (ret < 0) {
dev_err(dev, "failed to register drm g2d device\n");
goto err_put_clk;
}
return 0;
err_put_clk:
pm_runtime_disable(dev);
err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
kmem_cache_destroy(g2d->runqueue_slab);
return ret;
}
static int g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
component_del(&pdev->dev, &g2d_component_ops);
/* There should be no locking needed here. */
g2d_remove_runqueue_nodes(g2d, NULL);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
kmem_cache_destroy(g2d->runqueue_slab);
return 0;
}
static int g2d_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
/*
* Suspend the runqueue worker operation and wait until the G2D
* engine is idle.
*/
set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
g2d_wait_finish(g2d, NULL);
flush_work(&g2d->runqueue_work);
return 0;
}
static int g2d_resume(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
return 0;
}
static int g2d_runtime_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
clk_disable_unprepare(g2d->gate_clk);
return 0;
}
static int g2d_runtime_resume(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(g2d->gate_clk);
if (ret < 0)
dev_warn(dev, "failed to enable clock.\n");
return ret;
}
static const struct dev_pm_ops g2d_pm_ops = {
SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
};
static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" },
{ .compatible = "samsung,exynos4212-g2d" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_g2d_match);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
.remove = g2d_remove,
.driver = {
.name = "exynos-drm-g2d",
.owner = THIS_MODULE,
.pm = pm_ptr(&g2d_pm_ops),
.of_match_table = exynos_g2d_match,
},
};
| linux-master | drivers/gpu/drm/exynos/exynos_drm_g2d.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* exynos_drm_fb.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authors:
* Inki Dae <[email protected]>
* Joonyoung Shim <[email protected]>
* Seung-Woo Kim <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
struct exynos_drm_gem *exynos_gem)
{
unsigned int flags;
/*
* if exynos drm driver supports iommu then framebuffer can use
* all the buffer types.
*/
if (is_drm_iommu_supported(drm_dev))
return 0;
flags = exynos_gem->flags;
/*
* Physically non-contiguous memory type for framebuffer is not
* supported without IOMMU.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEV_ERROR(drm_dev->dev,
"Non-contiguous GEM memory is not supported.\n");
return -EINVAL;
}
return 0;
}
static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct exynos_drm_gem **exynos_gem,
int count)
{
struct drm_framebuffer *fb;
int i;
int ret;
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
ret = check_fb_gem_memory_type(dev, exynos_gem[i]);
if (ret < 0)
goto err;
fb->obj[i] = &exynos_gem[i]->base;
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"failed to initialize framebuffer\n");
goto err;
}
return fb;
err:
kfree(fb);
return ERR_PTR(ret);
}
static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_framebuffer *fb;
int i;
int ret;
for (i = 0; i < info->num_planes; i++) {
unsigned int height = (i == 0) ? mode_cmd->height :
DIV_ROUND_UP(mode_cmd->height, info->vsub);
unsigned long size = height * mode_cmd->pitches[i] +
mode_cmd->offsets[i];
exynos_gem[i] = exynos_drm_gem_get(file_priv,
mode_cmd->handles[i]);
if (!exynos_gem[i]) {
DRM_DEV_ERROR(dev->dev,
"failed to lookup gem object\n");
ret = -ENOENT;
goto err;
}
if (size > exynos_gem[i]->size) {
i++;
ret = -EINVAL;
goto err;
}
}
fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err;
}
return fb;
err:
while (i--)
exynos_drm_gem_put(exynos_gem[i]);
return ERR_PTR(ret);
}
dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
{
struct exynos_drm_gem *exynos_gem;
if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
return 0;
exynos_gem = to_exynos_gem(fb->obj[index]);
return exynos_gem->dma_addr + fb->offsets[index];
}
static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
void exynos_drm_mode_config_init(struct drm_device *dev)
{
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
/*
* set max width and height as default value(4096x4096).
* this value would be used to check framebuffer size limitation
* at drm_mode_addfb().
*/
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
dev->mode_config.helper_private = &exynos_drm_mode_config_helpers;
dev->mode_config.normalize_zpos = true;
}
| linux-master | drivers/gpu/drm/exynos/exynos_drm_fb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung MIPI DSIM glue for Exynos SoCs.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
*
* Contacts: Tomasz Figa <[email protected]>
*/
#include <linux/component.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <drm/bridge/samsung-dsim.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
struct exynos_dsi {
struct drm_encoder encoder;
};
static irqreturn_t exynos_dsi_te_irq_handler(struct samsung_dsim *dsim)
{
struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
if (dsim->state & DSIM_STATE_VIDOUT_AVAILABLE)
exynos_drm_crtc_te_handler(encoder->crtc);
return IRQ_HANDLED;
}
static int exynos_dsi_host_attach(struct samsung_dsim *dsim,
struct mipi_dsi_device *device)
{
struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
struct drm_device *drm = encoder->dev;
drm_bridge_attach(encoder, &dsim->bridge,
list_first_entry_or_null(&encoder->bridge_chain,
struct drm_bridge,
chain_node), 0);
mutex_lock(&drm->mode_config.mutex);
dsim->lanes = device->lanes;
dsim->format = device->format;
dsim->mode_flags = device->mode_flags;
exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode =
!(dsim->mode_flags & MIPI_DSI_MODE_VIDEO);
mutex_unlock(&drm->mode_config.mutex);
if (drm->mode_config.poll_enabled)
drm_kms_helper_hotplug_event(drm);
return 0;
}
static void exynos_dsi_host_detach(struct samsung_dsim *dsim,
struct mipi_dsi_device *device)
{
struct exynos_dsi *dsi = dsim->priv;
struct drm_device *drm = dsi->encoder.dev;
if (drm->mode_config.poll_enabled)
drm_kms_helper_hotplug_event(drm);
}
static int exynos_dsi_bind(struct device *dev, struct device *master, void *data)
{
struct samsung_dsim *dsim = dev_get_drvdata(dev);
struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
struct drm_device *drm_dev = data;
int ret;
drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
if (ret < 0)
return ret;
return mipi_dsi_host_register(&dsim->dsi_host);
}
static void exynos_dsi_unbind(struct device *dev, struct device *master, void *data)
{
struct samsung_dsim *dsim = dev_get_drvdata(dev);
dsim->bridge.funcs->atomic_disable(&dsim->bridge, NULL);
mipi_dsi_host_unregister(&dsim->dsi_host);
}
static const struct component_ops exynos_dsi_component_ops = {
.bind = exynos_dsi_bind,
.unbind = exynos_dsi_unbind,
};
static int exynos_dsi_register_host(struct samsung_dsim *dsim)
{
struct exynos_dsi *dsi;
dsi = devm_kzalloc(dsim->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
dsim->priv = dsi;
dsim->bridge.pre_enable_prev_first = true;
return component_add(dsim->dev, &exynos_dsi_component_ops);
}
static void exynos_dsi_unregister_host(struct samsung_dsim *dsim)
{
component_del(dsim->dev, &exynos_dsi_component_ops);
}
static const struct samsung_dsim_host_ops exynos_dsi_exynos_host_ops = {
.register_host = exynos_dsi_register_host,
.unregister_host = exynos_dsi_unregister_host,
.attach = exynos_dsi_host_attach,
.detach = exynos_dsi_host_detach,
.te_irq_handler = exynos_dsi_te_irq_handler,
};
static const struct samsung_dsim_plat_data exynos3250_dsi_pdata = {
.hw_type = DSIM_TYPE_EXYNOS3250,
.host_ops = &exynos_dsi_exynos_host_ops,
};
static const struct samsung_dsim_plat_data exynos4210_dsi_pdata = {
.hw_type = DSIM_TYPE_EXYNOS4210,
.host_ops = &exynos_dsi_exynos_host_ops,
};
static const struct samsung_dsim_plat_data exynos5410_dsi_pdata = {
.hw_type = DSIM_TYPE_EXYNOS5410,
.host_ops = &exynos_dsi_exynos_host_ops,
};
static const struct samsung_dsim_plat_data exynos5422_dsi_pdata = {
.hw_type = DSIM_TYPE_EXYNOS5422,
.host_ops = &exynos_dsi_exynos_host_ops,
};
static const struct samsung_dsim_plat_data exynos5433_dsi_pdata = {
.hw_type = DSIM_TYPE_EXYNOS5433,
.host_ops = &exynos_dsi_exynos_host_ops,
};
static const struct of_device_id exynos_dsi_of_match[] = {
{
.compatible = "samsung,exynos3250-mipi-dsi",
.data = &exynos3250_dsi_pdata,
},
{
.compatible = "samsung,exynos4210-mipi-dsi",
.data = &exynos4210_dsi_pdata,
},
{
.compatible = "samsung,exynos5410-mipi-dsi",
.data = &exynos5410_dsi_pdata,
},
{
.compatible = "samsung,exynos5422-mipi-dsi",
.data = &exynos5422_dsi_pdata,
},
{
.compatible = "samsung,exynos5433-mipi-dsi",
.data = &exynos5433_dsi_pdata,
},
{ /* sentinel. */ }
};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
struct platform_driver dsi_driver = {
.probe = samsung_dsim_probe,
.remove = samsung_dsim_remove,
.driver = {
.name = "exynos-dsi",
.owner = THIS_MODULE,
.pm = &samsung_dsim_pm_ops,
.of_match_table = exynos_dsi_of_match,
},
};
MODULE_AUTHOR("Tomasz Figa <[email protected]>");
MODULE_AUTHOR("Andrzej Hajda <[email protected]>");
MODULE_DESCRIPTION("Samsung SoC MIPI DSI Master");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/exynos/exynos_drm_dsi.c |
// SPDX-License-Identifier: GPL-2.0-only
/* drivers/gpu/drm/exynos5433_drm_decon.c
*
* Copyright (C) 2015 Samsung Electronics Co.Ltd
* Authors:
* Joonyoung Shim <[email protected]>
* Hyungwon Hwang <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
#include "regs-decon5433.h"
#define DSD_CFG_MUX 0x1004
#define DSD_CFG_MUX_TE_UNMASK_GLOBAL BIT(13)
#define WINDOWS_NR 5
#define PRIMARY_WIN 2
#define CURSON_WIN 4
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
#define I80_HW_TRG (1 << 0)
#define IFTYPE_HDMI (1 << 1)
static const char * const decon_clks_name[] = {
"pclk",
"aclk_decon",
"aclk_smmu_decon0x",
"aclk_xiu_decon0x",
"pclk_smmu_decon0x",
"aclk_smmu_decon1x",
"aclk_xiu_decon1x",
"pclk_smmu_decon1x",
"sclk_decon_vclk",
"sclk_decon_eclk",
};
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
void __iomem *addr;
struct regmap *sysreg;
struct clk *clks[ARRAY_SIZE(decon_clks_name)];
unsigned int irq;
unsigned int irq_vsync;
unsigned int irq_lcd_sys;
unsigned int te_irq;
unsigned long out_type;
int first_win;
spinlock_t vblank_lock;
u32 frame_id;
};
static const uint32_t decon_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
[PRIMARY_WIN] = DRM_PLANE_TYPE_PRIMARY,
[CURSON_WIN] = DRM_PLANE_TYPE_CURSOR,
};
static const unsigned int capabilities[WINDOWS_NR] = {
0,
EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
};
static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
u32 val)
{
val = (val & mask) | (readl(ctx->addr + reg) & ~mask);
writel(val, ctx->addr + reg);
}
static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
u32 val;
val = VIDINTCON0_INTEN;
if (crtc->i80_mode)
val |= VIDINTCON0_FRAMEDONE;
else
val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
writel(val, ctx->addr + DECON_VIDINTCON0);
enable_irq(ctx->irq);
if (!(ctx->out_type & I80_HW_TRG))
enable_irq(ctx->te_irq);
return 0;
}
static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
if (!(ctx->out_type & I80_HW_TRG))
disable_irq_nosync(ctx->te_irq);
disable_irq_nosync(ctx->irq);
writel(0, ctx->addr + DECON_VIDINTCON0);
}
/* return number of starts/ends of frame transmissions since reset */
static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
{
u32 frm, pfrm, status, cnt = 2;
/* To get consistent result repeat read until frame id is stable.
* Usually the loop will be executed once, in rare cases when the loop
* is executed at frame change time 2nd pass will be needed.
*/
frm = readl(ctx->addr + DECON_CRFMID);
do {
status = readl(ctx->addr + DECON_VIDCON1);
pfrm = frm;
frm = readl(ctx->addr + DECON_CRFMID);
} while (frm != pfrm && --cnt);
/* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case
* of RGB, it should be taken into account.
*/
if (!frm)
return 0;
switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
case VIDCON1_VSTATUS_VS:
if (!(ctx->crtc->i80_mode))
--frm;
break;
case VIDCON1_VSTATUS_BP:
--frm;
break;
case VIDCON1_I80_ACTIVE:
case VIDCON1_VSTATUS_AC:
if (end)
--frm;
break;
default:
break;
}
return frm;
}
static void decon_setup_trigger(struct decon_context *ctx)
{
if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
return;
if (!(ctx->out_type & I80_HW_TRG)) {
writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
ctx->addr + DECON_TRIGCON);
return;
}
writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | TRIGCON_HWTRIGMASK
| TRIGCON_HWTRIGEN, ctx->addr + DECON_TRIGCON);
if (regmap_update_bits(ctx->sysreg, DSD_CFG_MUX,
DSD_CFG_MUX_TE_UNMASK_GLOBAL, ~0))
DRM_DEV_ERROR(ctx->dev, "Cannot update sysreg.\n");
}
static void decon_commit(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
struct drm_display_mode *m = &crtc->base.mode;
bool interlaced = false;
u32 val;
if (ctx->out_type & IFTYPE_HDMI) {
m->crtc_hsync_start = m->crtc_hdisplay + 10;
m->crtc_hsync_end = m->crtc_htotal - 92;
m->crtc_vsync_start = m->crtc_vdisplay + 1;
m->crtc_vsync_end = m->crtc_vsync_start + 1;
if (m->flags & DRM_MODE_FLAG_INTERLACE)
interlaced = true;
}
decon_setup_trigger(ctx);
/* lcd on and use command if */
val = VIDOUT_LCD_ON;
if (interlaced)
val |= VIDOUT_INTERLACE_EN_F;
if (crtc->i80_mode) {
val |= VIDOUT_COMMAND_IF;
} else {
val |= VIDOUT_RGB_IF;
}
writel(val, ctx->addr + DECON_VIDOUTCON0);
if (interlaced)
val = VIDTCON2_LINEVAL(m->vdisplay / 2 - 1) |
VIDTCON2_HOZVAL(m->hdisplay - 1);
else
val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
VIDTCON2_HOZVAL(m->hdisplay - 1);
writel(val, ctx->addr + DECON_VIDTCON2);
if (!crtc->i80_mode) {
int vbp = m->crtc_vtotal - m->crtc_vsync_end;
int vfp = m->crtc_vsync_start - m->crtc_vdisplay;
if (interlaced)
vbp = vbp / 2 - 1;
val = VIDTCON00_VBPD_F(vbp - 1) | VIDTCON00_VFPD_F(vfp - 1);
writel(val, ctx->addr + DECON_VIDTCON00);
val = VIDTCON01_VSPW_F(
m->crtc_vsync_end - m->crtc_vsync_start - 1);
writel(val, ctx->addr + DECON_VIDTCON01);
val = VIDTCON10_HBPD_F(
m->crtc_htotal - m->crtc_hsync_end - 1) |
VIDTCON10_HFPD_F(
m->crtc_hsync_start - m->crtc_hdisplay - 1);
writel(val, ctx->addr + DECON_VIDTCON10);
val = VIDTCON11_HSPW_F(
m->crtc_hsync_end - m->crtc_hsync_start - 1);
writel(val, ctx->addr + DECON_VIDTCON11);
}
/* enable output and display signal */
decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0);
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
static void decon_win_set_bldeq(struct decon_context *ctx, unsigned int win,
unsigned int alpha, unsigned int pixel_alpha)
{
u32 mask = BLENDERQ_A_FUNC_F(0xf) | BLENDERQ_B_FUNC_F(0xf);
u32 val = 0;
switch (pixel_alpha) {
case DRM_MODE_BLEND_PIXEL_NONE:
case DRM_MODE_BLEND_COVERAGE:
val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA_A);
val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
break;
case DRM_MODE_BLEND_PREMULTI:
default:
if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA0);
val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
} else {
val |= BLENDERQ_A_FUNC_F(BLENDERQ_ONE);
val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
}
break;
}
decon_set_bits(ctx, DECON_BLENDERQx(win), mask, val);
}
static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
unsigned int alpha, unsigned int pixel_alpha)
{
u32 win_alpha = alpha >> 8;
u32 val = 0;
switch (pixel_alpha) {
case DRM_MODE_BLEND_PIXEL_NONE:
break;
case DRM_MODE_BLEND_COVERAGE:
case DRM_MODE_BLEND_PREMULTI:
default:
val |= WINCONx_ALPHA_SEL_F;
val |= WINCONx_BLD_PIX_F;
val |= WINCONx_ALPHA_MUL_F;
break;
}
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_BLEND_MODE_MASK, val);
if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
val = VIDOSD_Wx_ALPHA_R_F(win_alpha) |
VIDOSD_Wx_ALPHA_G_F(win_alpha) |
VIDOSD_Wx_ALPHA_B_F(win_alpha);
decon_set_bits(ctx, DECON_VIDOSDxC(win),
VIDOSDxC_ALPHA0_RGB_MASK, val);
decon_set_bits(ctx, DECON_BLENDCON, BLEND_NEW, BLEND_NEW);
}
}
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
struct drm_framebuffer *fb)
{
struct exynos_drm_plane plane = ctx->planes[win];
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane.base.state);
unsigned int alpha = state->base.alpha;
unsigned int pixel_alpha;
unsigned long val;
if (fb->format->has_alpha)
pixel_alpha = state->base.pixel_blend_mode;
else
pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
val = readl(ctx->addr + DECON_WINCONx(win));
val &= WINCONx_ENWIN_F;
switch (fb->format->format) {
case DRM_FORMAT_XRGB1555:
val |= WINCONx_BPPMODE_16BPP_I1555;
val |= WINCONx_HAWSWP_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_RGB565:
val |= WINCONx_BPPMODE_16BPP_565;
val |= WINCONx_HAWSWP_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_XRGB8888:
val |= WINCONx_BPPMODE_24BPP_888;
val |= WINCONx_WSWP_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
default:
val |= WINCONx_BPPMODE_32BPP_A8888;
val |= WINCONx_WSWP_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
}
DRM_DEV_DEBUG_KMS(ctx->dev, "cpp = %u\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
* tearing for very small buffers, e.g. cursor buffer. Burst Mode
* switching which is based on plane size is not recommended as
* plane size varies a lot towards the end of the screen and rapid
* movement causes unstable DMA which results into iommu crash/tear.
*/
if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
decon_set_bits(ctx, DECON_WINCONx(win), ~WINCONx_BLEND_MODE_MASK, val);
if (win > 0) {
decon_win_set_bldmod(ctx, win, alpha, pixel_alpha);
decon_win_set_bldeq(ctx, win, alpha, pixel_alpha);
}
}
static void decon_shadow_protect(struct decon_context *ctx, bool protect)
{
decon_set_bits(ctx, DECON_SHADOWCON, SHADOWCON_PROTECT_MASK,
protect ? ~0 : 0);
}
static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
decon_shadow_protect(ctx, true);
}
#define BIT_VAL(x, e, s) (((x) & ((1 << ((e) - (s) + 1)) - 1)) << (s))
#define COORDINATE_X(x) BIT_VAL((x), 23, 12)
#define COORDINATE_Y(x) BIT_VAL((x), 11, 0)
static void decon_update_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane->base.state);
struct decon_context *ctx = crtc->ctx;
struct drm_framebuffer *fb = state->base.fb;
unsigned int win = plane->index;
unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0);
u32 val;
if (crtc->base.mode.flags & DRM_MODE_FLAG_INTERLACE) {
val = COORDINATE_X(state->crtc.x) |
COORDINATE_Y(state->crtc.y / 2);
writel(val, ctx->addr + DECON_VIDOSDxA(win));
val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
COORDINATE_Y((state->crtc.y + state->crtc.h) / 2 - 1);
writel(val, ctx->addr + DECON_VIDOSDxB(win));
} else {
val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
writel(val, ctx->addr + DECON_VIDOSDxA(win));
val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
writel(val, ctx->addr + DECON_VIDOSDxB(win));
}
val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
VIDOSD_Wx_ALPHA_B_F(0xff);
writel(val, ctx->addr + DECON_VIDOSDxC(win));
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
VIDOSD_Wx_ALPHA_B_F(0x0);
writel(val, ctx->addr + DECON_VIDOSDxD(win));
writel(dma_addr, ctx->addr + DECON_VIDW0xADD0B0(win));
val = dma_addr + pitch * state->src.h;
writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
if (!(ctx->out_type & IFTYPE_HDMI))
val = BIT_VAL(pitch - state->crtc.w * cpp, 27, 14)
| BIT_VAL(state->crtc.w * cpp, 13, 0);
else
val = BIT_VAL(pitch - state->crtc.w * cpp, 29, 15)
| BIT_VAL(state->crtc.w * cpp, 14, 0);
writel(val, ctx->addr + DECON_VIDW0xADD2(win));
decon_win_set_pixfmt(ctx, win, fb);
/* window enable */
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
}
static void decon_disable_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
unsigned int win = plane->index;
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
}
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->vblank_lock, flags);
decon_shadow_protect(ctx, false);
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
ctx->frame_id = decon_get_frame_count(ctx, true);
exynos_crtc_handle_event(crtc);
spin_unlock_irqrestore(&ctx->vblank_lock, flags);
}
static void decon_swreset(struct decon_context *ctx)
{
unsigned long flags;
u32 val;
int ret;
writel(0, ctx->addr + DECON_VIDCON0);
readl_poll_timeout(ctx->addr + DECON_VIDCON0, val,
~val & VIDCON0_STOP_STATUS, 12, 20000);
writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
ret = readl_poll_timeout(ctx->addr + DECON_VIDCON0, val,
~val & VIDCON0_SWRESET, 12, 20000);
WARN(ret < 0, "failed to software reset DECON\n");
spin_lock_irqsave(&ctx->vblank_lock, flags);
ctx->frame_id = 0;
spin_unlock_irqrestore(&ctx->vblank_lock, flags);
if (!(ctx->out_type & IFTYPE_HDMI))
return;
writel(VIDCON0_CLKVALUP | VIDCON0_VLCKFREE, ctx->addr + DECON_VIDCON0);
decon_set_bits(ctx, DECON_CMU,
CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F, ~0);
writel(VIDCON1_VCLK_RUN_VDEN_DISABLE, ctx->addr + DECON_VIDCON1);
writel(CRCCTRL_CRCEN | CRCCTRL_CRCSTART_F | CRCCTRL_CRCCLKEN,
ctx->addr + DECON_CRCCTRL);
}
static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int ret;
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
return;
}
exynos_drm_pipe_clk_enable(crtc, true);
decon_swreset(ctx);
decon_commit(ctx->crtc);
}
static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
if (!(ctx->out_type & I80_HW_TRG))
synchronize_irq(ctx->te_irq);
synchronize_irq(ctx->irq);
/*
* We need to make sure that all windows are disabled before we
* suspend that connector. Otherwise we might try to scan from
* a destroyed buffer later.
*/
for (i = ctx->first_win; i < WINDOWS_NR; i++)
decon_disable_plane(crtc, &ctx->planes[i]);
decon_swreset(ctx);
exynos_drm_pipe_clk_enable(crtc, false);
pm_runtime_put_sync(ctx->dev);
}
static irqreturn_t decon_te_irq_handler(int irq, void *dev_id)
{
struct decon_context *ctx = dev_id;
decon_set_bits(ctx, DECON_TRIGCON, TRIGCON_SWTRIGCMD, ~0);
return IRQ_HANDLED;
}
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int win, i, ret;
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
ret = clk_prepare_enable(ctx->clks[i]);
if (ret < 0)
goto err;
}
decon_shadow_protect(ctx, true);
for (win = 0; win < WINDOWS_NR; win++)
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
decon_shadow_protect(ctx, false);
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
/* TODO: wait for possible vsync */
msleep(50);
err:
while (--i >= 0)
clk_disable_unprepare(ctx->clks[i]);
}
static enum drm_mode_status decon_mode_valid(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct decon_context *ctx = crtc->ctx;
ctx->irq = crtc->i80_mode ? ctx->irq_lcd_sys : ctx->irq_vsync;
if (ctx->irq)
return MODE_OK;
dev_info(ctx->dev, "Sink requires %s mode, but appropriate interrupt is not provided.\n",
crtc->i80_mode ? "command" : "video");
return MODE_BAD;
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.atomic_enable = decon_atomic_enable,
.atomic_disable = decon_atomic_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
.mode_valid = decon_mode_valid,
.atomic_flush = decon_atomic_flush,
};
static int decon_bind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_plane *exynos_plane;
enum exynos_drm_output_type out_type;
unsigned int win;
int ret;
ctx->drm_dev = drm_dev;
for (win = ctx->first_win; win < WINDOWS_NR; win++) {
ctx->configs[win].pixel_formats = decon_formats;
ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
ctx->configs[win].zpos = win - ctx->first_win;
ctx->configs[win].type = decon_win_types[win];
ctx->configs[win].capabilities = capabilities[win];
ret = exynos_plane_init(drm_dev, &ctx->planes[win], win,
&ctx->configs[win]);
if (ret)
return ret;
}
exynos_plane = &ctx->planes[PRIMARY_WIN];
out_type = (ctx->out_type & IFTYPE_HDMI) ? EXYNOS_DISPLAY_TYPE_HDMI
: EXYNOS_DISPLAY_TYPE_LCD;
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
out_type, &decon_crtc_ops, ctx);
if (IS_ERR(ctx->crtc))
return PTR_ERR(ctx->crtc);
decon_clear_channels(ctx->crtc);
return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
}
static void decon_unbind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
decon_atomic_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
}
static const struct component_ops decon_component_ops = {
.bind = decon_bind,
.unbind = decon_unbind,
};
static void decon_handle_vblank(struct decon_context *ctx)
{
u32 frm;
spin_lock(&ctx->vblank_lock);
frm = decon_get_frame_count(ctx, true);
if (frm != ctx->frame_id) {
/* handle only if incremented, take care of wrap-around */
if ((s32)(frm - ctx->frame_id) > 0)
drm_crtc_handle_vblank(&ctx->crtc->base);
ctx->frame_id = frm;
}
spin_unlock(&ctx->vblank_lock);
}
static irqreturn_t decon_irq_handler(int irq, void *dev_id)
{
struct decon_context *ctx = dev_id;
u32 val;
val = readl(ctx->addr + DECON_VIDINTCON1);
val &= VIDINTCON1_INTFRMDONEPEND | VIDINTCON1_INTFRMPEND;
if (val) {
writel(val, ctx->addr + DECON_VIDINTCON1);
if (ctx->out_type & IFTYPE_HDMI) {
val = readl(ctx->addr + DECON_VIDOUTCON0);
val &= VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F;
if (val ==
(VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
return IRQ_HANDLED;
}
decon_handle_vblank(ctx);
}
return IRQ_HANDLED;
}
static int exynos5433_decon_suspend(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
int i = ARRAY_SIZE(decon_clks_name);
while (--i >= 0)
clk_disable_unprepare(ctx->clks[i]);
return 0;
}
static int exynos5433_decon_resume(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
int i, ret;
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
ret = clk_prepare_enable(ctx->clks[i]);
if (ret < 0)
goto err;
}
return 0;
err:
while (--i >= 0)
clk_disable_unprepare(ctx->clks[i]);
return ret;
}
static DEFINE_RUNTIME_DEV_PM_OPS(exynos5433_decon_pm_ops,
exynos5433_decon_suspend,
exynos5433_decon_resume, NULL);
static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
{
.compatible = "samsung,exynos5433-decon",
.data = (void *)I80_HW_TRG
},
{
.compatible = "samsung,exynos5433-decon-tv",
.data = (void *)(I80_HW_TRG | IFTYPE_HDMI)
},
{},
};
MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
static int decon_conf_irq(struct decon_context *ctx, const char *name,
irq_handler_t handler, unsigned long int flags)
{
struct platform_device *pdev = to_platform_device(ctx->dev);
int ret, irq = platform_get_irq_byname(pdev, name);
if (irq < 0) {
switch (irq) {
case -EPROBE_DEFER:
return irq;
case -ENODATA:
case -ENXIO:
return 0;
default:
dev_err(ctx->dev, "IRQ %s get failed, %d\n", name, irq);
return irq;
}
}
ret = devm_request_irq(ctx->dev, irq, handler,
flags | IRQF_NO_AUTOEN, "drm_decon", ctx);
if (ret < 0) {
dev_err(ctx->dev, "IRQ %s request failed\n", name);
return ret;
}
return irq;
}
static int exynos5433_decon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct decon_context *ctx;
int ret;
int i;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
ctx->out_type = (unsigned long)of_device_get_match_data(dev);
spin_lock_init(&ctx->vblank_lock);
if (ctx->out_type & IFTYPE_HDMI)
ctx->first_win = 1;
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
struct clk *clk;
clk = devm_clk_get(ctx->dev, decon_clks_name[i]);
if (IS_ERR(clk))
return PTR_ERR(clk);
ctx->clks[i] = clk;
}
ctx->addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->addr))
return PTR_ERR(ctx->addr);
ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
if (ret < 0)
return ret;
ctx->irq_vsync = ret;
ret = decon_conf_irq(ctx, "lcd_sys", decon_irq_handler, 0);
if (ret < 0)
return ret;
ctx->irq_lcd_sys = ret;
ret = decon_conf_irq(ctx, "te", decon_te_irq_handler,
IRQF_TRIGGER_RISING);
if (ret < 0)
return ret;
if (ret) {
ctx->te_irq = ret;
ctx->out_type &= ~I80_HW_TRG;
}
if (ctx->out_type & I80_HW_TRG) {
ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,disp-sysreg");
if (IS_ERR(ctx->sysreg)) {
dev_err(dev, "failed to get system register\n");
return PTR_ERR(ctx->sysreg);
}
}
platform_set_drvdata(pdev, ctx);
pm_runtime_enable(dev);
ret = component_add(dev, &decon_component_ops);
if (ret)
goto err_disable_pm_runtime;
return 0;
err_disable_pm_runtime:
pm_runtime_disable(dev);
return ret;
}
static int exynos5433_decon_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &decon_component_ops);
return 0;
}
struct platform_driver exynos5433_decon_driver = {
.probe = exynos5433_decon_probe,
.remove = exynos5433_decon_remove,
.driver = {
.name = "exynos5433-decon",
.pm = pm_ptr(&exynos5433_decon_pm_ops),
.of_match_table = exynos5433_decon_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/exynos/exynos5433_drm_decon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Authors:
* Seung-Woo Kim <[email protected]>
* Inki Dae <[email protected]>
* Joonyoung Shim <[email protected]>
*
* Based on drivers/media/video/s5p-tv/mixer_reg.c
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <drm/drm_blend.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
#include "regs-mixer.h"
#include "regs-vp.h"
#define MIXER_WIN_NR 3
#define VP_DEFAULT_WIN 2
/*
* Mixer color space conversion coefficient triplet.
* Used for CSC from RGB to YCbCr.
* Each coefficient is a 10-bit fixed point number with
* sign and no integer part, i.e.
* [0:8] = fractional part (representing a value y = x / 2^9)
* [9] = sign
* Negative values are encoded with two's complement.
*/
#define MXR_CSC_C(x) ((int)((x) * 512.0) & 0x3ff)
#define MXR_CSC_CT(a0, a1, a2) \
((MXR_CSC_C(a0) << 20) | (MXR_CSC_C(a1) << 10) | (MXR_CSC_C(a2) << 0))
/* YCbCr value, used for mixer background color configuration. */
#define MXR_YCBCR_VAL(y, cb, cr) (((y) << 16) | ((cb) << 8) | ((cr) << 0))
/* The pixelformats that are natively supported by the mixer. */
#define MXR_FORMAT_RGB565 4
#define MXR_FORMAT_ARGB1555 5
#define MXR_FORMAT_ARGB4444 6
#define MXR_FORMAT_ARGB8888 7
enum mixer_version_id {
MXR_VER_0_0_0_16,
MXR_VER_16_0_33_0,
MXR_VER_128_0_0_184,
};
enum mixer_flag_bits {
MXR_BIT_POWERED,
MXR_BIT_VSYNC,
MXR_BIT_INTERLACE,
MXR_BIT_VP_ENABLED,
MXR_BIT_HAS_SCLK,
};
static const uint32_t mixer_formats[] = {
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
static const uint32_t vp_formats[] = {
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
};
struct mixer_context {
struct platform_device *pdev;
struct device *dev;
struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[MIXER_WIN_NR];
unsigned long flags;
int irq;
void __iomem *mixer_regs;
void __iomem *vp_regs;
spinlock_t reg_slock;
struct clk *mixer;
struct clk *vp;
struct clk *hdmi;
struct clk *sclk_mixer;
struct clk *sclk_hdmi;
struct clk *mout_mixer;
enum mixer_version_id mxr_ver;
int scan_value;
};
struct mixer_drv_data {
enum mixer_version_id version;
bool is_vp_enabled;
bool has_sclk;
};
static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
{
.zpos = 0,
.type = DRM_PLANE_TYPE_PRIMARY,
.pixel_formats = mixer_formats,
.num_pixel_formats = ARRAY_SIZE(mixer_formats),
.capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE |
EXYNOS_DRM_PLANE_CAP_ZPOS |
EXYNOS_DRM_PLANE_CAP_PIX_BLEND |
EXYNOS_DRM_PLANE_CAP_WIN_BLEND,
}, {
.zpos = 1,
.type = DRM_PLANE_TYPE_CURSOR,
.pixel_formats = mixer_formats,
.num_pixel_formats = ARRAY_SIZE(mixer_formats),
.capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE |
EXYNOS_DRM_PLANE_CAP_ZPOS |
EXYNOS_DRM_PLANE_CAP_PIX_BLEND |
EXYNOS_DRM_PLANE_CAP_WIN_BLEND,
}, {
.zpos = 2,
.type = DRM_PLANE_TYPE_OVERLAY,
.pixel_formats = vp_formats,
.num_pixel_formats = ARRAY_SIZE(vp_formats),
.capabilities = EXYNOS_DRM_PLANE_CAP_SCALE |
EXYNOS_DRM_PLANE_CAP_ZPOS |
EXYNOS_DRM_PLANE_CAP_TILE |
EXYNOS_DRM_PLANE_CAP_WIN_BLEND,
},
};
static const u8 filter_y_horiz_tap8[] = {
0, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, 0, 0, 0,
0, 2, 4, 5, 6, 6, 6, 6,
6, 5, 5, 4, 3, 2, 1, 1,
0, -6, -12, -16, -18, -20, -21, -20,
-20, -18, -16, -13, -10, -8, -5, -2,
127, 126, 125, 121, 114, 107, 99, 89,
79, 68, 57, 46, 35, 25, 16, 8,
};
static const u8 filter_y_vert_tap4[] = {
0, -3, -6, -8, -8, -8, -8, -7,
-6, -5, -4, -3, -2, -1, -1, 0,
127, 126, 124, 118, 111, 102, 92, 81,
70, 59, 48, 37, 27, 19, 11, 5,
0, 5, 11, 19, 27, 37, 48, 59,
70, 81, 92, 102, 111, 118, 124, 126,
0, 0, -1, -1, -2, -3, -4, -5,
-6, -7, -8, -8, -8, -8, -6, -3,
};
static const u8 filter_cr_horiz_tap4[] = {
0, -3, -6, -8, -8, -8, -8, -7,
-6, -5, -4, -3, -2, -1, -1, 0,
127, 126, 124, 118, 111, 102, 92, 81,
70, 59, 48, 37, 27, 19, 11, 5,
};
static inline u32 vp_reg_read(struct mixer_context *ctx, u32 reg_id)
{
return readl(ctx->vp_regs + reg_id);
}
static inline void vp_reg_write(struct mixer_context *ctx, u32 reg_id,
u32 val)
{
writel(val, ctx->vp_regs + reg_id);
}
static inline void vp_reg_writemask(struct mixer_context *ctx, u32 reg_id,
u32 val, u32 mask)
{
u32 old = vp_reg_read(ctx, reg_id);
val = (val & mask) | (old & ~mask);
writel(val, ctx->vp_regs + reg_id);
}
static inline u32 mixer_reg_read(struct mixer_context *ctx, u32 reg_id)
{
return readl(ctx->mixer_regs + reg_id);
}
static inline void mixer_reg_write(struct mixer_context *ctx, u32 reg_id,
u32 val)
{
writel(val, ctx->mixer_regs + reg_id);
}
static inline void mixer_reg_writemask(struct mixer_context *ctx,
u32 reg_id, u32 val, u32 mask)
{
u32 old = mixer_reg_read(ctx, reg_id);
val = (val & mask) | (old & ~mask);
writel(val, ctx->mixer_regs + reg_id);
}
static void mixer_regs_dump(struct mixer_context *ctx)
{
#define DUMPREG(reg_id) \
do { \
DRM_DEV_DEBUG_KMS(ctx->dev, #reg_id " = %08x\n", \
(u32)readl(ctx->mixer_regs + reg_id)); \
} while (0)
DUMPREG(MXR_STATUS);
DUMPREG(MXR_CFG);
DUMPREG(MXR_INT_EN);
DUMPREG(MXR_INT_STATUS);
DUMPREG(MXR_LAYER_CFG);
DUMPREG(MXR_VIDEO_CFG);
DUMPREG(MXR_GRAPHIC0_CFG);
DUMPREG(MXR_GRAPHIC0_BASE);
DUMPREG(MXR_GRAPHIC0_SPAN);
DUMPREG(MXR_GRAPHIC0_WH);
DUMPREG(MXR_GRAPHIC0_SXY);
DUMPREG(MXR_GRAPHIC0_DXY);
DUMPREG(MXR_GRAPHIC1_CFG);
DUMPREG(MXR_GRAPHIC1_BASE);
DUMPREG(MXR_GRAPHIC1_SPAN);
DUMPREG(MXR_GRAPHIC1_WH);
DUMPREG(MXR_GRAPHIC1_SXY);
DUMPREG(MXR_GRAPHIC1_DXY);
#undef DUMPREG
}
static void vp_regs_dump(struct mixer_context *ctx)
{
#define DUMPREG(reg_id) \
do { \
DRM_DEV_DEBUG_KMS(ctx->dev, #reg_id " = %08x\n", \
(u32) readl(ctx->vp_regs + reg_id)); \
} while (0)
DUMPREG(VP_ENABLE);
DUMPREG(VP_SRESET);
DUMPREG(VP_SHADOW_UPDATE);
DUMPREG(VP_FIELD_ID);
DUMPREG(VP_MODE);
DUMPREG(VP_IMG_SIZE_Y);
DUMPREG(VP_IMG_SIZE_C);
DUMPREG(VP_PER_RATE_CTRL);
DUMPREG(VP_TOP_Y_PTR);
DUMPREG(VP_BOT_Y_PTR);
DUMPREG(VP_TOP_C_PTR);
DUMPREG(VP_BOT_C_PTR);
DUMPREG(VP_ENDIAN_MODE);
DUMPREG(VP_SRC_H_POSITION);
DUMPREG(VP_SRC_V_POSITION);
DUMPREG(VP_SRC_WIDTH);
DUMPREG(VP_SRC_HEIGHT);
DUMPREG(VP_DST_H_POSITION);
DUMPREG(VP_DST_V_POSITION);
DUMPREG(VP_DST_WIDTH);
DUMPREG(VP_DST_HEIGHT);
DUMPREG(VP_H_RATIO);
DUMPREG(VP_V_RATIO);
#undef DUMPREG
}
static inline void vp_filter_set(struct mixer_context *ctx,
int reg_id, const u8 *data, unsigned int size)
{
/* assure 4-byte align */
BUG_ON(size & 3);
for (; size; size -= 4, reg_id += 4, data += 4) {
u32 val = (data[0] << 24) | (data[1] << 16) |
(data[2] << 8) | data[3];
vp_reg_write(ctx, reg_id, val);
}
}
static void vp_default_filter(struct mixer_context *ctx)
{
vp_filter_set(ctx, VP_POLY8_Y0_LL,
filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
vp_filter_set(ctx, VP_POLY4_Y0_LL,
filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
vp_filter_set(ctx, VP_POLY4_C0_LL,
filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
}
static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win,
unsigned int pixel_alpha, unsigned int alpha)
{
u32 win_alpha = alpha >> 8;
u32 val;
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
switch (pixel_alpha) {
case DRM_MODE_BLEND_PIXEL_NONE:
break;
case DRM_MODE_BLEND_COVERAGE:
val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
break;
case DRM_MODE_BLEND_PREMULTI:
default:
val |= MXR_GRP_CFG_BLEND_PRE_MUL;
val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
break;
}
if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
val |= MXR_GRP_CFG_WIN_BLEND_EN;
val |= win_alpha;
}
mixer_reg_writemask(ctx, MXR_GRAPHIC_CFG(win),
val, MXR_GRP_CFG_MISC_MASK);
}
static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
{
u32 win_alpha = alpha >> 8;
u32 val = 0;
if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
val |= MXR_VID_CFG_BLEND_EN;
val |= win_alpha;
}
mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
}
static bool mixer_is_synced(struct mixer_context *ctx)
{
u32 base, shadow;
if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
ctx->mxr_ver == MXR_VER_128_0_0_184)
return !(mixer_reg_read(ctx, MXR_CFG) &
MXR_CFG_LAYER_UPDATE_COUNT_MASK);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
vp_reg_read(ctx, VP_SHADOW_UPDATE))
return false;
base = mixer_reg_read(ctx, MXR_CFG);
shadow = mixer_reg_read(ctx, MXR_CFG_S);
if (base != shadow)
return false;
base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
if (base != shadow)
return false;
base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
if (base != shadow)
return false;
return true;
}
static int mixer_wait_for_sync(struct mixer_context *ctx)
{
ktime_t timeout = ktime_add_us(ktime_get(), 100000);
while (!mixer_is_synced(ctx)) {
usleep_range(1000, 2000);
if (ktime_compare(ktime_get(), timeout) > 0)
return -ETIMEDOUT;
}
return 0;
}
static void mixer_disable_sync(struct mixer_context *ctx)
{
mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
}
static void mixer_enable_sync(struct mixer_context *ctx)
{
if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
ctx->mxr_ver == MXR_VER_128_0_0_184)
mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
}
static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
{
u32 val;
/* choosing between interlace and progressive mode */
val = test_bit(MXR_BIT_INTERLACE, &ctx->flags) ?
MXR_CFG_SCAN_INTERLACE : MXR_CFG_SCAN_PROGRESSIVE;
if (ctx->mxr_ver == MXR_VER_128_0_0_184)
mixer_reg_write(ctx, MXR_RESOLUTION,
MXR_MXR_RES_HEIGHT(height) | MXR_MXR_RES_WIDTH(width));
else
val |= ctx->scan_value;
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_SCAN_MASK);
}
static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, struct drm_display_mode *mode)
{
enum hdmi_quantization_range range = drm_default_rgb_quant_range(mode);
u32 val;
if (mode->vdisplay < 720) {
val = MXR_CFG_RGB601;
} else {
val = MXR_CFG_RGB709;
/* Configure the BT.709 CSC matrix for full range RGB. */
mixer_reg_write(ctx, MXR_CM_COEFF_Y,
MXR_CSC_CT( 0.184, 0.614, 0.063) |
MXR_CM_COEFF_RGB_FULL);
mixer_reg_write(ctx, MXR_CM_COEFF_CB,
MXR_CSC_CT(-0.102, -0.338, 0.440));
mixer_reg_write(ctx, MXR_CM_COEFF_CR,
MXR_CSC_CT( 0.440, -0.399, -0.040));
}
if (range == HDMI_QUANTIZATION_RANGE_FULL)
val |= MXR_CFG_QUANT_RANGE_FULL;
else
val |= MXR_CFG_QUANT_RANGE_LIMITED;
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
}
static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
unsigned int priority, bool enable)
{
u32 val = enable ? ~0 : 0;
switch (win) {
case 0:
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
mixer_reg_writemask(ctx, MXR_LAYER_CFG,
MXR_LAYER_CFG_GRP0_VAL(priority),
MXR_LAYER_CFG_GRP0_MASK);
break;
case 1:
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
mixer_reg_writemask(ctx, MXR_LAYER_CFG,
MXR_LAYER_CFG_GRP1_VAL(priority),
MXR_LAYER_CFG_GRP1_MASK);
break;
case VP_DEFAULT_WIN:
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
vp_reg_writemask(ctx, VP_ENABLE, val, VP_ENABLE_ON);
mixer_reg_writemask(ctx, MXR_CFG, val,
MXR_CFG_VP_ENABLE);
mixer_reg_writemask(ctx, MXR_LAYER_CFG,
MXR_LAYER_CFG_VP_VAL(priority),
MXR_LAYER_CFG_VP_MASK);
}
break;
}
}
static void mixer_run(struct mixer_context *ctx)
{
mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
}
static void mixer_stop(struct mixer_context *ctx)
{
int timeout = 20;
mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
while (!(mixer_reg_read(ctx, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
--timeout)
usleep_range(10000, 12000);
}
static void mixer_commit(struct mixer_context *ctx)
{
struct drm_display_mode *mode = &ctx->crtc->base.state->adjusted_mode;
mixer_cfg_scan(ctx, mode->hdisplay, mode->vdisplay);
mixer_cfg_rgb_fmt(ctx, mode);
mixer_run(ctx);
}
static void vp_video_buffer(struct mixer_context *ctx,
struct exynos_drm_plane *plane)
{
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane->base.state);
struct drm_framebuffer *fb = state->base.fb;
unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
dma_addr_t luma_addr[2], chroma_addr[2];
bool is_tiled, is_nv21;
u32 val;
is_nv21 = (fb->format->format == DRM_FORMAT_NV21);
is_tiled = (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE);
luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
if (is_tiled) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
luma_addr[1] = luma_addr[0] + fb->pitches[0];
chroma_addr[1] = chroma_addr[0] + fb->pitches[1];
}
} else {
luma_addr[1] = 0;
chroma_addr[1] = 0;
}
spin_lock_irqsave(&ctx->reg_slock, flags);
/* interlace or progressive scan mode */
val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
/* setup format */
val = (is_nv21 ? VP_MODE_NV21 : VP_MODE_NV12);
val |= (is_tiled ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height));
/* chroma plane for NV12/NV21 is half the height of the luma plane */
vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[1]) |
VP_IMG_VSIZE(fb->height / 2));
vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w);
vp_reg_write(ctx, VP_SRC_H_POSITION,
VP_SRC_H_POSITION_VAL(state->src.x));
vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w);
vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x);
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2);
vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y / 2);
vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2);
vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2);
} else {
vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h);
vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y);
}
vp_reg_write(ctx, VP_H_RATIO, state->h_ratio);
vp_reg_write(ctx, VP_V_RATIO, state->v_ratio);
vp_reg_write(ctx, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
/* set buffer address to vp */
vp_reg_write(ctx, VP_TOP_Y_PTR, luma_addr[0]);
vp_reg_write(ctx, VP_BOT_Y_PTR, luma_addr[1]);
vp_reg_write(ctx, VP_TOP_C_PTR, chroma_addr[0]);
vp_reg_write(ctx, VP_BOT_C_PTR, chroma_addr[1]);
mixer_cfg_layer(ctx, plane->index, priority, true);
mixer_cfg_vp_blend(ctx, state->base.alpha);
spin_unlock_irqrestore(&ctx->reg_slock, flags);
mixer_regs_dump(ctx);
vp_regs_dump(ctx);
}
static void mixer_graph_buffer(struct mixer_context *ctx,
struct exynos_drm_plane *plane)
{
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane->base.state);
struct drm_framebuffer *fb = state->base.fb;
unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
unsigned int win = plane->index;
unsigned int x_ratio = 0, y_ratio = 0;
unsigned int dst_x_offset, dst_y_offset;
unsigned int pixel_alpha;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
if (fb->format->has_alpha)
pixel_alpha = state->base.pixel_blend_mode;
else
pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
switch (fb->format->format) {
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
fmt = MXR_FORMAT_ARGB4444;
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
fmt = MXR_FORMAT_ARGB1555;
break;
case DRM_FORMAT_RGB565:
fmt = MXR_FORMAT_RGB565;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
default:
fmt = MXR_FORMAT_ARGB8888;
break;
}
/* ratio is already checked by common plane code */
x_ratio = state->h_ratio == (1 << 15);
y_ratio = state->v_ratio == (1 << 15);
dst_x_offset = state->crtc.x;
dst_y_offset = state->crtc.y;
/* translate dma address base s.t. the source image offset is zero */
dma_addr = exynos_drm_fb_dma_addr(fb, 0)
+ (state->src.x * fb->format->cpp[0])
+ (state->src.y * fb->pitches[0]);
spin_lock_irqsave(&ctx->reg_slock, flags);
/* setup format */
mixer_reg_writemask(ctx, MXR_GRAPHIC_CFG(win),
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
mixer_reg_write(ctx, MXR_GRAPHIC_SPAN(win),
fb->pitches[0] / fb->format->cpp[0]);
val = MXR_GRP_WH_WIDTH(state->src.w);
val |= MXR_GRP_WH_HEIGHT(state->src.h);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(ctx, MXR_GRAPHIC_WH(win), val);
/* setup offsets in display image */
val = MXR_GRP_DXY_DX(dst_x_offset);
val |= MXR_GRP_DXY_DY(dst_y_offset);
mixer_reg_write(ctx, MXR_GRAPHIC_DXY(win), val);
/* set buffer address to mixer */
mixer_reg_write(ctx, MXR_GRAPHIC_BASE(win), dma_addr);
mixer_cfg_layer(ctx, win, priority, true);
mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
spin_unlock_irqrestore(&ctx->reg_slock, flags);
mixer_regs_dump(ctx);
}
static void vp_win_reset(struct mixer_context *ctx)
{
unsigned int tries = 100;
vp_reg_write(ctx, VP_SRESET, VP_SRESET_PROCESSING);
while (--tries) {
/* waiting until VP_SRESET_PROCESSING is 0 */
if (~vp_reg_read(ctx, VP_SRESET) & VP_SRESET_PROCESSING)
break;
mdelay(10);
}
WARN(tries == 0, "failed to reset Video Processor\n");
}
static void mixer_win_reset(struct mixer_context *ctx)
{
unsigned long flags;
spin_lock_irqsave(&ctx->reg_slock, flags);
mixer_reg_writemask(ctx, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
/* set output in RGB888 mode */
mixer_reg_writemask(ctx, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
/* 16 beat burst in DMA */
mixer_reg_writemask(ctx, MXR_STATUS, MXR_STATUS_16_BURST,
MXR_STATUS_BURST_MASK);
/* reset default layer priority */
mixer_reg_write(ctx, MXR_LAYER_CFG, 0);
/* set all background colors to RGB (0,0,0) */
mixer_reg_write(ctx, MXR_BG_COLOR0, MXR_YCBCR_VAL(0, 128, 128));
mixer_reg_write(ctx, MXR_BG_COLOR1, MXR_YCBCR_VAL(0, 128, 128));
mixer_reg_write(ctx, MXR_BG_COLOR2, MXR_YCBCR_VAL(0, 128, 128));
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
/* configuration of Video Processor Registers */
vp_win_reset(ctx);
vp_default_filter(ctx);
}
/* disable all layers */
mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
/* set all source image offsets to zero */
mixer_reg_write(ctx, MXR_GRAPHIC_SXY(0), 0);
mixer_reg_write(ctx, MXR_GRAPHIC_SXY(1), 0);
spin_unlock_irqrestore(&ctx->reg_slock, flags);
}
static irqreturn_t mixer_irq_handler(int irq, void *arg)
{
struct mixer_context *ctx = arg;
u32 val;
spin_lock(&ctx->reg_slock);
/* read interrupt status for handling and clearing flags for VSYNC */
val = mixer_reg_read(ctx, MXR_INT_STATUS);
/* handling VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
/* vsync interrupt use different bit for read and clear */
val |= MXR_INT_CLEAR_VSYNC;
val &= ~MXR_INT_STATUS_VSYNC;
/* interlace scan need to check shadow register */
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
&& !mixer_is_synced(ctx))
goto out;
drm_crtc_handle_vblank(&ctx->crtc->base);
}
out:
/* clear interrupts */
mixer_reg_write(ctx, MXR_INT_STATUS, val);
spin_unlock(&ctx->reg_slock);
return IRQ_HANDLED;
}
static int mixer_resources_init(struct mixer_context *mixer_ctx)
{
struct device *dev = &mixer_ctx->pdev->dev;
struct resource *res;
int ret;
spin_lock_init(&mixer_ctx->reg_slock);
mixer_ctx->mixer = devm_clk_get(dev, "mixer");
if (IS_ERR(mixer_ctx->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
return -ENODEV;
}
mixer_ctx->hdmi = devm_clk_get(dev, "hdmi");
if (IS_ERR(mixer_ctx->hdmi)) {
dev_err(dev, "failed to get clock 'hdmi'\n");
return PTR_ERR(mixer_ctx->hdmi);
}
mixer_ctx->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR(mixer_ctx->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
return -ENODEV;
}
res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
return -ENXIO;
}
mixer_ctx->mixer_regs = devm_ioremap(dev, res->start,
resource_size(res));
if (mixer_ctx->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
return -ENXIO;
}
ret = platform_get_irq(mixer_ctx->pdev, 0);
if (ret < 0)
return ret;
mixer_ctx->irq = ret;
ret = devm_request_irq(dev, mixer_ctx->irq, mixer_irq_handler,
0, "drm_mixer", mixer_ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
return ret;
}
return 0;
}
static int vp_resources_init(struct mixer_context *mixer_ctx)
{
struct device *dev = &mixer_ctx->pdev->dev;
struct resource *res;
mixer_ctx->vp = devm_clk_get(dev, "vp");
if (IS_ERR(mixer_ctx->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
return -ENODEV;
}
if (test_bit(MXR_BIT_HAS_SCLK, &mixer_ctx->flags)) {
mixer_ctx->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
if (IS_ERR(mixer_ctx->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
return -ENODEV;
}
mixer_ctx->mout_mixer = devm_clk_get(dev, "mout_mixer");
if (IS_ERR(mixer_ctx->mout_mixer)) {
dev_err(dev, "failed to get clock 'mout_mixer'\n");
return -ENODEV;
}
if (mixer_ctx->sclk_hdmi && mixer_ctx->mout_mixer)
clk_set_parent(mixer_ctx->mout_mixer,
mixer_ctx->sclk_hdmi);
}
res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
return -ENXIO;
}
mixer_ctx->vp_regs = devm_ioremap(dev, res->start,
resource_size(res));
if (mixer_ctx->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
return -ENXIO;
}
return 0;
}
static int mixer_initialize(struct mixer_context *mixer_ctx,
struct drm_device *drm_dev)
{
int ret;
mixer_ctx->drm_dev = drm_dev;
/* acquire resources: regs, irqs, clocks */
ret = mixer_resources_init(mixer_ctx);
if (ret) {
DRM_DEV_ERROR(mixer_ctx->dev,
"mixer_resources_init failed ret=%d\n", ret);
return ret;
}
if (test_bit(MXR_BIT_VP_ENABLED, &mixer_ctx->flags)) {
/* acquire vp resources: regs, irqs, clocks */
ret = vp_resources_init(mixer_ctx);
if (ret) {
DRM_DEV_ERROR(mixer_ctx->dev,
"vp_resources_init failed ret=%d\n", ret);
return ret;
}
}
return exynos_drm_register_dma(drm_dev, mixer_ctx->dev,
&mixer_ctx->dma_priv);
}
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev,
&mixer_ctx->dma_priv);
}
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct mixer_context *mixer_ctx = crtc->ctx;
__set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return 0;
/* enable vsync interrupt */
mixer_reg_writemask(mixer_ctx, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_writemask(mixer_ctx, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
return 0;
}
static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
{
struct mixer_context *mixer_ctx = crtc->ctx;
__clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
/* disable vsync interrupt */
mixer_reg_writemask(mixer_ctx, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_writemask(mixer_ctx, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
if (mixer_wait_for_sync(ctx))
dev_err(ctx->dev, "timeout waiting for VSYNC\n");
mixer_disable_sync(ctx);
}
static void mixer_update_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct mixer_context *mixer_ctx = crtc->ctx;
DRM_DEV_DEBUG_KMS(mixer_ctx->dev, "win: %d\n", plane->index);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
if (plane->index == VP_DEFAULT_WIN)
vp_video_buffer(mixer_ctx, plane);
else
mixer_graph_buffer(mixer_ctx, plane);
}
static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct mixer_context *mixer_ctx = crtc->ctx;
unsigned long flags;
DRM_DEV_DEBUG_KMS(mixer_ctx->dev, "win: %d\n", plane->index);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
spin_lock_irqsave(&mixer_ctx->reg_slock, flags);
mixer_cfg_layer(mixer_ctx, plane->index, 0, false);
spin_unlock_irqrestore(&mixer_ctx->reg_slock, flags);
}
static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
{
struct mixer_context *mixer_ctx = crtc->ctx;
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
mixer_enable_sync(mixer_ctx);
exynos_crtc_handle_event(crtc);
}
static void mixer_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
int ret;
if (test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "failed to enable MIXER device.\n");
return;
}
exynos_drm_pipe_clk_enable(crtc, true);
mixer_disable_sync(ctx);
mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
mixer_reg_writemask(ctx, MXR_INT_STATUS, ~0,
MXR_INT_CLEAR_VSYNC);
mixer_reg_writemask(ctx, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
}
mixer_win_reset(ctx);
mixer_commit(ctx);
mixer_enable_sync(ctx);
set_bit(MXR_BIT_POWERED, &ctx->flags);
}
static void mixer_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
int i;
if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
mixer_stop(ctx);
mixer_regs_dump(ctx);
for (i = 0; i < MIXER_WIN_NR; i++)
mixer_disable_plane(crtc, &ctx->planes[i]);
exynos_drm_pipe_clk_enable(crtc, false);
pm_runtime_put(ctx->dev);
clear_bit(MXR_BIT_POWERED, &ctx->flags);
}
static enum drm_mode_status mixer_mode_valid(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct mixer_context *ctx = crtc->ctx;
u32 w = mode->hdisplay, h = mode->vdisplay;
DRM_DEV_DEBUG_KMS(ctx->dev, "xres=%d, yres=%d, refresh=%d, intl=%d\n",
w, h, drm_mode_vrefresh(mode),
!!(mode->flags & DRM_MODE_FLAG_INTERLACE));
if (ctx->mxr_ver == MXR_VER_128_0_0_184)
return MODE_OK;
if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
(w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
(w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
return MODE_OK;
if ((w == 1024 && h == 768) ||
(w == 1366 && h == 768) ||
(w == 1280 && h == 1024))
return MODE_OK;
return MODE_BAD;
}
static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mixer_context *ctx = crtc->ctx;
int width = mode->hdisplay, height = mode->vdisplay, i;
static const struct {
int hdisplay, vdisplay, htotal, vtotal, scan_val;
} modes[] = {
{ 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD },
{ 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD },
{ 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD },
{ 1920, 1080, 2200, 1125, MXR_CFG_SCAN_HD_1080 |
MXR_CFG_SCAN_HD }
};
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
else
__clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
if (ctx->mxr_ver == MXR_VER_128_0_0_184)
return true;
for (i = 0; i < ARRAY_SIZE(modes); ++i)
if (width <= modes[i].hdisplay && height <= modes[i].vdisplay) {
ctx->scan_value = modes[i].scan_val;
if (width < modes[i].hdisplay ||
height < modes[i].vdisplay) {
adjusted_mode->hdisplay = modes[i].hdisplay;
adjusted_mode->hsync_start = modes[i].hdisplay;
adjusted_mode->hsync_end = modes[i].htotal;
adjusted_mode->htotal = modes[i].htotal;
adjusted_mode->vdisplay = modes[i].vdisplay;
adjusted_mode->vsync_start = modes[i].vdisplay;
adjusted_mode->vsync_end = modes[i].vtotal;
adjusted_mode->vtotal = modes[i].vtotal;
}
return true;
}
return false;
}
static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
.atomic_enable = mixer_atomic_enable,
.atomic_disable = mixer_atomic_disable,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.atomic_begin = mixer_atomic_begin,
.update_plane = mixer_update_plane,
.disable_plane = mixer_disable_plane,
.atomic_flush = mixer_atomic_flush,
.mode_valid = mixer_mode_valid,
.mode_fixup = mixer_mode_fixup,
};
static const struct mixer_drv_data exynos5420_mxr_drv_data = {
.version = MXR_VER_128_0_0_184,
.is_vp_enabled = 0,
};
static const struct mixer_drv_data exynos5250_mxr_drv_data = {
.version = MXR_VER_16_0_33_0,
.is_vp_enabled = 0,
};
static const struct mixer_drv_data exynos4212_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
};
static const struct mixer_drv_data exynos4210_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
.has_sclk = 1,
};
static const struct of_device_id mixer_match_types[] = {
{
.compatible = "samsung,exynos4210-mixer",
.data = &exynos4210_mxr_drv_data,
}, {
.compatible = "samsung,exynos4212-mixer",
.data = &exynos4212_mxr_drv_data,
}, {
.compatible = "samsung,exynos5-mixer",
.data = &exynos5250_mxr_drv_data,
}, {
.compatible = "samsung,exynos5250-mixer",
.data = &exynos5250_mxr_drv_data,
}, {
.compatible = "samsung,exynos5420-mixer",
.data = &exynos5420_mxr_drv_data,
}, {
/* end node */
}
};
MODULE_DEVICE_TABLE(of, mixer_match_types);
static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_plane *exynos_plane;
unsigned int i;
int ret;
ret = mixer_initialize(ctx, drm_dev);
if (ret)
return ret;
for (i = 0; i < MIXER_WIN_NR; i++) {
if (i == VP_DEFAULT_WIN && !test_bit(MXR_BIT_VP_ENABLED,
&ctx->flags))
continue;
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
&plane_configs[i]);
if (ret)
return ret;
}
exynos_plane = &ctx->planes[DEFAULT_WIN];
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
EXYNOS_DISPLAY_TYPE_HDMI, &mixer_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
mixer_ctx_remove(ctx);
ret = PTR_ERR(ctx->crtc);
goto free_ctx;
}
return 0;
free_ctx:
devm_kfree(dev, ctx);
return ret;
}
static void mixer_unbind(struct device *dev, struct device *master, void *data)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
mixer_ctx_remove(ctx);
}
static const struct component_ops mixer_component_ops = {
.bind = mixer_bind,
.unbind = mixer_unbind,
};
static int mixer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct mixer_drv_data *drv;
struct mixer_context *ctx;
int ret;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
DRM_DEV_ERROR(dev, "failed to alloc mixer context.\n");
return -ENOMEM;
}
drv = of_device_get_match_data(dev);
ctx->pdev = pdev;
ctx->dev = dev;
ctx->mxr_ver = drv->version;
if (drv->is_vp_enabled)
__set_bit(MXR_BIT_VP_ENABLED, &ctx->flags);
if (drv->has_sclk)
__set_bit(MXR_BIT_HAS_SCLK, &ctx->flags);
platform_set_drvdata(pdev, ctx);
pm_runtime_enable(dev);
ret = component_add(&pdev->dev, &mixer_component_ops);
if (ret)
pm_runtime_disable(dev);
return ret;
}
static int mixer_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &mixer_component_ops);
return 0;
}
static int __maybe_unused exynos_mixer_suspend(struct device *dev)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
clk_disable_unprepare(ctx->hdmi);
clk_disable_unprepare(ctx->mixer);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
clk_disable_unprepare(ctx->vp);
if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags))
clk_disable_unprepare(ctx->sclk_mixer);
}
return 0;
}
static int __maybe_unused exynos_mixer_resume(struct device *dev)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(ctx->mixer);
if (ret < 0) {
DRM_DEV_ERROR(ctx->dev,
"Failed to prepare_enable the mixer clk [%d]\n",
ret);
return ret;
}
ret = clk_prepare_enable(ctx->hdmi);
if (ret < 0) {
DRM_DEV_ERROR(dev,
"Failed to prepare_enable the hdmi clk [%d]\n",
ret);
return ret;
}
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
ret = clk_prepare_enable(ctx->vp);
if (ret < 0) {
DRM_DEV_ERROR(dev,
"Failed to prepare_enable the vp clk [%d]\n",
ret);
return ret;
}
if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) {
ret = clk_prepare_enable(ctx->sclk_mixer);
if (ret < 0) {
DRM_DEV_ERROR(dev,
"Failed to prepare_enable the " \
"sclk_mixer clk [%d]\n",
ret);
return ret;
}
}
}
return 0;
}
static const struct dev_pm_ops exynos_mixer_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
struct platform_driver mixer_driver = {
.driver = {
.name = "exynos-mixer",
.owner = THIS_MODULE,
.pm = &exynos_mixer_pm_ops,
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
.remove = mixer_remove,
};
| linux-master | drivers/gpu/drm/exynos/exynos_mixer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* exynos_drm_vidi.c
*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* Authors:
* Inki Dae <[email protected]>
*/
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/timer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
/* VIDI uses fixed refresh rate of 50Hz */
#define VIDI_REFRESH_TIME (1000 / 50)
/* vidi has totally three virtual windows. */
#define WINDOWS_NR 3
#define ctx_from_connector(c) container_of(c, struct vidi_context, \
connector)
struct vidi_context {
struct drm_encoder encoder;
struct drm_device *drm_dev;
struct device *dev;
struct exynos_drm_crtc *crtc;
struct drm_connector connector;
struct exynos_drm_plane planes[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
unsigned int connected;
bool suspended;
struct timer_list timer;
struct mutex lock;
};
static inline struct vidi_context *encoder_to_vidi(struct drm_encoder *e)
{
return container_of(e, struct vidi_context, encoder);
}
static const char fake_edid_info[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05,
0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
0x0a, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0xbd,
0xee, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x66, 0x21, 0x50, 0xb0, 0x51, 0x00,
0x1b, 0x30, 0x40, 0x70, 0x36, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e,
0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00,
0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
0x4b, 0x1a, 0x44, 0x17, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x41, 0x4d, 0x53, 0x55, 0x4e, 0x47,
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xbc, 0x02, 0x03, 0x1e, 0xf1,
0x46, 0x84, 0x05, 0x03, 0x10, 0x20, 0x22, 0x23, 0x09, 0x07, 0x07, 0x83,
0x01, 0x00, 0x00, 0xe2, 0x00, 0x0f, 0x67, 0x03, 0x0c, 0x00, 0x10, 0x00,
0xb8, 0x2d, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c,
0x25, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x9e, 0x8c, 0x0a, 0xd0, 0x8a,
0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00, 0xa0, 0x5a, 0x00, 0x00,
0x00, 0x18, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
0x45, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x06
};
static const uint32_t formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_NV12,
};
static const enum drm_plane_type vidi_win_types[WINDOWS_NR] = {
DRM_PLANE_TYPE_PRIMARY,
DRM_PLANE_TYPE_OVERLAY,
DRM_PLANE_TYPE_CURSOR,
};
static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
if (ctx->suspended)
return -EPERM;
mod_timer(&ctx->timer,
jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1);
return 0;
}
static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
{
}
static void vidi_update_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct drm_plane_state *state = plane->base.state;
struct vidi_context *ctx = crtc->ctx;
dma_addr_t addr;
if (ctx->suspended)
return;
addr = exynos_drm_fb_dma_addr(state->fb, 0);
DRM_DEV_DEBUG_KMS(ctx->dev, "dma_addr = %pad\n", &addr);
}
static void vidi_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
mutex_lock(&ctx->lock);
ctx->suspended = false;
mutex_unlock(&ctx->lock);
drm_crtc_vblank_on(&crtc->base);
}
static void vidi_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
drm_crtc_vblank_off(&crtc->base);
mutex_lock(&ctx->lock);
ctx->suspended = true;
mutex_unlock(&ctx->lock);
}
static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.atomic_enable = vidi_atomic_enable,
.atomic_disable = vidi_atomic_disable,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
.update_plane = vidi_update_plane,
.atomic_flush = exynos_crtc_handle_event,
};
static void vidi_fake_vblank_timer(struct timer_list *t)
{
struct vidi_context *ctx = from_timer(ctx, t, timer);
if (drm_crtc_handle_vblank(&ctx->crtc->base))
mod_timer(&ctx->timer,
jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1);
}
static ssize_t vidi_show_connection(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
int rc;
mutex_lock(&ctx->lock);
rc = sprintf(buf, "%d\n", ctx->connected);
mutex_unlock(&ctx->lock);
return rc;
}
static ssize_t vidi_store_connection(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
int ret;
ret = kstrtoint(buf, 0, &ctx->connected);
if (ret)
return ret;
if (ctx->connected > 1)
return -EINVAL;
/* use fake edid data for test. */
if (!ctx->raw_edid)
ctx->raw_edid = (struct edid *)fake_edid_info;
/* if raw_edid isn't same as fake data then it can't be tested. */
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
DRM_DEV_DEBUG_KMS(dev, "edid data is not fake data.\n");
return -EINVAL;
}
DRM_DEV_DEBUG_KMS(dev, "requested connection.\n");
drm_helper_hpd_irq_event(ctx->drm_dev);
return len;
}
static DEVICE_ATTR(connection, 0644, vidi_show_connection,
vidi_store_connection);
static struct attribute *vidi_attrs[] = {
&dev_attr_connection.attr,
NULL,
};
ATTRIBUTE_GROUPS(vidi);
int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file_priv)
{
struct vidi_context *ctx = dev_get_drvdata(drm_dev->dev);
struct drm_exynos_vidi_connection *vidi = data;
if (!vidi) {
DRM_DEV_DEBUG_KMS(ctx->dev,
"user data for vidi is null.\n");
return -EINVAL;
}
if (vidi->connection > 1) {
DRM_DEV_DEBUG_KMS(ctx->dev,
"connection should be 0 or 1.\n");
return -EINVAL;
}
if (ctx->connected == vidi->connection) {
DRM_DEV_DEBUG_KMS(ctx->dev,
"same connection request.\n");
return -EINVAL;
}
if (vidi->connection) {
struct edid *raw_edid;
raw_edid = (struct edid *)(unsigned long)vidi->edid;
if (!drm_edid_is_valid(raw_edid)) {
DRM_DEV_DEBUG_KMS(ctx->dev,
"edid data is invalid.\n");
return -EINVAL;
}
ctx->raw_edid = drm_edid_duplicate(raw_edid);
if (!ctx->raw_edid) {
DRM_DEV_DEBUG_KMS(ctx->dev,
"failed to allocate raw_edid.\n");
return -ENOMEM;
}
} else {
/*
* with connection = 0, free raw_edid
* only if raw edid data isn't same as fake data.
*/
if (ctx->raw_edid && ctx->raw_edid !=
(struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
ctx->raw_edid = NULL;
}
}
ctx->connected = vidi->connection;
drm_helper_hpd_irq_event(ctx->drm_dev);
return 0;
}
static enum drm_connector_status vidi_detect(struct drm_connector *connector,
bool force)
{
struct vidi_context *ctx = ctx_from_connector(connector);
/*
* connection request would come from user side
* to do hotplug through specific ioctl.
*/
return ctx->connected ? connector_status_connected :
connector_status_disconnected;
}
static void vidi_connector_destroy(struct drm_connector *connector)
{
}
static const struct drm_connector_funcs vidi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = vidi_detect,
.destroy = vidi_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int vidi_get_modes(struct drm_connector *connector)
{
struct vidi_context *ctx = ctx_from_connector(connector);
struct edid *edid;
int edid_len;
/*
* the edid data comes from user side and it would be set
* to ctx->raw_edid through specific ioctl.
*/
if (!ctx->raw_edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
return -EFAULT;
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
return -ENOMEM;
}
drm_connector_update_edid_property(connector, edid);
return drm_add_edid_modes(connector, edid);
}
static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
.get_modes = vidi_get_modes,
};
static int vidi_create_connector(struct drm_encoder *encoder)
{
struct vidi_context *ctx = encoder_to_vidi(encoder);
struct drm_connector *connector = &ctx->connector;
int ret;
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(ctx->drm_dev, connector,
&vidi_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
DRM_DEV_ERROR(ctx->dev,
"Failed to initialize connector with drm\n");
return ret;
}
drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
return 0;
}
static void exynos_vidi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void exynos_vidi_enable(struct drm_encoder *encoder)
{
}
static void exynos_vidi_disable(struct drm_encoder *encoder)
{
}
static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
.mode_set = exynos_vidi_mode_set,
.enable = exynos_vidi_enable,
.disable = exynos_vidi_disable,
};
static int vidi_bind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct drm_encoder *encoder = &ctx->encoder;
struct exynos_drm_plane *exynos_plane;
struct exynos_drm_plane_config plane_config = { 0 };
unsigned int i;
int ret;
ctx->drm_dev = drm_dev;
plane_config.pixel_formats = formats;
plane_config.num_pixel_formats = ARRAY_SIZE(formats);
for (i = 0; i < WINDOWS_NR; i++) {
plane_config.zpos = i;
plane_config.type = vidi_win_types[i];
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
&plane_config);
if (ret)
return ret;
}
exynos_plane = &ctx->planes[DEFAULT_WIN];
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
EXYNOS_DISPLAY_TYPE_VIDI, &vidi_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
DRM_DEV_ERROR(dev, "failed to create crtc.\n");
return PTR_ERR(ctx->crtc);
}
drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_VIDI);
if (ret < 0)
return ret;
ret = vidi_create_connector(encoder);
if (ret) {
DRM_DEV_ERROR(dev, "failed to create connector ret = %d\n",
ret);
drm_encoder_cleanup(encoder);
return ret;
}
return 0;
}
static void vidi_unbind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
del_timer_sync(&ctx->timer);
}
static const struct component_ops vidi_component_ops = {
.bind = vidi_bind,
.unbind = vidi_unbind,
};
static int vidi_probe(struct platform_device *pdev)
{
struct vidi_context *ctx;
struct device *dev = &pdev->dev;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
mutex_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
return component_add(dev, &vidi_component_ops);
}
static int vidi_remove(struct platform_device *pdev)
{
struct vidi_context *ctx = platform_get_drvdata(pdev);
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
ctx->raw_edid = NULL;
}
component_del(&pdev->dev, &vidi_component_ops);
return 0;
}
struct platform_driver vidi_driver = {
.probe = vidi_probe,
.remove = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
.dev_groups = vidi_groups,
},
};
| linux-master | drivers/gpu/drm/exynos/exynos_drm_vidi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* Authors:
* Eunchul Kim <[email protected]>
* Jinyoung Jeon <[email protected]>
* Sangmin Lee <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_ipp.h"
#include "regs-fimc.h"
/*
* FIMC stands for Fully Interactive Mobile Camera and
* supports image scaler/rotator and input/output DMA operations.
* input DMA reads image data from the memory.
* output DMA writes image data to memory.
* FIMC supports image rotation and image effect functions.
*/
#define FIMC_MAX_DEVS 4
#define FIMC_MAX_SRC 2
#define FIMC_MAX_DST 32
#define FIMC_SHFACTOR 10
#define FIMC_BUF_STOP 1
#define FIMC_BUF_START 2
#define FIMC_WIDTH_ITU_709 1280
#define FIMC_AUTOSUSPEND_DELAY 2000
static unsigned int fimc_mask = 0xc;
module_param_named(fimc_devs, fimc_mask, uint, 0644);
MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM");
#define get_fimc_context(dev) dev_get_drvdata(dev)
enum {
FIMC_CLK_LCLK,
FIMC_CLK_GATE,
FIMC_CLK_WB_A,
FIMC_CLK_WB_B,
FIMC_CLKS_MAX
};
static const char * const fimc_clock_names[] = {
[FIMC_CLK_LCLK] = "sclk_fimc",
[FIMC_CLK_GATE] = "fimc",
[FIMC_CLK_WB_A] = "pxl_async0",
[FIMC_CLK_WB_B] = "pxl_async1",
};
/*
* A structure of scaler.
*
* @range: narrow, wide.
* @bypass: unused scaler path.
* @up_h: horizontal scale up.
* @up_v: vertical scale up.
* @hratio: horizontal ratio.
* @vratio: vertical ratio.
*/
struct fimc_scaler {
bool range;
bool bypass;
bool up_h;
bool up_v;
u32 hratio;
u32 vratio;
};
/*
* A structure of fimc context.
*
* @regs: memory mapped io registers.
* @lock: locking of operations.
* @clocks: fimc clocks.
* @sc: scaler infomations.
* @pol: porarity of writeback.
* @id: fimc id.
* @irq: irq number.
*/
struct fimc_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
void *dma_priv;
struct device *dev;
struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
void __iomem *regs;
spinlock_t lock;
struct clk *clocks[FIMC_CLKS_MAX];
struct fimc_scaler sc;
int id;
int irq;
};
static u32 fimc_read(struct fimc_context *ctx, u32 reg)
{
return readl(ctx->regs + reg);
}
static void fimc_write(struct fimc_context *ctx, u32 val, u32 reg)
{
writel(val, ctx->regs + reg);
}
static void fimc_set_bits(struct fimc_context *ctx, u32 reg, u32 bits)
{
void __iomem *r = ctx->regs + reg;
writel(readl(r) | bits, r);
}
static void fimc_clear_bits(struct fimc_context *ctx, u32 reg, u32 bits)
{
void __iomem *r = ctx->regs + reg;
writel(readl(r) & ~bits, r);
}
static void fimc_sw_reset(struct fimc_context *ctx)
{
u32 cfg;
/* stop dma operation */
cfg = fimc_read(ctx, EXYNOS_CISTATUS);
if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg))
fimc_clear_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
fimc_set_bits(ctx, EXYNOS_CISRCFMT, EXYNOS_CISRCFMT_ITU601_8BIT);
/* disable image capture */
fimc_clear_bits(ctx, EXYNOS_CIIMGCPT,
EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
/* s/w reset */
fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST);
/* s/w reset complete */
fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST);
/* reset sequence */
fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
}
static void fimc_set_type_ctrl(struct fimc_context *ctx)
{
u32 cfg;
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
EXYNOS_CIGCTRL_SELWRITEBACK_A |
EXYNOS_CIGCTRL_SELCAM_MIPI_A |
EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable)
cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
else
cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
static void fimc_mask_irq(struct fimc_context *ctx, bool enable)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable) {
cfg &= ~EXYNOS_CIGCTRL_IRQ_OVFEN;
cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE | EXYNOS_CIGCTRL_IRQ_LEVEL;
} else
cfg &= ~EXYNOS_CIGCTRL_IRQ_ENABLE;
fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
static void fimc_clear_irq(struct fimc_context *ctx)
{
fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_CLR);
}
static bool fimc_check_ovf(struct fimc_context *ctx)
{
u32 status, flag;
status = fimc_read(ctx, EXYNOS_CISTATUS);
flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
EXYNOS_CISTATUS_OVFICR;
DRM_DEV_DEBUG_KMS(ctx->dev, "flag[0x%x]\n", flag);
if (status & flag) {
fimc_set_bits(ctx, EXYNOS_CIWDOFST,
EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
DRM_DEV_ERROR(ctx->dev,
"occurred overflow at %d, status 0x%x.\n",
ctx->id, status);
return true;
}
return false;
}
static bool fimc_check_frame_end(struct fimc_context *ctx)
{
u32 cfg;
cfg = fimc_read(ctx, EXYNOS_CISTATUS);
DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]\n", cfg);
if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
return false;
cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
fimc_write(ctx, cfg, EXYNOS_CISTATUS);
return true;
}
static int fimc_get_buf_id(struct fimc_context *ctx)
{
u32 cfg;
int frame_cnt, buf_id;
cfg = fimc_read(ctx, EXYNOS_CISTATUS2);
frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
if (frame_cnt == 0)
frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
DRM_DEV_DEBUG_KMS(ctx->dev, "present[%d]before[%d]\n",
EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
if (frame_cnt == 0) {
DRM_DEV_ERROR(ctx->dev, "failed to get frame count.\n");
return -EIO;
}
buf_id = frame_cnt - 1;
DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]\n", buf_id);
return buf_id;
}
static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
if (enable)
cfg |= EXYNOS_CIOCTRL_LASTENDEN;
else
cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
}
static void fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
/* RGB */
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
switch (fmt) {
case DRM_FORMAT_RGB565:
cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return;
default:
/* bypass */
break;
}
/* YUV */
cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
EXYNOS_MSCTRL_C_INT_IN_2PLANE |
EXYNOS_MSCTRL_ORDER422_YCBYCR);
switch (fmt) {
case DRM_FORMAT_YUYV:
cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
break;
case DRM_FORMAT_YVYU:
cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
break;
case DRM_FORMAT_UYVY:
cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
break;
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YUV444:
cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
break;
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
EXYNOS_MSCTRL_C_INT_IN_2PLANE);
break;
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
EXYNOS_MSCTRL_C_INT_IN_2PLANE);
break;
}
fimc_write(ctx, cfg, EXYNOS_MSCTRL);
}
static void fimc_src_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
switch (fmt) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
break;
case DRM_FORMAT_YUV444:
cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
break;
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_YUV422:
cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
break;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
break;
}
fimc_write(ctx, cfg, EXYNOS_MSCTRL);
cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
if (tiled)
cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
else
cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
fimc_src_set_fmt_order(ctx, fmt);
}
static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
{
unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
u32 cfg1, cfg2;
DRM_DEV_DEBUG_KMS(ctx->dev, "rotation[%x]\n", rotation);
cfg1 = fimc_read(ctx, EXYNOS_MSCTRL);
cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
EXYNOS_MSCTRL_FLIP_Y_MIRROR);
cfg2 = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
switch (degree) {
case DRM_MODE_ROTATE_0:
if (rotation & DRM_MODE_REFLECT_X)
cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
if (rotation & DRM_MODE_REFLECT_Y)
cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
break;
case DRM_MODE_ROTATE_90:
cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
if (rotation & DRM_MODE_REFLECT_X)
cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
if (rotation & DRM_MODE_REFLECT_Y)
cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
break;
case DRM_MODE_ROTATE_180:
cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
EXYNOS_MSCTRL_FLIP_Y_MIRROR);
if (rotation & DRM_MODE_REFLECT_X)
cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
if (rotation & DRM_MODE_REFLECT_Y)
cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
break;
case DRM_MODE_ROTATE_270:
cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
EXYNOS_MSCTRL_FLIP_Y_MIRROR);
cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
if (rotation & DRM_MODE_REFLECT_X)
cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
if (rotation & DRM_MODE_REFLECT_Y)
cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
break;
}
fimc_write(ctx, cfg1, EXYNOS_MSCTRL);
fimc_write(ctx, cfg2, EXYNOS_CITRGFMT);
}
static void fimc_set_window(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, h1, h2, v1, v2;
/* cropped image */
h1 = buf->rect.x;
h2 = real_width - buf->rect.w - buf->rect.x;
v1 = buf->rect.y;
v2 = buf->buf.height - buf->rect.h - buf->rect.y;
DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
real_width, buf->buf.height);
DRM_DEV_DEBUG_KMS(ctx->dev, "h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1,
v2);
/*
* set window offset 1, 2 size
* check figure 43-21 in user manual
*/
cfg = fimc_read(ctx, EXYNOS_CIWDOFST);
cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
EXYNOS_CIWDOFST_WINVEROFST_MASK);
cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
EXYNOS_CIWDOFST_WINVEROFST(v1));
cfg |= EXYNOS_CIWDOFST_WINOFSEN;
fimc_write(ctx, cfg, EXYNOS_CIWDOFST);
cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
EXYNOS_CIWDOFST2_WINVEROFST2(v2));
fimc_write(ctx, cfg, EXYNOS_CIWDOFST2);
}
static void fimc_src_set_size(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "hsize[%d]vsize[%d]\n", real_width,
buf->buf.height);
/* original size */
cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x,
buf->rect.y, buf->rect.w, buf->rect.h);
/* set input DMA image size */
cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE);
cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(buf->rect.w) |
EXYNOS_CIREAL_ISIZE_HEIGHT(buf->rect.h));
fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE);
/*
* set input FIFO image size
* for now, we support only ITU601 8 bit mode
*/
cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) |
EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
/* offset Y(RGB), Cb, Cr */
cfg = (EXYNOS_CIIYOFF_HORIZONTAL(buf->rect.x) |
EXYNOS_CIIYOFF_VERTICAL(buf->rect.y));
fimc_write(ctx, cfg, EXYNOS_CIIYOFF);
cfg = (EXYNOS_CIICBOFF_HORIZONTAL(buf->rect.x) |
EXYNOS_CIICBOFF_VERTICAL(buf->rect.y));
fimc_write(ctx, cfg, EXYNOS_CIICBOFF);
cfg = (EXYNOS_CIICROFF_HORIZONTAL(buf->rect.x) |
EXYNOS_CIICROFF_VERTICAL(buf->rect.y));
fimc_write(ctx, cfg, EXYNOS_CIICROFF);
fimc_set_window(ctx, buf);
}
static void fimc_src_set_addr(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
fimc_write(ctx, buf->dma_addr[0], EXYNOS_CIIYSA(0));
fimc_write(ctx, buf->dma_addr[1], EXYNOS_CIICBSA(0));
fimc_write(ctx, buf->dma_addr[2], EXYNOS_CIICRSA(0));
}
static void fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
/* RGB */
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
switch (fmt) {
case DRM_FORMAT_RGB565:
cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return;
case DRM_FORMAT_RGB888:
cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return;
case DRM_FORMAT_XRGB8888:
cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
break;
default:
/* bypass */
break;
}
/* YUV */
cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
EXYNOS_CIOCTRL_ORDER422_MASK |
EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
switch (fmt) {
case DRM_FORMAT_XRGB8888:
cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
break;
case DRM_FORMAT_YUYV:
cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
break;
case DRM_FORMAT_YVYU:
cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
break;
case DRM_FORMAT_UYVY:
cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
break;
case DRM_FORMAT_VYUY:
cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
break;
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
break;
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
break;
}
fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
}
static void fimc_dst_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = fimc_read(ctx, EXYNOS_CIEXTEN);
if (fmt == DRM_FORMAT_AYUV) {
cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
fimc_write(ctx, cfg, EXYNOS_CIEXTEN);
} else {
cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
fimc_write(ctx, cfg, EXYNOS_CIEXTEN);
cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
switch (fmt) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
break;
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_YUV422:
cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
break;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
break;
}
fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
}
cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
if (tiled)
cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
else
cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
fimc_dst_set_fmt_order(ctx, fmt);
}
static void fimc_dst_set_transf(struct fimc_context *ctx, unsigned int rotation)
{
unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "rotation[0x%x]\n", rotation);
cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
switch (degree) {
case DRM_MODE_ROTATE_0:
if (rotation & DRM_MODE_REFLECT_X)
cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
if (rotation & DRM_MODE_REFLECT_Y)
cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
break;
case DRM_MODE_ROTATE_90:
cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
if (rotation & DRM_MODE_REFLECT_X)
cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
if (rotation & DRM_MODE_REFLECT_Y)
cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
break;
case DRM_MODE_ROTATE_180:
cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
if (rotation & DRM_MODE_REFLECT_X)
cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
break;
case DRM_MODE_ROTATE_270:
cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
EXYNOS_CITRGFMT_FLIP_X_MIRROR |
EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
if (rotation & DRM_MODE_REFLECT_X)
cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
break;
}
fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
}
static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
struct drm_exynos_ipp_task_rect *src,
struct drm_exynos_ipp_task_rect *dst)
{
u32 cfg, cfg_ext, shfactor;
u32 pre_dst_width, pre_dst_height;
u32 hfactor, vfactor;
int ret = 0;
u32 src_w, src_h, dst_w, dst_h;
cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT);
if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
src_w = src->h;
src_h = src->w;
} else {
src_w = src->w;
src_h = src->h;
}
if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
dst_w = dst->h;
dst_h = dst->w;
} else {
dst_w = dst->w;
dst_h = dst->h;
}
/* fimc_ippdrv_check_property assures that dividers are not null */
hfactor = fls(src_w / dst_w / 2);
if (hfactor > FIMC_SHFACTOR / 2) {
dev_err(ctx->dev, "failed to get ratio horizontal.\n");
return -EINVAL;
}
vfactor = fls(src_h / dst_h / 2);
if (vfactor > FIMC_SHFACTOR / 2) {
dev_err(ctx->dev, "failed to get ratio vertical.\n");
return -EINVAL;
}
pre_dst_width = src_w >> hfactor;
pre_dst_height = src_h >> vfactor;
DRM_DEV_DEBUG_KMS(ctx->dev, "pre_dst_width[%d]pre_dst_height[%d]\n",
pre_dst_width, pre_dst_height);
DRM_DEV_DEBUG_KMS(ctx->dev, "hfactor[%d]vfactor[%d]\n", hfactor,
vfactor);
sc->hratio = (src_w << 14) / (dst_w << hfactor);
sc->vratio = (src_h << 14) / (dst_h << vfactor);
sc->up_h = (dst_w >= src_w);
sc->up_v = (dst_h >= src_h);
DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
sc->hratio, sc->vratio, sc->up_h, sc->up_v);
shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
DRM_DEV_DEBUG_KMS(ctx->dev, "shfactor[%d]\n", shfactor);
cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
EXYNOS_CISCPRERATIO_PREHORRATIO(1 << hfactor) |
EXYNOS_CISCPRERATIO_PREVERRATIO(1 << vfactor));
fimc_write(ctx, cfg, EXYNOS_CISCPRERATIO);
cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
fimc_write(ctx, cfg, EXYNOS_CISCPREDST);
return ret;
}
static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
{
u32 cfg, cfg_ext;
DRM_DEV_DEBUG_KMS(ctx->dev, "range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
sc->range, sc->bypass, sc->up_h, sc->up_v);
DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]\n",
sc->hratio, sc->vratio);
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
EXYNOS_CISCCTRL_CSCR2Y_WIDE |
EXYNOS_CISCCTRL_CSCY2R_WIDE);
if (sc->range)
cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
EXYNOS_CISCCTRL_CSCY2R_WIDE);
if (sc->bypass)
cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
if (sc->up_h)
cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
if (sc->up_v)
cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
cfg_ext = fimc_read(ctx, EXYNOS_CIEXTEN);
cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN);
}
static void fimc_dst_set_size(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, cfg_ext;
DRM_DEV_DEBUG_KMS(ctx->dev, "hsize[%d]vsize[%d]\n", real_width,
buf->buf.height);
/* original size */
cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x,
buf->rect.y,
buf->rect.w, buf->rect.h);
/* CSC ITU */
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
if (buf->buf.width >= FIMC_WIDTH_ITU_709)
cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
else
cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT);
/* target image size */
cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
EXYNOS_CITRGFMT_TARGETV_MASK);
if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE)
cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.h) |
EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.w));
else
cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.w) |
EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.h));
fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
/* target area */
cfg = EXYNOS_CITAREA_TARGET_AREA(buf->rect.w * buf->rect.h);
fimc_write(ctx, cfg, EXYNOS_CITAREA);
/* offset Y(RGB), Cb, Cr */
cfg = (EXYNOS_CIOYOFF_HORIZONTAL(buf->rect.x) |
EXYNOS_CIOYOFF_VERTICAL(buf->rect.y));
fimc_write(ctx, cfg, EXYNOS_CIOYOFF);
cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(buf->rect.x) |
EXYNOS_CIOCBOFF_VERTICAL(buf->rect.y));
fimc_write(ctx, cfg, EXYNOS_CIOCBOFF);
cfg = (EXYNOS_CIOCROFF_HORIZONTAL(buf->rect.x) |
EXYNOS_CIOCROFF_VERTICAL(buf->rect.y));
fimc_write(ctx, cfg, EXYNOS_CIOCROFF);
}
static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
bool enqueue)
{
unsigned long flags;
u32 buf_num;
u32 cfg;
DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
spin_lock_irqsave(&ctx->lock, flags);
cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
if (enqueue)
cfg |= (1 << buf_id);
else
cfg &= ~(1 << buf_id);
fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
buf_num = hweight32(cfg);
if (enqueue && buf_num >= FIMC_BUF_START)
fimc_mask_irq(ctx, true);
else if (!enqueue && buf_num <= FIMC_BUF_STOP)
fimc_mask_irq(ctx, false);
spin_unlock_irqrestore(&ctx->lock, flags);
}
static void fimc_dst_set_addr(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
fimc_write(ctx, buf->dma_addr[0], EXYNOS_CIOYSA(0));
fimc_write(ctx, buf->dma_addr[1], EXYNOS_CIOCBSA(0));
fimc_write(ctx, buf->dma_addr[2], EXYNOS_CIOCRSA(0));
fimc_dst_set_buf_seq(ctx, 0, true);
}
static void fimc_stop(struct fimc_context *ctx);
static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
{
struct fimc_context *ctx = dev_id;
int buf_id;
DRM_DEV_DEBUG_KMS(ctx->dev, "fimc id[%d]\n", ctx->id);
fimc_clear_irq(ctx);
if (fimc_check_ovf(ctx))
return IRQ_NONE;
if (!fimc_check_frame_end(ctx))
return IRQ_NONE;
buf_id = fimc_get_buf_id(ctx);
if (buf_id < 0)
return IRQ_HANDLED;
DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]\n", buf_id);
if (ctx->task) {
struct exynos_drm_ipp_task *task = ctx->task;
ctx->task = NULL;
pm_runtime_mark_last_busy(ctx->dev);
pm_runtime_put_autosuspend(ctx->dev);
exynos_drm_ipp_task_done(task, 0);
}
fimc_dst_set_buf_seq(ctx, buf_id, false);
fimc_stop(ctx);
return IRQ_HANDLED;
}
static void fimc_clear_addr(struct fimc_context *ctx)
{
int i;
for (i = 0; i < FIMC_MAX_SRC; i++) {
fimc_write(ctx, 0, EXYNOS_CIIYSA(i));
fimc_write(ctx, 0, EXYNOS_CIICBSA(i));
fimc_write(ctx, 0, EXYNOS_CIICRSA(i));
}
for (i = 0; i < FIMC_MAX_DST; i++) {
fimc_write(ctx, 0, EXYNOS_CIOYSA(i));
fimc_write(ctx, 0, EXYNOS_CIOCBSA(i));
fimc_write(ctx, 0, EXYNOS_CIOCRSA(i));
}
}
static void fimc_reset(struct fimc_context *ctx)
{
/* reset h/w block */
fimc_sw_reset(ctx);
/* reset scaler capability */
memset(&ctx->sc, 0x0, sizeof(ctx->sc));
fimc_clear_addr(ctx);
}
static void fimc_start(struct fimc_context *ctx)
{
u32 cfg0, cfg1;
fimc_mask_irq(ctx, true);
/* If set true, we can save jpeg about screen */
fimc_handle_jpeg(ctx, false);
fimc_set_scaler(ctx, &ctx->sc);
fimc_set_type_ctrl(ctx);
fimc_handle_lastend(ctx, false);
/* setup dma */
cfg0 = fimc_read(ctx, EXYNOS_MSCTRL);
cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
fimc_write(ctx, cfg0, EXYNOS_MSCTRL);
/* Reset status */
fimc_write(ctx, 0x0, EXYNOS_CISTATUS);
cfg0 = fimc_read(ctx, EXYNOS_CIIMGCPT);
cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
/* Scaler */
cfg1 = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
EXYNOS_CISCCTRL_SCALERSTART);
fimc_write(ctx, cfg1, EXYNOS_CISCCTRL);
/* Enable image capture*/
cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
fimc_write(ctx, cfg0, EXYNOS_CIIMGCPT);
/* Disable frame end irq */
fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
}
static void fimc_stop(struct fimc_context *ctx)
{
u32 cfg;
/* Source clear */
cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
cfg &= ~EXYNOS_MSCTRL_ENVID;
fimc_write(ctx, cfg, EXYNOS_MSCTRL);
fimc_mask_irq(ctx, false);
/* reset sequence */
fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
/* Scaler disable */
fimc_clear_bits(ctx, EXYNOS_CISCCTRL, EXYNOS_CISCCTRL_SCALERSTART);
/* Disable image capture */
fimc_clear_bits(ctx, EXYNOS_CIIMGCPT,
EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
/* Enable frame end irq */
fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
}
static int fimc_commit(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
struct fimc_context *ctx =
container_of(ipp, struct fimc_context, ipp);
int ret;
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "failed to enable FIMC device.\n");
return ret;
}
ctx->task = task;
fimc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier);
fimc_src_set_size(ctx, &task->src);
fimc_src_set_transf(ctx, DRM_MODE_ROTATE_0);
fimc_src_set_addr(ctx, &task->src);
fimc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier);
fimc_dst_set_transf(ctx, task->transform.rotation);
fimc_dst_set_size(ctx, &task->dst);
fimc_dst_set_addr(ctx, &task->dst);
fimc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect);
fimc_start(ctx);
return 0;
}
static void fimc_abort(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
struct fimc_context *ctx =
container_of(ipp, struct fimc_context, ipp);
fimc_reset(ctx);
if (ctx->task) {
struct exynos_drm_ipp_task *task = ctx->task;
ctx->task = NULL;
pm_runtime_mark_last_busy(ctx->dev);
pm_runtime_put_autosuspend(ctx->dev);
exynos_drm_ipp_task_done(task, -EIO);
}
}
static struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = fimc_commit,
.abort = fimc_abort,
};
static int fimc_bind(struct device *dev, struct device *master, void *data)
{
struct fimc_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
ctx->formats, ctx->num_formats, "fimc");
dev_info(dev, "The exynos fimc has been probed successfully\n");
return 0;
}
static void fimc_unbind(struct device *dev, struct device *master,
void *data)
{
struct fimc_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
}
static const struct component_ops fimc_component_ops = {
.bind = fimc_bind,
.unbind = fimc_unbind,
};
static void fimc_put_clocks(struct fimc_context *ctx)
{
int i;
for (i = 0; i < FIMC_CLKS_MAX; i++) {
if (IS_ERR(ctx->clocks[i]))
continue;
clk_put(ctx->clocks[i]);
ctx->clocks[i] = ERR_PTR(-EINVAL);
}
}
static int fimc_setup_clocks(struct fimc_context *ctx)
{
struct device *fimc_dev = ctx->dev;
struct device *dev;
int ret, i;
for (i = 0; i < FIMC_CLKS_MAX; i++)
ctx->clocks[i] = ERR_PTR(-EINVAL);
for (i = 0; i < FIMC_CLKS_MAX; i++) {
if (i == FIMC_CLK_WB_A || i == FIMC_CLK_WB_B)
dev = fimc_dev->parent;
else
dev = fimc_dev;
ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]);
if (IS_ERR(ctx->clocks[i])) {
ret = PTR_ERR(ctx->clocks[i]);
dev_err(fimc_dev, "failed to get clock: %s\n",
fimc_clock_names[i]);
goto e_clk_free;
}
}
ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]);
if (!ret)
return ret;
e_clk_free:
fimc_put_clocks(ctx);
return ret;
}
int exynos_drm_check_fimc_device(struct device *dev)
{
int id = of_alias_get_id(dev->of_node, "fimc");
if (id >= 0 && (BIT(id) & fimc_mask))
return 0;
return -ENODEV;
}
static const unsigned int fimc_formats[] = {
DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565,
DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV21, DRM_FORMAT_NV61,
DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422,
DRM_FORMAT_YUV444,
};
static const unsigned int fimc_tiled_formats[] = {
DRM_FORMAT_NV12, DRM_FORMAT_NV21,
};
static const struct drm_exynos_ipp_limit fimc_4210_limits_v1[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4224, 2 }, .v = { 16, 0, 2 }) },
{ IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1920 }, .v = { 128, 0 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
.v = { (1 << 16) / 64, (1 << 16) * 64 }) },
};
static const struct drm_exynos_ipp_limit fimc_4210_limits_v2[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 16, 1920, 2 }, .v = { 16, 0, 2 }) },
{ IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1366 }, .v = { 128, 0 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
.v = { (1 << 16) / 64, (1 << 16) * 64 }) },
};
static const struct drm_exynos_ipp_limit fimc_4210_limits_tiled_v1[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 128, 1920, 2 }, .v = { 128, 0, 2 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
.v = { (1 << 16) / 64, (1 << 16) * 64 }) },
};
static const struct drm_exynos_ipp_limit fimc_4210_limits_tiled_v2[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 128, 1366, 2 }, .v = { 128, 0, 2 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
.v = { (1 << 16) / 64, (1 << 16) * 64 }) },
};
static int fimc_probe(struct platform_device *pdev)
{
const struct drm_exynos_ipp_limit *limits;
struct exynos_drm_ipp_formats *formats;
struct device *dev = &pdev->dev;
struct fimc_context *ctx;
int ret;
int i, j, num_limits, num_formats;
if (exynos_drm_check_fimc_device(dev) != 0)
return -ENODEV;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
ctx->id = of_alias_get_id(dev->of_node, "fimc");
/* construct formats/limits array */
num_formats = ARRAY_SIZE(fimc_formats) + ARRAY_SIZE(fimc_tiled_formats);
formats = devm_kcalloc(dev, num_formats, sizeof(*formats),
GFP_KERNEL);
if (!formats)
return -ENOMEM;
/* linear formats */
if (ctx->id < 3) {
limits = fimc_4210_limits_v1;
num_limits = ARRAY_SIZE(fimc_4210_limits_v1);
} else {
limits = fimc_4210_limits_v2;
num_limits = ARRAY_SIZE(fimc_4210_limits_v2);
}
for (i = 0; i < ARRAY_SIZE(fimc_formats); i++) {
formats[i].fourcc = fimc_formats[i];
formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
DRM_EXYNOS_IPP_FORMAT_DESTINATION;
formats[i].limits = limits;
formats[i].num_limits = num_limits;
}
/* tiled formats */
if (ctx->id < 3) {
limits = fimc_4210_limits_tiled_v1;
num_limits = ARRAY_SIZE(fimc_4210_limits_tiled_v1);
} else {
limits = fimc_4210_limits_tiled_v2;
num_limits = ARRAY_SIZE(fimc_4210_limits_tiled_v2);
}
for (j = i, i = 0; i < ARRAY_SIZE(fimc_tiled_formats); j++, i++) {
formats[j].fourcc = fimc_tiled_formats[i];
formats[j].modifier = DRM_FORMAT_MOD_SAMSUNG_64_32_TILE;
formats[j].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
DRM_EXYNOS_IPP_FORMAT_DESTINATION;
formats[j].limits = limits;
formats[j].num_limits = num_limits;
}
ctx->formats = formats;
ctx->num_formats = num_formats;
/* resource memory */
ctx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
/* resource irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
ret = devm_request_irq(dev, ret, fimc_irq_handler,
0, dev_name(dev), ctx);
if (ret < 0) {
dev_err(dev, "failed to request irq.\n");
return ret;
}
ret = fimc_setup_clocks(ctx);
if (ret < 0)
return ret;
spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, FIMC_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
ret = component_add(dev, &fimc_component_ops);
if (ret)
goto err_pm_dis;
dev_info(dev, "drm fimc registered successfully.\n");
return 0;
err_pm_dis:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
fimc_put_clocks(ctx);
return ret;
}
static int fimc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimc_context *ctx = get_fimc_context(dev);
component_del(dev, &fimc_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
fimc_put_clocks(ctx);
return 0;
}
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
return 0;
}
static int fimc_runtime_resume(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
}
static DEFINE_RUNTIME_DEV_PM_OPS(fimc_pm_ops, fimc_runtime_suspend,
fimc_runtime_resume, NULL);
static const struct of_device_id fimc_of_match[] = {
{ .compatible = "samsung,exynos4210-fimc" },
{ .compatible = "samsung,exynos4212-fimc" },
{ },
};
MODULE_DEVICE_TABLE(of, fimc_of_match);
struct platform_driver fimc_driver = {
.probe = fimc_probe,
.remove = fimc_remove,
.driver = {
.of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
.owner = THIS_MODULE,
.pm = pm_ptr(&fimc_pm_ops),
},
};
| linux-master | drivers/gpu/drm/exynos/exynos_drm_fimc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* exynos_drm_crtc.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authors:
* Inki Dae <[email protected]>
* Joonyoung Shim <[email protected]>
* Seung-Woo Kim <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_plane.h"
static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->ops->atomic_enable)
exynos_crtc->ops->atomic_enable(exynos_crtc);
drm_crtc_vblank_on(crtc);
}
static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
drm_crtc_vblank_off(crtc);
if (exynos_crtc->ops->atomic_disable)
exynos_crtc->ops->atomic_disable(exynos_crtc);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event && !crtc->state->active) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
}
static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (!crtc_state->enable)
return 0;
if (exynos_crtc->ops->atomic_check)
return exynos_crtc->ops->atomic_check(exynos_crtc, crtc_state);
return 0;
}
static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->ops->atomic_begin)
exynos_crtc->ops->atomic_begin(exynos_crtc);
}
static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->ops->atomic_flush)
exynos_crtc->ops->atomic_flush(exynos_crtc);
}
static enum drm_mode_status exynos_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->ops->mode_valid)
return exynos_crtc->ops->mode_valid(exynos_crtc, mode);
return MODE_OK;
}
static bool exynos_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->ops->mode_fixup)
return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
adjusted_mode);
return true;
}
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.mode_valid = exynos_crtc_mode_valid,
.mode_fixup = exynos_crtc_mode_fixup,
.atomic_check = exynos_crtc_atomic_check,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
.atomic_enable = exynos_drm_crtc_atomic_enable,
.atomic_disable = exynos_drm_crtc_atomic_disable,
};
void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
{
struct drm_crtc *crtc = &exynos_crtc->base;
struct drm_pending_vblank_event *event = crtc->state->event;
unsigned long flags;
if (!event)
return;
crtc->state->event = NULL;
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_arm_vblank_event(crtc, event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
drm_crtc_cleanup(crtc);
kfree(exynos_crtc);
}
static int exynos_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->ops->enable_vblank)
return exynos_crtc->ops->enable_vblank(exynos_crtc);
return 0;
}
static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->ops->disable_vblank)
exynos_crtc->ops->disable_vblank(exynos_crtc);
}
static const struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.destroy = exynos_drm_crtc_destroy,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
};
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
struct drm_plane *plane,
enum exynos_drm_output_type type,
const struct exynos_drm_crtc_ops *ops,
void *ctx)
{
struct exynos_drm_crtc *exynos_crtc;
struct drm_crtc *crtc;
int ret;
exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
if (!exynos_crtc)
return ERR_PTR(-ENOMEM);
exynos_crtc->type = type;
exynos_crtc->ops = ops;
exynos_crtc->ctx = ctx;
crtc = &exynos_crtc->base;
ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL,
&exynos_crtc_funcs, NULL);
if (ret < 0)
goto err_crtc;
drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
return exynos_crtc;
err_crtc:
plane->funcs->destroy(plane);
kfree(exynos_crtc);
return ERR_PTR(ret);
}
struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, drm_dev)
if (to_exynos_crtc(crtc)->type == out_type)
return to_exynos_crtc(crtc);
return ERR_PTR(-ENODEV);
}
int exynos_drm_set_possible_crtcs(struct drm_encoder *encoder,
enum exynos_drm_output_type out_type)
{
struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(encoder->dev,
out_type);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
encoder->possible_crtcs = drm_crtc_mask(&crtc->base);
return 0;
}
void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->ops->te_handler)
exynos_crtc->ops->te_handler(exynos_crtc);
}
| linux-master | drivers/gpu/drm/exynos/exynos_drm_crtc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* exynos_drm_gem.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Author: Inki Dae <[email protected]>
*/
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>
#include <linux/shmem_fs.h>
#include <linux/module.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
MODULE_IMPORT_NS(DMA_BUF);
static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
{
struct drm_device *dev = exynos_gem->base.dev;
unsigned long attr = 0;
if (exynos_gem->dma_addr) {
DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
return 0;
}
/*
* if EXYNOS_BO_CONTIG, fully physically contiguous memory
* region will be allocated else physically contiguous
* as possible.
*/
if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
attr |= DMA_ATTR_FORCE_CONTIGUOUS;
/*
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
* else cachable mapping.
*/
if (exynos_gem->flags & EXYNOS_BO_WC ||
!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
attr |= DMA_ATTR_WRITE_COMBINE;
/* FBDev emulation requires kernel mapping */
if (!kvmap)
attr |= DMA_ATTR_NO_KERNEL_MAPPING;
exynos_gem->dma_attrs = attr;
exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
&exynos_gem->dma_addr, GFP_KERNEL,
exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
return -ENOMEM;
}
if (kvmap)
exynos_gem->kvaddr = exynos_gem->cookie;
DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
return 0;
}
static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
{
struct drm_device *dev = exynos_gem->base.dev;
if (!exynos_gem->dma_addr) {
DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
return;
}
DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
(dma_addr_t)exynos_gem->dma_addr,
exynos_gem->dma_attrs);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
struct drm_file *file_priv,
unsigned int *handle)
{
int ret;
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see.
*/
ret = drm_gem_handle_create(file_priv, obj, handle);
if (ret)
return ret;
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(obj);
return 0;
}
void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
{
struct drm_gem_object *obj = &exynos_gem->base;
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
obj->handle_count);
/*
* do not release memory region from exporter.
*
* the region will be released by exporter
* once dmabuf's refcount becomes 0.
*/
if (obj->import_attach)
drm_prime_gem_destroy(obj, exynos_gem->sgt);
else
exynos_drm_free_buf(exynos_gem);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
kfree(exynos_gem);
}
static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
.free = exynos_drm_gem_free_object,
.get_sg_table = exynos_drm_gem_prime_get_sg_table,
.mmap = exynos_drm_gem_mmap,
.vm_ops = &exynos_drm_gem_vm_ops,
};
static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem *exynos_gem;
struct drm_gem_object *obj;
int ret;
exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
if (!exynos_gem)
return ERR_PTR(-ENOMEM);
exynos_gem->size = size;
obj = &exynos_gem->base;
obj->funcs = &exynos_drm_gem_object_funcs;
ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
kfree(exynos_gem);
return ERR_PTR(ret);
}
ret = drm_gem_create_mmap_offset(obj);
if (ret < 0) {
drm_gem_object_release(obj);
kfree(exynos_gem);
return ERR_PTR(ret);
}
DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
return exynos_gem;
}
struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
unsigned long size,
bool kvmap)
{
struct exynos_drm_gem *exynos_gem;
int ret;
if (flags & ~(EXYNOS_BO_MASK)) {
DRM_DEV_ERROR(dev->dev,
"invalid GEM buffer flags: %u\n", flags);
return ERR_PTR(-EINVAL);
}
if (!size) {
DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
return ERR_PTR(-EINVAL);
}
size = roundup(size, PAGE_SIZE);
exynos_gem = exynos_drm_gem_init(dev, size);
if (IS_ERR(exynos_gem))
return exynos_gem;
if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
/*
* when no IOMMU is available, all allocated buffers are
* contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
*/
flags &= ~EXYNOS_BO_NONCONTIG;
DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
}
/* set memory type and cache attribute from user side. */
exynos_gem->flags = flags;
ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
if (ret < 0) {
drm_gem_object_release(&exynos_gem->base);
kfree(exynos_gem);
return ERR_PTR(ret);
}
return exynos_gem;
}
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_gem_create *args = data;
struct exynos_drm_gem *exynos_gem;
int ret;
exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
&args->handle);
if (ret) {
exynos_drm_gem_destroy(exynos_gem);
return ret;
}
return 0;
}
int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_gem_map *args = data;
return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
&args->offset);
}
struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
unsigned int gem_handle)
{
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(filp, gem_handle);
if (!obj)
return NULL;
return to_exynos_gem(obj);
}
static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
struct vm_area_struct *vma)
{
struct drm_device *drm_dev = exynos_gem->base.dev;
unsigned long vm_size;
int ret;
vm_flags_clear(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
vm_size = vma->vm_end - vma->vm_start;
/* check if user-requested size is valid. */
if (vm_size > exynos_gem->size)
return -EINVAL;
ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
exynos_gem->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
}
return 0;
}
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct exynos_drm_gem *exynos_gem;
struct drm_exynos_gem_info *args = data;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj) {
DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
return -EINVAL;
}
exynos_gem = to_exynos_gem(obj);
args->flags = exynos_gem->flags;
args->size = exynos_gem->size;
drm_gem_object_put(obj);
return 0;
}
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
exynos_drm_gem_destroy(to_exynos_gem(obj));
}
int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct exynos_drm_gem *exynos_gem;
unsigned int flags;
int ret;
/*
* allocate memory to be used for framebuffer.
* - this callback would be called by user application
* with DRM_IOCTL_MODE_CREATE_DUMB command.
*/
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
if (is_drm_iommu_supported(dev))
flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
else
flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
if (IS_ERR(exynos_gem)) {
dev_warn(dev->dev, "FB allocation failed.\n");
return PTR_ERR(exynos_gem);
}
ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
&args->handle);
if (ret) {
exynos_drm_gem_destroy(exynos_gem);
return ret;
}
return 0;
}
static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
int ret;
if (obj->import_attach)
return dma_buf_mmap(obj->dma_buf, vma, 0);
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
exynos_gem->flags);
/* non-cachable as default. */
if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
else if (exynos_gem->flags & EXYNOS_BO_WC)
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
vma->vm_page_prot =
pgprot_noncached(vm_get_page_prot(vma->vm_flags));
ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
if (ret)
goto err_close_vm;
return ret;
err_close_vm:
drm_gem_vm_close(vma);
return ret;
}
/* low-level interface prime helpers */
struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
}
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
struct drm_device *drm_dev = obj->dev;
struct sg_table *sgt;
int ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
exynos_gem->dma_attrs);
if (ret) {
DRM_ERROR("failed to get sgtable, %d\n", ret);
kfree(sgt);
return ERR_PTR(ret);
}
return sgt;
}
struct drm_gem_object *
exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct exynos_drm_gem *exynos_gem;
/* check if the entries in the sg_table are contiguous */
if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
DRM_ERROR("buffer chunks must be mapped contiguously");
return ERR_PTR(-EINVAL);
}
exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
if (IS_ERR(exynos_gem))
return ERR_CAST(exynos_gem);
/*
* Buffer has been mapped as contiguous into DMA address space,
* but if there is IOMMU, it can be either CONTIG or NONCONTIG.
* We assume a simplified logic below:
*/
if (is_drm_iommu_supported(dev))
exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
else
exynos_gem->flags |= EXYNOS_BO_CONTIG;
exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
exynos_gem->sgt = sgt;
return &exynos_gem->base;
}
| linux-master | drivers/gpu/drm/exynos/exynos_drm_gem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Samsung Electronics Co.Ltd
* Author:
* Andrzej Pietrasiewicz <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_ipp.h"
#include "regs-scaler.h"
#define scaler_read(offset) readl(scaler->regs + (offset))
#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset))
#define SCALER_MAX_CLK 4
#define SCALER_AUTOSUSPEND_DELAY 2000
#define SCALER_RESET_WAIT_RETRIES 100
struct scaler_data {
const char *clk_name[SCALER_MAX_CLK];
unsigned int num_clk;
const struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
};
struct scaler_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
void *dma_priv;
struct device *dev;
void __iomem *regs;
struct clk *clock[SCALER_MAX_CLK];
struct exynos_drm_ipp_task *task;
const struct scaler_data *scaler_data;
};
struct scaler_format {
u32 drm_fmt;
u32 internal_fmt;
u32 chroma_tile_w;
u32 chroma_tile_h;
};
static const struct scaler_format scaler_formats[] = {
{ DRM_FORMAT_NV12, SCALER_YUV420_2P_UV, 8, 8 },
{ DRM_FORMAT_NV21, SCALER_YUV420_2P_VU, 8, 8 },
{ DRM_FORMAT_YUV420, SCALER_YUV420_3P, 8, 8 },
{ DRM_FORMAT_YUYV, SCALER_YUV422_1P_YUYV, 16, 16 },
{ DRM_FORMAT_UYVY, SCALER_YUV422_1P_UYVY, 16, 16 },
{ DRM_FORMAT_YVYU, SCALER_YUV422_1P_YVYU, 16, 16 },
{ DRM_FORMAT_NV16, SCALER_YUV422_2P_UV, 8, 16 },
{ DRM_FORMAT_NV61, SCALER_YUV422_2P_VU, 8, 16 },
{ DRM_FORMAT_YUV422, SCALER_YUV422_3P, 8, 16 },
{ DRM_FORMAT_NV24, SCALER_YUV444_2P_UV, 16, 16 },
{ DRM_FORMAT_NV42, SCALER_YUV444_2P_VU, 16, 16 },
{ DRM_FORMAT_YUV444, SCALER_YUV444_3P, 16, 16 },
{ DRM_FORMAT_RGB565, SCALER_RGB_565, 0, 0 },
{ DRM_FORMAT_XRGB1555, SCALER_ARGB1555, 0, 0 },
{ DRM_FORMAT_ARGB1555, SCALER_ARGB1555, 0, 0 },
{ DRM_FORMAT_XRGB4444, SCALER_ARGB4444, 0, 0 },
{ DRM_FORMAT_ARGB4444, SCALER_ARGB4444, 0, 0 },
{ DRM_FORMAT_XRGB8888, SCALER_ARGB8888, 0, 0 },
{ DRM_FORMAT_ARGB8888, SCALER_ARGB8888, 0, 0 },
{ DRM_FORMAT_RGBX8888, SCALER_RGBA8888, 0, 0 },
{ DRM_FORMAT_RGBA8888, SCALER_RGBA8888, 0, 0 },
};
static const struct scaler_format *scaler_get_format(u32 drm_fmt)
{
int i;
for (i = 0; i < ARRAY_SIZE(scaler_formats); i++)
if (scaler_formats[i].drm_fmt == drm_fmt)
return &scaler_formats[i];
return NULL;
}
static inline int scaler_reset(struct scaler_context *scaler)
{
int retry = SCALER_RESET_WAIT_RETRIES;
scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
do {
cpu_relax();
} while (--retry > 1 &&
scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
do {
cpu_relax();
scaler_write(1, SCALER_INT_EN);
} while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
return retry ? 0 : -EIO;
}
static inline void scaler_enable_int(struct scaler_context *scaler)
{
u32 val;
val = SCALER_INT_EN_TIMEOUT |
SCALER_INT_EN_ILLEGAL_BLEND |
SCALER_INT_EN_ILLEGAL_RATIO |
SCALER_INT_EN_ILLEGAL_DST_HEIGHT |
SCALER_INT_EN_ILLEGAL_DST_WIDTH |
SCALER_INT_EN_ILLEGAL_DST_V_POS |
SCALER_INT_EN_ILLEGAL_DST_H_POS |
SCALER_INT_EN_ILLEGAL_DST_C_SPAN |
SCALER_INT_EN_ILLEGAL_DST_Y_SPAN |
SCALER_INT_EN_ILLEGAL_DST_CR_BASE |
SCALER_INT_EN_ILLEGAL_DST_CB_BASE |
SCALER_INT_EN_ILLEGAL_DST_Y_BASE |
SCALER_INT_EN_ILLEGAL_DST_COLOR |
SCALER_INT_EN_ILLEGAL_SRC_HEIGHT |
SCALER_INT_EN_ILLEGAL_SRC_WIDTH |
SCALER_INT_EN_ILLEGAL_SRC_CV_POS |
SCALER_INT_EN_ILLEGAL_SRC_CH_POS |
SCALER_INT_EN_ILLEGAL_SRC_YV_POS |
SCALER_INT_EN_ILLEGAL_SRC_YH_POS |
SCALER_INT_EN_ILLEGAL_DST_SPAN |
SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN |
SCALER_INT_EN_ILLEGAL_SRC_CR_BASE |
SCALER_INT_EN_ILLEGAL_SRC_CB_BASE |
SCALER_INT_EN_ILLEGAL_SRC_Y_BASE |
SCALER_INT_EN_ILLEGAL_SRC_COLOR |
SCALER_INT_EN_FRAME_END;
scaler_write(val, SCALER_INT_EN);
}
static inline void scaler_set_src_fmt(struct scaler_context *scaler,
u32 src_fmt, u32 tile)
{
u32 val;
val = SCALER_SRC_CFG_SET_COLOR_FORMAT(src_fmt) | (tile << 10);
scaler_write(val, SCALER_SRC_CFG);
}
static inline void scaler_set_src_base(struct scaler_context *scaler,
struct exynos_drm_ipp_buffer *src_buf)
{
static unsigned int bases[] = {
SCALER_SRC_Y_BASE,
SCALER_SRC_CB_BASE,
SCALER_SRC_CR_BASE,
};
int i;
for (i = 0; i < src_buf->format->num_planes; ++i)
scaler_write(src_buf->dma_addr[i], bases[i]);
}
static inline void scaler_set_src_span(struct scaler_context *scaler,
struct exynos_drm_ipp_buffer *src_buf)
{
u32 val;
val = SCALER_SRC_SPAN_SET_Y_SPAN(src_buf->buf.pitch[0] /
src_buf->format->cpp[0]);
if (src_buf->format->num_planes > 1)
val |= SCALER_SRC_SPAN_SET_C_SPAN(src_buf->buf.pitch[1]);
scaler_write(val, SCALER_SRC_SPAN);
}
static inline void scaler_set_src_luma_chroma_pos(struct scaler_context *scaler,
struct drm_exynos_ipp_task_rect *src_pos,
const struct scaler_format *fmt)
{
u32 val;
val = SCALER_SRC_Y_POS_SET_YH_POS(src_pos->x << 2);
val |= SCALER_SRC_Y_POS_SET_YV_POS(src_pos->y << 2);
scaler_write(val, SCALER_SRC_Y_POS);
val = SCALER_SRC_C_POS_SET_CH_POS(
(src_pos->x * fmt->chroma_tile_w / 16) << 2);
val |= SCALER_SRC_C_POS_SET_CV_POS(
(src_pos->y * fmt->chroma_tile_h / 16) << 2);
scaler_write(val, SCALER_SRC_C_POS);
}
static inline void scaler_set_src_wh(struct scaler_context *scaler,
struct drm_exynos_ipp_task_rect *src_pos)
{
u32 val;
val = SCALER_SRC_WH_SET_WIDTH(src_pos->w);
val |= SCALER_SRC_WH_SET_HEIGHT(src_pos->h);
scaler_write(val, SCALER_SRC_WH);
}
static inline void scaler_set_dst_fmt(struct scaler_context *scaler,
u32 dst_fmt)
{
u32 val;
val = SCALER_DST_CFG_SET_COLOR_FORMAT(dst_fmt);
scaler_write(val, SCALER_DST_CFG);
}
static inline void scaler_set_dst_base(struct scaler_context *scaler,
struct exynos_drm_ipp_buffer *dst_buf)
{
static unsigned int bases[] = {
SCALER_DST_Y_BASE,
SCALER_DST_CB_BASE,
SCALER_DST_CR_BASE,
};
int i;
for (i = 0; i < dst_buf->format->num_planes; ++i)
scaler_write(dst_buf->dma_addr[i], bases[i]);
}
static inline void scaler_set_dst_span(struct scaler_context *scaler,
struct exynos_drm_ipp_buffer *dst_buf)
{
u32 val;
val = SCALER_DST_SPAN_SET_Y_SPAN(dst_buf->buf.pitch[0] /
dst_buf->format->cpp[0]);
if (dst_buf->format->num_planes > 1)
val |= SCALER_DST_SPAN_SET_C_SPAN(dst_buf->buf.pitch[1]);
scaler_write(val, SCALER_DST_SPAN);
}
static inline void scaler_set_dst_luma_pos(struct scaler_context *scaler,
struct drm_exynos_ipp_task_rect *dst_pos)
{
u32 val;
val = SCALER_DST_WH_SET_WIDTH(dst_pos->w);
val |= SCALER_DST_WH_SET_HEIGHT(dst_pos->h);
scaler_write(val, SCALER_DST_WH);
}
static inline void scaler_set_dst_wh(struct scaler_context *scaler,
struct drm_exynos_ipp_task_rect *dst_pos)
{
u32 val;
val = SCALER_DST_POS_SET_H_POS(dst_pos->x);
val |= SCALER_DST_POS_SET_V_POS(dst_pos->y);
scaler_write(val, SCALER_DST_POS);
}
static inline void scaler_set_hv_ratio(struct scaler_context *scaler,
unsigned int rotation,
struct drm_exynos_ipp_task_rect *src_pos,
struct drm_exynos_ipp_task_rect *dst_pos)
{
u32 val, h_ratio, v_ratio;
if (drm_rotation_90_or_270(rotation)) {
h_ratio = (src_pos->h << 16) / dst_pos->w;
v_ratio = (src_pos->w << 16) / dst_pos->h;
} else {
h_ratio = (src_pos->w << 16) / dst_pos->w;
v_ratio = (src_pos->h << 16) / dst_pos->h;
}
val = SCALER_H_RATIO_SET(h_ratio);
scaler_write(val, SCALER_H_RATIO);
val = SCALER_V_RATIO_SET(v_ratio);
scaler_write(val, SCALER_V_RATIO);
}
static inline void scaler_set_rotation(struct scaler_context *scaler,
unsigned int rotation)
{
u32 val = 0;
if (rotation & DRM_MODE_ROTATE_90)
val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_90);
else if (rotation & DRM_MODE_ROTATE_180)
val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_180);
else if (rotation & DRM_MODE_ROTATE_270)
val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_270);
if (rotation & DRM_MODE_REFLECT_X)
val |= SCALER_ROT_CFG_FLIP_X_EN;
if (rotation & DRM_MODE_REFLECT_Y)
val |= SCALER_ROT_CFG_FLIP_Y_EN;
scaler_write(val, SCALER_ROT_CFG);
}
static inline void scaler_set_csc(struct scaler_context *scaler,
const struct drm_format_info *fmt)
{
static const u32 csc_mtx[2][3][3] = {
{ /* YCbCr to RGB */
{0x254, 0x000, 0x331},
{0x254, 0xf38, 0xe60},
{0x254, 0x409, 0x000},
},
{ /* RGB to YCbCr */
{0x084, 0x102, 0x032},
{0xfb4, 0xf6b, 0x0e1},
{0x0e1, 0xf44, 0xfdc},
},
};
int i, j, dir;
switch (fmt->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
dir = 1;
break;
default:
dir = 0;
}
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
scaler_write(csc_mtx[dir][i][j], SCALER_CSC_COEF(j, i));
}
static inline void scaler_set_timer(struct scaler_context *scaler,
unsigned int timer, unsigned int divider)
{
u32 val;
val = SCALER_TIMEOUT_CTRL_TIMER_ENABLE;
val |= SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(timer);
val |= SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(divider);
scaler_write(val, SCALER_TIMEOUT_CTRL);
}
static inline void scaler_start_hw(struct scaler_context *scaler)
{
scaler_write(SCALER_CFG_START_CMD, SCALER_CFG);
}
static int scaler_commit(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
struct scaler_context *scaler =
container_of(ipp, struct scaler_context, ipp);
struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect;
struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
const struct scaler_format *src_fmt, *dst_fmt;
int ret = 0;
src_fmt = scaler_get_format(task->src.buf.fourcc);
dst_fmt = scaler_get_format(task->dst.buf.fourcc);
ret = pm_runtime_resume_and_get(scaler->dev);
if (ret < 0)
return ret;
if (scaler_reset(scaler))
return -EIO;
scaler->task = task;
scaler_set_src_fmt(
scaler, src_fmt->internal_fmt, task->src.buf.modifier != 0);
scaler_set_src_base(scaler, &task->src);
scaler_set_src_span(scaler, &task->src);
scaler_set_src_luma_chroma_pos(scaler, src_pos, src_fmt);
scaler_set_src_wh(scaler, src_pos);
scaler_set_dst_fmt(scaler, dst_fmt->internal_fmt);
scaler_set_dst_base(scaler, &task->dst);
scaler_set_dst_span(scaler, &task->dst);
scaler_set_dst_luma_pos(scaler, dst_pos);
scaler_set_dst_wh(scaler, dst_pos);
scaler_set_hv_ratio(scaler, task->transform.rotation, src_pos, dst_pos);
scaler_set_rotation(scaler, task->transform.rotation);
scaler_set_csc(scaler, task->src.format);
scaler_set_timer(scaler, 0xffff, 0xf);
scaler_enable_int(scaler);
scaler_start_hw(scaler);
return 0;
}
static struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = scaler_commit,
};
static inline void scaler_disable_int(struct scaler_context *scaler)
{
scaler_write(0, SCALER_INT_EN);
}
static inline u32 scaler_get_int_status(struct scaler_context *scaler)
{
u32 val = scaler_read(SCALER_INT_STATUS);
scaler_write(val, SCALER_INT_STATUS);
return val;
}
static inline int scaler_task_done(u32 val)
{
return val & SCALER_INT_STATUS_FRAME_END ? 0 : -EINVAL;
}
static irqreturn_t scaler_irq_handler(int irq, void *arg)
{
struct scaler_context *scaler = arg;
u32 val = scaler_get_int_status(scaler);
scaler_disable_int(scaler);
if (scaler->task) {
struct exynos_drm_ipp_task *task = scaler->task;
scaler->task = NULL;
pm_runtime_mark_last_busy(scaler->dev);
pm_runtime_put_autosuspend(scaler->dev);
exynos_drm_ipp_task_done(task, scaler_task_done(val));
}
return IRQ_HANDLED;
}
static int scaler_bind(struct device *dev, struct device *master, void *data)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &scaler->ipp;
scaler->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
scaler->scaler_data->formats,
scaler->scaler_data->num_formats, "scaler");
dev_info(dev, "The exynos scaler has been probed successfully\n");
return 0;
}
static void scaler_unbind(struct device *dev, struct device *master,
void *data)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
&scaler->dma_priv);
}
static const struct component_ops scaler_component_ops = {
.bind = scaler_bind,
.unbind = scaler_unbind,
};
static int scaler_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct scaler_context *scaler;
int irq;
int ret, i;
scaler = devm_kzalloc(dev, sizeof(*scaler), GFP_KERNEL);
if (!scaler)
return -ENOMEM;
scaler->scaler_data =
(struct scaler_data *)of_device_get_match_data(dev);
scaler->dev = dev;
scaler->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(scaler->regs))
return PTR_ERR(scaler->regs);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(dev, irq, NULL, scaler_irq_handler,
IRQF_ONESHOT, "drm_scaler", scaler);
if (ret < 0) {
dev_err(dev, "failed to request irq\n");
return ret;
}
for (i = 0; i < scaler->scaler_data->num_clk; ++i) {
scaler->clock[i] = devm_clk_get(dev,
scaler->scaler_data->clk_name[i]);
if (IS_ERR(scaler->clock[i])) {
dev_err(dev, "failed to get clock\n");
return PTR_ERR(scaler->clock[i]);
}
}
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, SCALER_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
platform_set_drvdata(pdev, scaler);
ret = component_add(dev, &scaler_component_ops);
if (ret)
goto err_ippdrv_register;
return 0;
err_ippdrv_register:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return ret;
}
static int scaler_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &scaler_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return 0;
}
static int clk_disable_unprepare_wrapper(struct clk *clk)
{
clk_disable_unprepare(clk);
return 0;
}
static int scaler_clk_ctrl(struct scaler_context *scaler, bool enable)
{
int (*clk_fun)(struct clk *clk), i;
clk_fun = enable ? clk_prepare_enable : clk_disable_unprepare_wrapper;
for (i = 0; i < scaler->scaler_data->num_clk; ++i)
clk_fun(scaler->clock[i]);
return 0;
}
static int scaler_runtime_suspend(struct device *dev)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
return scaler_clk_ctrl(scaler, false);
}
static int scaler_runtime_resume(struct device *dev)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
return scaler_clk_ctrl(scaler, true);
}
static DEFINE_RUNTIME_DEV_PM_OPS(scaler_pm_ops, scaler_runtime_suspend,
scaler_runtime_resume, NULL);
static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_hv_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
{ IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
.v = { 65536 * 1 / 4, 65536 * 16 }) },
};
static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_h_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 1) },
{ IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
.v = { 65536 * 1 / 4, 65536 * 16 }) },
};
static const struct drm_exynos_ipp_limit scaler_5420_one_pixel_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
{ IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
.v = { 65536 * 1 / 4, 65536 * 16 }) },
};
static const struct drm_exynos_ipp_limit scaler_5420_tile_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K })},
{ IPP_SIZE_LIMIT(AREA, .h.align = 16, .v.align = 16) },
{ IPP_SCALE_LIMIT(.h = {1, 1}, .v = {1, 1})},
{ }
};
#define IPP_SRCDST_TILE_FORMAT(f, l) \
IPP_SRCDST_MFORMAT(f, DRM_FORMAT_MOD_SAMSUNG_16_16_TILE, (l))
static const struct exynos_drm_ipp_formats exynos5420_formats[] = {
/* SCALER_YUV420_2P_UV */
{ IPP_SRCDST_FORMAT(NV21, scaler_5420_two_pixel_hv_limits) },
/* SCALER_YUV420_2P_VU */
{ IPP_SRCDST_FORMAT(NV12, scaler_5420_two_pixel_hv_limits) },
/* SCALER_YUV420_3P */
{ IPP_SRCDST_FORMAT(YUV420, scaler_5420_two_pixel_hv_limits) },
/* SCALER_YUV422_1P_YUYV */
{ IPP_SRCDST_FORMAT(YUYV, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV422_1P_UYVY */
{ IPP_SRCDST_FORMAT(UYVY, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV422_1P_YVYU */
{ IPP_SRCDST_FORMAT(YVYU, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV422_2P_UV */
{ IPP_SRCDST_FORMAT(NV61, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV422_2P_VU */
{ IPP_SRCDST_FORMAT(NV16, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV422_3P */
{ IPP_SRCDST_FORMAT(YUV422, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV444_2P_UV */
{ IPP_SRCDST_FORMAT(NV42, scaler_5420_one_pixel_limits) },
/* SCALER_YUV444_2P_VU */
{ IPP_SRCDST_FORMAT(NV24, scaler_5420_one_pixel_limits) },
/* SCALER_YUV444_3P */
{ IPP_SRCDST_FORMAT(YUV444, scaler_5420_one_pixel_limits) },
/* SCALER_RGB_565 */
{ IPP_SRCDST_FORMAT(RGB565, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB1555 */
{ IPP_SRCDST_FORMAT(XRGB1555, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB1555 */
{ IPP_SRCDST_FORMAT(ARGB1555, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB4444 */
{ IPP_SRCDST_FORMAT(XRGB4444, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB4444 */
{ IPP_SRCDST_FORMAT(ARGB4444, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB8888 */
{ IPP_SRCDST_FORMAT(XRGB8888, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB8888 */
{ IPP_SRCDST_FORMAT(ARGB8888, scaler_5420_one_pixel_limits) },
/* SCALER_RGBA8888 */
{ IPP_SRCDST_FORMAT(RGBX8888, scaler_5420_one_pixel_limits) },
/* SCALER_RGBA8888 */
{ IPP_SRCDST_FORMAT(RGBA8888, scaler_5420_one_pixel_limits) },
/* SCALER_YUV420_2P_UV TILE */
{ IPP_SRCDST_TILE_FORMAT(NV21, scaler_5420_tile_limits) },
/* SCALER_YUV420_2P_VU TILE */
{ IPP_SRCDST_TILE_FORMAT(NV12, scaler_5420_tile_limits) },
/* SCALER_YUV420_3P TILE */
{ IPP_SRCDST_TILE_FORMAT(YUV420, scaler_5420_tile_limits) },
/* SCALER_YUV422_1P_YUYV TILE */
{ IPP_SRCDST_TILE_FORMAT(YUYV, scaler_5420_tile_limits) },
};
static const struct scaler_data exynos5420_data = {
.clk_name = {"mscl"},
.num_clk = 1,
.formats = exynos5420_formats,
.num_formats = ARRAY_SIZE(exynos5420_formats),
};
static const struct scaler_data exynos5433_data = {
.clk_name = {"pclk", "aclk", "aclk_xiu"},
.num_clk = 3,
.formats = exynos5420_formats, /* intentional */
.num_formats = ARRAY_SIZE(exynos5420_formats),
};
static const struct of_device_id exynos_scaler_match[] = {
{
.compatible = "samsung,exynos5420-scaler",
.data = &exynos5420_data,
}, {
.compatible = "samsung,exynos5433-scaler",
.data = &exynos5433_data,
}, {
},
};
MODULE_DEVICE_TABLE(of, exynos_scaler_match);
struct platform_driver scaler_driver = {
.probe = scaler_probe,
.remove = scaler_remove,
.driver = {
.name = "exynos-scaler",
.owner = THIS_MODULE,
.pm = pm_ptr(&scaler_pm_ops),
.of_match_table = exynos_scaler_match,
},
};
| linux-master | drivers/gpu/drm/exynos/exynos_drm_scaler.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authors:
* Inki Dae <[email protected]>
* Joonyoung Shim <[email protected]>
* Seung-Woo Kim <[email protected]>
*/
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_ipp.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
#define DRIVER_DATE "20180330"
/*
* Interface history:
*
* 1.0 - Original version
* 1.1 - Upgrade IPP driver to version 2.0
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
int ret;
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;
file->driver_priv = file_priv;
ret = g2d_open(dev, file);
if (ret)
goto err_file_priv_free;
return ret;
err_file_priv_free:
kfree(file_priv);
file->driver_priv = NULL;
return ret;
}
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
g2d_close(dev, file);
kfree(file->driver_priv);
file->driver_priv = NULL;
}
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_RESOURCES,
exynos_drm_ipp_get_res_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_CAPS, exynos_drm_ipp_get_caps_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_LIMITS,
exynos_drm_ipp_get_limits_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_COMMIT, exynos_drm_ipp_commit_ioctl,
DRM_RENDER_ALLOW),
};
DEFINE_DRM_GEM_FOPS(exynos_drm_driver_fops);
static const struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM
| DRIVER_ATOMIC | DRIVER_RENDER,
.open = exynos_drm_open,
.postclose = exynos_drm_postclose,
.dumb_create = exynos_drm_gem_dumb_create,
.gem_prime_import = exynos_drm_gem_prime_import,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm_dev);
}
static void exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
drm_mode_config_helper_resume(drm_dev);
}
static const struct dev_pm_ops exynos_drm_pm_ops = {
.prepare = exynos_drm_suspend,
.complete = exynos_drm_resume,
};
/* forward declaration */
static struct platform_driver exynos_drm_platform_driver;
struct exynos_drm_driver_info {
struct platform_driver *driver;
unsigned int flags;
};
#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
#define DRM_FIMC_DEVICE BIT(2) /* devices shared with V4L2 subsystem */
#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
/*
* Connector drivers should not be placed before associated crtc drivers,
* because connector requires pipe number of its crtc during initialization.
*/
static struct exynos_drm_driver_info exynos_drm_drivers[] = {
{
DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(vidi_driver, CONFIG_DRM_EXYNOS_VIDI),
DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
}, {
DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE,
}, {
DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(scaler_driver, CONFIG_DRM_EXYNOS_SCALER),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
DRM_COMPONENT_DRIVER
}, {
&exynos_drm_platform_driver,
DRM_VIRTUAL_DEVICE
}
};
static struct component_match *exynos_drm_match_add(struct device *dev)
{
struct component_match *match = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
struct device *p = NULL, *d;
if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER))
continue;
while ((d = platform_find_device_by_driver(p, &info->driver->driver))) {
put_device(p);
if (!(info->flags & DRM_FIMC_DEVICE) ||
exynos_drm_check_fimc_device(d) == 0)
component_match_add(dev, &match, component_compare_dev, d);
p = d;
}
put_device(p);
}
return match ?: ERR_PTR(-ENODEV);
}
static int exynos_drm_bind(struct device *dev)
{
struct exynos_drm_private *private;
struct drm_encoder *encoder;
struct drm_device *drm;
unsigned int clone_mask;
int ret;
drm = drm_dev_alloc(&exynos_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
if (!private) {
ret = -ENOMEM;
goto err_free_drm;
}
init_waitqueue_head(&private->wait);
spin_lock_init(&private->lock);
dev_set_drvdata(dev, drm);
drm->dev_private = (void *)private;
drm_mode_config_init(drm);
exynos_drm_mode_config_init(drm);
/* setup possible_clones. */
clone_mask = 0;
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
clone_mask |= drm_encoder_mask(encoder);
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
encoder->possible_clones = clone_mask;
/* Try to bind all sub drivers. */
ret = component_bind_all(drm->dev, drm);
if (ret)
goto err_mode_config_cleanup;
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret)
goto err_unbind_all;
drm_mode_config_reset(drm);
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm);
/* register the DRM device */
ret = drm_dev_register(drm, 0);
if (ret < 0)
goto err_cleanup_poll;
exynos_drm_fbdev_setup(drm);
return 0;
err_cleanup_poll:
drm_kms_helper_poll_fini(drm);
err_unbind_all:
component_unbind_all(drm->dev, drm);
err_mode_config_cleanup:
drm_mode_config_cleanup(drm);
exynos_drm_cleanup_dma(drm);
kfree(private);
err_free_drm:
drm_dev_put(drm);
return ret;
}
static void exynos_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
exynos_drm_cleanup_dma(drm);
kfree(drm->dev_private);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
drm_dev_put(drm);
}
static const struct component_master_ops exynos_drm_ops = {
.bind = exynos_drm_bind,
.unbind = exynos_drm_unbind,
};
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
struct component_match *match;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
match = exynos_drm_match_add(&pdev->dev);
if (IS_ERR(match))
return PTR_ERR(match);
return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
match);
}
static int exynos_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &exynos_drm_ops);
return 0;
}
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
.remove = exynos_drm_platform_remove,
.driver = {
.name = "exynos-drm",
.pm = &exynos_drm_pm_ops,
},
};
static void exynos_drm_unregister_devices(void)
{
int i;
for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
struct device *dev;
if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
continue;
while ((dev = platform_find_device_by_driver(NULL,
&info->driver->driver))) {
put_device(dev);
platform_device_unregister(to_platform_device(dev));
}
}
}
static int exynos_drm_register_devices(void)
{
struct platform_device *pdev;
int i;
for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
continue;
pdev = platform_device_register_simple(
info->driver->driver.name, -1, NULL, 0);
if (IS_ERR(pdev))
goto fail;
}
return 0;
fail:
exynos_drm_unregister_devices();
return PTR_ERR(pdev);
}
static void exynos_drm_unregister_drivers(void)
{
int i;
for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
if (!info->driver)
continue;
platform_driver_unregister(info->driver);
}
}
static int exynos_drm_register_drivers(void)
{
int i, ret;
for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
if (!info->driver)
continue;
ret = platform_driver_register(info->driver);
if (ret)
goto fail;
}
return 0;
fail:
exynos_drm_unregister_drivers();
return ret;
}
static int exynos_drm_init(void)
{
int ret;
if (drm_firmware_drivers_only())
return -ENODEV;
ret = exynos_drm_register_devices();
if (ret)
return ret;
ret = exynos_drm_register_drivers();
if (ret)
goto err_unregister_pdevs;
return 0;
err_unregister_pdevs:
exynos_drm_unregister_devices();
return ret;
}
static void exynos_drm_exit(void)
{
exynos_drm_unregister_drivers();
exynos_drm_unregister_devices();
}
module_init(exynos_drm_init);
module_exit(exynos_drm_exit);
MODULE_AUTHOR("Inki Dae <[email protected]>");
MODULE_AUTHOR("Joonyoung Shim <[email protected]>");
MODULE_AUTHOR("Seung-Woo Kim <[email protected]>");
MODULE_DESCRIPTION("Samsung SoC DRM Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/exynos/exynos_drm_drv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* drivers/gpu/drm/exynos/exynos7_drm_decon.c
*
* Copyright (C) 2014 Samsung Electronics Co.Ltd
* Authors:
* Akshu Agarwal <[email protected]>
* Ajay Kumar <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
#include "regs-decon7.h"
/*
* DECON stands for Display and Enhancement controller.
*/
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
#define WINDOWS_NR 2
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
struct clk *pclk;
struct clk *aclk;
struct clk *eclk;
struct clk *vclk;
void __iomem *regs;
unsigned long irq_flags;
bool i80_if;
bool suspended;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
struct drm_encoder *encoder;
};
static const struct of_device_id decon_driver_dt_match[] = {
{.compatible = "samsung,exynos7-decon"},
{},
};
MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
static const uint32_t decon_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_BGRA8888,
};
static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
DRM_PLANE_TYPE_PRIMARY,
DRM_PLANE_TYPE_CURSOR,
};
static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
if (ctx->suspended)
return;
atomic_set(&ctx->wait_vsync_event, 1);
/*
* wait for DECON to signal VSYNC interrupt or return after
* timeout which is set to 50ms (refresh rate of 20).
*/
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
u32 val = readl(ctx->regs + WINCON(win));
if (val & WINCONx_ENWIN) {
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
ch_enabled = 1;
}
}
/* Wait for vsync, as disable channel takes effect at next vsync */
if (ch_enabled)
decon_wait_for_vblank(ctx->crtc);
}
static int decon_ctx_initialize(struct decon_context *ctx,
struct drm_device *drm_dev)
{
ctx->drm_dev = drm_dev;
decon_clear_channels(ctx->crtc);
return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
}
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
const struct drm_display_mode *mode)
{
unsigned long ideal_clk = mode->clock;
u32 clkdiv;
/* Find the clock divider value that gets us closest to ideal_clk */
clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->vclk), ideal_clk);
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
static void decon_commit(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
u32 val, clkdiv;
if (ctx->suspended)
return;
/* nothing to do if we haven't set the mode yet */
if (mode->htotal == 0 || mode->vtotal == 0)
return;
if (!ctx->i80_if) {
int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
/* setup vertical timing values. */
vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
val = VIDTCON0_VBPD(vbpd - 1) | VIDTCON0_VFPD(vfpd - 1);
writel(val, ctx->regs + VIDTCON0);
val = VIDTCON1_VSPW(vsync_len - 1);
writel(val, ctx->regs + VIDTCON1);
/* setup horizontal timing values. */
hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
/* setup horizontal timing values. */
val = VIDTCON2_HBPD(hbpd - 1) | VIDTCON2_HFPD(hfpd - 1);
writel(val, ctx->regs + VIDTCON2);
val = VIDTCON3_HSPW(hsync_len - 1);
writel(val, ctx->regs + VIDTCON3);
}
/* setup horizontal and vertical display size. */
val = VIDTCON4_LINEVAL(mode->vdisplay - 1) |
VIDTCON4_HOZVAL(mode->hdisplay - 1);
writel(val, ctx->regs + VIDTCON4);
writel(mode->vdisplay - 1, ctx->regs + LINECNT_OP_THRESHOLD);
/*
* fields of register with prefix '_F' would be updated
* at vsync(same as dma start)
*/
val = VIDCON0_ENVID | VIDCON0_ENVID_F;
writel(val, ctx->regs + VIDCON0);
clkdiv = decon_calc_clkdiv(ctx, mode);
if (clkdiv > 1) {
val = VCLKCON1_CLKVAL_NUM_VCLK(clkdiv - 1);
writel(val, ctx->regs + VCLKCON1);
writel(val, ctx->regs + VCLKCON2);
}
val = readl(ctx->regs + DECON_UPDATE);
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
}
static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
u32 val;
if (ctx->suspended)
return -EPERM;
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
val |= VIDINTCON0_INT_ENABLE;
if (!ctx->i80_if) {
val |= VIDINTCON0_INT_FRAME;
val &= ~VIDINTCON0_FRAMESEL0_MASK;
val |= VIDINTCON0_FRAMESEL0_VSYNC;
}
writel(val, ctx->regs + VIDINTCON0);
}
return 0;
}
static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
u32 val;
if (ctx->suspended)
return;
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
val &= ~VIDINTCON0_INT_ENABLE;
if (!ctx->i80_if)
val &= ~VIDINTCON0_INT_FRAME;
writel(val, ctx->regs + VIDINTCON0);
}
}
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
struct drm_framebuffer *fb)
{
unsigned long val;
int padding;
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_BPPMODE_MASK;
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
val |= WINCONx_BPPMODE_16BPP_565;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_XRGB8888:
val |= WINCONx_BPPMODE_24BPP_xRGB;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_XBGR8888:
val |= WINCONx_BPPMODE_24BPP_xBGR;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_RGBX8888:
val |= WINCONx_BPPMODE_24BPP_RGBx;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_BGRX8888:
val |= WINCONx_BPPMODE_24BPP_BGRx;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
val |= WINCONx_BPPMODE_32BPP_ARGB | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ABGR8888:
val |= WINCONx_BPPMODE_32BPP_ABGR | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_RGBA8888:
val |= WINCONx_BPPMODE_32BPP_RGBA | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_BGRA8888:
default:
val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
}
DRM_DEV_DEBUG_KMS(ctx->dev, "cpp = %d\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
* tearing for very small buffers, e.g. cursor buffer. Burst Mode
* switching which is based on plane size is not recommended as
* plane size varies a lot towards the end of the screen and rapid
* movement causes unstable DMA which results into iommu crash/tear.
*/
padding = (fb->pitches[0] / fb->format->cpp[0]) - fb->width;
if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
writel(val, ctx->regs + WINCON(win));
}
static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
{
unsigned int keycon0 = 0, keycon1 = 0;
keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
keycon1 = WxKEYCON1_COLVAL(0xffffffff);
writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
}
/**
* decon_shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @ctx: display and enhancement controller context
* @win: window to protect registers for
* @protect: 1 to protect (disable updates)
*/
static void decon_shadow_protect_win(struct decon_context *ctx,
unsigned int win, bool protect)
{
u32 bits, val;
bits = SHADOWCON_WINx_PROTECT(win);
val = readl(ctx->regs + SHADOWCON);
if (protect)
val |= bits;
else
val &= ~bits;
writel(val, ctx->regs + SHADOWCON);
}
static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
if (ctx->suspended)
return;
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, true);
}
static void decon_update_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane->base.state);
struct decon_context *ctx = crtc->ctx;
struct drm_framebuffer *fb = state->base.fb;
int padding;
unsigned long val, alpha;
unsigned int last_x;
unsigned int last_y;
unsigned int win = plane->index;
unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
return;
/*
* SHADOWCON/PRTCON register is used for enabling timing.
*
* for example, once only width value of a register is set,
* if the dma is started then decon hardware could malfunction so
* with protect window setting, the register fields with prefix '_F'
* wouldn't be updated at vsync also but updated once unprotect window
* is set.
*/
/* buffer start address */
val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
writel(val, ctx->regs + VIDW_BUF_START(win));
padding = (pitch / cpp) - fb->width;
/* buffer size */
writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
writel(fb->height, ctx->regs + VIDW_WHOLE_Y(win));
/* offset from the start of the buffer to read */
writel(state->src.x, ctx->regs + VIDW_OFFSET_X(win));
writel(state->src.y, ctx->regs + VIDW_OFFSET_Y(win));
DRM_DEV_DEBUG_KMS(ctx->dev, "start addr = 0x%lx\n",
(unsigned long)val);
DRM_DEV_DEBUG_KMS(ctx->dev, "ovl_width = %d, ovl_height = %d\n",
state->crtc.w, state->crtc.h);
val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
VIDOSDxA_TOPLEFT_Y(state->crtc.y);
writel(val, ctx->regs + VIDOSD_A(win));
last_x = state->crtc.x + state->crtc.w;
if (last_x)
last_x--;
last_y = state->crtc.y + state->crtc.h;
if (last_y)
last_y--;
val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y);
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEV_DEBUG_KMS(ctx->dev, "osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
state->crtc.x, state->crtc.y, last_x, last_y);
/* OSD alpha */
alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
VIDOSDxC_ALPHA0_G_F(0x0) |
VIDOSDxC_ALPHA0_B_F(0x0);
writel(alpha, ctx->regs + VIDOSD_C(win));
alpha = VIDOSDxD_ALPHA1_R_F(0xff) |
VIDOSDxD_ALPHA1_G_F(0xff) |
VIDOSDxD_ALPHA1_B_F(0xff);
writel(alpha, ctx->regs + VIDOSD_D(win));
decon_win_set_pixfmt(ctx, win, fb);
/* hardware window 0 doesn't support color key. */
if (win != 0)
decon_win_set_colkey(ctx, win);
/* wincon */
val = readl(ctx->regs + WINCON(win));
val |= WINCONx_TRIPLE_BUF_MODE;
val |= WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
/* Enable DMA channel and unprotect windows */
decon_shadow_protect_win(ctx, win, false);
val = readl(ctx->regs + DECON_UPDATE);
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
}
static void decon_disable_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
unsigned int win = plane->index;
u32 val;
if (ctx->suspended)
return;
/* protect windows */
decon_shadow_protect_win(ctx, win, true);
/* wincon */
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
val = readl(ctx->regs + DECON_UPDATE);
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
}
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
if (ctx->suspended)
return;
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
exynos_crtc_handle_event(crtc);
}
static void decon_init(struct decon_context *ctx)
{
u32 val;
writel(VIDCON0_SWRESET, ctx->regs + VIDCON0);
val = VIDOUTCON0_DISP_IF_0_ON;
if (!ctx->i80_if)
val |= VIDOUTCON0_RGBIF;
writel(val, ctx->regs + VIDOUTCON0);
writel(VCLKCON0_CLKVALUP | VCLKCON0_VCLKFREE, ctx->regs + VCLKCON0);
if (!ctx->i80_if)
writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0));
}
static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int ret;
if (!ctx->suspended)
return;
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
return;
}
decon_init(ctx);
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
decon_enable_vblank(ctx->crtc);
decon_commit(ctx->crtc);
ctx->suspended = false;
}
static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
if (ctx->suspended)
return;
/*
* We need to make sure that all windows are disabled before we
* suspend that connector. Otherwise we might try to scan from
* a destroyed buffer later.
*/
for (i = 0; i < WINDOWS_NR; i++)
decon_disable_plane(crtc, &ctx->planes[i]);
pm_runtime_put_sync(ctx->dev);
ctx->suspended = true;
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.atomic_enable = decon_atomic_enable,
.atomic_disable = decon_atomic_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
.atomic_flush = decon_atomic_flush,
};
static irqreturn_t decon_irq_handler(int irq, void *dev_id)
{
struct decon_context *ctx = (struct decon_context *)dev_id;
u32 val, clear_bit;
val = readl(ctx->regs + VIDINTCON1);
clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
if (val & clear_bit)
writel(clear_bit, ctx->regs + VIDINTCON1);
/* check the crtc is detached already from encoder */
if (!ctx->drm_dev)
goto out;
if (!ctx->i80_if) {
drm_crtc_handle_vblank(&ctx->crtc->base);
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
wake_up(&ctx->wait_vsync_queue);
}
}
out:
return IRQ_HANDLED;
}
static int decon_bind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_plane *exynos_plane;
unsigned int i;
int ret;
ret = decon_ctx_initialize(ctx, drm_dev);
if (ret) {
DRM_DEV_ERROR(dev, "decon_ctx_initialize failed.\n");
return ret;
}
for (i = 0; i < WINDOWS_NR; i++) {
ctx->configs[i].pixel_formats = decon_formats;
ctx->configs[i].num_pixel_formats = ARRAY_SIZE(decon_formats);
ctx->configs[i].zpos = i;
ctx->configs[i].type = decon_win_types[i];
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
&ctx->configs[i]);
if (ret)
return ret;
}
exynos_plane = &ctx->planes[DEFAULT_WIN];
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
EXYNOS_DISPLAY_TYPE_LCD, &decon_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
decon_ctx_remove(ctx);
return PTR_ERR(ctx->crtc);
}
if (ctx->encoder)
exynos_dpi_bind(drm_dev, ctx->encoder);
return 0;
}
static void decon_unbind(struct device *dev, struct device *master,
void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
decon_atomic_disable(ctx->crtc);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
decon_ctx_remove(ctx);
}
static const struct component_ops decon_component_ops = {
.bind = decon_bind,
.unbind = decon_unbind,
};
static int decon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct decon_context *ctx;
struct device_node *i80_if_timings;
int ret;
if (!dev->of_node)
return -ENODEV;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
ctx->suspended = true;
i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
if (i80_if_timings)
ctx->i80_if = true;
of_node_put(i80_if_timings);
ctx->regs = of_iomap(dev->of_node, 0);
if (!ctx->regs)
return -ENOMEM;
ctx->pclk = devm_clk_get(dev, "pclk_decon0");
if (IS_ERR(ctx->pclk)) {
dev_err(dev, "failed to get bus clock pclk\n");
ret = PTR_ERR(ctx->pclk);
goto err_iounmap;
}
ctx->aclk = devm_clk_get(dev, "aclk_decon0");
if (IS_ERR(ctx->aclk)) {
dev_err(dev, "failed to get bus clock aclk\n");
ret = PTR_ERR(ctx->aclk);
goto err_iounmap;
}
ctx->eclk = devm_clk_get(dev, "decon0_eclk");
if (IS_ERR(ctx->eclk)) {
dev_err(dev, "failed to get eclock\n");
ret = PTR_ERR(ctx->eclk);
goto err_iounmap;
}
ctx->vclk = devm_clk_get(dev, "decon0_vclk");
if (IS_ERR(ctx->vclk)) {
dev_err(dev, "failed to get vclock\n");
ret = PTR_ERR(ctx->vclk);
goto err_iounmap;
}
ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
if (ret < 0)
goto err_iounmap;
ret = devm_request_irq(dev, ret, decon_irq_handler, 0, "drm_decon", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
goto err_iounmap;
}
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, ctx);
ctx->encoder = exynos_dpi_probe(dev);
if (IS_ERR(ctx->encoder)) {
ret = PTR_ERR(ctx->encoder);
goto err_iounmap;
}
pm_runtime_enable(dev);
ret = component_add(dev, &decon_component_ops);
if (ret)
goto err_disable_pm_runtime;
return ret;
err_disable_pm_runtime:
pm_runtime_disable(dev);
err_iounmap:
iounmap(ctx->regs);
return ret;
}
static int decon_remove(struct platform_device *pdev)
{
struct decon_context *ctx = dev_get_drvdata(&pdev->dev);
pm_runtime_disable(&pdev->dev);
iounmap(ctx->regs);
component_del(&pdev->dev, &decon_component_ops);
return 0;
}
static int exynos7_decon_suspend(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
clk_disable_unprepare(ctx->vclk);
clk_disable_unprepare(ctx->eclk);
clk_disable_unprepare(ctx->aclk);
clk_disable_unprepare(ctx->pclk);
return 0;
}
static int exynos7_decon_resume(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(ctx->pclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the pclk [%d]\n",
ret);
goto err_pclk_enable;
}
ret = clk_prepare_enable(ctx->aclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the aclk [%d]\n",
ret);
goto err_aclk_enable;
}
ret = clk_prepare_enable(ctx->eclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the eclk [%d]\n",
ret);
goto err_eclk_enable;
}
ret = clk_prepare_enable(ctx->vclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the vclk [%d]\n",
ret);
goto err_vclk_enable;
}
return 0;
err_vclk_enable:
clk_disable_unprepare(ctx->eclk);
err_eclk_enable:
clk_disable_unprepare(ctx->aclk);
err_aclk_enable:
clk_disable_unprepare(ctx->pclk);
err_pclk_enable:
return ret;
}
static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend,
exynos7_decon_resume, NULL);
struct platform_driver decon_driver = {
.probe = decon_probe,
.remove = decon_remove,
.driver = {
.name = "exynos-decon",
.pm = pm_ptr(&exynos7_decon_pm_ops),
.of_match_table = decon_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/exynos/exynos7_drm_decon.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2012 Samsung Electronics Co., Ltd.
// Author: Inki Dae <[email protected]>
// Author: Andrzej Hajda <[email protected]>
#include <linux/dma-map-ops.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#if defined(CONFIG_ARM_DMA_USE_IOMMU)
#include <asm/dma-iommu.h>
#else
#define arm_iommu_create_mapping(...) ({ NULL; })
#define arm_iommu_attach_device(...) ({ -ENODEV; })
#define arm_iommu_release_mapping(...) ({ })
#define arm_iommu_detach_device(...) ({ })
#define to_dma_iommu_mapping(dev) NULL
#endif
#if !defined(CONFIG_IOMMU_DMA)
#define iommu_dma_init_domain(...) ({ -EINVAL; })
#endif
#define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
/*
* drm_iommu_attach_device- attach device to iommu mapping
*
* @drm_dev: DRM device
* @subdrv_dev: device to be attach
*
* This function should be called by sub drivers to attach it to iommu
* mapping.
*/
static int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret = 0;
if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
dev_name(subdrv_dev));
return -EINVAL;
}
dma_set_max_seg_size(subdrv_dev, DMA_BIT_MASK(32));
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
/*
* Keep the original DMA mapping of the sub-device and
* restore it on Exynos DRM detach, otherwise the DMA
* framework considers it as IOMMU-less during the next
* probe (in case of deferred probe or modular build)
*/
*dma_priv = to_dma_iommu_mapping(subdrv_dev);
if (*dma_priv)
arm_iommu_detach_device(subdrv_dev);
ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
} else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
ret = iommu_attach_device(priv->mapping, subdrv_dev);
}
return ret;
}
/*
* drm_iommu_detach_device -detach device address space mapping from device
*
* @drm_dev: DRM device
* @subdrv_dev: device to be detached
*
* This function should be called by sub drivers to detach it from iommu
* mapping
*/
static void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
arm_iommu_detach_device(subdrv_dev);
arm_iommu_attach_device(subdrv_dev, *dma_priv);
} else if (IS_ENABLED(CONFIG_IOMMU_DMA))
iommu_detach_device(priv->mapping, subdrv_dev);
}
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
void **dma_priv)
{
struct exynos_drm_private *priv = drm->dev_private;
if (!priv->dma_dev) {
priv->dma_dev = dev;
DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
dev_name(dev));
}
if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
return 0;
if (!priv->mapping) {
void *mapping;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
mapping = arm_iommu_create_mapping(&platform_bus_type,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev);
else
mapping = ERR_PTR(-ENODEV);
if (IS_ERR(mapping))
return PTR_ERR(mapping);
priv->mapping = mapping;
}
return drm_iommu_attach_device(drm, dev, dma_priv);
}
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
void **dma_priv)
{
if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
drm_iommu_detach_device(drm, dev, dma_priv);
}
void exynos_drm_cleanup_dma(struct drm_device *drm)
{
struct exynos_drm_private *priv = drm->dev_private;
if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
return;
arm_iommu_release_mapping(priv->mapping);
priv->mapping = NULL;
priv->dma_dev = NULL;
}
| linux-master | drivers/gpu/drm/exynos/exynos_drm_dma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* exynos_drm_fimd.c
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Authors:
* Joonyoung Shim <[email protected]>
* Inki Dae <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <video/samsung_fimd.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
/*
* FIMD stands for Fully Interactive Mobile Display and
* as a display controller, it transfers contents drawn on memory
* to a LCD Panel through Display Interfaces such as RGB or
* CPU Interface.
*/
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
/* position control register for hardware window 0, 2 ~ 4.*/
#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
/*
* size control register for hardware windows 0 and alpha control register
* for hardware windows 1 ~ 4
*/
#define VIDOSD_C(win) (VIDOSD_BASE + 0x08 + (win) * 16)
/* size control register for hardware windows 1 ~ 2. */
#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
#define VIDWnALPHA0(win) (VIDW_ALPHA + 0x00 + (win) * 8)
#define VIDWnALPHA1(win) (VIDW_ALPHA + 0x04 + (win) * 8)
#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
#define VIDWx_BUF_START_S(win, buf) (VIDW_BUF_START_S(buf) + (win) * 8)
#define VIDWx_BUF_END(win, buf) (VIDW_BUF_END(buf) + (win) * 8)
#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
/* color key control register for hardware window 1 ~ 4. */
#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + ((x - 1) * 8))
/* color key value register for hardware window 1 ~ 4. */
#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
/* I80 trigger control register */
#define TRIGCON 0x1A4
#define TRGMODE_ENABLE (1 << 0)
#define SWTRGCMD_ENABLE (1 << 1)
/* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */
#define HWTRGEN_ENABLE (1 << 3)
#define HWTRGMASK_ENABLE (1 << 4)
/* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */
#define HWTRIGEN_PER_ENABLE (1 << 31)
/* display mode change control register except exynos4 */
#define VIDOUT_CON 0x000
#define VIDOUT_CON_F_I80_LDI0 (0x2 << 8)
/* I80 interface control for main LDI register */
#define I80IFCONFAx(x) (0x1B0 + (x) * 4)
#define I80IFCONFBx(x) (0x1B8 + (x) * 4)
#define LCD_CS_SETUP(x) ((x) << 16)
#define LCD_WR_SETUP(x) ((x) << 12)
#define LCD_WR_ACTIVE(x) ((x) << 8)
#define LCD_WR_HOLD(x) ((x) << 4)
#define I80IFEN_ENABLE (1 << 0)
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
/* HW trigger flag on i80 panel. */
#define I80_HW_TRG (1 << 1)
struct fimd_driver_data {
unsigned int timing_base;
unsigned int lcdblk_offset;
unsigned int lcdblk_vt_shift;
unsigned int lcdblk_bypass_shift;
unsigned int lcdblk_mic_bypass_shift;
unsigned int trg_type;
unsigned int has_shadowcon:1;
unsigned int has_clksel:1;
unsigned int has_limited_fmt:1;
unsigned int has_vidoutcon:1;
unsigned int has_vtsel:1;
unsigned int has_mic_bypass:1;
unsigned int has_dp_clk:1;
unsigned int has_hw_trigger:1;
unsigned int has_trigger_per_te:1;
unsigned int has_bgr_support:1;
};
static struct fimd_driver_data s3c64xx_fimd_driver_data = {
.timing_base = 0x0,
.has_clksel = 1,
.has_limited_fmt = 1,
};
static struct fimd_driver_data s5pv210_fimd_driver_data = {
.timing_base = 0x0,
.has_shadowcon = 1,
.has_clksel = 1,
};
static struct fimd_driver_data exynos3_fimd_driver_data = {
.timing_base = 0x20000,
.lcdblk_offset = 0x210,
.lcdblk_bypass_shift = 1,
.has_shadowcon = 1,
.has_vidoutcon = 1,
};
static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
.lcdblk_offset = 0x210,
.lcdblk_vt_shift = 10,
.lcdblk_bypass_shift = 1,
.has_shadowcon = 1,
.has_vtsel = 1,
.has_bgr_support = 1,
};
static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000,
.lcdblk_offset = 0x214,
.lcdblk_vt_shift = 24,
.lcdblk_bypass_shift = 15,
.has_shadowcon = 1,
.has_vidoutcon = 1,
.has_vtsel = 1,
.has_dp_clk = 1,
.has_bgr_support = 1,
};
static struct fimd_driver_data exynos5420_fimd_driver_data = {
.timing_base = 0x20000,
.lcdblk_offset = 0x214,
.lcdblk_vt_shift = 24,
.lcdblk_bypass_shift = 15,
.lcdblk_mic_bypass_shift = 11,
.has_shadowcon = 1,
.has_vidoutcon = 1,
.has_vtsel = 1,
.has_mic_bypass = 1,
.has_dp_clk = 1,
.has_bgr_support = 1,
};
struct fimd_context {
struct device *dev;
struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
struct clk *bus_clk;
struct clk *lcd_clk;
void __iomem *regs;
struct regmap *sysreg;
unsigned long irq_flags;
u32 vidcon0;
u32 vidcon1;
u32 vidout_con;
u32 i80ifcon;
bool i80_if;
bool suspended;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
atomic_t win_updated;
atomic_t triggering;
u32 clkdiv;
const struct fimd_driver_data *driver_data;
struct drm_encoder *encoder;
struct exynos_drm_clk dp_clk;
};
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,s3c6400-fimd",
.data = &s3c64xx_fimd_driver_data },
{ .compatible = "samsung,s5pv210-fimd",
.data = &s5pv210_fimd_driver_data },
{ .compatible = "samsung,exynos3250-fimd",
.data = &exynos3_fimd_driver_data },
{ .compatible = "samsung,exynos4210-fimd",
.data = &exynos4_fimd_driver_data },
{ .compatible = "samsung,exynos5250-fimd",
.data = &exynos5_fimd_driver_data },
{ .compatible = "samsung,exynos5420-fimd",
.data = &exynos5420_fimd_driver_data },
{},
};
MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
static const enum drm_plane_type fimd_win_types[WINDOWS_NR] = {
DRM_PLANE_TYPE_PRIMARY,
DRM_PLANE_TYPE_OVERLAY,
DRM_PLANE_TYPE_OVERLAY,
DRM_PLANE_TYPE_OVERLAY,
DRM_PLANE_TYPE_CURSOR,
};
static const uint32_t fimd_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
static const uint32_t fimd_extended_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
};
static const unsigned int capabilities[WINDOWS_NR] = {
0,
EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
};
static inline void fimd_set_bits(struct fimd_context *ctx, u32 reg, u32 mask,
u32 val)
{
val = (val & mask) | (readl(ctx->regs + reg) & ~mask);
writel(val, ctx->regs + reg);
}
static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
u32 val;
if (ctx->suspended)
return -EPERM;
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
val |= VIDINTCON0_INT_ENABLE;
if (ctx->i80_if) {
val |= VIDINTCON0_INT_I80IFDONE;
val |= VIDINTCON0_INT_SYSMAINCON;
val &= ~VIDINTCON0_INT_SYSSUBCON;
} else {
val |= VIDINTCON0_INT_FRAME;
val &= ~VIDINTCON0_FRAMESEL0_MASK;
val |= VIDINTCON0_FRAMESEL0_FRONTPORCH;
val &= ~VIDINTCON0_FRAMESEL1_MASK;
val |= VIDINTCON0_FRAMESEL1_NONE;
}
writel(val, ctx->regs + VIDINTCON0);
}
return 0;
}
static void fimd_disable_vblank(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
u32 val;
if (ctx->suspended)
return;
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
val &= ~VIDINTCON0_INT_ENABLE;
if (ctx->i80_if) {
val &= ~VIDINTCON0_INT_I80IFDONE;
val &= ~VIDINTCON0_INT_SYSMAINCON;
val &= ~VIDINTCON0_INT_SYSSUBCON;
} else
val &= ~VIDINTCON0_INT_FRAME;
writel(val, ctx->regs + VIDINTCON0);
}
}
static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
if (ctx->suspended)
return;
atomic_set(&ctx->wait_vsync_event, 1);
/*
* wait for FIMD to signal VSYNC interrupt or return after
* timeout which is set to 50ms (refresh rate of 20).
*/
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win,
bool enable)
{
u32 val = readl(ctx->regs + WINCON(win));
if (enable)
val |= WINCONx_ENWIN;
else
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
}
static void fimd_enable_shadow_channel_path(struct fimd_context *ctx,
unsigned int win,
bool enable)
{
u32 val = readl(ctx->regs + SHADOWCON);
if (enable)
val |= SHADOWCON_CHx_ENABLE(win);
else
val &= ~SHADOWCON_CHx_ENABLE(win);
writel(val, ctx->regs + SHADOWCON);
}
static int fimd_clear_channels(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
int ret;
/* Hardware is in unknown state, so ensure it gets enabled properly */
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "failed to enable FIMD device.\n");
return ret;
}
clk_prepare_enable(ctx->bus_clk);
clk_prepare_enable(ctx->lcd_clk);
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
u32 val = readl(ctx->regs + WINCON(win));
if (val & WINCONx_ENWIN) {
fimd_enable_video_output(ctx, win, false);
if (ctx->driver_data->has_shadowcon)
fimd_enable_shadow_channel_path(ctx, win,
false);
ch_enabled = 1;
}
}
/* Wait for vsync, as disable channel takes effect at next vsync */
if (ch_enabled) {
ctx->suspended = false;
fimd_enable_vblank(ctx->crtc);
fimd_wait_for_vblank(ctx->crtc);
fimd_disable_vblank(ctx->crtc);
ctx->suspended = true;
}
clk_disable_unprepare(ctx->lcd_clk);
clk_disable_unprepare(ctx->bus_clk);
pm_runtime_put(ctx->dev);
return 0;
}
static int fimd_atomic_check(struct exynos_drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct drm_display_mode *mode = &state->adjusted_mode;
struct fimd_context *ctx = crtc->ctx;
unsigned long ideal_clk, lcd_rate;
u32 clkdiv;
if (mode->clock == 0) {
DRM_DEV_ERROR(ctx->dev, "Mode has zero clock value.\n");
return -EINVAL;
}
ideal_clk = mode->clock * 1000;
if (ctx->i80_if) {
/*
* The frame done interrupt should be occurred prior to the
* next TE signal.
*/
ideal_clk *= 2;
}
lcd_rate = clk_get_rate(ctx->lcd_clk);
if (2 * lcd_rate < ideal_clk) {
DRM_DEV_ERROR(ctx->dev,
"sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n",
lcd_rate, ideal_clk);
return -EINVAL;
}
/* Find the clock divider value that gets us closest to ideal_clk */
clkdiv = DIV_ROUND_CLOSEST(lcd_rate, ideal_clk);
if (clkdiv >= 0x200) {
DRM_DEV_ERROR(ctx->dev, "requested pixel clock(%lu) too low\n",
ideal_clk);
return -EINVAL;
}
ctx->clkdiv = (clkdiv < 0x100) ? clkdiv : 0xff;
return 0;
}
static void fimd_setup_trigger(struct fimd_context *ctx)
{
void __iomem *timing_base = ctx->regs + ctx->driver_data->timing_base;
u32 trg_type = ctx->driver_data->trg_type;
u32 val = readl(timing_base + TRIGCON);
val &= ~(TRGMODE_ENABLE);
if (trg_type == I80_HW_TRG) {
if (ctx->driver_data->has_hw_trigger)
val |= HWTRGEN_ENABLE | HWTRGMASK_ENABLE;
if (ctx->driver_data->has_trigger_per_te)
val |= HWTRIGEN_PER_ENABLE;
} else {
val |= TRGMODE_ENABLE;
}
writel(val, timing_base + TRIGCON);
}
static void fimd_commit(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
const struct fimd_driver_data *driver_data = ctx->driver_data;
void *timing_base = ctx->regs + driver_data->timing_base;
u32 val;
if (ctx->suspended)
return;
/* nothing to do if we haven't set the mode yet */
if (mode->htotal == 0 || mode->vtotal == 0)
return;
if (ctx->i80_if) {
val = ctx->i80ifcon | I80IFEN_ENABLE;
writel(val, timing_base + I80IFCONFAx(0));
/* disable auto frame rate */
writel(0, timing_base + I80IFCONFBx(0));
/* set video type selection to I80 interface */
if (driver_data->has_vtsel && ctx->sysreg &&
regmap_update_bits(ctx->sysreg,
driver_data->lcdblk_offset,
0x3 << driver_data->lcdblk_vt_shift,
0x1 << driver_data->lcdblk_vt_shift)) {
DRM_DEV_ERROR(ctx->dev,
"Failed to update sysreg for I80 i/f.\n");
return;
}
} else {
int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
u32 vidcon1;
/* setup polarity values */
vidcon1 = ctx->vidcon1;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
vidcon1 |= VIDCON1_INV_VSYNC;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
vidcon1 |= VIDCON1_INV_HSYNC;
writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
/* setup vertical timing values. */
vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
val = VIDTCON0_VBPD(vbpd - 1) |
VIDTCON0_VFPD(vfpd - 1) |
VIDTCON0_VSPW(vsync_len - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
/* setup horizontal timing values. */
hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
val = VIDTCON1_HBPD(hbpd - 1) |
VIDTCON1_HFPD(hfpd - 1) |
VIDTCON1_HSPW(hsync_len - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
}
if (driver_data->has_vidoutcon)
writel(ctx->vidout_con, timing_base + VIDOUT_CON);
/* set bypass selection */
if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
driver_data->lcdblk_offset,
0x1 << driver_data->lcdblk_bypass_shift,
0x1 << driver_data->lcdblk_bypass_shift)) {
DRM_DEV_ERROR(ctx->dev,
"Failed to update sysreg for bypass setting.\n");
return;
}
/* TODO: When MIC is enabled for display path, the lcdblk_mic_bypass
* bit should be cleared.
*/
if (driver_data->has_mic_bypass && ctx->sysreg &&
regmap_update_bits(ctx->sysreg,
driver_data->lcdblk_offset,
0x1 << driver_data->lcdblk_mic_bypass_shift,
0x1 << driver_data->lcdblk_mic_bypass_shift)) {
DRM_DEV_ERROR(ctx->dev,
"Failed to update sysreg for bypass mic.\n");
return;
}
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
VIDTCON2_HOZVAL(mode->hdisplay - 1) |
VIDTCON2_LINEVAL_E(mode->vdisplay - 1) |
VIDTCON2_HOZVAL_E(mode->hdisplay - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
fimd_setup_trigger(ctx);
/*
* fields of register with prefix '_F' would be updated
* at vsync(same as dma start)
*/
val = ctx->vidcon0;
val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
if (ctx->driver_data->has_clksel)
val |= VIDCON0_CLKSEL_LCD;
if (ctx->clkdiv > 1)
val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR;
writel(val, ctx->regs + VIDCON0);
}
static void fimd_win_set_bldeq(struct fimd_context *ctx, unsigned int win,
unsigned int alpha, unsigned int pixel_alpha)
{
u32 mask = BLENDEQ_A_FUNC_F(0xf) | BLENDEQ_B_FUNC_F(0xf);
u32 val = 0;
switch (pixel_alpha) {
case DRM_MODE_BLEND_PIXEL_NONE:
case DRM_MODE_BLEND_COVERAGE:
val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA_A);
val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
break;
case DRM_MODE_BLEND_PREMULTI:
default:
if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA0);
val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
} else {
val |= BLENDEQ_A_FUNC_F(BLENDEQ_ONE);
val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
}
break;
}
fimd_set_bits(ctx, BLENDEQx(win), mask, val);
}
static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
unsigned int alpha, unsigned int pixel_alpha)
{
u32 win_alpha_l = (alpha >> 8) & 0xf;
u32 win_alpha_h = alpha >> 12;
u32 val = 0;
switch (pixel_alpha) {
case DRM_MODE_BLEND_PIXEL_NONE:
break;
case DRM_MODE_BLEND_COVERAGE:
case DRM_MODE_BLEND_PREMULTI:
default:
val |= WINCON1_ALPHA_SEL;
val |= WINCON1_BLD_PIX;
val |= WINCON1_ALPHA_MUL;
break;
}
fimd_set_bits(ctx, WINCON(win), WINCONx_BLEND_MODE_MASK, val);
/* OSD alpha */
val = VIDISD14C_ALPHA0_R(win_alpha_h) |
VIDISD14C_ALPHA0_G(win_alpha_h) |
VIDISD14C_ALPHA0_B(win_alpha_h) |
VIDISD14C_ALPHA1_R(0x0) |
VIDISD14C_ALPHA1_G(0x0) |
VIDISD14C_ALPHA1_B(0x0);
writel(val, ctx->regs + VIDOSD_C(win));
val = VIDW_ALPHA_R(win_alpha_l) | VIDW_ALPHA_G(win_alpha_l) |
VIDW_ALPHA_B(win_alpha_l);
writel(val, ctx->regs + VIDWnALPHA0(win));
val = VIDW_ALPHA_R(0x0) | VIDW_ALPHA_G(0x0) |
VIDW_ALPHA_B(0x0);
writel(val, ctx->regs + VIDWnALPHA1(win));
fimd_set_bits(ctx, BLENDCON, BLENDCON_NEW_MASK,
BLENDCON_NEW_8BIT_ALPHA_VALUE);
}
static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
struct drm_framebuffer *fb, int width)
{
struct exynos_drm_plane plane = ctx->planes[win];
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane.base.state);
uint32_t pixel_format = fb->format->format;
unsigned int alpha = state->base.alpha;
u32 val = WINCONx_ENWIN;
unsigned int pixel_alpha;
if (fb->format->has_alpha)
pixel_alpha = state->base.pixel_blend_mode;
else
pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
/*
* In case of s3c64xx, window 0 doesn't support alpha channel.
* So the request format is ARGB8888 then change it to XRGB8888.
*/
if (ctx->driver_data->has_limited_fmt && !win) {
if (pixel_format == DRM_FORMAT_ARGB8888)
pixel_format = DRM_FORMAT_XRGB8888;
}
switch (pixel_format) {
case DRM_FORMAT_C8:
val |= WINCON0_BPPMODE_8BPP_PALETTE;
val |= WINCONx_BURSTLEN_8WORD;
val |= WINCONx_BYTSWP;
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
val |= WINCON0_BPPMODE_16BPP_1555;
val |= WINCONx_HAWSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
val |= WINCON0_BPPMODE_16BPP_565;
val |= WINCONx_HAWSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
val |= WINCON0_BPPMODE_24BPP_888;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
default:
val |= WINCON1_BPPMODE_25BPP_A1888;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
}
switch (pixel_format) {
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_BGR565:
writel(WIN_RGB_ORDER_REVERSE, ctx->regs + WIN_RGB_ORDER(win));
break;
default:
writel(WIN_RGB_ORDER_FORWARD, ctx->regs + WIN_RGB_ORDER(win));
break;
}
/*
* Setting dma-burst to 16Word causes permanent tearing for very small
* buffers, e.g. cursor buffer. Burst Mode switching which based on
* plane size is not recommended as plane size varies alot towards the
* end of the screen and rapid movement causes unstable DMA, but it is
* still better to change dma-burst than displaying garbage.
*/
if (width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_4WORD;
}
fimd_set_bits(ctx, WINCON(win), ~WINCONx_BLEND_MODE_MASK, val);
/* hardware window 0 doesn't support alpha channel. */
if (win != 0) {
fimd_win_set_bldmod(ctx, win, alpha, pixel_alpha);
fimd_win_set_bldeq(ctx, win, alpha, pixel_alpha);
}
}
static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
{
unsigned int keycon0 = 0, keycon1 = 0;
keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
keycon1 = WxKEYCON1_COLVAL(0xffffffff);
writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
}
/**
* fimd_shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @ctx: local driver data
* @win: window to protect registers for
* @protect: 1 to protect (disable updates)
*/
static void fimd_shadow_protect_win(struct fimd_context *ctx,
unsigned int win, bool protect)
{
u32 reg, bits, val;
/*
* SHADOWCON/PRTCON register is used for enabling timing.
*
* for example, once only width value of a register is set,
* if the dma is started then fimd hardware could malfunction so
* with protect window setting, the register fields with prefix '_F'
* wouldn't be updated at vsync also but updated once unprotect window
* is set.
*/
if (ctx->driver_data->has_shadowcon) {
reg = SHADOWCON;
bits = SHADOWCON_WINx_PROTECT(win);
} else {
reg = PRTCON;
bits = PRTCON_PROTECT;
}
val = readl(ctx->regs + reg);
if (protect)
val |= bits;
else
val &= ~bits;
writel(val, ctx->regs + reg);
}
static void fimd_atomic_begin(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
int i;
if (ctx->suspended)
return;
for (i = 0; i < WINDOWS_NR; i++)
fimd_shadow_protect_win(ctx, i, true);
}
static void fimd_atomic_flush(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
int i;
if (ctx->suspended)
return;
for (i = 0; i < WINDOWS_NR; i++)
fimd_shadow_protect_win(ctx, i, false);
exynos_crtc_handle_event(crtc);
}
static void fimd_update_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane->base.state);
struct fimd_context *ctx = crtc->ctx;
struct drm_framebuffer *fb = state->base.fb;
dma_addr_t dma_addr;
unsigned long val, size, offset;
unsigned int last_x, last_y, buf_offsize, line_size;
unsigned int win = plane->index;
unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
return;
offset = state->src.x * cpp;
offset += state->src.y * pitch;
/* buffer start address */
dma_addr = exynos_drm_fb_dma_addr(fb, 0) + offset;
val = (unsigned long)dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
size = pitch * state->crtc.h;
val = (unsigned long)(dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEV_DEBUG_KMS(ctx->dev,
"start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
(unsigned long)dma_addr, val, size);
DRM_DEV_DEBUG_KMS(ctx->dev, "ovl_width = %d, ovl_height = %d\n",
state->crtc.w, state->crtc.h);
/* buffer size */
buf_offsize = pitch - (state->crtc.w * cpp);
line_size = state->crtc.w * cpp;
val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH_E(line_size);
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */
val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
VIDOSDxA_TOPLEFT_Y(state->crtc.y) |
VIDOSDxA_TOPLEFT_X_E(state->crtc.x) |
VIDOSDxA_TOPLEFT_Y_E(state->crtc.y);
writel(val, ctx->regs + VIDOSD_A(win));
last_x = state->crtc.x + state->crtc.w;
if (last_x)
last_x--;
last_y = state->crtc.y + state->crtc.h;
if (last_y)
last_y--;
val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEV_DEBUG_KMS(ctx->dev,
"osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
state->crtc.x, state->crtc.y, last_x, last_y);
/* OSD size */
if (win != 3 && win != 4) {
u32 offset = VIDOSD_D(win);
if (win == 0)
offset = VIDOSD_C(win);
val = state->crtc.w * state->crtc.h;
writel(val, ctx->regs + offset);
DRM_DEV_DEBUG_KMS(ctx->dev, "osd size = 0x%x\n",
(unsigned int)val);
}
fimd_win_set_pixfmt(ctx, win, fb, state->src.w);
/* hardware window 0 doesn't support color key. */
if (win != 0)
fimd_win_set_colkey(ctx, win);
fimd_enable_video_output(ctx, win, true);
if (ctx->driver_data->has_shadowcon)
fimd_enable_shadow_channel_path(ctx, win, true);
if (ctx->i80_if)
atomic_set(&ctx->win_updated, 1);
}
static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct fimd_context *ctx = crtc->ctx;
unsigned int win = plane->index;
if (ctx->suspended)
return;
fimd_enable_video_output(ctx, win, false);
if (ctx->driver_data->has_shadowcon)
fimd_enable_shadow_channel_path(ctx, win, false);
}
static void fimd_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
if (!ctx->suspended)
return;
ctx->suspended = false;
if (pm_runtime_resume_and_get(ctx->dev) < 0) {
dev_warn(ctx->dev, "failed to enable FIMD device.\n");
return;
}
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
fimd_enable_vblank(ctx->crtc);
fimd_commit(ctx->crtc);
}
static void fimd_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
int i;
if (ctx->suspended)
return;
/*
* We need to make sure that all windows are disabled before we
* suspend that connector. Otherwise we might try to scan from
* a destroyed buffer later.
*/
for (i = 0; i < WINDOWS_NR; i++)
fimd_disable_plane(crtc, &ctx->planes[i]);
fimd_enable_vblank(crtc);
fimd_wait_for_vblank(crtc);
fimd_disable_vblank(crtc);
writel(0, ctx->regs + VIDCON0);
pm_runtime_put_sync(ctx->dev);
ctx->suspended = true;
}
static void fimd_trigger(struct device *dev)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
const struct fimd_driver_data *driver_data = ctx->driver_data;
void *timing_base = ctx->regs + driver_data->timing_base;
u32 reg;
/*
* Skips triggering if in triggering state, because multiple triggering
* requests can cause panel reset.
*/
if (atomic_read(&ctx->triggering))
return;
/* Enters triggering mode */
atomic_set(&ctx->triggering, 1);
reg = readl(timing_base + TRIGCON);
reg |= (TRGMODE_ENABLE | SWTRGCMD_ENABLE);
writel(reg, timing_base + TRIGCON);
/*
* Exits triggering mode if vblank is not enabled yet, because when the
* VIDINTCON0 register is not set, it can not exit from triggering mode.
*/
if (!test_bit(0, &ctx->irq_flags))
atomic_set(&ctx->triggering, 0);
}
static void fimd_te_handler(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
u32 trg_type = ctx->driver_data->trg_type;
/* Checks the crtc is detached already from encoder */
if (!ctx->drm_dev)
return;
if (trg_type == I80_HW_TRG)
goto out;
/*
* If there is a page flip request, triggers and handles the page flip
* event so that current fb can be updated into panel GRAM.
*/
if (atomic_add_unless(&ctx->win_updated, -1, 0))
fimd_trigger(ctx->dev);
out:
/* Wakes up vsync event queue */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
wake_up(&ctx->wait_vsync_queue);
}
if (test_bit(0, &ctx->irq_flags))
drm_crtc_handle_vblank(&ctx->crtc->base);
}
static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
{
struct fimd_context *ctx = container_of(clk, struct fimd_context,
dp_clk);
u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
writel(val, ctx->regs + DP_MIE_CLKCON);
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.atomic_enable = fimd_atomic_enable,
.atomic_disable = fimd_atomic_disable,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
.atomic_begin = fimd_atomic_begin,
.update_plane = fimd_update_plane,
.disable_plane = fimd_disable_plane,
.atomic_flush = fimd_atomic_flush,
.atomic_check = fimd_atomic_check,
.te_handler = fimd_te_handler,
};
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
{
struct fimd_context *ctx = (struct fimd_context *)dev_id;
u32 val, clear_bit;
val = readl(ctx->regs + VIDINTCON1);
clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
if (val & clear_bit)
writel(clear_bit, ctx->regs + VIDINTCON1);
/* check the crtc is detached already from encoder */
if (!ctx->drm_dev)
goto out;
if (!ctx->i80_if)
drm_crtc_handle_vblank(&ctx->crtc->base);
if (ctx->i80_if) {
/* Exits triggering mode */
atomic_set(&ctx->triggering, 0);
} else {
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
wake_up(&ctx->wait_vsync_queue);
}
}
out:
return IRQ_HANDLED;
}
static int fimd_bind(struct device *dev, struct device *master, void *data)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_plane *exynos_plane;
unsigned int i;
int ret;
ctx->drm_dev = drm_dev;
for (i = 0; i < WINDOWS_NR; i++) {
if (ctx->driver_data->has_bgr_support) {
ctx->configs[i].pixel_formats = fimd_extended_formats;
ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_extended_formats);
} else {
ctx->configs[i].pixel_formats = fimd_formats;
ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_formats);
}
ctx->configs[i].zpos = i;
ctx->configs[i].type = fimd_win_types[i];
ctx->configs[i].capabilities = capabilities[i];
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
&ctx->configs[i]);
if (ret)
return ret;
}
exynos_plane = &ctx->planes[DEFAULT_WIN];
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
EXYNOS_DISPLAY_TYPE_LCD, &fimd_crtc_ops, ctx);
if (IS_ERR(ctx->crtc))
return PTR_ERR(ctx->crtc);
if (ctx->driver_data->has_dp_clk) {
ctx->dp_clk.enable = fimd_dp_clock_enable;
ctx->crtc->pipe_clk = &ctx->dp_clk;
}
if (ctx->encoder)
exynos_dpi_bind(drm_dev, ctx->encoder);
if (is_drm_iommu_supported(drm_dev)) {
int ret;
ret = fimd_clear_channels(ctx->crtc);
if (ret < 0)
return ret;
}
return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
}
static void fimd_unbind(struct device *dev, struct device *master,
void *data)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
fimd_atomic_disable(ctx->crtc);
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
}
static const struct component_ops fimd_component_ops = {
.bind = fimd_bind,
.unbind = fimd_unbind,
};
static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
struct device_node *i80_if_timings;
int ret;
if (!dev->of_node)
return -ENODEV;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
ctx->suspended = true;
ctx->driver_data = of_device_get_match_data(dev);
if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
ctx->vidcon1 |= VIDCON1_INV_VDEN;
if (of_property_read_bool(dev->of_node, "samsung,invert-vclk"))
ctx->vidcon1 |= VIDCON1_INV_VCLK;
i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
if (i80_if_timings) {
u32 val;
ctx->i80_if = true;
if (ctx->driver_data->has_vidoutcon)
ctx->vidout_con |= VIDOUT_CON_F_I80_LDI0;
else
ctx->vidcon0 |= VIDCON0_VIDOUT_I80_LDI0;
/*
* The user manual describes that this "DSI_EN" bit is required
* to enable I80 24-bit data interface.
*/
ctx->vidcon0 |= VIDCON0_DSI_EN;
if (of_property_read_u32(i80_if_timings, "cs-setup", &val))
val = 0;
ctx->i80ifcon = LCD_CS_SETUP(val);
if (of_property_read_u32(i80_if_timings, "wr-setup", &val))
val = 0;
ctx->i80ifcon |= LCD_WR_SETUP(val);
if (of_property_read_u32(i80_if_timings, "wr-active", &val))
val = 1;
ctx->i80ifcon |= LCD_WR_ACTIVE(val);
if (of_property_read_u32(i80_if_timings, "wr-hold", &val))
val = 0;
ctx->i80ifcon |= LCD_WR_HOLD(val);
}
of_node_put(i80_if_timings);
ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,sysreg");
if (IS_ERR(ctx->sysreg)) {
dev_warn(dev, "failed to get system register.\n");
ctx->sysreg = NULL;
}
ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
return PTR_ERR(ctx->bus_clk);
}
ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n");
return PTR_ERR(ctx->lcd_clk);
}
ctx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
if (ret < 0)
return ret;
ret = devm_request_irq(dev, ret, fimd_irq_handler, 0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
return ret;
}
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, ctx);
ctx->encoder = exynos_dpi_probe(dev);
if (IS_ERR(ctx->encoder))
return PTR_ERR(ctx->encoder);
pm_runtime_enable(dev);
ret = component_add(dev, &fimd_component_ops);
if (ret)
goto err_disable_pm_runtime;
return ret;
err_disable_pm_runtime:
pm_runtime_disable(dev);
return ret;
}
static int fimd_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &fimd_component_ops);
return 0;
}
static int exynos_fimd_suspend(struct device *dev)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
clk_disable_unprepare(ctx->lcd_clk);
clk_disable_unprepare(ctx->bus_clk);
return 0;
}
static int exynos_fimd_resume(struct device *dev)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(ctx->bus_clk);
if (ret < 0) {
DRM_DEV_ERROR(dev,
"Failed to prepare_enable the bus clk [%d]\n",
ret);
return ret;
}
ret = clk_prepare_enable(ctx->lcd_clk);
if (ret < 0) {
DRM_DEV_ERROR(dev,
"Failed to prepare_enable the lcd clk [%d]\n",
ret);
return ret;
}
return 0;
}
static DEFINE_RUNTIME_DEV_PM_OPS(exynos_fimd_pm_ops, exynos_fimd_suspend,
exynos_fimd_resume, NULL);
struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = fimd_remove,
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
.pm = pm_ptr(&exynos_fimd_pm_ops),
.of_match_table = fimd_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/exynos/exynos_drm_fimd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Authors: Joonyoung Shim <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_framebuffer.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
/*
* This function is to get X or Y size shown via screen. This needs length and
* start position of CRTC.
*
* <--- length --->
* CRTC ----------------
* ^ start ^ end
*
* There are six cases from a to f.
*
* <----- SCREEN ----->
* 0 last
* ----------|------------------|----------
* CRTCs
* a -------
* b -------
* c --------------------------
* d --------
* e -------
* f -------
*/
static int exynos_plane_get_size(int start, unsigned length, unsigned last)
{
int end = start + length;
int size = 0;
if (start <= 0) {
if (end > 0)
size = min_t(unsigned, end, last);
} else if (start <= last) {
size = min_t(unsigned, last - start, length);
}
return size;
}
static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
{
struct drm_plane_state *state = &exynos_state->base;
struct drm_crtc *crtc = state->crtc;
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(state->state, crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
unsigned int src_x, src_y;
unsigned int src_w, src_h;
unsigned int actual_w;
unsigned int actual_h;
/*
* The original src/dest coordinates are stored in exynos_state->base,
* but we want to keep another copy internal to our driver that we can
* clip/modify ourselves.
*/
crtc_x = state->crtc_x;
crtc_y = state->crtc_y;
crtc_w = state->crtc_w;
crtc_h = state->crtc_h;
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
/* set ratio */
exynos_state->h_ratio = (src_w << 16) / crtc_w;
exynos_state->v_ratio = (src_h << 16) / crtc_h;
/* clip to visible area */
actual_w = exynos_plane_get_size(crtc_x, crtc_w, mode->hdisplay);
actual_h = exynos_plane_get_size(crtc_y, crtc_h, mode->vdisplay);
if (crtc_x < 0) {
if (actual_w)
src_x += ((-crtc_x) * exynos_state->h_ratio) >> 16;
crtc_x = 0;
}
if (crtc_y < 0) {
if (actual_h)
src_y += ((-crtc_y) * exynos_state->v_ratio) >> 16;
crtc_y = 0;
}
/* set drm framebuffer data. */
exynos_state->src.x = src_x;
exynos_state->src.y = src_y;
exynos_state->src.w = (actual_w * exynos_state->h_ratio) >> 16;
exynos_state->src.h = (actual_h * exynos_state->v_ratio) >> 16;
/* set plane range to be displayed. */
exynos_state->crtc.x = crtc_x;
exynos_state->crtc.y = crtc_y;
exynos_state->crtc.w = actual_w;
exynos_state->crtc.h = actual_h;
DRM_DEV_DEBUG_KMS(crtc->dev->dev,
"plane : offset_x/y(%d,%d), width/height(%d,%d)",
exynos_state->crtc.x, exynos_state->crtc.y,
exynos_state->crtc.w, exynos_state->crtc.h);
}
static void exynos_drm_plane_reset(struct drm_plane *plane)
{
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_plane_state *exynos_state;
if (plane->state) {
exynos_state = to_exynos_plane_state(plane->state);
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(exynos_state);
plane->state = NULL;
}
exynos_state = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
if (exynos_state) {
__drm_atomic_helper_plane_reset(plane, &exynos_state->base);
plane->state->zpos = exynos_plane->config->zpos;
}
}
static struct drm_plane_state *
exynos_drm_plane_duplicate_state(struct drm_plane *plane)
{
struct exynos_drm_plane_state *exynos_state;
struct exynos_drm_plane_state *copy;
exynos_state = to_exynos_plane_state(plane->state);
copy = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
if (!copy)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, ©->base);
return ©->base;
}
static void exynos_drm_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct exynos_drm_plane_state *old_exynos_state =
to_exynos_plane_state(old_state);
__drm_atomic_helper_plane_destroy_state(old_state);
kfree(old_exynos_state);
}
static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = exynos_drm_plane_reset,
.atomic_duplicate_state = exynos_drm_plane_duplicate_state,
.atomic_destroy_state = exynos_drm_plane_destroy_state,
};
static int
exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
struct drm_framebuffer *fb = state->base.fb;
struct drm_device *dev = fb->dev;
switch (fb->modifier) {
case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
if (!(config->capabilities & EXYNOS_DRM_PLANE_CAP_TILE))
return -ENOTSUPP;
break;
case DRM_FORMAT_MOD_LINEAR:
break;
default:
DRM_DEV_ERROR(dev->dev, "unsupported pixel format modifier");
return -ENOTSUPP;
}
return 0;
}
static int
exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
struct drm_crtc *crtc = state->base.crtc;
bool width_ok = false, height_ok = false;
if (config->capabilities & EXYNOS_DRM_PLANE_CAP_SCALE)
return 0;
if (state->src.w == state->crtc.w)
width_ok = true;
if (state->src.h == state->crtc.h)
height_ok = true;
if ((config->capabilities & EXYNOS_DRM_PLANE_CAP_DOUBLE) &&
state->h_ratio == (1 << 15))
width_ok = true;
if ((config->capabilities & EXYNOS_DRM_PLANE_CAP_DOUBLE) &&
state->v_ratio == (1 << 15))
height_ok = true;
if (width_ok && height_ok)
return 0;
DRM_DEV_DEBUG_KMS(crtc->dev->dev, "scaling mode is not supported");
return -ENOTSUPP;
}
static int exynos_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_plane_state *exynos_state =
to_exynos_plane_state(new_plane_state);
int ret = 0;
if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
/* translate state into exynos_state */
exynos_plane_mode_set(exynos_state);
ret = exynos_drm_plane_check_format(exynos_plane->config, exynos_state);
if (ret)
return ret;
ret = exynos_drm_plane_check_size(exynos_plane->config, exynos_state);
return ret;
}
static void exynos_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(new_state->crtc);
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
if (!new_state->crtc)
return;
if (exynos_crtc->ops->update_plane)
exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
}
static void exynos_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(old_state->crtc);
if (!old_state->crtc)
return;
if (exynos_crtc->ops->disable_plane)
exynos_crtc->ops->disable_plane(exynos_crtc, exynos_plane);
}
static const struct drm_plane_helper_funcs plane_helper_funcs = {
.atomic_check = exynos_plane_atomic_check,
.atomic_update = exynos_plane_atomic_update,
.atomic_disable = exynos_plane_atomic_disable,
};
static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
int zpos, bool immutable)
{
if (immutable)
drm_plane_create_zpos_immutable_property(plane, zpos);
else
drm_plane_create_zpos_property(plane, zpos, 0, MAX_PLANE - 1);
}
int exynos_plane_init(struct drm_device *dev,
struct exynos_drm_plane *exynos_plane, unsigned int index,
const struct exynos_drm_plane_config *config)
{
int err;
unsigned int supported_modes = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE);
struct drm_plane *plane = &exynos_plane->base;
err = drm_universal_plane_init(dev, &exynos_plane->base,
1 << dev->mode_config.num_crtc,
&exynos_plane_funcs,
config->pixel_formats,
config->num_pixel_formats,
NULL, config->type, NULL);
if (err) {
DRM_DEV_ERROR(dev->dev, "failed to initialize plane\n");
return err;
}
drm_plane_helper_add(&exynos_plane->base, &plane_helper_funcs);
exynos_plane->index = index;
exynos_plane->config = config;
exynos_plane_attach_zpos_property(&exynos_plane->base, config->zpos,
!(config->capabilities & EXYNOS_DRM_PLANE_CAP_ZPOS));
if (config->capabilities & EXYNOS_DRM_PLANE_CAP_PIX_BLEND)
drm_plane_create_blend_mode_property(plane, supported_modes);
if (config->capabilities & EXYNOS_DRM_PLANE_CAP_WIN_BLEND)
drm_plane_create_alpha_property(plane);
return 0;
}
| linux-master | drivers/gpu/drm/exynos/exynos_drm_plane.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* Authors:
* YoungJun Cho <[email protected]>
* Eunchul Kim <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include <drm/drm_fourcc.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_ipp.h"
#include "regs-rotator.h"
/*
* Rotator supports image crop/rotator and input/output DMA operations.
* input DMA reads image data from the memory.
* output DMA writes image data to memory.
*/
#define ROTATOR_AUTOSUSPEND_DELAY 2000
#define rot_read(offset) readl(rot->regs + (offset))
#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
enum rot_irq_status {
ROT_IRQ_STATUS_COMPLETE = 8,
ROT_IRQ_STATUS_ILLEGAL = 9,
};
struct rot_variant {
const struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
};
/*
* A structure of rotator context.
* @ippdrv: prepare initialization using ippdrv.
* @regs: memory mapped io registers.
* @clock: rotator gate clock.
* @limit_tbl: limitation of rotator.
* @irq: irq number.
*/
struct rot_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
void *dma_priv;
struct device *dev;
void __iomem *regs;
struct clk *clock;
const struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
struct exynos_drm_ipp_task *task;
};
static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
{
u32 val = rot_read(ROT_CONFIG);
if (enable == true)
val |= ROT_CONFIG_IRQ;
else
val &= ~ROT_CONFIG_IRQ;
rot_write(val, ROT_CONFIG);
}
static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
{
u32 val = rot_read(ROT_STATUS);
val = ROT_STATUS_IRQ(val);
if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
return ROT_IRQ_STATUS_COMPLETE;
return ROT_IRQ_STATUS_ILLEGAL;
}
static irqreturn_t rotator_irq_handler(int irq, void *arg)
{
struct rot_context *rot = arg;
enum rot_irq_status irq_status;
u32 val;
/* Get execution result */
irq_status = rotator_reg_get_irq_status(rot);
/* clear status */
val = rot_read(ROT_STATUS);
val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
rot_write(val, ROT_STATUS);
if (rot->task) {
struct exynos_drm_ipp_task *task = rot->task;
rot->task = NULL;
pm_runtime_mark_last_busy(rot->dev);
pm_runtime_put_autosuspend(rot->dev);
exynos_drm_ipp_task_done(task,
irq_status == ROT_IRQ_STATUS_COMPLETE ? 0 : -EINVAL);
}
return IRQ_HANDLED;
}
static void rotator_src_set_fmt(struct rot_context *rot, u32 fmt)
{
u32 val;
val = rot_read(ROT_CONTROL);
val &= ~ROT_CONTROL_FMT_MASK;
switch (fmt) {
case DRM_FORMAT_NV12:
val |= ROT_CONTROL_FMT_YCBCR420_2P;
break;
case DRM_FORMAT_XRGB8888:
val |= ROT_CONTROL_FMT_RGB888;
break;
}
rot_write(val, ROT_CONTROL);
}
static void rotator_src_set_buf(struct rot_context *rot,
struct exynos_drm_ipp_buffer *buf)
{
u32 val;
/* Set buffer size configuration */
val = ROT_SET_BUF_SIZE_H(buf->buf.height) |
ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]);
rot_write(val, ROT_SRC_BUF_SIZE);
/* Set crop image position configuration */
val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x);
rot_write(val, ROT_SRC_CROP_POS);
val = ROT_SRC_CROP_SIZE_H(buf->rect.h) |
ROT_SRC_CROP_SIZE_W(buf->rect.w);
rot_write(val, ROT_SRC_CROP_SIZE);
/* Set buffer DMA address */
rot_write(buf->dma_addr[0], ROT_SRC_BUF_ADDR(0));
rot_write(buf->dma_addr[1], ROT_SRC_BUF_ADDR(1));
}
static void rotator_dst_set_transf(struct rot_context *rot,
unsigned int rotation)
{
u32 val;
/* Set transform configuration */
val = rot_read(ROT_CONTROL);
val &= ~ROT_CONTROL_FLIP_MASK;
if (rotation & DRM_MODE_REFLECT_X)
val |= ROT_CONTROL_FLIP_VERTICAL;
if (rotation & DRM_MODE_REFLECT_Y)
val |= ROT_CONTROL_FLIP_HORIZONTAL;
val &= ~ROT_CONTROL_ROT_MASK;
if (rotation & DRM_MODE_ROTATE_90)
val |= ROT_CONTROL_ROT_90;
else if (rotation & DRM_MODE_ROTATE_180)
val |= ROT_CONTROL_ROT_180;
else if (rotation & DRM_MODE_ROTATE_270)
val |= ROT_CONTROL_ROT_270;
rot_write(val, ROT_CONTROL);
}
static void rotator_dst_set_buf(struct rot_context *rot,
struct exynos_drm_ipp_buffer *buf)
{
u32 val;
/* Set buffer size configuration */
val = ROT_SET_BUF_SIZE_H(buf->buf.height) |
ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]);
rot_write(val, ROT_DST_BUF_SIZE);
/* Set crop image position configuration */
val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x);
rot_write(val, ROT_DST_CROP_POS);
/* Set buffer DMA address */
rot_write(buf->dma_addr[0], ROT_DST_BUF_ADDR(0));
rot_write(buf->dma_addr[1], ROT_DST_BUF_ADDR(1));
}
static void rotator_start(struct rot_context *rot)
{
u32 val;
/* Set interrupt enable */
rotator_reg_set_irq(rot, true);
val = rot_read(ROT_CONTROL);
val |= ROT_CONTROL_START;
rot_write(val, ROT_CONTROL);
}
static int rotator_commit(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
struct rot_context *rot =
container_of(ipp, struct rot_context, ipp);
int ret;
ret = pm_runtime_resume_and_get(rot->dev);
if (ret < 0) {
dev_err(rot->dev, "failed to enable ROTATOR device.\n");
return ret;
}
rot->task = task;
rotator_src_set_fmt(rot, task->src.buf.fourcc);
rotator_src_set_buf(rot, &task->src);
rotator_dst_set_transf(rot, task->transform.rotation);
rotator_dst_set_buf(rot, &task->dst);
rotator_start(rot);
return 0;
}
static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = rotator_commit,
};
static int rotator_bind(struct device *dev, struct device *master, void *data)
{
struct rot_context *rot = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &rot->ipp;
rot->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
rot->formats, rot->num_formats, "rotator");
dev_info(dev, "The exynos rotator has been probed successfully\n");
return 0;
}
static void rotator_unbind(struct device *dev, struct device *master,
void *data)
{
struct rot_context *rot = dev_get_drvdata(dev);
struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
}
static const struct component_ops rotator_component_ops = {
.bind = rotator_bind,
.unbind = rotator_unbind,
};
static int rotator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rot_context *rot;
const struct rot_variant *variant;
int irq;
int ret;
rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
if (!rot)
return -ENOMEM;
variant = of_device_get_match_data(dev);
rot->formats = variant->formats;
rot->num_formats = variant->num_formats;
rot->dev = dev;
rot->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rot->regs))
return PTR_ERR(rot->regs);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev),
rot);
if (ret < 0) {
dev_err(dev, "failed to request irq\n");
return ret;
}
rot->clock = devm_clk_get(dev, "rotator");
if (IS_ERR(rot->clock)) {
dev_err(dev, "failed to get clock\n");
return PTR_ERR(rot->clock);
}
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, ROTATOR_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
platform_set_drvdata(pdev, rot);
ret = component_add(dev, &rotator_component_ops);
if (ret)
goto err_component;
return 0;
err_component:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return ret;
}
static int rotator_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &rotator_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return 0;
}
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
clk_disable_unprepare(rot->clock);
return 0;
}
static int rotator_runtime_resume(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
return clk_prepare_enable(rot->clock);
}
static const struct drm_exynos_ipp_limit rotator_s5pv210_rbg888_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
};
static const struct drm_exynos_ipp_limit rotator_4210_rbg888_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) },
};
static const struct drm_exynos_ipp_limit rotator_4412_rbg888_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) },
};
static const struct drm_exynos_ipp_limit rotator_5250_rbg888_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
};
static const struct drm_exynos_ipp_limit rotator_s5pv210_yuv_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
};
static const struct drm_exynos_ipp_limit rotator_4210_yuv_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
};
static const struct drm_exynos_ipp_limit rotator_4412_yuv_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_32K }, .v = { 32, SZ_32K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
};
static const struct exynos_drm_ipp_formats rotator_s5pv210_formats[] = {
{ IPP_SRCDST_FORMAT(XRGB8888, rotator_s5pv210_rbg888_limits) },
{ IPP_SRCDST_FORMAT(NV12, rotator_s5pv210_yuv_limits) },
};
static const struct exynos_drm_ipp_formats rotator_4210_formats[] = {
{ IPP_SRCDST_FORMAT(XRGB8888, rotator_4210_rbg888_limits) },
{ IPP_SRCDST_FORMAT(NV12, rotator_4210_yuv_limits) },
};
static const struct exynos_drm_ipp_formats rotator_4412_formats[] = {
{ IPP_SRCDST_FORMAT(XRGB8888, rotator_4412_rbg888_limits) },
{ IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) },
};
static const struct exynos_drm_ipp_formats rotator_5250_formats[] = {
{ IPP_SRCDST_FORMAT(XRGB8888, rotator_5250_rbg888_limits) },
{ IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) },
};
static const struct rot_variant rotator_s5pv210_data = {
.formats = rotator_s5pv210_formats,
.num_formats = ARRAY_SIZE(rotator_s5pv210_formats),
};
static const struct rot_variant rotator_4210_data = {
.formats = rotator_4210_formats,
.num_formats = ARRAY_SIZE(rotator_4210_formats),
};
static const struct rot_variant rotator_4412_data = {
.formats = rotator_4412_formats,
.num_formats = ARRAY_SIZE(rotator_4412_formats),
};
static const struct rot_variant rotator_5250_data = {
.formats = rotator_5250_formats,
.num_formats = ARRAY_SIZE(rotator_5250_formats),
};
static const struct of_device_id exynos_rotator_match[] = {
{
.compatible = "samsung,s5pv210-rotator",
.data = &rotator_s5pv210_data,
}, {
.compatible = "samsung,exynos4210-rotator",
.data = &rotator_4210_data,
}, {
.compatible = "samsung,exynos4212-rotator",
.data = &rotator_4412_data,
}, {
.compatible = "samsung,exynos5250-rotator",
.data = &rotator_5250_data,
}, {
},
};
MODULE_DEVICE_TABLE(of, exynos_rotator_match);
static DEFINE_RUNTIME_DEV_PM_OPS(rotator_pm_ops, rotator_runtime_suspend,
rotator_runtime_resume, NULL);
struct platform_driver rotator_driver = {
.probe = rotator_probe,
.remove = rotator_remove,
.driver = {
.name = "exynos-rotator",
.owner = THIS_MODULE,
.pm = pm_ptr(&rotator_pm_ops),
.of_match_table = exynos_rotator_match,
},
};
| linux-master | drivers/gpu/drm/exynos/exynos_drm_rotator.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* exynos_drm_fbdev.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authors:
* Inki Dae <[email protected]>
* Joonyoung Shim <[email protected]>
* Seung-Woo Kim <[email protected]>
*/
#include <linux/fb.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_prime.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
static int exynos_drm_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = info->par;
struct drm_gem_object *obj = drm_gem_fb_get_obj(helper->fb, 0);
return drm_gem_prime_mmap(obj, vma);
}
static void exynos_drm_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
drm_fb_helper_fini(fb_helper);
drm_framebuffer_remove(fb);
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
static const struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_DMAMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_mmap = exynos_drm_fb_mmap,
.fb_destroy = exynos_drm_fb_destroy,
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes,
struct exynos_drm_gem *exynos_gem)
{
struct fb_info *fbi;
struct drm_framebuffer *fb = helper->fb;
unsigned int size = fb->width * fb->height * fb->format->cpp[0];
unsigned long offset;
fbi = drm_fb_helper_alloc_info(helper);
if (IS_ERR(fbi)) {
DRM_DEV_ERROR(to_dma_dev(helper->dev),
"failed to allocate fb info.\n");
return PTR_ERR(fbi);
}
fbi->fbops = &exynos_drm_fb_ops;
drm_fb_helper_fill_info(fbi, helper, sizes);
offset = fbi->var.xoffset * fb->format->cpp[0];
offset += fbi->var.yoffset * fb->pitches[0];
fbi->flags |= FBINFO_VIRTFB;
fbi->screen_buffer = exynos_gem->kvaddr + offset;
fbi->screen_size = size;
fbi->fix.smem_len = size;
return 0;
}
static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct exynos_drm_gem *exynos_gem;
struct drm_device *dev = helper->dev;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
unsigned long size;
int ret;
DRM_DEV_DEBUG_KMS(dev->dev,
"surface width(%d), height(%d) and bpp(%d\n",
sizes->surface_width, sizes->surface_height,
sizes->surface_bpp);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_WC, size, true);
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
helper->fb =
exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
if (IS_ERR(helper->fb)) {
DRM_DEV_ERROR(dev->dev, "failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
goto err_destroy_gem;
}
ret = exynos_drm_fbdev_update(helper, sizes, exynos_gem);
if (ret < 0)
goto err_destroy_framebuffer;
return 0;
err_destroy_framebuffer:
drm_framebuffer_cleanup(helper->fb);
helper->fb = NULL;
err_destroy_gem:
exynos_drm_gem_destroy(exynos_gem);
return ret;
}
static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
.fb_probe = exynos_drm_fbdev_create,
};
/*
* struct drm_client
*/
static void exynos_drm_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->info) {
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
}
static int exynos_drm_fbdev_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
static int exynos_drm_fbdev_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
int ret;
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err_drm_err;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
goto err_drm_fb_helper_fini;
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(fb_helper);
err_drm_err:
drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
return ret;
}
static const struct drm_client_funcs exynos_drm_fbdev_client_funcs = {
.owner = THIS_MODULE,
.unregister = exynos_drm_fbdev_client_unregister,
.restore = exynos_drm_fbdev_client_restore,
.hotplug = exynos_drm_fbdev_client_hotplug,
};
void exynos_drm_fbdev_setup(struct drm_device *dev)
{
struct drm_fb_helper *fb_helper;
int ret;
drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
if (!fb_helper)
return;
drm_fb_helper_prepare(dev, fb_helper, PREFERRED_BPP, &exynos_drm_fb_helper_funcs);
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &exynos_drm_fbdev_client_funcs);
if (ret)
goto err_drm_client_init;
drm_client_register(&fb_helper->client);
return;
err_drm_client_init:
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
| linux-master | drivers/gpu/drm/exynos/exynos_drm_fbdev.c |
/*
* Copyright (C) 2017 Samsung Electronics Co.Ltd
* Authors:
* Marek Szyprowski <[email protected]>
*
* Exynos DRM Image Post Processing (IPP) related functions
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*/
#include <linux/uaccess.h>
#include <drm/drm_blend.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_mode.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_ipp.h"
static int num_ipp;
static LIST_HEAD(ipp_list);
/**
* exynos_drm_ipp_register - Register a new picture processor hardware module
* @dev: DRM device
* @ipp: ipp module to init
* @funcs: callbacks for the new ipp object
* @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*)
* @formats: array of supported formats
* @num_formats: size of the supported formats array
* @name: name (for debugging purposes)
*
* Initializes a ipp module.
*
* Returns:
* Zero on success, error code on failure.
*/
int exynos_drm_ipp_register(struct device *dev, struct exynos_drm_ipp *ipp,
const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
const struct exynos_drm_ipp_formats *formats,
unsigned int num_formats, const char *name)
{
WARN_ON(!ipp);
WARN_ON(!funcs);
WARN_ON(!formats);
WARN_ON(!num_formats);
spin_lock_init(&ipp->lock);
INIT_LIST_HEAD(&ipp->todo_list);
init_waitqueue_head(&ipp->done_wq);
ipp->dev = dev;
ipp->funcs = funcs;
ipp->capabilities = caps;
ipp->name = name;
ipp->formats = formats;
ipp->num_formats = num_formats;
/* ipp_list modification is serialized by component framework */
list_add_tail(&ipp->head, &ipp_list);
ipp->id = num_ipp++;
DRM_DEV_DEBUG_DRIVER(dev, "Registered ipp %d\n", ipp->id);
return 0;
}
/**
* exynos_drm_ipp_unregister - Unregister the picture processor module
* @dev: DRM device
* @ipp: ipp module
*/
void exynos_drm_ipp_unregister(struct device *dev,
struct exynos_drm_ipp *ipp)
{
WARN_ON(ipp->task);
WARN_ON(!list_empty(&ipp->todo_list));
list_del(&ipp->head);
}
/**
* exynos_drm_ipp_get_res_ioctl - enumerate all ipp modules
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Construct a list of ipp ids.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_ioctl_ipp_get_res *resp = data;
struct exynos_drm_ipp *ipp;
uint32_t __user *ipp_ptr = (uint32_t __user *)
(unsigned long)resp->ipp_id_ptr;
unsigned int count = num_ipp, copied = 0;
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (count && resp->count_ipps >= count) {
list_for_each_entry(ipp, &ipp_list, head) {
if (put_user(ipp->id, ipp_ptr + copied))
return -EFAULT;
copied++;
}
}
resp->count_ipps = count;
return 0;
}
static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
{
struct exynos_drm_ipp *ipp;
list_for_each_entry(ipp, &ipp_list, head)
if (ipp->id == id)
return ipp;
return NULL;
}
/**
* exynos_drm_ipp_get_caps_ioctl - get ipp module capabilities and formats
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Construct a structure describing ipp module capabilities.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_ioctl_ipp_get_caps *resp = data;
void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr;
struct exynos_drm_ipp *ipp;
int i;
ipp = __ipp_get(resp->ipp_id);
if (!ipp)
return -ENOENT;
resp->ipp_id = ipp->id;
resp->capabilities = ipp->capabilities;
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (resp->formats_count >= ipp->num_formats) {
for (i = 0; i < ipp->num_formats; i++) {
struct drm_exynos_ipp_format tmp = {
.fourcc = ipp->formats[i].fourcc,
.type = ipp->formats[i].type,
.modifier = ipp->formats[i].modifier,
};
if (copy_to_user(ptr, &tmp, sizeof(tmp)))
return -EFAULT;
ptr += sizeof(tmp);
}
}
resp->formats_count = ipp->num_formats;
return 0;
}
static inline const struct exynos_drm_ipp_formats *__ipp_format_get(
struct exynos_drm_ipp *ipp, uint32_t fourcc,
uint64_t mod, unsigned int type)
{
int i;
for (i = 0; i < ipp->num_formats; i++) {
if ((ipp->formats[i].type & type) &&
ipp->formats[i].fourcc == fourcc &&
ipp->formats[i].modifier == mod)
return &ipp->formats[i];
}
return NULL;
}
/**
* exynos_drm_ipp_get_limits_ioctl - get ipp module limits
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Construct a structure describing ipp module limitations for provided
* picture format.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_ioctl_ipp_get_limits *resp = data;
void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr;
const struct exynos_drm_ipp_formats *format;
struct exynos_drm_ipp *ipp;
if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE &&
resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION)
return -EINVAL;
ipp = __ipp_get(resp->ipp_id);
if (!ipp)
return -ENOENT;
format = __ipp_format_get(ipp, resp->fourcc, resp->modifier,
resp->type);
if (!format)
return -EINVAL;
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (format->num_limits && resp->limits_count >= format->num_limits)
if (copy_to_user((void __user *)ptr, format->limits,
sizeof(*format->limits) * format->num_limits))
return -EFAULT;
resp->limits_count = format->num_limits;
return 0;
}
struct drm_pending_exynos_ipp_event {
struct drm_pending_event base;
struct drm_exynos_ipp_event event;
};
static inline struct exynos_drm_ipp_task *
exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp)
{
struct exynos_drm_ipp_task *task;
task = kzalloc(sizeof(*task), GFP_KERNEL);
if (!task)
return NULL;
task->dev = ipp->dev;
task->ipp = ipp;
/* some defaults */
task->src.rect.w = task->dst.rect.w = UINT_MAX;
task->src.rect.h = task->dst.rect.h = UINT_MAX;
task->transform.rotation = DRM_MODE_ROTATE_0;
DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task);
return task;
}
static const struct exynos_drm_param_map {
unsigned int id;
unsigned int size;
unsigned int offset;
} exynos_drm_ipp_params_maps[] = {
{
DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
sizeof(struct drm_exynos_ipp_task_buffer),
offsetof(struct exynos_drm_ipp_task, src.buf),
}, {
DRM_EXYNOS_IPP_TASK_BUFFER |
DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
sizeof(struct drm_exynos_ipp_task_buffer),
offsetof(struct exynos_drm_ipp_task, dst.buf),
}, {
DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
sizeof(struct drm_exynos_ipp_task_rect),
offsetof(struct exynos_drm_ipp_task, src.rect),
}, {
DRM_EXYNOS_IPP_TASK_RECTANGLE |
DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
sizeof(struct drm_exynos_ipp_task_rect),
offsetof(struct exynos_drm_ipp_task, dst.rect),
}, {
DRM_EXYNOS_IPP_TASK_TRANSFORM,
sizeof(struct drm_exynos_ipp_task_transform),
offsetof(struct exynos_drm_ipp_task, transform),
}, {
DRM_EXYNOS_IPP_TASK_ALPHA,
sizeof(struct drm_exynos_ipp_task_alpha),
offsetof(struct exynos_drm_ipp_task, alpha),
},
};
static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
struct drm_exynos_ioctl_ipp_commit *arg)
{
const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps;
void __user *params = (void __user *)(unsigned long)arg->params_ptr;
unsigned int size = arg->params_size;
uint32_t id;
int i;
while (size) {
if (get_user(id, (uint32_t __user *)params))
return -EFAULT;
for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++)
if (map[i].id == id)
break;
if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) ||
map[i].size > size)
return -EINVAL;
if (copy_from_user((void *)task + map[i].offset, params,
map[i].size))
return -EFAULT;
params += map[i].size;
size -= map[i].size;
}
DRM_DEV_DEBUG_DRIVER(task->dev,
"Got task %pK configuration from userspace\n",
task);
return 0;
}
static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
struct drm_file *filp)
{
int ret = 0;
int i;
/* get GEM buffers and check their size */
for (i = 0; i < buf->format->num_planes; i++) {
unsigned int height = (i == 0) ? buf->buf.height :
DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
unsigned long size = height * buf->buf.pitch[i];
struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
buf->buf.gem_id[i]);
if (!gem) {
ret = -ENOENT;
goto gem_free;
}
buf->exynos_gem[i] = gem;
if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
i++;
ret = -EINVAL;
goto gem_free;
}
buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr +
buf->buf.offset[i];
}
return 0;
gem_free:
while (i--) {
exynos_drm_gem_put(buf->exynos_gem[i]);
buf->exynos_gem[i] = NULL;
}
return ret;
}
static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
{
int i;
if (!buf->exynos_gem[0])
return;
for (i = 0; i < buf->format->num_planes; i++)
exynos_drm_gem_put(buf->exynos_gem[i]);
}
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task);
exynos_drm_ipp_task_release_buf(&task->src);
exynos_drm_ipp_task_release_buf(&task->dst);
if (task->event)
drm_event_cancel_free(ipp->drm_dev, &task->event->base);
kfree(task);
}
struct drm_ipp_limit {
struct drm_exynos_ipp_limit_val h;
struct drm_exynos_ipp_limit_val v;
};
enum drm_ipp_size_id {
IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
};
static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
[IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
[IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
[IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED,
DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
};
static inline void __limit_set_val(unsigned int *ptr, unsigned int val)
{
if (!*ptr)
*ptr = val;
}
static void __get_size_limit(const struct drm_exynos_ipp_limit *limits,
unsigned int num_limits, enum drm_ipp_size_id id,
struct drm_ipp_limit *res)
{
const struct drm_exynos_ipp_limit *l = limits;
int i = 0;
memset(res, 0, sizeof(*res));
for (i = 0; limit_id_fallback[id][i]; i++)
for (l = limits; l - limits < num_limits; l++) {
if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) !=
DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) ||
((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) !=
limit_id_fallback[id][i]))
continue;
__limit_set_val(&res->h.min, l->h.min);
__limit_set_val(&res->h.max, l->h.max);
__limit_set_val(&res->h.align, l->h.align);
__limit_set_val(&res->v.min, l->v.min);
__limit_set_val(&res->v.max, l->v.max);
__limit_set_val(&res->v.align, l->v.align);
}
}
static inline bool __align_check(unsigned int val, unsigned int align)
{
if (align && (val & (align - 1))) {
DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n",
val, align);
return false;
}
return true;
}
static inline bool __size_limit_check(unsigned int val,
struct drm_exynos_ipp_limit_val *l)
{
if ((l->min && val < l->min) || (l->max && val > l->max)) {
DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n",
val, l->min, l->max);
return false;
}
return __align_check(val, l->align);
}
static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
const struct drm_exynos_ipp_limit *limits, unsigned int num_limits,
bool rotate, bool swap)
{
enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
struct drm_ipp_limit l;
struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
if (!limits)
return 0;
__get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
if (!__size_limit_check(real_width, &l.h) ||
!__size_limit_check(buf->buf.height, &l.v))
return -EINVAL;
if (swap) {
lv = &l.h;
lh = &l.v;
}
__get_size_limit(limits, num_limits, id, &l);
if (!__size_limit_check(buf->rect.w, lh) ||
!__align_check(buf->rect.x, lh->align) ||
!__size_limit_check(buf->rect.h, lv) ||
!__align_check(buf->rect.y, lv->align))
return -EINVAL;
return 0;
}
static inline bool __scale_limit_check(unsigned int src, unsigned int dst,
unsigned int min, unsigned int max)
{
if ((max && (dst << 16) > src * max) ||
(min && (dst << 16) < src * min)) {
DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n",
src, dst,
min >> 16, 100000 * (min & 0xffff) / (1 << 16),
max >> 16, 100000 * (max & 0xffff) / (1 << 16));
return false;
}
return true;
}
static int exynos_drm_ipp_check_scale_limits(
struct drm_exynos_ipp_task_rect *src,
struct drm_exynos_ipp_task_rect *dst,
const struct drm_exynos_ipp_limit *limits,
unsigned int num_limits, bool swap)
{
const struct drm_exynos_ipp_limit_val *lh, *lv;
int dw, dh;
for (; num_limits; limits++, num_limits--)
if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) ==
DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE)
break;
if (!num_limits)
return 0;
lh = (!swap) ? &limits->h : &limits->v;
lv = (!swap) ? &limits->v : &limits->h;
dw = (!swap) ? dst->w : dst->h;
dh = (!swap) ? dst->h : dst->w;
if (!__scale_limit_check(src->w, dw, lh->min, lh->max) ||
!__scale_limit_check(src->h, dh, lv->min, lv->max))
return -EINVAL;
return 0;
}
static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
struct exynos_drm_ipp_buffer *buf,
struct exynos_drm_ipp_buffer *src,
struct exynos_drm_ipp_buffer *dst,
bool rotate, bool swap)
{
const struct exynos_drm_ipp_formats *fmt;
int ret, i;
fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
DRM_EXYNOS_IPP_FORMAT_DESTINATION);
if (!fmt) {
DRM_DEV_DEBUG_DRIVER(task->dev,
"Task %pK: %s format not supported\n",
task, buf == src ? "src" : "dst");
return -EINVAL;
}
/* basic checks */
if (buf->buf.width == 0 || buf->buf.height == 0)
return -EINVAL;
buf->format = drm_format_info(buf->buf.fourcc);
for (i = 0; i < buf->format->num_planes; i++) {
unsigned int width = (i == 0) ? buf->buf.width :
DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
if (buf->buf.pitch[i] == 0)
buf->buf.pitch[i] = width * buf->format->cpp[i];
if (buf->buf.pitch[i] < width * buf->format->cpp[i])
return -EINVAL;
if (!buf->buf.gem_id[i])
return -ENOENT;
}
/* pitch for additional planes must match */
if (buf->format->num_planes > 2 &&
buf->buf.pitch[1] != buf->buf.pitch[2])
return -EINVAL;
/* check driver limits */
ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
fmt->num_limits,
rotate,
buf == dst ? swap : false);
if (ret)
return ret;
ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
fmt->limits,
fmt->num_limits, swap);
return ret;
}
static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
{
struct exynos_drm_ipp *ipp = task->ipp;
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
unsigned int rotation = task->transform.rotation;
int ret = 0;
bool swap = drm_rotation_90_or_270(rotation);
bool rotate = (rotation != DRM_MODE_ROTATE_0);
bool scale = false;
DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task);
if (src->rect.w == UINT_MAX)
src->rect.w = src->buf.width;
if (src->rect.h == UINT_MAX)
src->rect.h = src->buf.height;
if (dst->rect.w == UINT_MAX)
dst->rect.w = dst->buf.width;
if (dst->rect.h == UINT_MAX)
dst->rect.h = dst->buf.height;
if (src->rect.x + src->rect.w > (src->buf.width) ||
src->rect.y + src->rect.h > (src->buf.height) ||
dst->rect.x + dst->rect.w > (dst->buf.width) ||
dst->rect.y + dst->rect.h > (dst->buf.height)) {
DRM_DEV_DEBUG_DRIVER(task->dev,
"Task %pK: defined area is outside provided buffers\n",
task);
return -EINVAL;
}
if ((!swap && (src->rect.w != dst->rect.w ||
src->rect.h != dst->rect.h)) ||
(swap && (src->rect.w != dst->rect.h ||
src->rect.h != dst->rect.w)))
scale = true;
if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) &&
(src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
src->buf.fourcc != dst->buf.fourcc)) {
DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n",
task);
return -EINVAL;
}
ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
if (ret)
return ret;
ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
if (ret)
return ret;
DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %pK: all checks done.\n",
task);
return ret;
}
static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
struct drm_file *filp)
{
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
int ret = 0;
DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n",
task);
ret = exynos_drm_ipp_task_setup_buffer(src, filp);
if (ret) {
DRM_DEV_DEBUG_DRIVER(task->dev,
"Task %pK: src buffer setup failed\n",
task);
return ret;
}
ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
if (ret) {
DRM_DEV_DEBUG_DRIVER(task->dev,
"Task %pK: dst buffer setup failed\n",
task);
return ret;
}
DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n",
task);
return ret;
}
static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
struct drm_file *file_priv, uint64_t user_data)
{
struct drm_pending_exynos_ipp_event *e = NULL;
int ret;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
e->event.base.type = DRM_EXYNOS_IPP_EVENT;
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
ret = drm_event_reserve_init(task->ipp->drm_dev, file_priv, &e->base,
&e->event.base);
if (ret)
goto free;
task->event = e;
return 0;
free:
kfree(e);
return ret;
}
static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
{
struct timespec64 now;
ktime_get_ts64(&now);
task->event->event.tv_sec = now.tv_sec;
task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
drm_send_event(task->ipp->drm_dev, &task->event->base);
}
static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
{
int ret = task->ret;
if (ret == 0 && task->event) {
exynos_drm_ipp_event_send(task);
/* ensure event won't be canceled on task free */
task->event = NULL;
}
exynos_drm_ipp_task_free(task->ipp, task);
return ret;
}
static void exynos_drm_ipp_cleanup_work(struct work_struct *work)
{
struct exynos_drm_ipp_task *task = container_of(work,
struct exynos_drm_ipp_task, cleanup_work);
exynos_drm_ipp_task_cleanup(task);
}
static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp);
/**
* exynos_drm_ipp_task_done - finish given task and set return code
* @task: ipp task to finish
* @ret: error code or 0 if operation has been performed successfully
*/
void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
{
struct exynos_drm_ipp *ipp = task->ipp;
unsigned long flags;
DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n",
ipp->id, task, ret);
spin_lock_irqsave(&ipp->lock, flags);
if (ipp->task == task)
ipp->task = NULL;
task->flags |= DRM_EXYNOS_IPP_TASK_DONE;
task->ret = ret;
spin_unlock_irqrestore(&ipp->lock, flags);
exynos_drm_ipp_next_task(ipp);
wake_up(&ipp->done_wq);
if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) {
INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work);
schedule_work(&task->cleanup_work);
}
}
static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
{
struct exynos_drm_ipp_task *task;
unsigned long flags;
int ret;
DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, try to run new task\n",
ipp->id);
spin_lock_irqsave(&ipp->lock, flags);
if (ipp->task || list_empty(&ipp->todo_list)) {
spin_unlock_irqrestore(&ipp->lock, flags);
return;
}
task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task,
head);
list_del_init(&task->head);
ipp->task = task;
spin_unlock_irqrestore(&ipp->lock, flags);
DRM_DEV_DEBUG_DRIVER(ipp->dev,
"ipp: %d, selected task %pK to run\n", ipp->id,
task);
ret = ipp->funcs->commit(ipp, task);
if (ret)
exynos_drm_ipp_task_done(task, ret);
}
static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
unsigned long flags;
spin_lock_irqsave(&ipp->lock, flags);
list_add(&task->head, &ipp->todo_list);
spin_unlock_irqrestore(&ipp->lock, flags);
exynos_drm_ipp_next_task(ipp);
}
static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
unsigned long flags;
spin_lock_irqsave(&ipp->lock, flags);
if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) {
/* already completed task */
exynos_drm_ipp_task_cleanup(task);
} else if (ipp->task != task) {
/* task has not been scheduled for execution yet */
list_del_init(&task->head);
exynos_drm_ipp_task_cleanup(task);
} else {
/*
* currently processed task, call abort() and perform
* cleanup with async worker
*/
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
spin_unlock_irqrestore(&ipp->lock, flags);
if (ipp->funcs->abort)
ipp->funcs->abort(ipp, task);
return;
}
spin_unlock_irqrestore(&ipp->lock, flags);
}
/**
* exynos_drm_ipp_commit_ioctl - perform image processing operation
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Construct a ipp task from the set of properties provided from the user
* and try to schedule it to framebuffer processor hardware.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_ioctl_ipp_commit *arg = data;
struct exynos_drm_ipp *ipp;
struct exynos_drm_ipp_task *task;
int ret = 0;
if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved)
return -EINVAL;
/* can't test and expect an event at the same time */
if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) &&
(arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT))
return -EINVAL;
ipp = __ipp_get(arg->ipp_id);
if (!ipp)
return -ENOENT;
task = exynos_drm_ipp_task_alloc(ipp);
if (!task)
return -ENOMEM;
ret = exynos_drm_ipp_task_set(task, arg);
if (ret)
goto free;
ret = exynos_drm_ipp_task_check(task);
if (ret)
goto free;
ret = exynos_drm_ipp_task_setup_buffers(task, file_priv);
if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY)
goto free;
if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) {
ret = exynos_drm_ipp_event_create(task, file_priv,
arg->user_data);
if (ret)
goto free;
}
/*
* Queue task for processing on the hardware. task object will be
* then freed after exynos_drm_ipp_task_done()
*/
if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
DRM_DEV_DEBUG_DRIVER(ipp->dev,
"ipp: %d, nonblocking processing task %pK\n",
ipp->id, task);
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
exynos_drm_ipp_schedule_task(task->ipp, task);
ret = 0;
} else {
DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %pK\n",
ipp->id, task);
exynos_drm_ipp_schedule_task(ipp, task);
ret = wait_event_interruptible(ipp->done_wq,
task->flags & DRM_EXYNOS_IPP_TASK_DONE);
if (ret)
exynos_drm_ipp_task_abort(ipp, task);
else
ret = exynos_drm_ipp_task_cleanup(task);
}
return ret;
free:
exynos_drm_ipp_task_free(ipp, task);
return ret;
}
| linux-master | drivers/gpu/drm/exynos/exynos_drm_ipp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Samsung SoC DP (Display Port) interface driver.
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd.
* Author: Jingoo Han <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
#include <drm/bridge/analogix_dp.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
#define to_dp(nm) container_of(nm, struct exynos_dp_device, nm)
struct exynos_dp_device {
struct drm_encoder encoder;
struct drm_connector *connector;
struct drm_bridge *ptn_bridge;
struct drm_device *drm_dev;
struct device *dev;
struct videomode vm;
struct analogix_dp_device *adp;
struct analogix_dp_plat_data plat_data;
};
static int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data,
bool enable)
{
struct exynos_dp_device *dp = to_dp(plat_data);
struct drm_encoder *encoder = &dp->encoder;
if (!encoder->crtc)
return -EPERM;
exynos_drm_pipe_clk_enable(to_exynos_crtc(encoder->crtc), enable);
return 0;
}
static int exynos_dp_poweron(struct analogix_dp_plat_data *plat_data)
{
return exynos_dp_crtc_clock_enable(plat_data, true);
}
static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
{
return exynos_dp_crtc_clock_enable(plat_data, false);
}
static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
struct drm_connector *connector)
{
struct exynos_dp_device *dp = to_dp(plat_data);
struct drm_display_mode *mode;
int num_modes = 0;
if (dp->plat_data.panel)
return num_modes;
mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_DEV_ERROR(dp->dev,
"failed to create a new display mode.\n");
return num_modes;
}
drm_display_mode_from_videomode(&dp->vm, mode);
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
return num_modes + 1;
}
static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct exynos_dp_device *dp = to_dp(plat_data);
int ret;
dp->connector = connector;
/* Pre-empt DP connector creation if there's a bridge */
if (dp->ptn_bridge) {
ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge,
0);
if (ret)
return ret;
}
return 0;
}
static void exynos_dp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void exynos_dp_nop(struct drm_encoder *encoder)
{
/* do nothing */
}
static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
.mode_set = exynos_dp_mode_set,
.enable = exynos_dp_nop,
.disable = exynos_dp_nop,
};
static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE);
if (ret) {
DRM_DEV_ERROR(dp->dev,
"failed: of_get_videomode() : %d\n", ret);
return ret;
}
return 0;
}
static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dp->encoder;
struct drm_device *drm_dev = data;
int ret;
dp->drm_dev = drm_dev;
if (!dp->plat_data.panel && !dp->ptn_bridge) {
ret = exynos_dp_dt_parse_panel(dp);
if (ret)
return ret;
}
drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
if (ret < 0)
return ret;
dp->plat_data.encoder = encoder;
ret = analogix_dp_bind(dp->adp, dp->drm_dev);
if (ret)
dp->encoder.funcs->destroy(&dp->encoder);
return ret;
}
static void exynos_dp_unbind(struct device *dev, struct device *master,
void *data)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
analogix_dp_unbind(dp->adp);
dp->encoder.funcs->destroy(&dp->encoder);
}
static const struct component_ops exynos_dp_ops = {
.bind = exynos_dp_bind,
.unbind = exynos_dp_unbind,
};
static int exynos_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
struct exynos_dp_device *dp;
struct drm_panel *panel;
struct drm_bridge *bridge;
int ret;
dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
GFP_KERNEL);
if (!dp)
return -ENOMEM;
dp->dev = dev;
/*
* We just use the drvdata until driver run into component
* add function, and then we would set drvdata to null, so
* that analogix dp driver would take charge of the drvdata.
*/
platform_set_drvdata(pdev, dp);
/* This is for the backward compatibility. */
np = of_parse_phandle(dev->of_node, "panel", 0);
if (np) {
dp->plat_data.panel = of_drm_find_panel(np);
of_node_put(np);
if (IS_ERR(dp->plat_data.panel))
return PTR_ERR(dp->plat_data.panel);
goto out;
}
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &panel, &bridge);
if (ret)
return ret;
/* The remote port can be either a panel or a bridge */
dp->plat_data.panel = panel;
dp->plat_data.dev_type = EXYNOS_DP;
dp->plat_data.power_on_start = exynos_dp_poweron;
dp->plat_data.power_off = exynos_dp_poweroff;
dp->plat_data.attach = exynos_dp_bridge_attach;
dp->plat_data.get_modes = exynos_dp_get_modes;
dp->plat_data.skip_connector = !!bridge;
dp->ptn_bridge = bridge;
out:
dp->adp = analogix_dp_probe(dev, &dp->plat_data);
if (IS_ERR(dp->adp))
return PTR_ERR(dp->adp);
return component_add(&pdev->dev, &exynos_dp_ops);
}
static int exynos_dp_remove(struct platform_device *pdev)
{
struct exynos_dp_device *dp = platform_get_drvdata(pdev);
component_del(&pdev->dev, &exynos_dp_ops);
analogix_dp_remove(dp->adp);
return 0;
}
static int exynos_dp_suspend(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
return analogix_dp_suspend(dp->adp);
}
static int exynos_dp_resume(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
return analogix_dp_resume(dp->adp);
}
static DEFINE_RUNTIME_DEV_PM_OPS(exynos_dp_pm_ops, exynos_dp_suspend,
exynos_dp_resume, NULL);
static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_dp_match);
struct platform_driver dp_driver = {
.probe = exynos_dp_probe,
.remove = exynos_dp_remove,
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
.pm = pm_ptr(&exynos_dp_pm_ops),
.of_match_table = exynos_dp_match,
},
};
MODULE_AUTHOR("Jingoo Han <[email protected]>");
MODULE_DESCRIPTION("Samsung Specific Analogix-DP Driver Extension");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/exynos/exynos_dp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Exynos DRM Parallel output support.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
*
* Contacts: Andrzej Hajda <[email protected]>
*/
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
#include "exynos_drm_crtc.h"
struct exynos_dpi {
struct drm_encoder encoder;
struct device *dev;
struct device_node *panel_node;
struct drm_panel *panel;
struct drm_connector connector;
struct videomode *vm;
};
#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e)
{
return container_of(e, struct exynos_dpi, encoder);
}
static enum drm_connector_status
exynos_dpi_detect(struct drm_connector *connector, bool force)
{
return connector_status_connected;
}
static void exynos_dpi_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs exynos_dpi_connector_funcs = {
.detect = exynos_dpi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = exynos_dpi_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int exynos_dpi_get_modes(struct drm_connector *connector)
{
struct exynos_dpi *ctx = connector_to_dpi(connector);
/* fimd timings gets precedence over panel modes */
if (ctx->vm) {
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_DEV_ERROR(ctx->dev,
"failed to create a new display mode\n");
return 0;
}
drm_display_mode_from_videomode(ctx->vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
return 1;
}
if (ctx->panel)
return drm_panel_get_modes(ctx->panel, connector);
return 0;
}
static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
.get_modes = exynos_dpi_get_modes,
};
static int exynos_dpi_create_connector(struct drm_encoder *encoder)
{
struct exynos_dpi *ctx = encoder_to_dpi(encoder);
struct drm_connector *connector = &ctx->connector;
int ret;
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(encoder->dev, connector,
&exynos_dpi_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
if (ret) {
DRM_DEV_ERROR(ctx->dev,
"failed to initialize connector with drm\n");
return ret;
}
drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
return 0;
}
static void exynos_dpi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void exynos_dpi_enable(struct drm_encoder *encoder)
{
struct exynos_dpi *ctx = encoder_to_dpi(encoder);
if (ctx->panel) {
drm_panel_prepare(ctx->panel);
drm_panel_enable(ctx->panel);
}
}
static void exynos_dpi_disable(struct drm_encoder *encoder)
{
struct exynos_dpi *ctx = encoder_to_dpi(encoder);
if (ctx->panel) {
drm_panel_disable(ctx->panel);
drm_panel_unprepare(ctx->panel);
}
}
static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
.mode_set = exynos_dpi_mode_set,
.enable = exynos_dpi_enable,
.disable = exynos_dpi_disable,
};
enum {
FIMD_PORT_IN0,
FIMD_PORT_IN1,
FIMD_PORT_IN2,
FIMD_PORT_RGB,
FIMD_PORT_WRB,
};
static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
{
struct device *dev = ctx->dev;
struct device_node *dn = dev->of_node;
struct device_node *np;
ctx->panel_node = of_graph_get_remote_node(dn, FIMD_PORT_RGB, 0);
np = of_get_child_by_name(dn, "display-timings");
if (np) {
struct videomode *vm;
int ret;
of_node_put(np);
vm = devm_kzalloc(dev, sizeof(*ctx->vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
ret = of_get_videomode(dn, vm, 0);
if (ret < 0) {
devm_kfree(dev, vm);
return ret;
}
ctx->vm = vm;
return 0;
}
if (!ctx->panel_node)
return -EINVAL;
return 0;
}
int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
{
int ret;
drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
if (ret < 0)
return ret;
ret = exynos_dpi_create_connector(encoder);
if (ret) {
DRM_DEV_ERROR(encoder_to_dpi(encoder)->dev,
"failed to create connector ret = %d\n", ret);
drm_encoder_cleanup(encoder);
return ret;
}
return 0;
}
struct drm_encoder *exynos_dpi_probe(struct device *dev)
{
struct exynos_dpi *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->dev = dev;
ret = exynos_dpi_parse_dt(ctx);
if (ret < 0) {
devm_kfree(dev, ctx);
return NULL;
}
if (ctx->panel_node) {
ctx->panel = of_drm_find_panel(ctx->panel_node);
if (IS_ERR(ctx->panel))
return ERR_CAST(ctx->panel);
}
return &ctx->encoder;
}
int exynos_dpi_remove(struct drm_encoder *encoder)
{
struct exynos_dpi *ctx = encoder_to_dpi(encoder);
exynos_dpi_disable(&ctx->encoder);
return 0;
}
| linux-master | drivers/gpu/drm/exynos/exynos_drm_dpi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Free Electrons
* Copyright (C) 2015 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <linux/component.h>
#include <linux/ioport.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <uapi/drm/drm_mode.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sun4i_lvds.h"
#include "sun4i_rgb.h"
#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
#include "sun4i_tcon_dclk.h"
#include "sun8i_tcon_top.h"
#include "sunxi_engine.h"
static struct drm_connector *sun4i_tcon_get_connector(const struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_connector_list_iter iter;
drm_connector_list_iter_begin(encoder->dev, &iter);
drm_for_each_connector_iter(connector, &iter)
if (connector->encoder == encoder) {
drm_connector_list_iter_end(&iter);
return connector;
}
drm_connector_list_iter_end(&iter);
return NULL;
}
static int sun4i_tcon_get_pixel_depth(const struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_display_info *info;
connector = sun4i_tcon_get_connector(encoder);
if (!connector)
return -EINVAL;
info = &connector->display_info;
if (info->num_bus_formats != 1)
return -EINVAL;
switch (info->bus_formats[0]) {
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
return 18;
case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
return 24;
}
return -EINVAL;
}
static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
bool enabled)
{
struct clk *clk;
switch (channel) {
case 0:
WARN_ON(!tcon->quirks->has_channel_0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
SUN4I_TCON0_CTL_TCON_ENABLE,
enabled ? SUN4I_TCON0_CTL_TCON_ENABLE : 0);
clk = tcon->dclk;
break;
case 1:
WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE,
enabled ? SUN4I_TCON1_CTL_TCON_ENABLE : 0);
clk = tcon->sclk1;
break;
default:
DRM_WARN("Unknown channel... doing nothing\n");
return;
}
if (enabled) {
clk_prepare_enable(clk);
clk_rate_exclusive_get(clk);
} else {
clk_rate_exclusive_put(clk);
clk_disable_unprepare(clk);
}
}
static void sun4i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder)
{
regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
SUN4I_TCON0_LVDS_ANA0_CK_EN |
SUN4I_TCON0_LVDS_ANA0_REG_V |
SUN4I_TCON0_LVDS_ANA0_REG_C |
SUN4I_TCON0_LVDS_ANA0_EN_MB |
SUN4I_TCON0_LVDS_ANA0_PD |
SUN4I_TCON0_LVDS_ANA0_DCHS);
udelay(2); /* delay at least 1200 ns */
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
SUN4I_TCON0_LVDS_ANA1_INIT,
SUN4I_TCON0_LVDS_ANA1_INIT);
udelay(1); /* delay at least 120 ns */
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
SUN4I_TCON0_LVDS_ANA1_UPDATE,
SUN4I_TCON0_LVDS_ANA1_UPDATE);
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
SUN4I_TCON0_LVDS_ANA0_EN_MB,
SUN4I_TCON0_LVDS_ANA0_EN_MB);
}
static void sun6i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder)
{
u8 val;
regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
SUN6I_TCON0_LVDS_ANA0_C(2) |
SUN6I_TCON0_LVDS_ANA0_V(3) |
SUN6I_TCON0_LVDS_ANA0_PD(2) |
SUN6I_TCON0_LVDS_ANA0_EN_LDO);
udelay(2);
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
SUN6I_TCON0_LVDS_ANA0_EN_MB,
SUN6I_TCON0_LVDS_ANA0_EN_MB);
udelay(2);
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
if (sun4i_tcon_get_pixel_depth(encoder) == 18)
val = 7;
else
val = 0xf;
regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
}
static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
bool enabled)
{
if (enabled) {
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
SUN4I_TCON0_LVDS_IF_EN,
SUN4I_TCON0_LVDS_IF_EN);
if (tcon->quirks->setup_lvds_phy)
tcon->quirks->setup_lvds_phy(tcon, encoder);
} else {
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
SUN4I_TCON0_LVDS_IF_EN, 0);
}
}
void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
bool enabled)
{
bool is_lvds = false;
int channel;
switch (encoder->encoder_type) {
case DRM_MODE_ENCODER_LVDS:
is_lvds = true;
fallthrough;
case DRM_MODE_ENCODER_DSI:
case DRM_MODE_ENCODER_NONE:
channel = 0;
break;
case DRM_MODE_ENCODER_TMDS:
case DRM_MODE_ENCODER_TVDAC:
channel = 1;
break;
default:
DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n");
return;
}
if (is_lvds && !enabled)
sun4i_tcon_lvds_set_status(tcon, encoder, false);
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_TCON_ENABLE,
enabled ? SUN4I_TCON_GCTL_TCON_ENABLE : 0);
if (is_lvds && enabled)
sun4i_tcon_lvds_set_status(tcon, encoder, true);
sun4i_tcon_channel_set_status(tcon, channel, enabled);
}
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
{
u32 mask, val = 0;
DRM_DEBUG_DRIVER("%sabling VBLANK interrupt\n", enable ? "En" : "Dis");
mask = SUN4I_TCON_GINT0_VBLANK_ENABLE(0) |
SUN4I_TCON_GINT0_VBLANK_ENABLE(1) |
SUN4I_TCON_GINT0_TCON0_TRI_FINISH_ENABLE;
if (enable)
val = mask;
regmap_update_bits(tcon->regs, SUN4I_TCON_GINT0_REG, mask, val);
}
EXPORT_SYMBOL(sun4i_tcon_enable_vblank);
/*
* This function is a helper for TCON output muxing. The TCON output
* muxing control register in earlier SoCs (without the TCON TOP block)
* are located in TCON0. This helper returns a pointer to TCON0's
* sun4i_tcon structure, or NULL if not found.
*/
static struct sun4i_tcon *sun4i_get_tcon0(struct drm_device *drm)
{
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_tcon *tcon;
list_for_each_entry(tcon, &drv->tcon_list, list)
if (tcon->id == 0)
return tcon;
dev_warn(drm->dev,
"TCON0 not found, display output muxing may not work\n");
return NULL;
}
static void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
const struct drm_encoder *encoder)
{
int ret = -ENOTSUPP;
if (tcon->quirks->set_mux)
ret = tcon->quirks->set_mux(tcon, encoder);
DRM_DEBUG_DRIVER("Muxing encoder %s to CRTC %s: %d\n",
encoder->name, encoder->crtc->name, ret);
}
static int sun4i_tcon_get_clk_delay(const struct drm_display_mode *mode,
int channel)
{
int delay = mode->vtotal - mode->vdisplay;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
delay /= 2;
if (channel == 1)
delay -= 2;
delay = min(delay, 30);
DRM_DEBUG_DRIVER("TCON %d clock delay %u\n", channel, delay);
return delay;
}
static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
const struct drm_connector *connector)
{
u32 bus_format = 0;
u32 val = 0;
/* XXX Would this ever happen? */
if (!connector)
return;
/*
* FIXME: Undocumented bits
*
* The whole dithering process and these parameters are not
* explained in the vendor documents or BSP kernel code.
*/
regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PR_REG, 0x11111111);
regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PG_REG, 0x11111111);
regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PB_REG, 0x11111111);
regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LR_REG, 0x11111111);
regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LG_REG, 0x11111111);
regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LB_REG, 0x11111111);
regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL0_REG, 0x01010000);
regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL1_REG, 0x15151111);
regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL2_REG, 0x57575555);
regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL3_REG, 0x7f7f7777);
/* Do dithering if panel only supports 6 bits per color */
if (connector->display_info.bpc == 6)
val |= SUN4I_TCON0_FRM_CTL_EN;
if (connector->display_info.num_bus_formats == 1)
bus_format = connector->display_info.bus_formats[0];
/* Check the connection format */
switch (bus_format) {
case MEDIA_BUS_FMT_RGB565_1X16:
/* R and B components are only 5 bits deep */
val |= SUN4I_TCON0_FRM_CTL_MODE_R;
val |= SUN4I_TCON0_FRM_CTL_MODE_B;
fallthrough;
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
/* Fall through: enable dithering */
val |= SUN4I_TCON0_FRM_CTL_EN;
break;
}
/* Write dithering settings */
regmap_write(tcon->regs, SUN4I_TCON_FRM_CTL_REG, val);
}
static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
/* TODO support normal CPU interface modes */
struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
struct mipi_dsi_device *device = dsi->device;
u8 bpp = mipi_dsi_pixel_format_to_bpp(device->format);
u8 lanes = device->lanes;
u32 block_space, start_delay;
u32 tcon_div;
/*
* dclk is required to run at 1/4 the DSI per-lane bit rate.
*/
tcon->dclk_min_div = SUN6I_DSI_TCON_DIV;
tcon->dclk_max_div = SUN6I_DSI_TCON_DIV;
clk_set_rate(tcon->dclk, mode->crtc_clock * 1000 * (bpp / lanes)
/ SUN6I_DSI_TCON_DIV);
/* Set the resolution */
regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
/* Set dithering if needed */
sun4i_tcon0_mode_set_dithering(tcon, sun4i_tcon_get_connector(encoder));
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
SUN4I_TCON0_CTL_IF_MASK,
SUN4I_TCON0_CTL_IF_8080);
regmap_write(tcon->regs, SUN4I_TCON_ECC_FIFO_REG,
SUN4I_TCON_ECC_FIFO_EN);
regmap_write(tcon->regs, SUN4I_TCON0_CPU_IF_REG,
SUN4I_TCON0_CPU_IF_MODE_DSI |
SUN4I_TCON0_CPU_IF_TRI_FIFO_FLUSH |
SUN4I_TCON0_CPU_IF_TRI_FIFO_EN |
SUN4I_TCON0_CPU_IF_TRI_EN);
/*
* This looks suspicious, but it works...
*
* The datasheet says that this should be set higher than 20 *
* pixel cycle, but it's not clear what a pixel cycle is.
*/
regmap_read(tcon->regs, SUN4I_TCON0_DCLK_REG, &tcon_div);
tcon_div &= GENMASK(6, 0);
block_space = mode->htotal * bpp / (tcon_div * lanes);
block_space -= mode->hdisplay + 40;
regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI0_REG,
SUN4I_TCON0_CPU_TRI0_BLOCK_SPACE(block_space) |
SUN4I_TCON0_CPU_TRI0_BLOCK_SIZE(mode->hdisplay));
regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI1_REG,
SUN4I_TCON0_CPU_TRI1_BLOCK_NUM(mode->vdisplay));
start_delay = (mode->crtc_vtotal - mode->crtc_vdisplay - 10 - 1);
start_delay = start_delay * mode->crtc_htotal * 149;
start_delay = start_delay / (mode->crtc_clock / 1000) / 8;
regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI2_REG,
SUN4I_TCON0_CPU_TRI2_TRANS_START_SET(10) |
SUN4I_TCON0_CPU_TRI2_START_DELAY(start_delay));
/*
* The Allwinner BSP has a comment that the period should be
* the display clock * 15, but uses an hardcoded 3000...
*/
regmap_write(tcon->regs, SUN4I_TCON_SAFE_PERIOD_REG,
SUN4I_TCON_SAFE_PERIOD_NUM(3000) |
SUN4I_TCON_SAFE_PERIOD_MODE(3));
/* Enable the output on the pins */
regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG,
0xe0000000);
}
static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
unsigned int bp;
u8 clk_delay;
u32 reg, val = 0;
WARN_ON(!tcon->quirks->has_channel_0);
tcon->dclk_min_div = 7;
tcon->dclk_max_div = 7;
clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
/* Set the resolution */
regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
/* Set dithering if needed */
sun4i_tcon0_mode_set_dithering(tcon, sun4i_tcon_get_connector(encoder));
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
SUN4I_TCON0_CTL_CLK_DELAY_MASK,
SUN4I_TCON0_CTL_CLK_DELAY(clk_delay));
/*
* This is called a backporch in the register documentation,
* but it really is the back porch + hsync
*/
bp = mode->crtc_htotal - mode->crtc_hsync_start;
DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
mode->crtc_htotal, bp);
/* Set horizontal display timings */
regmap_write(tcon->regs, SUN4I_TCON0_BASIC1_REG,
SUN4I_TCON0_BASIC1_H_TOTAL(mode->htotal) |
SUN4I_TCON0_BASIC1_H_BACKPORCH(bp));
/*
* This is called a backporch in the register documentation,
* but it really is the back porch + hsync
*/
bp = mode->crtc_vtotal - mode->crtc_vsync_start;
DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
mode->crtc_vtotal, bp);
/* Set vertical display timings */
regmap_write(tcon->regs, SUN4I_TCON0_BASIC2_REG,
SUN4I_TCON0_BASIC2_V_TOTAL(mode->crtc_vtotal * 2) |
SUN4I_TCON0_BASIC2_V_BACKPORCH(bp));
reg = SUN4I_TCON0_LVDS_IF_CLK_SEL_TCON0;
if (sun4i_tcon_get_pixel_depth(encoder) == 24)
reg |= SUN4I_TCON0_LVDS_IF_BITWIDTH_24BITS;
else
reg |= SUN4I_TCON0_LVDS_IF_BITWIDTH_18BITS;
regmap_write(tcon->regs, SUN4I_TCON0_LVDS_IF_REG, reg);
/* Setup the polarity of the various signals */
if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
/* Map output pins to channel 0 */
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK,
SUN4I_TCON_GCTL_IOMAP_TCON0);
/* Enable the output on the pins */
regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000);
}
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct drm_connector *connector = sun4i_tcon_get_connector(encoder);
const struct drm_display_info *info = &connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
WARN_ON(!tcon->quirks->has_channel_0);
tcon->dclk_min_div = tcon->quirks->dclk_min_div;
tcon->dclk_max_div = 127;
clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
/* Set the resolution */
regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
/* Set dithering if needed */
sun4i_tcon0_mode_set_dithering(tcon, connector);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
SUN4I_TCON0_CTL_CLK_DELAY_MASK,
SUN4I_TCON0_CTL_CLK_DELAY(clk_delay));
/*
* This is called a backporch in the register documentation,
* but it really is the back porch + hsync
*/
bp = mode->crtc_htotal - mode->crtc_hsync_start;
DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
mode->crtc_htotal, bp);
/* Set horizontal display timings */
regmap_write(tcon->regs, SUN4I_TCON0_BASIC1_REG,
SUN4I_TCON0_BASIC1_H_TOTAL(mode->crtc_htotal) |
SUN4I_TCON0_BASIC1_H_BACKPORCH(bp));
/*
* This is called a backporch in the register documentation,
* but it really is the back porch + hsync
*/
bp = mode->crtc_vtotal - mode->crtc_vsync_start;
DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
mode->crtc_vtotal, bp);
/* Set vertical display timings */
regmap_write(tcon->regs, SUN4I_TCON0_BASIC2_REG,
SUN4I_TCON0_BASIC2_V_TOTAL(mode->crtc_vtotal * 2) |
SUN4I_TCON0_BASIC2_V_BACKPORCH(bp));
/* Set Hsync and Vsync length */
hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
DRM_DEBUG_DRIVER("Setting HSYNC %d, VSYNC %d\n", hsync, vsync);
regmap_write(tcon->regs, SUN4I_TCON0_BASIC3_REG,
SUN4I_TCON0_BASIC3_V_SYNC(vsync) |
SUN4I_TCON0_BASIC3_H_SYNC(hsync));
/* Setup the polarity of the various signals */
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE;
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE |
SUN4I_TCON0_IO_POL_DE_NEGATIVE,
val);
/* Map output pins to channel 0 */
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK,
SUN4I_TCON_GCTL_IOMAP_TCON0);
/* Enable the output on the pins */
regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0);
}
static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
unsigned int bp, hsync, vsync, vtotal;
u8 clk_delay;
u32 val;
WARN_ON(!tcon->quirks->has_channel_1);
/* Configure the dot clock */
clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_CLK_DELAY_MASK,
SUN4I_TCON1_CTL_CLK_DELAY(clk_delay));
/* Set interlaced mode */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
val = SUN4I_TCON1_CTL_INTERLACE_ENABLE;
else
val = 0;
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_INTERLACE_ENABLE,
val);
/* Set the input resolution */
regmap_write(tcon->regs, SUN4I_TCON1_BASIC0_REG,
SUN4I_TCON1_BASIC0_X(mode->crtc_hdisplay) |
SUN4I_TCON1_BASIC0_Y(mode->crtc_vdisplay));
/* Set the upscaling resolution */
regmap_write(tcon->regs, SUN4I_TCON1_BASIC1_REG,
SUN4I_TCON1_BASIC1_X(mode->crtc_hdisplay) |
SUN4I_TCON1_BASIC1_Y(mode->crtc_vdisplay));
/* Set the output resolution */
regmap_write(tcon->regs, SUN4I_TCON1_BASIC2_REG,
SUN4I_TCON1_BASIC2_X(mode->crtc_hdisplay) |
SUN4I_TCON1_BASIC2_Y(mode->crtc_vdisplay));
/* Set horizontal display timings */
bp = mode->crtc_htotal - mode->crtc_hsync_start;
DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
mode->htotal, bp);
regmap_write(tcon->regs, SUN4I_TCON1_BASIC3_REG,
SUN4I_TCON1_BASIC3_H_TOTAL(mode->crtc_htotal) |
SUN4I_TCON1_BASIC3_H_BACKPORCH(bp));
bp = mode->crtc_vtotal - mode->crtc_vsync_start;
DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
mode->crtc_vtotal, bp);
/*
* The vertical resolution needs to be doubled in all
* cases. We could use crtc_vtotal and always multiply by two,
* but that leads to a rounding error in interlace when vtotal
* is odd.
*
* This happens with TV's PAL for example, where vtotal will
* be 625, crtc_vtotal 312, and thus crtc_vtotal * 2 will be
* 624, which apparently confuses the hardware.
*
* To work around this, we will always use vtotal, and
* multiply by two only if we're not in interlace.
*/
vtotal = mode->vtotal;
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
vtotal = vtotal * 2;
/* Set vertical display timings */
regmap_write(tcon->regs, SUN4I_TCON1_BASIC4_REG,
SUN4I_TCON1_BASIC4_V_TOTAL(vtotal) |
SUN4I_TCON1_BASIC4_V_BACKPORCH(bp));
/* Set Hsync and Vsync length */
hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
DRM_DEBUG_DRIVER("Setting HSYNC %d, VSYNC %d\n", hsync, vsync);
regmap_write(tcon->regs, SUN4I_TCON1_BASIC5_REG,
SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
SUN4I_TCON1_BASIC5_H_SYNC(hsync));
/* Setup the polarity of multiple signals */
if (tcon->quirks->polarity_in_ch0) {
val = 0;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
} else {
/* according to vendor driver, this bit must be always set */
val = SUN4I_TCON1_IO_POL_UNKNOWN;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE;
regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val);
}
/* Map output pins to channel 1 */
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK,
SUN4I_TCON_GCTL_IOMAP_TCON1);
}
void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
switch (encoder->encoder_type) {
case DRM_MODE_ENCODER_DSI:
/* DSI is tied to special case of CPU interface */
sun4i_tcon0_mode_set_cpu(tcon, encoder, mode);
break;
case DRM_MODE_ENCODER_LVDS:
sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
break;
case DRM_MODE_ENCODER_NONE:
sun4i_tcon0_mode_set_rgb(tcon, encoder, mode);
sun4i_tcon_set_mux(tcon, 0, encoder);
break;
case DRM_MODE_ENCODER_TVDAC:
case DRM_MODE_ENCODER_TMDS:
sun4i_tcon1_mode_set(tcon, mode);
sun4i_tcon_set_mux(tcon, 1, encoder);
break;
default:
DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n");
}
}
EXPORT_SYMBOL(sun4i_tcon_mode_set);
static void sun4i_tcon_finish_page_flip(struct drm_device *dev,
struct sun4i_crtc *scrtc)
{
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (scrtc->event) {
drm_crtc_send_vblank_event(&scrtc->crtc, scrtc->event);
drm_crtc_vblank_put(&scrtc->crtc);
scrtc->event = NULL;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static irqreturn_t sun4i_tcon_handler(int irq, void *private)
{
struct sun4i_tcon *tcon = private;
struct drm_device *drm = tcon->drm;
struct sun4i_crtc *scrtc = tcon->crtc;
struct sunxi_engine *engine = scrtc->engine;
unsigned int status;
regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status);
if (!(status & (SUN4I_TCON_GINT0_VBLANK_INT(0) |
SUN4I_TCON_GINT0_VBLANK_INT(1) |
SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT)))
return IRQ_NONE;
drm_crtc_handle_vblank(&scrtc->crtc);
sun4i_tcon_finish_page_flip(drm, scrtc);
/* Acknowledge the interrupt */
regmap_update_bits(tcon->regs, SUN4I_TCON_GINT0_REG,
SUN4I_TCON_GINT0_VBLANK_INT(0) |
SUN4I_TCON_GINT0_VBLANK_INT(1) |
SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT,
0);
if (engine->ops->vblank_quirk)
engine->ops->vblank_quirk(engine);
return IRQ_HANDLED;
}
static int sun4i_tcon_init_clocks(struct device *dev,
struct sun4i_tcon *tcon)
{
tcon->clk = devm_clk_get_enabled(dev, "ahb");
if (IS_ERR(tcon->clk)) {
dev_err(dev, "Couldn't get the TCON bus clock\n");
return PTR_ERR(tcon->clk);
}
if (tcon->quirks->has_channel_0) {
tcon->sclk0 = devm_clk_get_enabled(dev, "tcon-ch0");
if (IS_ERR(tcon->sclk0)) {
dev_err(dev, "Couldn't get the TCON channel 0 clock\n");
return PTR_ERR(tcon->sclk0);
}
}
if (tcon->quirks->has_channel_1) {
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
if (IS_ERR(tcon->sclk1)) {
dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
return PTR_ERR(tcon->sclk1);
}
}
return 0;
}
static int sun4i_tcon_init_irq(struct device *dev,
struct sun4i_tcon *tcon)
{
struct platform_device *pdev = to_platform_device(dev);
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, sun4i_tcon_handler, 0,
dev_name(dev), tcon);
if (ret) {
dev_err(dev, "Couldn't request the IRQ\n");
return ret;
}
return 0;
}
static const struct regmap_config sun4i_tcon_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x800,
};
static int sun4i_tcon_init_regmap(struct device *dev,
struct sun4i_tcon *tcon)
{
struct platform_device *pdev = to_platform_device(dev);
void __iomem *regs;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
tcon->regs = devm_regmap_init_mmio(dev, regs,
&sun4i_tcon_regmap_config);
if (IS_ERR(tcon->regs)) {
dev_err(dev, "Couldn't create the TCON regmap\n");
return PTR_ERR(tcon->regs);
}
/* Make sure the TCON is disabled and all IRQs are off */
regmap_write(tcon->regs, SUN4I_TCON_GCTL_REG, 0);
regmap_write(tcon->regs, SUN4I_TCON_GINT0_REG, 0);
regmap_write(tcon->regs, SUN4I_TCON_GINT1_REG, 0);
/* Disable IO lines and set them to tristate */
regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, ~0);
regmap_write(tcon->regs, SUN4I_TCON1_IO_TRI_REG, ~0);
return 0;
}
/*
* On SoCs with the old display pipeline design (Display Engine 1.0),
* the TCON is always tied to just one backend. Hence we can traverse
* the of_graph upwards to find the backend our tcon is connected to,
* and take its ID as our own.
*
* We can either identify backends from their compatible strings, which
* means maintaining a large list of them. Or, since the backend is
* registered and binded before the TCON, we can just go through the
* list of registered backends and compare the device node.
*
* As the structures now store engines instead of backends, here this
* function in fact searches the corresponding engine, and the ID is
* requested via the get_id function of the engine.
*/
static struct sunxi_engine *
sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
struct device_node *node,
u32 port_id)
{
struct device_node *port, *ep, *remote;
struct sunxi_engine *engine = ERR_PTR(-EINVAL);
u32 reg = 0;
port = of_graph_get_port_by_id(node, port_id);
if (!port)
return ERR_PTR(-EINVAL);
/*
* This only works if there is only one path from the TCON
* to any display engine. Otherwise the probe order of the
* TCONs and display engines is not guaranteed. They may
* either bind to the wrong one, or worse, bind to the same
* one if additional checks are not done.
*
* Bail out if there are multiple input connections.
*/
if (of_get_available_child_count(port) != 1)
goto out_put_port;
/* Get the first connection without specifying an ID */
ep = of_get_next_available_child(port, NULL);
if (!ep)
goto out_put_port;
remote = of_graph_get_remote_port_parent(ep);
if (!remote)
goto out_put_ep;
/* does this node match any registered engines? */
list_for_each_entry(engine, &drv->engine_list, list)
if (remote == engine->node)
goto out_put_remote;
/*
* According to device tree binding input ports have even id
* number and output ports have odd id. Since component with
* more than one input and one output (TCON TOP) exits, correct
* remote input id has to be calculated by subtracting 1 from
* remote output id. If this for some reason can't be done, 0
* is used as input port id.
*/
of_node_put(port);
port = of_graph_get_remote_port(ep);
if (!of_property_read_u32(port, "reg", ®) && reg > 0)
reg -= 1;
/* keep looking through upstream ports */
engine = sun4i_tcon_find_engine_traverse(drv, remote, reg);
out_put_remote:
of_node_put(remote);
out_put_ep:
of_node_put(ep);
out_put_port:
of_node_put(port);
return engine;
}
/*
* The device tree binding says that the remote endpoint ID of any
* connection between components, up to and including the TCON, of
* the display pipeline should be equal to the actual ID of the local
* component. Thus we can look at any one of the input connections of
* the TCONs, and use that connection's remote endpoint ID as our own.
*
* Since the user of this function already finds the input port,
* the port is passed in directly without further checks.
*/
static int sun4i_tcon_of_get_id_from_port(struct device_node *port)
{
struct device_node *ep;
int ret = -EINVAL;
/* try finding an upstream endpoint */
for_each_available_child_of_node(port, ep) {
struct device_node *remote;
u32 reg;
remote = of_graph_get_remote_endpoint(ep);
if (!remote)
continue;
ret = of_property_read_u32(remote, "reg", ®);
if (ret)
continue;
ret = reg;
}
return ret;
}
/*
* Once we know the TCON's id, we can look through the list of
* engines to find a matching one. We assume all engines have
* been probed and added to the list.
*/
static struct sunxi_engine *sun4i_tcon_get_engine_by_id(struct sun4i_drv *drv,
int id)
{
struct sunxi_engine *engine;
list_for_each_entry(engine, &drv->engine_list, list)
if (engine->id == id)
return engine;
return ERR_PTR(-EINVAL);
}
static bool sun4i_tcon_connected_to_tcon_top(struct device_node *node)
{
struct device_node *remote;
bool ret = false;
remote = of_graph_get_remote_node(node, 0, -1);
if (remote) {
ret = !!(IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
of_match_node(sun8i_tcon_top_of_table, remote));
of_node_put(remote);
}
return ret;
}
static int sun4i_tcon_get_index(struct sun4i_drv *drv)
{
struct list_head *pos;
int size = 0;
/*
* Because TCON is added to the list at the end of the probe
* (after this function is called), index of the current TCON
* will be same as current TCON list size.
*/
list_for_each(pos, &drv->tcon_list)
++size;
return size;
}
/*
* On SoCs with the old display pipeline design (Display Engine 1.0),
* we assumed the TCON was always tied to just one backend. However
* this proved not to be the case. On the A31, the TCON can select
* either backend as its source. On the A20 (and likely on the A10),
* the backend can choose which TCON to output to.
*
* The device tree binding says that the remote endpoint ID of any
* connection between components, up to and including the TCON, of
* the display pipeline should be equal to the actual ID of the local
* component. Thus we should be able to look at any one of the input
* connections of the TCONs, and use that connection's remote endpoint
* ID as our own.
*
* However the connections between the backend and TCON were assumed
* to be always singular, and their endpoit IDs were all incorrectly
* set to 0. This means for these old device trees, we cannot just look
* up the remote endpoint ID of a TCON input endpoint. TCON1 would be
* incorrectly identified as TCON0.
*
* This function first checks if the TCON node has 2 input endpoints.
* If so, then the device tree is a corrected version, and it will use
* sun4i_tcon_of_get_id() and sun4i_tcon_get_engine_by_id() from above
* to fetch the ID and engine directly. If not, then it is likely an
* old device trees, where the endpoint IDs were incorrect, but did not
* have endpoint connections between the backend and TCON across
* different display pipelines. It will fall back to the old method of
* traversing the of_graph to try and find a matching engine by device
* node.
*
* In the case of single display pipeline device trees, either method
* works.
*/
static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
struct device_node *node)
{
struct device_node *port;
struct sunxi_engine *engine;
port = of_graph_get_port_by_id(node, 0);
if (!port)
return ERR_PTR(-EINVAL);
/*
* Is this a corrected device tree with cross pipeline
* connections between the backend and TCON?
*/
if (of_get_child_count(port) > 1) {
int id;
/*
* When pipeline has the same number of TCONs and engines which
* are represented by frontends/backends (DE1) or mixers (DE2),
* we match them by their respective IDs. However, if pipeline
* contains TCON TOP, chances are that there are either more
* TCONs than engines (R40) or TCONs with non-consecutive ids.
* (H6). In that case it's easier just use TCON index in list
* as an id. That means that on R40, any 2 TCONs can be enabled
* in DT out of 4 (there are 2 mixers). Due to the design of
* TCON TOP, remaining 2 TCONs can't be connected to anything
* anyway.
*/
if (sun4i_tcon_connected_to_tcon_top(node))
id = sun4i_tcon_get_index(drv);
else
id = sun4i_tcon_of_get_id_from_port(port);
/* Get our engine by matching our ID */
engine = sun4i_tcon_get_engine_by_id(drv, id);
of_node_put(port);
return engine;
}
/* Fallback to old method by traversing input endpoints */
of_node_put(port);
return sun4i_tcon_find_engine_traverse(drv, node, 0);
}
static int sun4i_tcon_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sunxi_engine *engine;
struct device_node *remote;
struct sun4i_tcon *tcon;
struct reset_control *edp_rstc;
bool has_lvds_rst, has_lvds_alt, can_lvds;
int ret;
engine = sun4i_tcon_find_engine(drv, dev->of_node);
if (IS_ERR(engine)) {
dev_err(dev, "Couldn't find matching engine\n");
return -EPROBE_DEFER;
}
tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL);
if (!tcon)
return -ENOMEM;
dev_set_drvdata(dev, tcon);
tcon->drm = drm;
tcon->dev = dev;
tcon->id = engine->id;
tcon->quirks = of_device_get_match_data(dev);
tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
if (IS_ERR(tcon->lcd_rst)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(tcon->lcd_rst);
}
if (tcon->quirks->needs_edp_reset) {
edp_rstc = devm_reset_control_get_shared(dev, "edp");
if (IS_ERR(edp_rstc)) {
dev_err(dev, "Couldn't get edp reset line\n");
return PTR_ERR(edp_rstc);
}
ret = reset_control_deassert(edp_rstc);
if (ret) {
dev_err(dev, "Couldn't deassert edp reset line\n");
return ret;
}
}
/* Make sure our TCON is reset */
ret = reset_control_reset(tcon->lcd_rst);
if (ret) {
dev_err(dev, "Couldn't deassert our reset line\n");
return ret;
}
if (tcon->quirks->supports_lvds) {
/*
* This can only be made optional since we've had DT
* nodes without the LVDS reset properties.
*
* If the property is missing, just disable LVDS, and
* print a warning.
*/
tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
if (IS_ERR(tcon->lvds_rst)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(tcon->lvds_rst);
} else if (tcon->lvds_rst) {
has_lvds_rst = true;
reset_control_reset(tcon->lvds_rst);
} else {
has_lvds_rst = false;
}
/*
* This can only be made optional since we've had DT
* nodes without the LVDS reset properties.
*
* If the property is missing, just disable LVDS, and
* print a warning.
*/
if (tcon->quirks->has_lvds_alt) {
tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
if (IS_ERR(tcon->lvds_pll)) {
if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
has_lvds_alt = false;
} else {
dev_err(dev, "Couldn't get the LVDS PLL\n");
return PTR_ERR(tcon->lvds_pll);
}
} else {
has_lvds_alt = true;
}
}
if (!has_lvds_rst ||
(tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n");
dev_warn(dev, "LVDS output disabled\n");
can_lvds = false;
} else {
can_lvds = true;
}
} else {
can_lvds = false;
}
ret = sun4i_tcon_init_clocks(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON clocks\n");
goto err_assert_reset;
}
ret = sun4i_tcon_init_regmap(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON regmap\n");
goto err_assert_reset;
}
if (tcon->quirks->has_channel_0) {
ret = sun4i_dclk_create(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't create our TCON dot clock\n");
goto err_assert_reset;
}
}
ret = sun4i_tcon_init_irq(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON interrupts\n");
goto err_free_dclk;
}
tcon->crtc = sun4i_crtc_init(drm, engine, tcon);
if (IS_ERR(tcon->crtc)) {
dev_err(dev, "Couldn't create our CRTC\n");
ret = PTR_ERR(tcon->crtc);
goto err_free_dclk;
}
if (tcon->quirks->has_channel_0) {
/*
* If we have an LVDS panel connected to the TCON, we should
* just probe the LVDS connector. Otherwise, just probe RGB as
* we used to.
*/
remote = of_graph_get_remote_node(dev->of_node, 1, 0);
if (of_device_is_compatible(remote, "panel-lvds"))
if (can_lvds)
ret = sun4i_lvds_init(drm, tcon);
else
ret = -EINVAL;
else
ret = sun4i_rgb_init(drm, tcon);
of_node_put(remote);
if (ret < 0)
goto err_free_dclk;
}
if (tcon->quirks->needs_de_be_mux) {
/*
* We assume there is no dynamic muxing of backends
* and TCONs, so we select the backend with same ID.
*
* While dynamic selection might be interesting, since
* the CRTC is tied to the TCON, while the layers are
* tied to the backends, this means, we will need to
* switch between groups of layers. There might not be
* a way to represent this constraint in DRM.
*/
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
SUN4I_TCON0_CTL_SRC_SEL_MASK,
tcon->id);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_SRC_SEL_MASK,
tcon->id);
}
list_add_tail(&tcon->list, &drv->tcon_list);
return 0;
err_free_dclk:
if (tcon->quirks->has_channel_0)
sun4i_dclk_free(tcon);
err_assert_reset:
reset_control_assert(tcon->lcd_rst);
return ret;
}
static void sun4i_tcon_unbind(struct device *dev, struct device *master,
void *data)
{
struct sun4i_tcon *tcon = dev_get_drvdata(dev);
list_del(&tcon->list);
if (tcon->quirks->has_channel_0)
sun4i_dclk_free(tcon);
}
static const struct component_ops sun4i_tcon_ops = {
.bind = sun4i_tcon_bind,
.unbind = sun4i_tcon_unbind,
};
static int sun4i_tcon_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
const struct sun4i_tcon_quirks *quirks;
struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
quirks = of_device_get_match_data(&pdev->dev);
/* panels and bridges are present only on TCONs with channel 0 */
if (quirks->has_channel_0) {
ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
if (ret == -EPROBE_DEFER)
return ret;
}
return component_add(&pdev->dev, &sun4i_tcon_ops);
}
static void sun4i_tcon_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun4i_tcon_ops);
}
/* platform specific TCON muxing callbacks */
static int sun4i_a10_tcon_set_mux(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder)
{
struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev);
u32 shift;
if (!tcon0)
return -EINVAL;
switch (encoder->encoder_type) {
case DRM_MODE_ENCODER_TMDS:
/* HDMI */
shift = 8;
break;
default:
return -EINVAL;
}
regmap_update_bits(tcon0->regs, SUN4I_TCON_MUX_CTRL_REG,
0x3 << shift, tcon->id << shift);
return 0;
}
static int sun5i_a13_tcon_set_mux(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder)
{
u32 val;
if (encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
val = 1;
else
val = 0;
/*
* FIXME: Undocumented bits
*/
return regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, val);
}
static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder)
{
struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev);
u32 shift;
if (!tcon0)
return -EINVAL;
switch (encoder->encoder_type) {
case DRM_MODE_ENCODER_TMDS:
/* HDMI */
shift = 8;
break;
default:
/* TODO A31 has MIPI DSI but A31s does not */
return -EINVAL;
}
regmap_update_bits(tcon0->regs, SUN4I_TCON_MUX_CTRL_REG,
0x3 << shift, tcon->id << shift);
return 0;
}
static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder)
{
struct device_node *port, *remote;
struct platform_device *pdev;
int id, ret;
/* find TCON TOP platform device and TCON id */
port = of_graph_get_port_by_id(tcon->dev->of_node, 0);
if (!port)
return -EINVAL;
id = sun4i_tcon_of_get_id_from_port(port);
of_node_put(port);
remote = of_graph_get_remote_node(tcon->dev->of_node, 0, -1);
if (!remote)
return -EINVAL;
pdev = of_find_device_by_node(remote);
of_node_put(remote);
if (!pdev)
return -EINVAL;
if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id);
if (ret) {
put_device(&pdev->dev);
return ret;
}
}
if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) {
ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id);
if (ret) {
put_device(&pdev->dev);
return ret;
}
}
return 0;
}
static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
.dclk_min_div = 4,
.set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
.dclk_min_div = 4,
.set_mux = sun5i_a13_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
.has_lvds_alt = true,
.needs_de_be_mux = true,
.dclk_min_div = 1,
.set_mux = sun6i_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
.needs_de_be_mux = true,
.dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun7i_a20_tcon0_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
.has_channel_1 = true,
.dclk_min_div = 4,
/* Same display pipeline structure as A10 */
.set_mux = sun4i_a10_tcon_set_mux,
.setup_lvds_phy = sun4i_tcon_setup_lvds_phy,
};
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
.dclk_min_div = 4,
/* Same display pipeline structure as A10 */
.set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
.has_channel_0 = true,
.has_lvds_alt = true,
.dclk_min_div = 1,
.setup_lvds_phy = sun6i_tcon_setup_lvds_phy,
.supports_lvds = true,
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
.dclk_min_div = 1,
.setup_lvds_phy = sun6i_tcon_setup_lvds_phy,
};
static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
.has_channel_1 = true,
};
static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
.has_channel_1 = true,
.polarity_in_ch0 = true,
.set_mux = sun8i_r40_tcon_tv_set_mux,
};
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
.has_channel_0 = true,
.dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = {
.has_channel_0 = true,
.needs_edp_reset = true,
.dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = {
.has_channel_1 = true,
.needs_edp_reset = true,
};
static const struct sun4i_tcon_quirks sun20i_d1_lcd_quirks = {
.has_channel_0 = true,
.dclk_min_div = 1,
.set_mux = sun8i_r40_tcon_tv_set_mux,
};
/* sun4i_drv uses this list to check if a device node is a TCON */
const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun4i-a10-tcon", .data = &sun4i_a10_quirks },
{ .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
{ .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
{ .compatible = "allwinner,sun7i-a20-tcon0", .data = &sun7i_a20_tcon0_quirks },
{ .compatible = "allwinner,sun7i-a20-tcon1", .data = &sun7i_a20_quirks },
{ .compatible = "allwinner,sun8i-a23-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-tv", .data = &sun8i_a83t_tv_quirks },
{ .compatible = "allwinner,sun8i-r40-tcon-tv", .data = &sun8i_r40_tv_quirks },
{ .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks },
{ .compatible = "allwinner,sun9i-a80-tcon-lcd", .data = &sun9i_a80_tcon_lcd_quirks },
{ .compatible = "allwinner,sun9i-a80-tcon-tv", .data = &sun9i_a80_tcon_tv_quirks },
{ .compatible = "allwinner,sun20i-d1-tcon-lcd", .data = &sun20i_d1_lcd_quirks },
{ .compatible = "allwinner,sun20i-d1-tcon-tv", .data = &sun8i_r40_tv_quirks },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
EXPORT_SYMBOL(sun4i_tcon_of_table);
static struct platform_driver sun4i_tcon_platform_driver = {
.probe = sun4i_tcon_probe,
.remove_new = sun4i_tcon_remove,
.driver = {
.name = "sun4i-tcon",
.of_match_table = sun4i_tcon_of_table,
},
};
module_platform_driver(sun4i_tcon_platform_driver);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner A10 Timing Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun4i_tcon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) Jernej Skrabec <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun8i_csc.h"
#include "sun8i_mixer.h"
#include "sun8i_vi_layer.h"
#include "sun8i_vi_scaler.h"
static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
int overlay, bool enable, unsigned int zpos,
unsigned int old_zpos)
{
u32 val, bld_base, ch_base;
bld_base = sun8i_blender_base(mixer);
ch_base = sun8i_channel_base(mixer, channel);
DRM_DEBUG_DRIVER("%sabling VI channel %d overlay %d\n",
enable ? "En" : "Dis", channel, overlay);
if (enable)
val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN;
else
val = 0;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN, val);
if (!enable || zpos != old_zpos) {
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
0);
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
0);
}
if (enable) {
val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
val, val);
val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
val);
}
}
static void sun8i_vi_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
u32 mask, val, ch_base;
ch_base = sun8i_channel_base(mixer, channel);
if (mixer->cfg->is_de3) {
mask = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK |
SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK;
val = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA
(plane->state->alpha >> 8);
val |= (plane->state->alpha == DRM_BLEND_ALPHA_OPAQUE) ?
SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_PIXEL :
SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_COMBINED;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base,
overlay),
mask, val);
} else if (mixer->cfg->vi_num == 1) {
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_FCC_GLOBAL_ALPHA_REG,
SUN8I_MIXER_FCC_GLOBAL_ALPHA_MASK,
SUN8I_MIXER_FCC_GLOBAL_ALPHA
(plane->state->alpha >> 8));
}
}
static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane,
unsigned int zpos)
{
struct drm_plane_state *state = plane->state;
const struct drm_format_info *format = state->fb->format;
u32 src_w, src_h, dst_w, dst_h;
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
u32 hn = 0, hm = 0;
u32 vn = 0, vm = 0;
bool subsampled;
DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n",
channel, overlay);
bld_base = sun8i_blender_base(mixer);
ch_base = sun8i_channel_base(mixer, channel);
src_w = drm_rect_width(&state->src) >> 16;
src_h = drm_rect_height(&state->src) >> 16;
dst_w = drm_rect_width(&state->dst);
dst_h = drm_rect_height(&state->dst);
hphase = state->src.x1 & 0xffff;
vphase = state->src.y1 & 0xffff;
/* make coordinates dividable by subsampling factor */
if (format->hsub > 1) {
int mask, remainder;
mask = format->hsub - 1;
remainder = (state->src.x1 >> 16) & mask;
src_w = (src_w + remainder) & ~mask;
hphase += remainder << 16;
}
if (format->vsub > 1) {
int mask, remainder;
mask = format->vsub - 1;
remainder = (state->src.y1 >> 16) & mask;
src_h = (src_h + remainder) & ~mask;
vphase += remainder << 16;
}
insize = SUN8I_MIXER_SIZE(src_w, src_h);
outsize = SUN8I_MIXER_SIZE(dst_w, dst_h);
/* Set height and width */
DRM_DEBUG_DRIVER("Layer source offset X: %d Y: %d\n",
(state->src.x1 >> 16) & ~(format->hsub - 1),
(state->src.y1 >> 16) & ~(format->vsub - 1));
DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch_base, overlay),
insize);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_OVL_SIZE(ch_base),
insize);
/*
* Scaler must be enabled for subsampled formats, so it scales
* chroma to same size as luma.
*/
subsampled = format->hsub > 1 || format->vsub > 1;
if (insize != outsize || subsampled || hphase || vphase) {
unsigned int scanline, required;
struct drm_display_mode *mode;
u32 hscale, vscale, fps;
u64 ability;
DRM_DEBUG_DRIVER("HW scaling is enabled\n");
mode = &plane->state->crtc->state->mode;
fps = (mode->clock * 1000) / (mode->vtotal * mode->htotal);
ability = clk_get_rate(mixer->mod_clk);
/* BSP algorithm assumes 80% efficiency of VI scaler unit */
ability *= 80;
do_div(ability, mode->vdisplay * fps * max(src_w, dst_w));
required = src_h * 100 / dst_h;
if (ability < required) {
DRM_DEBUG_DRIVER("Using vertical coarse scaling\n");
vm = src_h;
vn = (u32)ability * dst_h / 100;
src_h = vn;
}
/* it seems that every RGB scaler has buffer for 2048 pixels */
scanline = subsampled ? mixer->cfg->scanline_yuv : 2048;
if (src_w > scanline) {
DRM_DEBUG_DRIVER("Using horizontal coarse scaling\n");
hm = src_w;
hn = scanline;
src_w = hn;
}
hscale = (src_w << 16) / dst_w;
vscale = (src_h << 16) / dst_h;
sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w,
dst_h, hscale, vscale, hphase, vphase,
format);
sun8i_vi_scaler_enable(mixer, channel, true);
} else {
DRM_DEBUG_DRIVER("HW scaling is not needed\n");
sun8i_vi_scaler_enable(mixer, channel, false);
}
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_HDS_Y(ch_base),
SUN8I_MIXER_CHAN_VI_DS_N(hn) |
SUN8I_MIXER_CHAN_VI_DS_M(hm));
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_HDS_UV(ch_base),
SUN8I_MIXER_CHAN_VI_DS_N(hn) |
SUN8I_MIXER_CHAN_VI_DS_M(hm));
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_VDS_Y(ch_base),
SUN8I_MIXER_CHAN_VI_DS_N(vn) |
SUN8I_MIXER_CHAN_VI_DS_M(vm));
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_VDS_UV(ch_base),
SUN8I_MIXER_CHAN_VI_DS_N(vn) |
SUN8I_MIXER_CHAN_VI_DS_M(vm));
/* Set base coordinates */
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
return 0;
}
static u32 sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
{
if (!format->is_yuv)
return SUN8I_CSC_MODE_OFF;
switch (format->format) {
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YVU444:
return SUN8I_CSC_MODE_YVU2RGB;
default:
return SUN8I_CSC_MODE_YUV2RGB;
}
}
static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
u32 val, ch_base, csc_mode, hw_fmt;
const struct drm_format_info *fmt;
int ret;
ch_base = sun8i_channel_base(mixer, channel);
fmt = state->fb->format;
ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
if (ret) {
DRM_DEBUG_DRIVER("Invalid format\n");
return ret;
}
val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
csc_mode = sun8i_vi_layer_get_csc_mode(fmt);
if (csc_mode != SUN8I_CSC_MODE_OFF) {
sun8i_csc_set_ccsc_coefficients(mixer, channel, csc_mode,
state->color_encoding,
state->color_range);
sun8i_csc_enable_ccsc(mixer, channel, true);
} else {
sun8i_csc_enable_ccsc(mixer, channel, false);
}
if (!fmt->is_yuv)
val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE;
else
val = 0;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE, val);
return 0;
}
static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
struct drm_gem_dma_object *gem;
u32 dx, dy, src_x, src_y;
dma_addr_t dma_addr;
u32 ch_base;
int i;
ch_base = sun8i_channel_base(mixer, channel);
/* Adjust x and y to be dividable by subsampling factor */
src_x = (state->src.x1 >> 16) & ~(format->hsub - 1);
src_y = (state->src.y1 >> 16) & ~(format->vsub - 1);
for (i = 0; i < format->num_planes; i++) {
/* Get the physical address of the buffer in memory */
gem = drm_fb_dma_get_gem_obj(fb, i);
DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->dma_addr);
/* Compute the start of the displayed memory */
dma_addr = gem->dma_addr + fb->offsets[i];
dx = src_x;
dy = src_y;
if (i > 0) {
dx /= format->hsub;
dy /= format->vsub;
}
/* Fixup framebuffer address for src coordinates */
dma_addr += dx * format->cpp[i];
dma_addr += dy * fb->pitches[i];
/* Set the line width */
DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n",
i + 1, fb->pitches[i]);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_PITCH(ch_base,
overlay, i),
fb->pitches[i]);
DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n",
i + 1, &dma_addr);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch_base,
overlay, i),
lower_32_bits(dma_addr));
}
return 0;
}
static int sun8i_vi_layer_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
int min_scale, max_scale;
if (!crtc)
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
min_scale = DRM_PLANE_NO_SCALING;
max_scale = DRM_PLANE_NO_SCALING;
if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) {
min_scale = SUN8I_VI_SCALER_SCALE_MIN;
max_scale = SUN8I_VI_SCALER_SCALE_MAX;
}
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
min_scale, max_scale,
true, true);
}
static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false, 0,
old_zpos);
}
static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
unsigned int zpos = new_state->normalized_zpos;
unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
if (!new_state->visible) {
sun8i_vi_layer_enable(mixer, layer->channel,
layer->overlay, false, 0, old_zpos);
return;
}
sun8i_vi_layer_update_coord(mixer, layer->channel,
layer->overlay, plane, zpos);
sun8i_vi_layer_update_alpha(mixer, layer->channel,
layer->overlay, plane);
sun8i_vi_layer_update_formats(mixer, layer->channel,
layer->overlay, plane);
sun8i_vi_layer_update_buffer(mixer, layer->channel,
layer->overlay, plane);
sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay,
true, zpos, old_zpos);
}
static const struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
.atomic_check = sun8i_vi_layer_atomic_check,
.atomic_disable = sun8i_vi_layer_atomic_disable,
.atomic_update = sun8i_vi_layer_atomic_update,
};
static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.destroy = drm_plane_cleanup,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
.update_plane = drm_atomic_helper_update_plane,
};
/*
* While DE2 VI layer supports same RGB formats as UI layer, alpha
* channel is ignored. This structure lists all unique variants
* where alpha channel is replaced with "don't care" (X) channel.
*/
static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_BGRX4444,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGBX4444,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_XBGR4444,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_NV16,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
};
static const u32 sun8i_vi_layer_de3_formats[] = {
DRM_FORMAT_ABGR1555,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_BGRA1010102,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_BGRA4444,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGBA1010102,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_NV16,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
DRM_FORMAT_P010,
DRM_FORMAT_P210,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
};
static const uint64_t sun8i_layer_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
struct sun8i_mixer *mixer,
int index)
{
enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
u32 supported_encodings, supported_ranges;
unsigned int plane_cnt, format_count;
struct sun8i_vi_layer *layer;
const u32 *formats;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
if (!layer)
return ERR_PTR(-ENOMEM);
if (mixer->cfg->is_de3) {
formats = sun8i_vi_layer_de3_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
} else {
formats = sun8i_vi_layer_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_formats);
}
if (!mixer->cfg->ui_num && index == 0)
type = DRM_PLANE_TYPE_PRIMARY;
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_vi_layer_funcs,
formats, format_count,
sun8i_layer_modifiers,
type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
}
plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
if (mixer->cfg->vi_num == 1 || mixer->cfg->is_de3) {
ret = drm_plane_create_alpha_property(&layer->plane);
if (ret) {
dev_err(drm->dev, "Couldn't add alpha property\n");
return ERR_PTR(ret);
}
}
ret = drm_plane_create_zpos_property(&layer->plane, index,
0, plane_cnt - 1);
if (ret) {
dev_err(drm->dev, "Couldn't add zpos property\n");
return ERR_PTR(ret);
}
supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709);
if (mixer->cfg->is_de3)
supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020);
supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE);
ret = drm_plane_create_color_properties(&layer->plane,
supported_encodings,
supported_ranges,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
if (ret) {
dev_err(drm->dev, "Couldn't add encoding and range properties!\n");
return ERR_PTR(ret);
}
drm_plane_helper_add(&layer->plane, &sun8i_vi_layer_helper_funcs);
layer->mixer = mixer;
layer->channel = index;
layer->overlay = 0;
return layer;
}
| linux-master | drivers/gpu/drm/sun4i/sun8i_vi_layer.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Jernej Skrabec <[email protected]>
*/
#include <linux/clk-provider.h>
#include "sun8i_dw_hdmi.h"
struct sun8i_phy_clk {
struct clk_hw hw;
struct sun8i_hdmi_phy *phy;
};
static inline struct sun8i_phy_clk *hw_to_phy_clk(struct clk_hw *hw)
{
return container_of(hw, struct sun8i_phy_clk, hw);
}
static int sun8i_phy_clk_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
unsigned long rate = req->rate;
unsigned long best_rate = 0;
struct clk_hw *best_parent = NULL;
struct clk_hw *parent;
int best_div = 1;
int i, p;
for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
parent = clk_hw_get_parent_by_index(hw, p);
if (!parent)
continue;
for (i = 1; i <= 16; i++) {
unsigned long ideal = rate * i;
unsigned long rounded;
rounded = clk_hw_round_rate(parent, ideal);
if (rounded == ideal) {
best_rate = rounded;
best_div = i;
best_parent = parent;
break;
}
if (!best_rate ||
abs(rate - rounded / i) <
abs(rate - best_rate / best_div)) {
best_rate = rounded;
best_div = i;
best_parent = parent;
}
}
if (best_rate / best_div == rate)
break;
}
req->rate = best_rate / best_div;
req->best_parent_rate = best_rate;
req->best_parent_hw = best_parent;
return 0;
}
static unsigned long sun8i_phy_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
u32 reg;
regmap_read(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG, ®);
reg = ((reg >> SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_SHIFT) &
SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK) + 1;
return parent_rate / reg;
}
static int sun8i_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
unsigned long best_rate = 0;
u8 best_m = 0, m;
for (m = 1; m <= 16; m++) {
unsigned long tmp_rate = parent_rate / m;
if (tmp_rate > rate)
continue;
if (!best_rate ||
(rate - tmp_rate) < (rate - best_rate)) {
best_rate = tmp_rate;
best_m = m;
}
}
regmap_update_bits(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG,
SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK,
SUN8I_HDMI_PHY_PLL_CFG2_PREDIV(best_m));
return 0;
}
static u8 sun8i_phy_clk_get_parent(struct clk_hw *hw)
{
struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
u32 reg;
regmap_read(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, ®);
reg = (reg & SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK) >>
SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT;
return reg;
}
static int sun8i_phy_clk_set_parent(struct clk_hw *hw, u8 index)
{
struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
if (index > 1)
return -EINVAL;
regmap_update_bits(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
index << SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT);
return 0;
}
static const struct clk_ops sun8i_phy_clk_ops = {
.determine_rate = sun8i_phy_clk_determine_rate,
.recalc_rate = sun8i_phy_clk_recalc_rate,
.set_rate = sun8i_phy_clk_set_rate,
.get_parent = sun8i_phy_clk_get_parent,
.set_parent = sun8i_phy_clk_set_parent,
};
int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
bool second_parent)
{
struct clk_init_data init;
struct sun8i_phy_clk *priv;
const char *parents[2];
parents[0] = __clk_get_name(phy->clk_pll0);
if (!parents[0])
return -ENODEV;
if (second_parent) {
parents[1] = __clk_get_name(phy->clk_pll1);
if (!parents[1])
return -ENODEV;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
init.name = "hdmi-phy-clk";
init.ops = &sun8i_phy_clk_ops;
init.parent_names = parents;
init.num_parents = second_parent ? 2 : 1;
init.flags = CLK_SET_RATE_PARENT;
priv->phy = phy;
priv->hw.init = &init;
phy->clk_phy = devm_clk_register(dev, &priv->hw);
if (IS_ERR(phy->clk_phy))
return PTR_ERR(phy->clk_phy);
return 0;
}
| linux-master | drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Free Electrons
* Copyright (C) 2016 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/regmap.h>
#include "sun4i_tcon.h"
#include "sun4i_tcon_dclk.h"
struct sun4i_dclk {
struct clk_hw hw;
struct regmap *regmap;
struct sun4i_tcon *tcon;
};
static inline struct sun4i_dclk *hw_to_dclk(struct clk_hw *hw)
{
return container_of(hw, struct sun4i_dclk, hw);
}
static void sun4i_dclk_disable(struct clk_hw *hw)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
BIT(SUN4I_TCON0_DCLK_GATE_BIT), 0);
}
static int sun4i_dclk_enable(struct clk_hw *hw)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
BIT(SUN4I_TCON0_DCLK_GATE_BIT),
BIT(SUN4I_TCON0_DCLK_GATE_BIT));
}
static int sun4i_dclk_is_enabled(struct clk_hw *hw)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
u32 val;
regmap_read(dclk->regmap, SUN4I_TCON0_DCLK_REG, &val);
return val & BIT(SUN4I_TCON0_DCLK_GATE_BIT);
}
static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
u32 val;
regmap_read(dclk->regmap, SUN4I_TCON0_DCLK_REG, &val);
val >>= SUN4I_TCON0_DCLK_DIV_SHIFT;
val &= (1 << SUN4I_TCON0_DCLK_DIV_WIDTH) - 1;
if (!val)
val = 1;
return parent_rate / val;
}
static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
struct sun4i_tcon *tcon = dclk->tcon;
unsigned long best_parent = 0;
u8 best_div = 1;
int i;
for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
u64 ideal = (u64)rate * i;
unsigned long rounded;
/*
* ideal has overflowed the max value that can be stored in an
* unsigned long, and every clk operation we might do on a
* truncated u64 value will give us incorrect results.
* Let's just stop there since bigger dividers will result in
* the same overflow issue.
*/
if (ideal > ULONG_MAX)
goto out;
rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
ideal);
if (rounded == ideal) {
best_parent = rounded;
best_div = i;
goto out;
}
if (abs(rate - rounded / i) <
abs(rate - best_parent / best_div)) {
best_parent = rounded;
best_div = i;
}
}
out:
*parent_rate = best_parent;
return best_parent / best_div;
}
static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
u8 div = parent_rate / rate;
return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
GENMASK(6, 0), div);
}
static int sun4i_dclk_get_phase(struct clk_hw *hw)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
u32 val;
regmap_read(dclk->regmap, SUN4I_TCON0_IO_POL_REG, &val);
val >>= 28;
val &= 3;
return val * 120;
}
static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
u32 val = degrees / 120;
val <<= 28;
regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
GENMASK(29, 28),
val);
return 0;
}
static const struct clk_ops sun4i_dclk_ops = {
.disable = sun4i_dclk_disable,
.enable = sun4i_dclk_enable,
.is_enabled = sun4i_dclk_is_enabled,
.recalc_rate = sun4i_dclk_recalc_rate,
.round_rate = sun4i_dclk_round_rate,
.set_rate = sun4i_dclk_set_rate,
.get_phase = sun4i_dclk_get_phase,
.set_phase = sun4i_dclk_set_phase,
};
int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
{
const char *clk_name, *parent_name;
struct clk_init_data init;
struct sun4i_dclk *dclk;
int ret;
parent_name = __clk_get_name(tcon->sclk0);
ret = of_property_read_string_index(dev->of_node,
"clock-output-names", 0,
&clk_name);
if (ret)
return ret;
dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
if (!dclk)
return -ENOMEM;
dclk->tcon = tcon;
init.name = clk_name;
init.ops = &sun4i_dclk_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
init.flags = CLK_SET_RATE_PARENT;
dclk->regmap = tcon->regs;
dclk->hw.init = &init;
tcon->dclk = clk_register(dev, &dclk->hw);
if (IS_ERR(tcon->dclk))
return PTR_ERR(tcon->dclk);
return 0;
}
EXPORT_SYMBOL(sun4i_dclk_create);
int sun4i_dclk_free(struct sun4i_tcon *tcon)
{
clk_unregister(tcon->dclk);
return 0;
}
EXPORT_SYMBOL(sun4i_dclk_free);
| linux-master | drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Maxime Ripard <[email protected]>
* Copyright (C) 2017 Jonathan Liu <[email protected]>
*/
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/iopoll.h>
#include "sun4i_hdmi.h"
#define SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK ( \
SUN4I_HDMI_DDC_INT_STATUS_ILLEGAL_FIFO_OPERATION | \
SUN4I_HDMI_DDC_INT_STATUS_DDC_RX_FIFO_UNDERFLOW | \
SUN4I_HDMI_DDC_INT_STATUS_DDC_TX_FIFO_OVERFLOW | \
SUN4I_HDMI_DDC_INT_STATUS_ARBITRATION_ERROR | \
SUN4I_HDMI_DDC_INT_STATUS_ACK_ERROR | \
SUN4I_HDMI_DDC_INT_STATUS_BUS_ERROR \
)
/* FIFO request bit is set when FIFO level is above RX_THRESHOLD during read */
#define RX_THRESHOLD SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX
static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read)
{
/*
* 1 byte takes 9 clock cycles (8 bits + 1 ACK) = 90 us for 100 kHz
* clock. As clock rate is fixed, just round it up to 100 us.
*/
const unsigned long byte_time_ns = 100;
const u32 mask = SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE;
u32 reg;
/*
* If threshold is inclusive, then the FIFO may only have
* RX_THRESHOLD number of bytes, instead of RX_THRESHOLD + 1.
*/
int read_len = RX_THRESHOLD +
(hdmi->variant->ddc_fifo_thres_incl ? 0 : 1);
/*
* Limit transfer length by FIFO threshold or FIFO size.
* For TX the threshold is for an empty FIFO.
*/
len = min_t(int, len, read ? read_len : SUN4I_HDMI_DDC_FIFO_SIZE);
/* Wait until error, FIFO request bit set or transfer complete */
if (regmap_field_read_poll_timeout(hdmi->field_ddc_int_status, reg,
reg & mask, len * byte_time_ns,
100000))
return -ETIMEDOUT;
if (reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK)
return -EIO;
if (read)
ioread8_rep(hdmi->base + hdmi->variant->ddc_fifo_reg, buf, len);
else
iowrite8_rep(hdmi->base + hdmi->variant->ddc_fifo_reg, buf, len);
/* Clear FIFO request bit by forcing a write to that bit */
regmap_field_force_write(hdmi->field_ddc_int_status,
SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST);
return len;
}
static int xfer_msg(struct sun4i_hdmi *hdmi, struct i2c_msg *msg)
{
int i, len;
u32 reg;
/* Set FIFO direction */
if (hdmi->variant->ddc_fifo_has_dir) {
reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK;
reg |= (msg->flags & I2C_M_RD) ?
SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ :
SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE;
writel(reg, hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
}
/* Clear address register (not cleared by soft reset) */
regmap_field_write(hdmi->field_ddc_addr_reg, 0);
/* Set I2C address */
regmap_field_write(hdmi->field_ddc_slave_addr, msg->addr);
/*
* Set FIFO RX/TX thresholds and clear FIFO
*
* If threshold is inclusive, we can set the TX threshold to
* 0 instead of 1.
*/
regmap_field_write(hdmi->field_ddc_fifo_tx_thres,
hdmi->variant->ddc_fifo_thres_incl ? 0 : 1);
regmap_field_write(hdmi->field_ddc_fifo_rx_thres, RX_THRESHOLD);
regmap_field_write(hdmi->field_ddc_fifo_clear, 1);
if (regmap_field_read_poll_timeout(hdmi->field_ddc_fifo_clear,
reg, !reg, 100, 2000))
return -EIO;
/* Set transfer length */
regmap_field_write(hdmi->field_ddc_byte_count, msg->len);
/* Set command */
regmap_field_write(hdmi->field_ddc_cmd,
msg->flags & I2C_M_RD ?
SUN4I_HDMI_DDC_CMD_IMPLICIT_READ :
SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE);
/* Clear interrupt status bits by forcing a write */
regmap_field_force_write(hdmi->field_ddc_int_status,
SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE);
/* Start command */
regmap_field_write(hdmi->field_ddc_start, 1);
/* Transfer bytes */
for (i = 0; i < msg->len; i += len) {
len = fifo_transfer(hdmi, msg->buf + i, msg->len - i,
msg->flags & I2C_M_RD);
if (len <= 0)
return len;
}
/* Wait for command to finish */
if (regmap_field_read_poll_timeout(hdmi->field_ddc_start,
reg, !reg, 100, 100000))
return -EIO;
/* Check for errors */
regmap_field_read(hdmi->field_ddc_int_status, ®);
if ((reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK) ||
!(reg & SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE)) {
return -EIO;
}
return 0;
}
static int sun4i_hdmi_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct sun4i_hdmi *hdmi = i2c_get_adapdata(adap);
u32 reg;
int err, i, ret = num;
for (i = 0; i < num; i++) {
if (!msgs[i].len)
return -EINVAL;
if (msgs[i].len > SUN4I_HDMI_DDC_BYTE_COUNT_MAX)
return -EINVAL;
}
/* DDC clock needs to be enabled for the module to work */
clk_prepare_enable(hdmi->ddc_clk);
clk_set_rate(hdmi->ddc_clk, 100000);
/* Reset I2C controller */
regmap_field_write(hdmi->field_ddc_en, 1);
regmap_field_write(hdmi->field_ddc_reset, 1);
if (regmap_field_read_poll_timeout(hdmi->field_ddc_reset,
reg, !reg, 100, 2000)) {
clk_disable_unprepare(hdmi->ddc_clk);
return -EIO;
}
regmap_field_write(hdmi->field_ddc_sck_en, 1);
regmap_field_write(hdmi->field_ddc_sda_en, 1);
for (i = 0; i < num; i++) {
err = xfer_msg(hdmi, &msgs[i]);
if (err) {
ret = err;
break;
}
}
clk_disable_unprepare(hdmi->ddc_clk);
return ret;
}
static u32 sun4i_hdmi_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm sun4i_hdmi_i2c_algorithm = {
.master_xfer = sun4i_hdmi_i2c_xfer,
.functionality = sun4i_hdmi_i2c_func,
};
static int sun4i_hdmi_init_regmap_fields(struct sun4i_hdmi *hdmi)
{
hdmi->field_ddc_en =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_en);
if (IS_ERR(hdmi->field_ddc_en))
return PTR_ERR(hdmi->field_ddc_en);
hdmi->field_ddc_start =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_start);
if (IS_ERR(hdmi->field_ddc_start))
return PTR_ERR(hdmi->field_ddc_start);
hdmi->field_ddc_reset =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_reset);
if (IS_ERR(hdmi->field_ddc_reset))
return PTR_ERR(hdmi->field_ddc_reset);
hdmi->field_ddc_addr_reg =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_addr_reg);
if (IS_ERR(hdmi->field_ddc_addr_reg))
return PTR_ERR(hdmi->field_ddc_addr_reg);
hdmi->field_ddc_slave_addr =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_slave_addr);
if (IS_ERR(hdmi->field_ddc_slave_addr))
return PTR_ERR(hdmi->field_ddc_slave_addr);
hdmi->field_ddc_int_mask =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_int_mask);
if (IS_ERR(hdmi->field_ddc_int_mask))
return PTR_ERR(hdmi->field_ddc_int_mask);
hdmi->field_ddc_int_status =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_int_status);
if (IS_ERR(hdmi->field_ddc_int_status))
return PTR_ERR(hdmi->field_ddc_int_status);
hdmi->field_ddc_fifo_clear =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_fifo_clear);
if (IS_ERR(hdmi->field_ddc_fifo_clear))
return PTR_ERR(hdmi->field_ddc_fifo_clear);
hdmi->field_ddc_fifo_rx_thres =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_fifo_rx_thres);
if (IS_ERR(hdmi->field_ddc_fifo_rx_thres))
return PTR_ERR(hdmi->field_ddc_fifo_rx_thres);
hdmi->field_ddc_fifo_tx_thres =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_fifo_tx_thres);
if (IS_ERR(hdmi->field_ddc_fifo_tx_thres))
return PTR_ERR(hdmi->field_ddc_fifo_tx_thres);
hdmi->field_ddc_byte_count =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_byte_count);
if (IS_ERR(hdmi->field_ddc_byte_count))
return PTR_ERR(hdmi->field_ddc_byte_count);
hdmi->field_ddc_cmd =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_cmd);
if (IS_ERR(hdmi->field_ddc_cmd))
return PTR_ERR(hdmi->field_ddc_cmd);
hdmi->field_ddc_sda_en =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_sda_en);
if (IS_ERR(hdmi->field_ddc_sda_en))
return PTR_ERR(hdmi->field_ddc_sda_en);
hdmi->field_ddc_sck_en =
devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->field_ddc_sck_en);
if (IS_ERR(hdmi->field_ddc_sck_en))
return PTR_ERR(hdmi->field_ddc_sck_en);
return 0;
}
int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi)
{
struct i2c_adapter *adap;
int ret = 0;
ret = sun4i_ddc_create(hdmi, hdmi->ddc_parent_clk);
if (ret)
return ret;
ret = sun4i_hdmi_init_regmap_fields(hdmi);
if (ret)
return ret;
adap = devm_kzalloc(dev, sizeof(*adap), GFP_KERNEL);
if (!adap)
return -ENOMEM;
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DDC;
adap->algo = &sun4i_hdmi_i2c_algorithm;
strscpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
ret = i2c_add_adapter(adap);
if (ret)
return ret;
hdmi->i2c = adap;
return ret;
}
| linux-master | drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Free Electrons
* Copyright (C) 2015 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "sun4i_drv.h"
#include "sun4i_framebuffer.h"
static int sun4i_de_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
ret = drm_atomic_normalize_zpos(dev, state);
if (ret)
return ret;
return drm_atomic_helper_check_planes(dev, state);
}
static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
.atomic_check = sun4i_de_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
};
static const struct drm_mode_config_helper_funcs sun4i_de_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
void sun4i_framebuffer_init(struct drm_device *drm)
{
drm_mode_config_reset(drm);
drm->mode_config.max_width = 8192;
drm->mode_config.max_height = 8192;
drm->mode_config.funcs = &sun4i_de_mode_config_funcs;
drm->mode_config.helper_private = &sun4i_de_mode_config_helpers;
}
| linux-master | drivers/gpu/drm/sun4i/sun4i_framebuffer.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018 Jernej Skrabec <[email protected]> */
#include <linux/bitfield.h>
#include <linux/component.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/sun8i-tcon-top.h>
#include "sun8i_tcon_top.h"
struct sun8i_tcon_top_quirks {
bool has_tcon_tv1;
bool has_dsi;
};
static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node)
{
return !!of_match_node(sun8i_tcon_top_of_table, node);
}
int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon)
{
struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
unsigned long flags;
u32 val;
if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) {
dev_err(dev, "Device is not TCON TOP!\n");
return -EINVAL;
}
if (tcon < 2 || tcon > 3) {
dev_err(dev, "TCON index must be 2 or 3!\n");
return -EINVAL;
}
spin_lock_irqsave(&tcon_top->reg_lock, flags);
val = readl(tcon_top->regs + TCON_TOP_GATE_SRC_REG);
val &= ~TCON_TOP_HDMI_SRC_MSK;
val |= FIELD_PREP(TCON_TOP_HDMI_SRC_MSK, tcon - 1);
writel(val, tcon_top->regs + TCON_TOP_GATE_SRC_REG);
spin_unlock_irqrestore(&tcon_top->reg_lock, flags);
return 0;
}
EXPORT_SYMBOL(sun8i_tcon_top_set_hdmi_src);
int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon)
{
struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
unsigned long flags;
u32 reg;
if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) {
dev_err(dev, "Device is not TCON TOP!\n");
return -EINVAL;
}
if (mixer > 1) {
dev_err(dev, "Mixer index is too high!\n");
return -EINVAL;
}
if (tcon > 3) {
dev_err(dev, "TCON index is too high!\n");
return -EINVAL;
}
spin_lock_irqsave(&tcon_top->reg_lock, flags);
reg = readl(tcon_top->regs + TCON_TOP_PORT_SEL_REG);
if (mixer == 0) {
reg &= ~TCON_TOP_PORT_DE0_MSK;
reg |= FIELD_PREP(TCON_TOP_PORT_DE0_MSK, tcon);
} else {
reg &= ~TCON_TOP_PORT_DE1_MSK;
reg |= FIELD_PREP(TCON_TOP_PORT_DE1_MSK, tcon);
}
writel(reg, tcon_top->regs + TCON_TOP_PORT_SEL_REG);
spin_unlock_irqrestore(&tcon_top->reg_lock, flags);
return 0;
}
EXPORT_SYMBOL(sun8i_tcon_top_de_config);
static struct clk_hw *sun8i_tcon_top_register_gate(struct device *dev,
const char *parent,
void __iomem *regs,
spinlock_t *lock,
u8 bit, int name_index)
{
const char *clk_name, *parent_name;
int ret, index;
index = of_property_match_string(dev->of_node, "clock-names", parent);
if (index < 0)
return ERR_PTR(index);
parent_name = of_clk_get_parent_name(dev->of_node, index);
ret = of_property_read_string_index(dev->of_node,
"clock-output-names", name_index,
&clk_name);
if (ret)
return ERR_PTR(ret);
return clk_hw_register_gate(dev, clk_name, parent_name,
CLK_SET_RATE_PARENT,
regs + TCON_TOP_GATE_SRC_REG,
bit, 0, lock);
};
static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct clk_hw_onecell_data *clk_data;
struct sun8i_tcon_top *tcon_top;
const struct sun8i_tcon_top_quirks *quirks;
void __iomem *regs;
int ret, i;
quirks = of_device_get_match_data(&pdev->dev);
tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL);
if (!tcon_top)
return -ENOMEM;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, CLK_NUM),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->num = CLK_NUM;
tcon_top->clk_data = clk_data;
spin_lock_init(&tcon_top->reg_lock);
tcon_top->rst = devm_reset_control_get(dev, NULL);
if (IS_ERR(tcon_top->rst)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(tcon_top->rst);
}
tcon_top->bus = devm_clk_get(dev, "bus");
if (IS_ERR(tcon_top->bus)) {
dev_err(dev, "Couldn't get the bus clock\n");
return PTR_ERR(tcon_top->bus);
}
regs = devm_platform_ioremap_resource(pdev, 0);
tcon_top->regs = regs;
if (IS_ERR(regs))
return PTR_ERR(regs);
ret = reset_control_deassert(tcon_top->rst);
if (ret) {
dev_err(dev, "Could not deassert ctrl reset control\n");
return ret;
}
ret = clk_prepare_enable(tcon_top->bus);
if (ret) {
dev_err(dev, "Could not enable bus clock\n");
goto err_assert_reset;
}
/*
* At least on H6, some registers have some bits set by default
* which may cause issues. Clear them here.
*/
writel(0, regs + TCON_TOP_PORT_SEL_REG);
writel(0, regs + TCON_TOP_GATE_SRC_REG);
/*
* TCON TOP has two muxes, which select parent clock for each TCON TV
* channel clock. Parent could be either TCON TV or TVE clock. For now
* we leave this fixed to TCON TV, since TVE driver for R40 is not yet
* implemented. Once it is, graph needs to be traversed to determine
* if TVE is active on each TCON TV. If it is, mux should be switched
* to TVE clock parent.
*/
i = 0;
clk_data->hws[CLK_TCON_TOP_TV0] =
sun8i_tcon_top_register_gate(dev, "tcon-tv0", regs,
&tcon_top->reg_lock,
TCON_TOP_TCON_TV0_GATE, i++);
if (quirks->has_tcon_tv1)
clk_data->hws[CLK_TCON_TOP_TV1] =
sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
&tcon_top->reg_lock,
TCON_TOP_TCON_TV1_GATE, i++);
if (quirks->has_dsi)
clk_data->hws[CLK_TCON_TOP_DSI] =
sun8i_tcon_top_register_gate(dev, "dsi", regs,
&tcon_top->reg_lock,
TCON_TOP_TCON_DSI_GATE, i++);
for (i = 0; i < CLK_NUM; i++)
if (IS_ERR(clk_data->hws[i])) {
ret = PTR_ERR(clk_data->hws[i]);
goto err_unregister_gates;
}
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
clk_data);
if (ret)
goto err_unregister_gates;
dev_set_drvdata(dev, tcon_top);
return 0;
err_unregister_gates:
for (i = 0; i < CLK_NUM; i++)
if (!IS_ERR_OR_NULL(clk_data->hws[i]))
clk_hw_unregister_gate(clk_data->hws[i]);
clk_disable_unprepare(tcon_top->bus);
err_assert_reset:
reset_control_assert(tcon_top->rst);
return ret;
}
static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
void *data)
{
struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
struct clk_hw_onecell_data *clk_data = tcon_top->clk_data;
int i;
of_clk_del_provider(dev->of_node);
for (i = 0; i < CLK_NUM; i++)
if (clk_data->hws[i])
clk_hw_unregister_gate(clk_data->hws[i]);
clk_disable_unprepare(tcon_top->bus);
reset_control_assert(tcon_top->rst);
}
static const struct component_ops sun8i_tcon_top_ops = {
.bind = sun8i_tcon_top_bind,
.unbind = sun8i_tcon_top_unbind,
};
static int sun8i_tcon_top_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &sun8i_tcon_top_ops);
}
static void sun8i_tcon_top_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun8i_tcon_top_ops);
}
static const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
.has_tcon_tv1 = true,
.has_dsi = true,
};
static const struct sun8i_tcon_top_quirks sun20i_d1_tcon_top_quirks = {
.has_dsi = true,
};
static const struct sun8i_tcon_top_quirks sun50i_h6_tcon_top_quirks = {
/* Nothing special */
};
/* sun4i_drv uses this list to check if a device node is a TCON TOP */
const struct of_device_id sun8i_tcon_top_of_table[] = {
{
.compatible = "allwinner,sun8i-r40-tcon-top",
.data = &sun8i_r40_tcon_top_quirks
},
{
.compatible = "allwinner,sun20i-d1-tcon-top",
.data = &sun20i_d1_tcon_top_quirks
},
{
.compatible = "allwinner,sun50i-h6-tcon-top",
.data = &sun50i_h6_tcon_top_quirks
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
EXPORT_SYMBOL(sun8i_tcon_top_of_table);
static struct platform_driver sun8i_tcon_top_platform_driver = {
.probe = sun8i_tcon_top_probe,
.remove_new = sun8i_tcon_top_remove,
.driver = {
.name = "sun8i-tcon-top",
.of_match_table = sun8i_tcon_top_of_table,
},
};
module_platform_driver(sun8i_tcon_top_platform_driver);
MODULE_AUTHOR("Jernej Skrabec <[email protected]>");
MODULE_DESCRIPTION("Allwinner R40 TCON TOP driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun8i_tcon_top.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Free Electrons
* Copyright (C) 2015 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <video/videomode.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sunxi_engine.h"
#include "sun4i_tcon.h"
/*
* While this isn't really working in the DRM theory, in practice we
* can only ever have one encoder per TCON since we have a mux in our
* TCON.
*/
static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, crtc->dev)
if (encoder->crtc == crtc)
return encoder;
return NULL;
}
static int sun4i_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
struct sunxi_engine *engine = scrtc->engine;
int ret = 0;
if (engine && engine->ops && engine->ops->atomic_check)
ret = engine->ops->atomic_check(engine, crtc_state);
return ret;
}
static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct sunxi_engine *engine = scrtc->engine;
unsigned long flags;
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
scrtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->state->event = NULL;
}
if (engine->ops->atomic_begin)
engine->ops->atomic_begin(engine, old_state);
}
static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
struct drm_pending_vblank_event *event = crtc->state->event;
DRM_DEBUG_DRIVER("Committing plane changes\n");
sunxi_engine_commit(scrtc->engine);
if (event) {
crtc->state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
}
static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
drm_crtc_vblank_off(crtc);
sun4i_tcon_set_status(scrtc->tcon, encoder, false);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
}
static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
DRM_DEBUG_DRIVER("Enabling the CRTC\n");
sun4i_tcon_set_status(scrtc->tcon, encoder, true);
drm_crtc_vblank_on(crtc);
}
static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
sun4i_tcon_mode_set(scrtc->tcon, encoder, mode);
sunxi_engine_mode_set(scrtc->engine, mode);
}
static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
.atomic_check = sun4i_crtc_atomic_check,
.atomic_begin = sun4i_crtc_atomic_begin,
.atomic_flush = sun4i_crtc_atomic_flush,
.atomic_enable = sun4i_crtc_atomic_enable,
.atomic_disable = sun4i_crtc_atomic_disable,
.mode_set_nofb = sun4i_crtc_mode_set_nofb,
};
static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
DRM_DEBUG_DRIVER("Enabling VBLANK on crtc %p\n", crtc);
sun4i_tcon_enable_vblank(scrtc->tcon, true);
return 0;
}
static void sun4i_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
DRM_DEBUG_DRIVER("Disabling VBLANK on crtc %p\n", crtc);
sun4i_tcon_enable_vblank(scrtc->tcon, false);
}
static const struct drm_crtc_funcs sun4i_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.set_config = drm_atomic_helper_set_config,
.enable_vblank = sun4i_crtc_enable_vblank,
.disable_vblank = sun4i_crtc_disable_vblank,
};
struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
struct sunxi_engine *engine,
struct sun4i_tcon *tcon)
{
struct sun4i_crtc *scrtc;
struct drm_plane **planes;
struct drm_plane *primary = NULL, *cursor = NULL;
int ret, i;
scrtc = devm_kzalloc(drm->dev, sizeof(*scrtc), GFP_KERNEL);
if (!scrtc)
return ERR_PTR(-ENOMEM);
scrtc->engine = engine;
scrtc->tcon = tcon;
/* Create our layers */
planes = sunxi_engine_layers_init(drm, engine);
if (IS_ERR(planes)) {
dev_err(drm->dev, "Couldn't create the planes\n");
return NULL;
}
/* find primary and cursor planes for drm_crtc_init_with_planes */
for (i = 0; planes[i]; i++) {
struct drm_plane *plane = planes[i];
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
primary = plane;
break;
case DRM_PLANE_TYPE_CURSOR:
cursor = plane;
break;
default:
break;
}
}
ret = drm_crtc_init_with_planes(drm, &scrtc->crtc,
primary,
cursor,
&sun4i_crtc_funcs,
NULL);
if (ret) {
dev_err(drm->dev, "Couldn't init DRM CRTC\n");
return ERR_PTR(ret);
}
drm_crtc_helper_add(&scrtc->crtc, &sun4i_crtc_helper_funcs);
/* Set crtc.port to output port node of the tcon */
scrtc->crtc.port = of_graph_get_port_by_id(scrtc->tcon->dev->of_node,
1);
/* Set possible_crtcs to this crtc for overlay planes */
for (i = 0; planes[i]; i++) {
uint32_t possible_crtcs = drm_crtc_mask(&scrtc->crtc);
struct drm_plane *plane = planes[i];
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
plane->possible_crtcs = possible_crtcs;
}
return scrtc;
}
| linux-master | drivers/gpu/drm/sun4i/sun4i_crtc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2018 Jernej Skrabec <[email protected]>
*/
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
#include "sun8i_dw_hdmi.h"
#include "sun8i_tcon_top.h"
static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
}
static const struct drm_encoder_helper_funcs
sun8i_dw_hdmi_encoder_helper_funcs = {
.mode_set = sun8i_dw_hdmi_encoder_mode_set,
};
static enum drm_mode_status
sun8i_dw_hdmi_mode_valid_a83t(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
if (mode->clock > 297000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static enum drm_mode_status
sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
/*
* Controller support maximum of 594 MHz, which correlates to
* 4K@60Hz 4:4:4 or RGB.
*/
if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node)
{
return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
!!of_match_node(sun8i_tcon_top_of_table, node);
}
static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm,
struct device_node *node)
{
struct device_node *port, *ep, *remote, *remote_port;
u32 crtcs = 0;
remote = of_graph_get_remote_node(node, 0, -1);
if (!remote)
return 0;
if (sun8i_dw_hdmi_node_is_tcon_top(remote)) {
port = of_graph_get_port_by_id(remote, 4);
if (!port)
goto crtcs_exit;
for_each_child_of_node(port, ep) {
remote_port = of_graph_get_remote_port(ep);
if (remote_port) {
crtcs |= drm_of_crtc_port_mask(drm, remote_port);
of_node_put(remote_port);
}
}
} else {
crtcs = drm_of_find_possible_crtcs(drm, node);
}
crtcs_exit:
of_node_put(remote);
return crtcs;
}
static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_hdmi_plat_data *plat_data;
struct drm_device *drm = data;
struct device_node *phy_node;
struct drm_encoder *encoder;
struct sun8i_dw_hdmi *hdmi;
int ret;
if (!pdev->dev.of_node)
return -ENODEV;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
plat_data = &hdmi->plat_data;
hdmi->dev = &pdev->dev;
encoder = &hdmi->encoder;
hdmi->quirks = of_device_get_match_data(dev);
encoder->possible_crtcs =
sun8i_dw_hdmi_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
* supposed to be connected to, it's because the CRTC has
* not been registered yet. Defer probing, and hope that
* the required CRTC is added later.
*/
if (encoder->possible_crtcs == 0)
return -EPROBE_DEFER;
hdmi->rst_ctrl = devm_reset_control_get(dev, "ctrl");
if (IS_ERR(hdmi->rst_ctrl))
return dev_err_probe(dev, PTR_ERR(hdmi->rst_ctrl),
"Could not get ctrl reset control\n");
hdmi->clk_tmds = devm_clk_get(dev, "tmds");
if (IS_ERR(hdmi->clk_tmds))
return dev_err_probe(dev, PTR_ERR(hdmi->clk_tmds),
"Couldn't get the tmds clock\n");
hdmi->regulator = devm_regulator_get(dev, "hvcc");
if (IS_ERR(hdmi->regulator))
return dev_err_probe(dev, PTR_ERR(hdmi->regulator),
"Couldn't get regulator\n");
ret = regulator_enable(hdmi->regulator);
if (ret) {
dev_err(dev, "Failed to enable regulator\n");
return ret;
}
ret = reset_control_deassert(hdmi->rst_ctrl);
if (ret) {
dev_err(dev, "Could not deassert ctrl reset control\n");
goto err_disable_regulator;
}
ret = clk_prepare_enable(hdmi->clk_tmds);
if (ret) {
dev_err(dev, "Could not enable tmds clock\n");
goto err_assert_ctrl_reset;
}
phy_node = of_parse_phandle(dev->of_node, "phys", 0);
if (!phy_node) {
dev_err(dev, "Can't found PHY phandle\n");
ret = -EINVAL;
goto err_disable_clk_tmds;
}
ret = sun8i_hdmi_phy_get(hdmi, phy_node);
of_node_put(phy_node);
if (ret) {
dev_err(dev, "Couldn't get the HDMI PHY\n");
goto err_disable_clk_tmds;
}
ret = sun8i_hdmi_phy_init(hdmi->phy);
if (ret)
goto err_disable_clk_tmds;
drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
plat_data->mode_valid = hdmi->quirks->mode_valid;
plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
platform_set_drvdata(pdev, hdmi);
hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
/*
* If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
* which would have called the encoder cleanup. Do it manually.
*/
if (IS_ERR(hdmi->hdmi)) {
ret = PTR_ERR(hdmi->hdmi);
goto cleanup_encoder;
}
return 0;
cleanup_encoder:
drm_encoder_cleanup(encoder);
err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
reset_control_assert(hdmi->rst_ctrl);
err_disable_regulator:
regulator_disable(hdmi->regulator);
return ret;
}
static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
dw_hdmi_unbind(hdmi->hdmi);
sun8i_hdmi_phy_deinit(hdmi->phy);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
regulator_disable(hdmi->regulator);
}
static const struct component_ops sun8i_dw_hdmi_ops = {
.bind = sun8i_dw_hdmi_bind,
.unbind = sun8i_dw_hdmi_unbind,
};
static int sun8i_dw_hdmi_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &sun8i_dw_hdmi_ops);
}
static void sun8i_dw_hdmi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun8i_dw_hdmi_ops);
}
static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_a83t,
};
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_h6,
.use_drm_infoframe = true,
};
static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
{
.compatible = "allwinner,sun8i-a83t-dw-hdmi",
.data = &sun8i_a83t_quirks,
},
{
.compatible = "allwinner,sun50i-h6-dw-hdmi",
.data = &sun50i_h6_quirks,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
.probe = sun8i_dw_hdmi_probe,
.remove_new = sun8i_dw_hdmi_remove,
.driver = {
.name = "sun8i-dw-hdmi",
.of_match_table = sun8i_dw_hdmi_dt_ids,
},
};
static int __init sun8i_dw_hdmi_init(void)
{
int ret;
ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
if (ret)
return ret;
ret = platform_driver_register(&sun8i_hdmi_phy_driver);
if (ret) {
platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
return ret;
}
return ret;
}
static void __exit sun8i_dw_hdmi_exit(void)
{
platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
platform_driver_unregister(&sun8i_hdmi_phy_driver);
}
module_init(sun8i_dw_hdmi_init);
module_exit(sun8i_dw_hdmi_exit);
MODULE_AUTHOR("Jernej Skrabec <[email protected]>");
MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Free Electrons
* Copyright (C) 2015 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <linux/component.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_backend.h"
#include "sun4i_drv.h"
#include "sun4i_frontend.h"
#include "sun4i_layer.h"
#include "sunxi_engine.h"
struct sun4i_backend_quirks {
/* backend <-> TCON muxing selection done in backend */
bool needs_output_muxing;
/* alpha at the lowest z position is not always supported */
bool supports_lowest_plane_alpha;
};
static const u32 sunxi_rgb2yuv_coef[12] = {
0x00000107, 0x00000204, 0x00000064, 0x00000108,
0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
};
static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
{
int i;
DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
/* Set color correction */
regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG,
SUN4I_BACKEND_OCCTL_ENABLE);
for (i = 0; i < 12; i++)
regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
sunxi_rgb2yuv_coef[i]);
}
static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine)
{
DRM_DEBUG_DRIVER("Disabling color correction\n");
/* Disable color correction */
regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG,
SUN4I_BACKEND_OCCTL_ENABLE, 0);
}
static void sun4i_backend_commit(struct sunxi_engine *engine)
{
DRM_DEBUG_DRIVER("Committing changes\n");
regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
}
void sun4i_backend_layer_enable(struct sun4i_backend *backend,
int layer, bool enable)
{
u32 val;
DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis",
layer);
if (enable)
val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
else
val = 0;
regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
}
static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
break;
case DRM_FORMAT_ARGB4444:
*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
break;
case DRM_FORMAT_ARGB1555:
*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
break;
case DRM_FORMAT_RGBA5551:
*mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
break;
case DRM_FORMAT_RGBA4444:
*mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
break;
case DRM_FORMAT_XRGB8888:
*mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
break;
case DRM_FORMAT_RGB888:
*mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
break;
case DRM_FORMAT_RGB565:
*mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
break;
default:
return -EINVAL;
}
return 0;
}
static const uint32_t sun4i_backend_formats[] = {
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
};
bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
{
unsigned int i;
if (modifier != DRM_FORMAT_MOD_LINEAR)
return false;
for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
if (sun4i_backend_formats[i] == fmt)
return true;
return false;
}
int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
/* Set height and width */
DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
state->crtc_w, state->crtc_h);
regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
SUN4I_BACKEND_LAYSIZE(state->crtc_w,
state->crtc_h));
/* Set base coordinates */
DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
state->crtc_x, state->crtc_y);
regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
SUN4I_BACKEND_LAYCOOR(state->crtc_x,
state->crtc_y));
return 0;
}
static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
const uint32_t fmt = format->format;
u32 val = SUN4I_BACKEND_IYUVCTL_EN;
int i;
for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
regmap_write(backend->engine.regs,
SUN4I_BACKEND_YGCOEF_REG(i),
sunxi_bt601_yuv2rgb_coef[i]);
/*
* We should do that only for a single plane, but the
* framebuffer's atomic_check has our back on this.
*/
regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
/* TODO: Add support for the multi-planar YUV formats */
if (drm_format_info_is_yuv_packed(format) &&
drm_format_info_is_yuv_sampling_422(format))
val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
else
DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
/*
* Allwinner seems to list the pixel sequence from right to left, while
* DRM lists it from left to right.
*/
switch (fmt) {
case DRM_FORMAT_YUYV:
val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
break;
case DRM_FORMAT_YVYU:
val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
break;
case DRM_FORMAT_UYVY:
val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
break;
case DRM_FORMAT_VYUY:
val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
break;
default:
DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
fmt);
}
regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
return 0;
}
int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
u32 val;
int ret;
/* Clear the YUV mode */
regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
regmap_update_bits(backend->engine.regs,
SUN4I_BACKEND_ATTCTL_REG0(layer),
SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
val);
if (fb->format->is_yuv)
return sun4i_backend_update_yuv_format(backend, layer, plane);
ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid format\n");
return ret;
}
regmap_update_bits(backend->engine.regs,
SUN4I_BACKEND_ATTCTL_REG1(layer),
SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
return 0;
}
int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
int layer, uint32_t fmt)
{
u32 val;
int ret;
ret = sun4i_backend_drm_format_to_layer(fmt, &val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid format\n");
return ret;
}
regmap_update_bits(backend->engine.regs,
SUN4I_BACKEND_ATTCTL_REG0(layer),
SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
regmap_update_bits(backend->engine.regs,
SUN4I_BACKEND_ATTCTL_REG1(layer),
SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
return 0;
}
static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
struct drm_framebuffer *fb,
dma_addr_t paddr)
{
/* TODO: Add support for the multi-planar YUV formats */
DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
fb->pitches[0] * 8);
return 0;
}
int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
u32 lo_paddr, hi_paddr;
dma_addr_t dma_addr;
/* Set the line width */
DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
regmap_write(backend->engine.regs,
SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
fb->pitches[0] * 8);
/* Get the start of the displayed memory */
dma_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
if (fb->format->is_yuv)
return sun4i_backend_update_yuv_buffer(backend, fb, dma_addr);
/* Write the 32 lower bits of the address (in bits) */
lo_paddr = dma_addr << 3;
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
regmap_write(backend->engine.regs,
SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
lo_paddr);
/* And the upper bits */
hi_paddr = dma_addr >> 29;
DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
return 0;
}
int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
unsigned int priority = state->normalized_zpos;
unsigned int pipe = p_state->pipe;
DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
layer, priority, pipe);
regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
return 0;
}
void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
int layer)
{
regmap_update_bits(backend->engine.regs,
SUN4I_BACKEND_ATTCTL_REG0(layer),
SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
}
static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
{
u16 src_h = state->src_h >> 16;
u16 src_w = state->src_w >> 16;
DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
src_w, src_h, state->crtc_w, state->crtc_h);
if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
return true;
return false;
}
static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
{
struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
struct sun4i_backend *backend = layer->backend;
uint32_t format = state->fb->format->format;
uint64_t modifier = state->fb->modifier;
if (IS_ERR(backend->frontend))
return false;
if (!sun4i_frontend_format_is_supported(format, modifier))
return false;
if (!sun4i_backend_format_is_supported(format, modifier))
return true;
/*
* TODO: The backend alone allows 2x and 4x integer scaling, including
* support for an alpha component (which the frontend doesn't support).
* Use the backend directly instead of the frontend in this case, with
* another test to return false.
*/
if (sun4i_backend_plane_uses_scaler(state))
return true;
/*
* Here the format is supported by both the frontend and the backend
* and no frontend scaling is required, so use the backend directly.
*/
return false;
}
static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
bool *uses_frontend)
{
if (sun4i_backend_plane_uses_frontend(state)) {
*uses_frontend = true;
return true;
}
*uses_frontend = false;
/* Scaling is not supported without the frontend. */
if (sun4i_backend_plane_uses_scaler(state))
return false;
return true;
}
static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
struct drm_crtc_state *old_state)
{
u32 val;
WARN_ON(regmap_read_poll_timeout(engine->regs,
SUN4I_BACKEND_REGBUFFCTL_REG,
val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
100, 50000));
}
static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
struct drm_crtc_state *crtc_state)
{
struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
struct drm_atomic_state *state = crtc_state->state;
struct drm_device *drm = state->dev;
struct drm_plane *plane;
unsigned int num_planes = 0;
unsigned int num_alpha_planes = 0;
unsigned int num_frontend_planes = 0;
unsigned int num_alpha_planes_max = 1;
unsigned int num_yuv_planes = 0;
unsigned int current_pipe = 0;
unsigned int i;
DRM_DEBUG_DRIVER("Starting checking our planes\n");
if (!crtc_state->planes_changed)
return 0;
drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
struct sun4i_layer_state *layer_state =
state_to_sun4i_layer_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
if (!sun4i_backend_plane_is_supported(plane_state,
&layer_state->uses_frontend))
return -EINVAL;
if (layer_state->uses_frontend) {
DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
plane->index);
num_frontend_planes++;
} else {
if (fb->format->is_yuv) {
DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
num_yuv_planes++;
}
}
DRM_DEBUG_DRIVER("Plane FB format is %p4cc\n",
&fb->format->format);
if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
num_alpha_planes++;
DRM_DEBUG_DRIVER("Plane zpos is %d\n",
plane_state->normalized_zpos);
/* Sort our planes by Zpos */
plane_states[plane_state->normalized_zpos] = plane_state;
num_planes++;
}
/* All our planes were disabled, bail out */
if (!num_planes)
return 0;
/*
* The hardware is a bit unusual here.
*
* Even though it supports 4 layers, it does the composition
* in two separate steps.
*
* The first one is assigning a layer to one of its two
* pipes. If more that 1 layer is assigned to the same pipe,
* and if pixels overlaps, the pipe will take the pixel from
* the layer with the highest priority.
*
* The second step is the actual alpha blending, that takes
* the two pipes as input, and uses the potential alpha
* component to do the transparency between the two.
*
* This two-step scenario makes us unable to guarantee a
* robust alpha blending between the 4 layers in all
* situations, since this means that we need to have one layer
* with alpha at the lowest position of our two pipes.
*
* However, we cannot even do that on every platform, since
* the hardware has a bug where the lowest plane of the lowest
* pipe (pipe 0, priority 0), if it has any alpha, will
* discard the pixel data entirely and just display the pixels
* in the background color (black by default).
*
* This means that on the affected platforms, we effectively
* have only three valid configurations with alpha, all of
* them with the alpha being on pipe1 with the lowest
* position, which can be 1, 2 or 3 depending on the number of
* planes and their zpos.
*/
/* For platforms that are not affected by the issue described above. */
if (backend->quirks->supports_lowest_plane_alpha)
num_alpha_planes_max++;
if (num_alpha_planes > num_alpha_planes_max) {
DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
return -EINVAL;
}
/* We can't have an alpha plane at the lowest position */
if (!backend->quirks->supports_lowest_plane_alpha &&
(plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
return -EINVAL;
for (i = 1; i < num_planes; i++) {
struct drm_plane_state *p_state = plane_states[i];
struct drm_framebuffer *fb = p_state->fb;
struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
/*
* The only alpha position is the lowest plane of the
* second pipe.
*/
if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
current_pipe++;
s_state->pipe = current_pipe;
}
/* We can only have a single YUV plane at a time */
if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
return -EINVAL;
}
if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
num_planes, num_alpha_planes, num_frontend_planes,
num_yuv_planes);
return 0;
}
static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
{
struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
struct sun4i_frontend *frontend = backend->frontend;
if (!frontend)
return;
/*
* In a teardown scenario with the frontend involved, we have
* to keep the frontend enabled until the next vblank, and
* only then disable it.
*
* This is due to the fact that the backend will not take into
* account the new configuration (with the plane that used to
* be fed by the frontend now disabled) until we write to the
* commit bit and the hardware fetches the new configuration
* during the next vblank.
*
* So we keep the frontend around in order to prevent any
* visual artifacts.
*/
spin_lock(&backend->frontend_lock);
if (backend->frontend_teardown) {
sun4i_frontend_exit(frontend);
backend->frontend_teardown = false;
}
spin_unlock(&backend->frontend_lock);
};
static void sun4i_backend_mode_set(struct sunxi_engine *engine,
const struct drm_display_mode *mode)
{
bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
DRM_DEBUG_DRIVER("Updating global size W: %u H: %u\n",
mode->hdisplay, mode->vdisplay);
regmap_write(engine->regs, SUN4I_BACKEND_DISSIZE_REG,
SUN4I_BACKEND_DISSIZE(mode->hdisplay, mode->vdisplay));
regmap_update_bits(engine->regs, SUN4I_BACKEND_MODCTL_REG,
SUN4I_BACKEND_MODCTL_ITLMOD_EN,
interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
interlaced ? "on" : "off");
}
static int sun4i_backend_init_sat(struct device *dev) {
struct sun4i_backend *backend = dev_get_drvdata(dev);
int ret;
backend->sat_reset = devm_reset_control_get(dev, "sat");
if (IS_ERR(backend->sat_reset)) {
dev_err(dev, "Couldn't get the SAT reset line\n");
return PTR_ERR(backend->sat_reset);
}
ret = reset_control_deassert(backend->sat_reset);
if (ret) {
dev_err(dev, "Couldn't deassert the SAT reset line\n");
return ret;
}
backend->sat_clk = devm_clk_get(dev, "sat");
if (IS_ERR(backend->sat_clk)) {
dev_err(dev, "Couldn't get our SAT clock\n");
ret = PTR_ERR(backend->sat_clk);
goto err_assert_reset;
}
ret = clk_prepare_enable(backend->sat_clk);
if (ret) {
dev_err(dev, "Couldn't enable the SAT clock\n");
return ret;
}
return 0;
err_assert_reset:
reset_control_assert(backend->sat_reset);
return ret;
}
static int sun4i_backend_free_sat(struct device *dev) {
struct sun4i_backend *backend = dev_get_drvdata(dev);
clk_disable_unprepare(backend->sat_clk);
reset_control_assert(backend->sat_reset);
return 0;
}
/*
* The display backend can take video output from the display frontend, or
* the display enhancement unit on the A80, as input for one it its layers.
* This relationship within the display pipeline is encoded in the device
* tree with of_graph, and we use it here to figure out which backend, if
* there are 2 or more, we are currently probing. The number would be in
* the "reg" property of the upstream output port endpoint.
*/
static int sun4i_backend_of_get_id(struct device_node *node)
{
struct device_node *ep, *remote;
struct of_endpoint of_ep;
/* Input port is 0, and we want the first endpoint. */
ep = of_graph_get_endpoint_by_regs(node, 0, -1);
if (!ep)
return -EINVAL;
remote = of_graph_get_remote_endpoint(ep);
of_node_put(ep);
if (!remote)
return -EINVAL;
of_graph_parse_endpoint(remote, &of_ep);
of_node_put(remote);
return of_ep.id;
}
/* TODO: This needs to take multiple pipelines into account */
static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
struct device_node *node)
{
struct device_node *port, *ep, *remote;
struct sun4i_frontend *frontend;
port = of_graph_get_port_by_id(node, 0);
if (!port)
return ERR_PTR(-EINVAL);
for_each_available_child_of_node(port, ep) {
remote = of_graph_get_remote_port_parent(ep);
if (!remote)
continue;
of_node_put(remote);
/* does this node match any registered engines? */
list_for_each_entry(frontend, &drv->frontend_list, list) {
if (remote == frontend->node) {
of_node_put(port);
of_node_put(ep);
return frontend;
}
}
}
of_node_put(port);
return ERR_PTR(-EINVAL);
}
static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
.atomic_begin = sun4i_backend_atomic_begin,
.atomic_check = sun4i_backend_atomic_check,
.commit = sun4i_backend_commit,
.layers_init = sun4i_layers_init,
.apply_color_correction = sun4i_backend_apply_color_correction,
.disable_color_correction = sun4i_backend_disable_color_correction,
.vblank_quirk = sun4i_backend_vblank_quirk,
.mode_set = sun4i_backend_mode_set,
};
static const struct regmap_config sun4i_backend_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x5800,
};
static int sun4i_backend_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_backend *backend;
const struct sun4i_backend_quirks *quirks;
void __iomem *regs;
int i, ret;
backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
if (!backend)
return -ENOMEM;
dev_set_drvdata(dev, backend);
spin_lock_init(&backend->frontend_lock);
if (of_property_present(dev->of_node, "interconnects")) {
/*
* This assume we have the same DMA constraints for all our the
* devices in our pipeline (all the backends, but also the
* frontends). This sounds bad, but it has always been the case
* for us, and DRM doesn't do per-device allocation either, so
* we would need to fix DRM first...
*/
ret = of_dma_configure(drm->dev, dev->of_node, true);
if (ret)
return ret;
}
backend->engine.node = dev->of_node;
backend->engine.ops = &sun4i_backend_engine_ops;
backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
if (backend->engine.id < 0)
return backend->engine.id;
backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
if (IS_ERR(backend->frontend))
dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
backend->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(backend->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(backend->reset);
}
ret = reset_control_deassert(backend->reset);
if (ret) {
dev_err(dev, "Couldn't deassert our reset line\n");
return ret;
}
backend->bus_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(backend->bus_clk)) {
dev_err(dev, "Couldn't get the backend bus clock\n");
ret = PTR_ERR(backend->bus_clk);
goto err_assert_reset;
}
clk_prepare_enable(backend->bus_clk);
backend->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(backend->mod_clk)) {
dev_err(dev, "Couldn't get the backend module clock\n");
ret = PTR_ERR(backend->mod_clk);
goto err_disable_bus_clk;
}
ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
if (ret) {
dev_err(dev, "Couldn't set the module clock frequency\n");
goto err_disable_bus_clk;
}
clk_prepare_enable(backend->mod_clk);
backend->ram_clk = devm_clk_get(dev, "ram");
if (IS_ERR(backend->ram_clk)) {
dev_err(dev, "Couldn't get the backend RAM clock\n");
ret = PTR_ERR(backend->ram_clk);
goto err_disable_mod_clk;
}
clk_prepare_enable(backend->ram_clk);
if (of_device_is_compatible(dev->of_node,
"allwinner,sun8i-a33-display-backend")) {
ret = sun4i_backend_init_sat(dev);
if (ret) {
dev_err(dev, "Couldn't init SAT resources\n");
goto err_disable_ram_clk;
}
}
backend->engine.regs = devm_regmap_init_mmio(dev, regs,
&sun4i_backend_regmap_config);
if (IS_ERR(backend->engine.regs)) {
dev_err(dev, "Couldn't create the backend regmap\n");
return PTR_ERR(backend->engine.regs);
}
list_add_tail(&backend->engine.list, &drv->engine_list);
/*
* Many of the backend's layer configuration registers have
* undefined default values. This poses a risk as we use
* regmap_update_bits in some places, and don't overwrite
* the whole register.
*
* Clear the registers here to have something predictable.
*/
for (i = 0x800; i < 0x1000; i += 4)
regmap_write(backend->engine.regs, i, 0);
/* Disable registers autoloading */
regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG,
SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
/* Enable the backend */
regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
SUN4I_BACKEND_MODCTL_DEBE_EN |
SUN4I_BACKEND_MODCTL_START_CTL);
/* Set output selection if needed */
quirks = of_device_get_match_data(dev);
if (quirks->needs_output_muxing) {
/*
* We assume there is no dynamic muxing of backends
* and TCONs, so we select the backend with same ID.
*
* While dynamic selection might be interesting, since
* the CRTC is tied to the TCON, while the layers are
* tied to the backends, this means, we will need to
* switch between groups of layers. There might not be
* a way to represent this constraint in DRM.
*/
regmap_update_bits(backend->engine.regs,
SUN4I_BACKEND_MODCTL_REG,
SUN4I_BACKEND_MODCTL_OUT_SEL,
(backend->engine.id
? SUN4I_BACKEND_MODCTL_OUT_LCD1
: SUN4I_BACKEND_MODCTL_OUT_LCD0));
}
backend->quirks = quirks;
return 0;
err_disable_ram_clk:
clk_disable_unprepare(backend->ram_clk);
err_disable_mod_clk:
clk_rate_exclusive_put(backend->mod_clk);
clk_disable_unprepare(backend->mod_clk);
err_disable_bus_clk:
clk_disable_unprepare(backend->bus_clk);
err_assert_reset:
reset_control_assert(backend->reset);
return ret;
}
static void sun4i_backend_unbind(struct device *dev, struct device *master,
void *data)
{
struct sun4i_backend *backend = dev_get_drvdata(dev);
list_del(&backend->engine.list);
if (of_device_is_compatible(dev->of_node,
"allwinner,sun8i-a33-display-backend"))
sun4i_backend_free_sat(dev);
clk_disable_unprepare(backend->ram_clk);
clk_rate_exclusive_put(backend->mod_clk);
clk_disable_unprepare(backend->mod_clk);
clk_disable_unprepare(backend->bus_clk);
reset_control_assert(backend->reset);
}
static const struct component_ops sun4i_backend_ops = {
.bind = sun4i_backend_bind,
.unbind = sun4i_backend_unbind,
};
static int sun4i_backend_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &sun4i_backend_ops);
}
static void sun4i_backend_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun4i_backend_ops);
}
static const struct sun4i_backend_quirks sun4i_backend_quirks = {
.needs_output_muxing = true,
};
static const struct sun4i_backend_quirks sun5i_backend_quirks = {
};
static const struct sun4i_backend_quirks sun6i_backend_quirks = {
};
static const struct sun4i_backend_quirks sun7i_backend_quirks = {
.needs_output_muxing = true,
};
static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
.supports_lowest_plane_alpha = true,
};
static const struct sun4i_backend_quirks sun9i_backend_quirks = {
};
static const struct of_device_id sun4i_backend_of_table[] = {
{
.compatible = "allwinner,sun4i-a10-display-backend",
.data = &sun4i_backend_quirks,
},
{
.compatible = "allwinner,sun5i-a13-display-backend",
.data = &sun5i_backend_quirks,
},
{
.compatible = "allwinner,sun6i-a31-display-backend",
.data = &sun6i_backend_quirks,
},
{
.compatible = "allwinner,sun7i-a20-display-backend",
.data = &sun7i_backend_quirks,
},
{
.compatible = "allwinner,sun8i-a23-display-backend",
.data = &sun8i_a33_backend_quirks,
},
{
.compatible = "allwinner,sun8i-a33-display-backend",
.data = &sun8i_a33_backend_quirks,
},
{
.compatible = "allwinner,sun9i-a80-display-backend",
.data = &sun9i_backend_quirks,
},
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
static struct platform_driver sun4i_backend_platform_driver = {
.probe = sun4i_backend_probe,
.remove_new = sun4i_backend_remove,
.driver = {
.name = "sun4i-backend",
.of_match_table = sun4i_backend_of_table,
},
};
module_platform_driver(sun4i_backend_platform_driver);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun4i_backend.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Icenowy Zheng <[email protected]>
*
* Based on sun4i_backend.c, which is:
* Copyright (C) 2015 Free Electrons
* Copyright (C) 2015 NextThing Co
*/
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_drv.h"
#include "sun8i_mixer.h"
#include "sun8i_ui_layer.h"
#include "sun8i_vi_layer.h"
#include "sunxi_engine.h"
struct de2_fmt_info {
u32 drm_fmt;
u32 de2_fmt;
};
static const struct de2_fmt_info de2_formats[] = {
{
.drm_fmt = DRM_FORMAT_ARGB8888,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB8888,
},
{
.drm_fmt = DRM_FORMAT_ABGR8888,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR8888,
},
{
.drm_fmt = DRM_FORMAT_RGBA8888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA8888,
},
{
.drm_fmt = DRM_FORMAT_BGRA8888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA8888,
},
{
.drm_fmt = DRM_FORMAT_XRGB8888,
.de2_fmt = SUN8I_MIXER_FBFMT_XRGB8888,
},
{
.drm_fmt = DRM_FORMAT_XBGR8888,
.de2_fmt = SUN8I_MIXER_FBFMT_XBGR8888,
},
{
.drm_fmt = DRM_FORMAT_RGBX8888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBX8888,
},
{
.drm_fmt = DRM_FORMAT_BGRX8888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRX8888,
},
{
.drm_fmt = DRM_FORMAT_RGB888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
},
{
.drm_fmt = DRM_FORMAT_BGR888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGR888,
},
{
.drm_fmt = DRM_FORMAT_RGB565,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB565,
},
{
.drm_fmt = DRM_FORMAT_BGR565,
.de2_fmt = SUN8I_MIXER_FBFMT_BGR565,
},
{
.drm_fmt = DRM_FORMAT_ARGB4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
},
{
.drm_fmt = DRM_FORMAT_ABGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
},
{
.drm_fmt = DRM_FORMAT_RGBA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
},
{
.drm_fmt = DRM_FORMAT_BGRA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
},
{
.drm_fmt = DRM_FORMAT_ARGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
},
{
.drm_fmt = DRM_FORMAT_ABGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
},
{
.drm_fmt = DRM_FORMAT_RGBA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
},
{
.drm_fmt = DRM_FORMAT_BGRA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
},
{
.drm_fmt = DRM_FORMAT_ARGB2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
},
{
.drm_fmt = DRM_FORMAT_ABGR2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
},
{
.drm_fmt = DRM_FORMAT_RGBA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
},
{
.drm_fmt = DRM_FORMAT_BGRA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
},
{
.drm_fmt = DRM_FORMAT_UYVY,
.de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
},
{
.drm_fmt = DRM_FORMAT_VYUY,
.de2_fmt = SUN8I_MIXER_FBFMT_VYUY,
},
{
.drm_fmt = DRM_FORMAT_YUYV,
.de2_fmt = SUN8I_MIXER_FBFMT_YUYV,
},
{
.drm_fmt = DRM_FORMAT_YVYU,
.de2_fmt = SUN8I_MIXER_FBFMT_YVYU,
},
{
.drm_fmt = DRM_FORMAT_NV16,
.de2_fmt = SUN8I_MIXER_FBFMT_NV16,
},
{
.drm_fmt = DRM_FORMAT_NV61,
.de2_fmt = SUN8I_MIXER_FBFMT_NV61,
},
{
.drm_fmt = DRM_FORMAT_NV12,
.de2_fmt = SUN8I_MIXER_FBFMT_NV12,
},
{
.drm_fmt = DRM_FORMAT_NV21,
.de2_fmt = SUN8I_MIXER_FBFMT_NV21,
},
{
.drm_fmt = DRM_FORMAT_YUV422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
},
{
.drm_fmt = DRM_FORMAT_YUV420,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
},
{
.drm_fmt = DRM_FORMAT_YUV411,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
},
{
.drm_fmt = DRM_FORMAT_YVU422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
},
{
.drm_fmt = DRM_FORMAT_YVU420,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
},
{
.drm_fmt = DRM_FORMAT_YVU411,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
},
{
.drm_fmt = DRM_FORMAT_P010,
.de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
},
{
.drm_fmt = DRM_FORMAT_P210,
.de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
},
};
int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(de2_formats); ++i)
if (de2_formats[i].drm_fmt == format) {
*hw_format = de2_formats[i].de2_fmt;
return 0;
}
return -EINVAL;
}
static void sun8i_mixer_commit(struct sunxi_engine *engine)
{
DRM_DEBUG_DRIVER("Committing changes\n");
regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
}
static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
struct sunxi_engine *engine)
{
struct drm_plane **planes;
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
int i;
planes = devm_kcalloc(drm->dev,
mixer->cfg->vi_num + mixer->cfg->ui_num + 1,
sizeof(*planes), GFP_KERNEL);
if (!planes)
return ERR_PTR(-ENOMEM);
for (i = 0; i < mixer->cfg->vi_num; i++) {
struct sun8i_vi_layer *layer;
layer = sun8i_vi_layer_init_one(drm, mixer, i);
if (IS_ERR(layer)) {
dev_err(drm->dev,
"Couldn't initialize overlay plane\n");
return ERR_CAST(layer);
}
planes[i] = &layer->plane;
}
for (i = 0; i < mixer->cfg->ui_num; i++) {
struct sun8i_ui_layer *layer;
layer = sun8i_ui_layer_init_one(drm, mixer, i);
if (IS_ERR(layer)) {
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
return ERR_CAST(layer);
}
planes[mixer->cfg->vi_num + i] = &layer->plane;
}
return planes;
}
static void sun8i_mixer_mode_set(struct sunxi_engine *engine,
const struct drm_display_mode *mode)
{
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
u32 bld_base, size, val;
bool interlaced;
bld_base = sun8i_blender_base(mixer);
interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
size = SUN8I_MIXER_SIZE(mode->hdisplay, mode->vdisplay);
DRM_DEBUG_DRIVER("Updating global size W: %u H: %u\n",
mode->hdisplay, mode->vdisplay);
regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_SIZE, size);
regmap_write(engine->regs, SUN8I_MIXER_BLEND_OUTSIZE(bld_base), size);
if (interlaced)
val = SUN8I_MIXER_BLEND_OUTCTL_INTERLACED;
else
val = 0;
regmap_update_bits(engine->regs, SUN8I_MIXER_BLEND_OUTCTL(bld_base),
SUN8I_MIXER_BLEND_OUTCTL_INTERLACED, val);
DRM_DEBUG_DRIVER("Switching display mixer interlaced mode %s\n",
interlaced ? "on" : "off");
}
static const struct sunxi_engine_ops sun8i_engine_ops = {
.commit = sun8i_mixer_commit,
.layers_init = sun8i_layers_init,
.mode_set = sun8i_mixer_mode_set,
};
static const struct regmap_config sun8i_mixer_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0xffffc, /* guessed */
};
static int sun8i_mixer_of_get_id(struct device_node *node)
{
struct device_node *ep, *remote;
struct of_endpoint of_ep;
/* Output port is 1, and we want the first endpoint. */
ep = of_graph_get_endpoint_by_regs(node, 1, -1);
if (!ep)
return -EINVAL;
remote = of_graph_get_remote_endpoint(ep);
of_node_put(ep);
if (!remote)
return -EINVAL;
of_graph_parse_endpoint(remote, &of_ep);
of_node_put(remote);
return of_ep.id;
}
static int sun8i_mixer_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun8i_mixer *mixer;
void __iomem *regs;
unsigned int base;
int plane_cnt;
int i, ret;
/*
* The mixer uses single 32-bit register to store memory
* addresses, so that it cannot deal with 64-bit memory
* addresses.
* Restrict the DMA mask so that the mixer won't be
* allocated some memory that is too high.
*/
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Cannot do 32-bit DMA.\n");
return ret;
}
mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
if (!mixer)
return -ENOMEM;
dev_set_drvdata(dev, mixer);
mixer->engine.ops = &sun8i_engine_ops;
mixer->engine.node = dev->of_node;
if (of_property_present(dev->of_node, "iommus")) {
/*
* This assume we have the same DMA constraints for
* all our the mixers in our pipeline. This sounds
* bad, but it has always been the case for us, and
* DRM doesn't do per-device allocation either, so we
* would need to fix DRM first...
*/
ret = of_dma_configure(drm->dev, dev->of_node, true);
if (ret)
return ret;
}
/*
* While this function can fail, we shouldn't do anything
* if this happens. Some early DE2 DT entries don't provide
* mixer id but work nevertheless because matching between
* TCON and mixer is done by comparing node pointers (old
* way) instead comparing ids. If this function fails and
* id is needed, it will fail during id matching anyway.
*/
mixer->engine.id = sun8i_mixer_of_get_id(dev->of_node);
mixer->cfg = of_device_get_match_data(dev);
if (!mixer->cfg)
return -EINVAL;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
mixer->engine.regs = devm_regmap_init_mmio(dev, regs,
&sun8i_mixer_regmap_config);
if (IS_ERR(mixer->engine.regs)) {
dev_err(dev, "Couldn't create the mixer regmap\n");
return PTR_ERR(mixer->engine.regs);
}
mixer->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(mixer->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(mixer->reset);
}
ret = reset_control_deassert(mixer->reset);
if (ret) {
dev_err(dev, "Couldn't deassert our reset line\n");
return ret;
}
mixer->bus_clk = devm_clk_get(dev, "bus");
if (IS_ERR(mixer->bus_clk)) {
dev_err(dev, "Couldn't get the mixer bus clock\n");
ret = PTR_ERR(mixer->bus_clk);
goto err_assert_reset;
}
clk_prepare_enable(mixer->bus_clk);
mixer->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(mixer->mod_clk)) {
dev_err(dev, "Couldn't get the mixer module clock\n");
ret = PTR_ERR(mixer->mod_clk);
goto err_disable_bus_clk;
}
/*
* It seems that we need to enforce that rate for whatever
* reason for the mixer to be functional. Make sure it's the
* case.
*/
if (mixer->cfg->mod_rate)
clk_set_rate(mixer->mod_clk, mixer->cfg->mod_rate);
clk_prepare_enable(mixer->mod_clk);
list_add_tail(&mixer->engine.list, &drv->engine_list);
base = sun8i_blender_base(mixer);
/* Reset registers and disable unused sub-engines */
if (mixer->cfg->is_de3) {
for (i = 0; i < DE3_MIXER_UNIT_SIZE; i += 4)
regmap_write(mixer->engine.regs, i, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_FCE_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_PEAK_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_LCTI_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_BLS_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_FCC_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_DNS_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_DRC_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_FMT_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC0_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC1_EN, 0);
} else {
for (i = 0; i < DE2_MIXER_UNIT_SIZE; i += 4)
regmap_write(mixer->engine.regs, i, 0);
regmap_write(mixer->engine.regs, SUN8I_MIXER_FCE_EN, 0);
regmap_write(mixer->engine.regs, SUN8I_MIXER_BWS_EN, 0);
regmap_write(mixer->engine.regs, SUN8I_MIXER_LTI_EN, 0);
regmap_write(mixer->engine.regs, SUN8I_MIXER_PEAK_EN, 0);
regmap_write(mixer->engine.regs, SUN8I_MIXER_ASE_EN, 0);
regmap_write(mixer->engine.regs, SUN8I_MIXER_FCC_EN, 0);
regmap_write(mixer->engine.regs, SUN8I_MIXER_DCSC_EN, 0);
}
/* Enable the mixer */
regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_CTL,
SUN8I_MIXER_GLOBAL_CTL_RT_EN);
/* Set background color to black */
regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
SUN8I_MIXER_BLEND_COLOR_BLACK);
/*
* Set fill color of bottom plane to black. Generally not needed
* except when VI plane is at bottom (zpos = 0) and enabled.
*/
regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
SUN8I_MIXER_BLEND_COLOR_BLACK);
plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
for (i = 0; i < plane_cnt; i++)
regmap_write(mixer->engine.regs,
SUN8I_MIXER_BLEND_MODE(base, i),
SUN8I_MIXER_BLEND_MODE_DEF);
regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
return 0;
err_disable_bus_clk:
clk_disable_unprepare(mixer->bus_clk);
err_assert_reset:
reset_control_assert(mixer->reset);
return ret;
}
static void sun8i_mixer_unbind(struct device *dev, struct device *master,
void *data)
{
struct sun8i_mixer *mixer = dev_get_drvdata(dev);
list_del(&mixer->engine.list);
clk_disable_unprepare(mixer->mod_clk);
clk_disable_unprepare(mixer->bus_clk);
reset_control_assert(mixer->reset);
}
static const struct component_ops sun8i_mixer_ops = {
.bind = sun8i_mixer_bind,
.unbind = sun8i_mixer_unbind,
};
static int sun8i_mixer_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &sun8i_mixer_ops);
}
static void sun8i_mixer_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun8i_mixer_ops);
}
static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
.mod_rate = 432000000,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
.ccsc = CCSC_MIXER0_LAYOUT,
.mod_rate = 150000000,
};
static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = {
.ccsc = CCSC_D1_MIXER0_LAYOUT,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
.mod_rate = 297000000,
.scaler_mask = 0x1,
.scanline_yuv = 1024,
.ui_num = 0,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
.is_de3 = true,
.mod_rate = 600000000,
.scaler_mask = 0xf,
.scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
};
static const struct of_device_id sun8i_mixer_of_table[] = {
{
.compatible = "allwinner,sun8i-a83t-de2-mixer-0",
.data = &sun8i_a83t_mixer0_cfg,
},
{
.compatible = "allwinner,sun8i-a83t-de2-mixer-1",
.data = &sun8i_a83t_mixer1_cfg,
},
{
.compatible = "allwinner,sun8i-h3-de2-mixer-0",
.data = &sun8i_h3_mixer0_cfg,
},
{
.compatible = "allwinner,sun8i-r40-de2-mixer-0",
.data = &sun8i_r40_mixer0_cfg,
},
{
.compatible = "allwinner,sun8i-r40-de2-mixer-1",
.data = &sun8i_r40_mixer1_cfg,
},
{
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
{
.compatible = "allwinner,sun20i-d1-de2-mixer-0",
.data = &sun20i_d1_mixer0_cfg,
},
{
.compatible = "allwinner,sun20i-d1-de2-mixer-1",
.data = &sun20i_d1_mixer1_cfg,
},
{
.compatible = "allwinner,sun50i-a64-de2-mixer-0",
.data = &sun50i_a64_mixer0_cfg,
},
{
.compatible = "allwinner,sun50i-a64-de2-mixer-1",
.data = &sun50i_a64_mixer1_cfg,
},
{
.compatible = "allwinner,sun50i-h6-de3-mixer-0",
.data = &sun50i_h6_mixer0_cfg,
},
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
static struct platform_driver sun8i_mixer_platform_driver = {
.probe = sun8i_mixer_probe,
.remove_new = sun8i_mixer_remove,
.driver = {
.name = "sun8i-mixer",
.of_match_table = sun8i_mixer_of_table,
},
};
module_platform_driver(sun8i_mixer_platform_driver);
MODULE_AUTHOR("Icenowy Zheng <[email protected]>");
MODULE_DESCRIPTION("Allwinner DE2 Mixer driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun8i_mixer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Free Electrons
* Copyright (C) 2016 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/io.h>
#include "sun4i_hdmi.h"
struct sun4i_tmds {
struct clk_hw hw;
struct sun4i_hdmi *hdmi;
u8 div_offset;
};
static inline struct sun4i_tmds *hw_to_tmds(struct clk_hw *hw)
{
return container_of(hw, struct sun4i_tmds, hw);
}
static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
unsigned long parent_rate,
u8 div_offset,
u8 *div,
bool *half)
{
unsigned long best_rate = 0;
u8 best_m = 0, m;
bool is_double = false;
for (m = div_offset ?: 1; m < (16 + div_offset); m++) {
u8 d;
for (d = 1; d < 3; d++) {
unsigned long tmp_rate;
tmp_rate = parent_rate / m / d;
if (tmp_rate > rate)
continue;
if (!best_rate ||
(rate - tmp_rate) < (rate - best_rate)) {
best_rate = tmp_rate;
best_m = m;
is_double = (d == 2) ? true : false;
}
}
}
if (div && half) {
*div = best_m;
*half = is_double;
}
return best_rate;
}
static int sun4i_tmds_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct sun4i_tmds *tmds = hw_to_tmds(hw);
struct clk_hw *parent = NULL;
unsigned long best_parent = 0;
unsigned long rate = req->rate;
int best_div = 1, best_half = 1;
int i, j, p;
/*
* We only consider PLL3, since the TCON is very likely to be
* clocked from it, and to have the same rate than our HDMI
* clock, so we should not need to do anything.
*/
for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
parent = clk_hw_get_parent_by_index(hw, p);
if (!parent)
continue;
for (i = 1; i < 3; i++) {
for (j = tmds->div_offset ?: 1;
j < (16 + tmds->div_offset); j++) {
unsigned long ideal = rate * i * j;
unsigned long rounded;
rounded = clk_hw_round_rate(parent, ideal);
if (rounded == ideal) {
best_parent = rounded;
best_half = i;
best_div = j;
goto out;
}
if (!best_parent ||
abs(rate - rounded / i / j) <
abs(rate - best_parent / best_half /
best_div)) {
best_parent = rounded;
best_half = i;
best_div = j;
}
}
}
}
if (!parent)
return -EINVAL;
out:
req->rate = best_parent / best_half / best_div;
req->best_parent_rate = best_parent;
req->best_parent_hw = parent;
return 0;
}
static unsigned long sun4i_tmds_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct sun4i_tmds *tmds = hw_to_tmds(hw);
u32 reg;
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
if (reg & SUN4I_HDMI_PAD_CTRL1_HALVE_CLK)
parent_rate /= 2;
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
reg = ((reg >> 4) & 0xf) + tmds->div_offset;
if (!reg)
reg = 1;
return parent_rate / reg;
}
static int sun4i_tmds_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct sun4i_tmds *tmds = hw_to_tmds(hw);
bool half;
u32 reg;
u8 div;
sun4i_tmds_calc_divider(rate, parent_rate, tmds->div_offset,
&div, &half);
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
reg &= ~SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
if (half)
reg |= SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
writel(reg, tmds->hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
reg &= ~SUN4I_HDMI_PLL_CTRL_DIV_MASK;
writel(reg | SUN4I_HDMI_PLL_CTRL_DIV(div - tmds->div_offset),
tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
return 0;
}
static u8 sun4i_tmds_get_parent(struct clk_hw *hw)
{
struct sun4i_tmds *tmds = hw_to_tmds(hw);
u32 reg;
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_DBG0_REG);
return ((reg & SUN4I_HDMI_PLL_DBG0_TMDS_PARENT_MASK) >>
SUN4I_HDMI_PLL_DBG0_TMDS_PARENT_SHIFT);
}
static int sun4i_tmds_set_parent(struct clk_hw *hw, u8 index)
{
struct sun4i_tmds *tmds = hw_to_tmds(hw);
u32 reg;
if (index > 1)
return -EINVAL;
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_DBG0_REG);
reg &= ~SUN4I_HDMI_PLL_DBG0_TMDS_PARENT_MASK;
writel(reg | SUN4I_HDMI_PLL_DBG0_TMDS_PARENT(index),
tmds->hdmi->base + SUN4I_HDMI_PLL_DBG0_REG);
return 0;
}
static const struct clk_ops sun4i_tmds_ops = {
.determine_rate = sun4i_tmds_determine_rate,
.recalc_rate = sun4i_tmds_recalc_rate,
.set_rate = sun4i_tmds_set_rate,
.get_parent = sun4i_tmds_get_parent,
.set_parent = sun4i_tmds_set_parent,
};
int sun4i_tmds_create(struct sun4i_hdmi *hdmi)
{
struct clk_init_data init;
struct sun4i_tmds *tmds;
const char *parents[2];
parents[0] = __clk_get_name(hdmi->pll0_clk);
if (!parents[0])
return -ENODEV;
parents[1] = __clk_get_name(hdmi->pll1_clk);
if (!parents[1])
return -ENODEV;
tmds = devm_kzalloc(hdmi->dev, sizeof(*tmds), GFP_KERNEL);
if (!tmds)
return -ENOMEM;
init.name = "hdmi-tmds";
init.ops = &sun4i_tmds_ops;
init.parent_names = parents;
init.num_parents = 2;
init.flags = CLK_SET_RATE_PARENT;
tmds->hdmi = hdmi;
tmds->hw.init = &init;
tmds->div_offset = hdmi->variant->tmds_clk_div_offset;
hdmi->tmds_clk = devm_clk_register(hdmi->dev, &tmds->hw);
if (IS_ERR(hdmi->tmds_clk))
return PTR_ERR(hdmi->tmds_clk);
return 0;
}
| linux-master | drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c |
/*
* Copyright (C) 2017 Jernej Skrabec <[email protected]>
*
* Coefficients are taken from BSP driver, which is:
* Copyright (C) 2014-2015 Allwinner
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include "sun8i_vi_scaler.h"
static const u32 lan3coefftab32_left[480] = {
0x40000000, 0x40fe0000, 0x3ffd0100, 0x3efc0100,
0x3efb0100, 0x3dfa0200, 0x3cf90200, 0x3bf80200,
0x39f70200, 0x37f70200, 0x35f70200, 0x33f70200,
0x31f70200, 0x2ef70200, 0x2cf70200, 0x2af70200,
0x27f70200, 0x24f80100, 0x22f80100, 0x1ef90100,
0x1cf90100, 0x19fa0100, 0x17fa0100, 0x14fb0100,
0x11fc0000, 0x0ffc0000, 0x0cfd0000, 0x0afd0000,
0x08fe0000, 0x05ff0000, 0x03ff0000, 0x02000000,
0x3806fc02, 0x3805fc02, 0x3803fd01, 0x3801fe01,
0x3700fe01, 0x35ffff01, 0x35fdff01, 0x34fc0001,
0x34fb0000, 0x33fa0000, 0x31fa0100, 0x2ff90100,
0x2df80200, 0x2bf80200, 0x2af70200, 0x28f70200,
0x27f70200, 0x24f70300, 0x22f70300, 0x1ff70300,
0x1ef70300, 0x1cf70300, 0x1af70300, 0x18f70300,
0x16f80300, 0x13f80300, 0x11f90300, 0x0ef90300,
0x0efa0200, 0x0cfa0200, 0x0afb0200, 0x08fb0200,
0x320bfa02, 0x3309fa02, 0x3208fb02, 0x3206fb02,
0x3205fb02, 0x3104fc02, 0x3102fc01, 0x3001fd01,
0x3000fd01, 0x2ffffd01, 0x2efefe01, 0x2dfdfe01,
0x2bfcff01, 0x29fcff01, 0x28fbff01, 0x27fa0001,
0x26fa0000, 0x24f90000, 0x22f90100, 0x20f90100,
0x1ff80100, 0x1ef80100, 0x1cf80100, 0x1af80200,
0x18f80200, 0x17f80200, 0x15f80200, 0x12f80200,
0x11f90200, 0x0ff90200, 0x0df90200, 0x0cfa0200,
0x2e0efa01, 0x2f0dfa01, 0x2f0bfa01, 0x2e0afa01,
0x2e09fa01, 0x2e07fb01, 0x2d06fb01, 0x2d05fb01,
0x2c04fb01, 0x2b03fc01, 0x2a02fc01, 0x2a01fc01,
0x2800fd01, 0x28fffd01, 0x26fefd01, 0x25fefe01,
0x24fdfe01, 0x23fcfe01, 0x21fcff01, 0x20fbff01,
0x1efbff01, 0x1efbff00, 0x1cfa0000, 0x1bfa0000,
0x19fa0000, 0x18fa0000, 0x17f90000, 0x15f90100,
0x14f90100, 0x12f90100, 0x11f90100, 0x0ff90100,
0x2b10fa00, 0x2b0ffa00, 0x2b0efa00, 0x2b0cfa00,
0x2b0bfa00, 0x2a0afb01, 0x2a09fb01, 0x2908fb01,
0x2807fb01, 0x2806fb01, 0x2805fb01, 0x2604fc01,
0x2503fc01, 0x2502fc01, 0x2401fc01, 0x2301fc01,
0x2100fd01, 0x21fffd01, 0x21fffd01, 0x20fefd01,
0x1dfefe01, 0x1cfdfe01, 0x1cfdfe00, 0x1bfcfe00,
0x19fcff00, 0x19fbff00, 0x17fbff00, 0x16fbff00,
0x15fbff00, 0x14fb0000, 0x13fa0000, 0x11fa0000,
0x2811fcff, 0x2810fcff, 0x280ffbff, 0x280efbff,
0x270dfb00, 0x270cfb00, 0x270bfb00, 0x260afb00,
0x2609fb00, 0x2508fb00, 0x2507fb00, 0x2407fb00,
0x2406fc00, 0x2305fc00, 0x2204fc00, 0x2203fc00,
0x2103fc00, 0x2002fc00, 0x1f01fd00, 0x1e01fd00,
0x1d00fd00, 0x1dfffd00, 0x1cfffd00, 0x1bfefd00,
0x1afefe00, 0x19fefe00, 0x18fdfe00, 0x17fdfe00,
0x16fdfe00, 0x15fcff00, 0x13fcff00, 0x12fcff00,
0x2512fdfe, 0x2511fdff, 0x2410fdff, 0x240ffdff,
0x240efcff, 0x240dfcff, 0x240dfcff, 0x240cfcff,
0x230bfcff, 0x230afc00, 0x2209fc00, 0x2108fc00,
0x2108fc00, 0x2007fc00, 0x2006fc00, 0x2005fc00,
0x1f05fc00, 0x1e04fc00, 0x1e03fc00, 0x1c03fd00,
0x1c02fd00, 0x1b02fd00, 0x1b01fd00, 0x1a00fd00,
0x1900fd00, 0x1800fd00, 0x17fffe00, 0x16fffe00,
0x16fefe00, 0x14fefe00, 0x13fefe00, 0x13fdfe00,
0x2212fffe, 0x2211fefe, 0x2211fefe, 0x2110fefe,
0x210ffeff, 0x220efdff, 0x210dfdff, 0x210dfdff,
0x210cfdff, 0x210bfdff, 0x200afdff, 0x200afdff,
0x1f09fdff, 0x1f08fdff, 0x1d08fd00, 0x1c07fd00,
0x1d06fd00, 0x1b06fd00, 0x1b05fd00, 0x1c04fd00,
0x1b04fd00, 0x1a03fd00, 0x1a03fd00, 0x1902fd00,
0x1802fd00, 0x1801fd00, 0x1701fd00, 0x1600fd00,
0x1400fe00, 0x1400fe00, 0x14fffe00, 0x13fffe00,
0x201200fe, 0x201100fe, 0x1f11fffe, 0x2010fffe,
0x1f0ffffe, 0x1e0ffffe, 0x1f0efeff, 0x1f0dfeff,
0x1f0dfeff, 0x1e0cfeff, 0x1e0bfeff, 0x1d0bfeff,
0x1d0afeff, 0x1d09fdff, 0x1d09fdff, 0x1c08fdff,
0x1c07fdff, 0x1b07fd00, 0x1b06fd00, 0x1a06fd00,
0x1a05fd00, 0x1805fd00, 0x1904fd00, 0x1804fd00,
0x1703fd00, 0x1703fd00, 0x1602fe00, 0x1502fe00,
0x1501fe00, 0x1401fe00, 0x1301fe00, 0x1300fe00,
0x1c1202fe, 0x1c1102fe, 0x1b1102fe, 0x1c1001fe,
0x1b1001fe, 0x1b0f01ff, 0x1b0e00ff, 0x1b0e00ff,
0x1b0d00ff, 0x1a0d00ff, 0x1a0c00ff, 0x1a0cffff,
0x1a0bffff, 0x1a0bffff, 0x1a0affff, 0x180affff,
0x1909ffff, 0x1809ffff, 0x1808ffff, 0x1808feff,
0x1807feff, 0x1707fe00, 0x1606fe00, 0x1506fe00,
0x1605fe00, 0x1505fe00, 0x1504fe00, 0x1304fe00,
0x1304fe00, 0x1303fe00, 0x1203fe00, 0x1203fe00,
0x181104ff, 0x191103ff, 0x191003ff, 0x181003ff,
0x180f03ff, 0x190f02ff, 0x190e02ff, 0x180e02ff,
0x180d02ff, 0x180d01ff, 0x180d01ff, 0x180c01ff,
0x180c01ff, 0x180b00ff, 0x170b00ff, 0x170a00ff,
0x170a00ff, 0x170900ff, 0x160900ff, 0x160900ff,
0x1608ffff, 0x1508ffff, 0x1507ff00, 0x1507ff00,
0x1407ff00, 0x1306ff00, 0x1306ff00, 0x1305ff00,
0x1205ff00, 0x1105ff00, 0x1204ff00, 0x1104ff00,
0x171005ff, 0x171005ff, 0x171004ff, 0x170f04ff,
0x160f04ff, 0x170f03ff, 0x170e03ff, 0x160e03ff,
0x160d03ff, 0x160d02ff, 0x160d02ff, 0x160c02ff,
0x160c02ff, 0x160c02ff, 0x160b01ff, 0x150b01ff,
0x150a01ff, 0x150a01ff, 0x150a01ff, 0x140901ff,
0x14090000, 0x14090000, 0x14080000, 0x13080000,
0x13070000, 0x12070000, 0x12070000, 0x12060000,
0x11060000, 0x11060000, 0x11050000, 0x1105ff00,
0x14100600, 0x15100500, 0x150f0500, 0x150f0500,
0x140f0500, 0x150e0400, 0x140e0400, 0x130e0400,
0x140d0400, 0x150d0300, 0x130d0300, 0x140c0300,
0x140c0300, 0x140c0200, 0x140b0200, 0x130b0200,
0x120b0200, 0x130a0200, 0x130a0200, 0x130a0100,
0x13090100, 0x12090100, 0x11090100, 0x12080100,
0x11080100, 0x10080100, 0x11070100, 0x11070000,
0x10070000, 0x11060000, 0x10060000, 0x10060000,
0x140f0600, 0x140f0600, 0x130f0600, 0x140f0500,
0x140e0500, 0x130e0500, 0x130e0500, 0x140d0400,
0x140d0400, 0x130d0400, 0x120d0400, 0x130c0400,
0x130c0300, 0x130c0300, 0x130b0300, 0x130b0300,
0x110b0300, 0x130a0200, 0x120a0200, 0x120a0200,
0x120a0200, 0x12090200, 0x10090200, 0x11090100,
0x11080100, 0x11080100, 0x10080100, 0x10080100,
0x10070100, 0x10070100, 0x0f070100, 0x10060100,
0x120f0701, 0x130f0601, 0x130e0601, 0x130e0601,
0x120e0601, 0x130e0501, 0x130e0500, 0x130d0500,
0x120d0500, 0x120d0500, 0x130c0400, 0x130c0400,
0x120c0400, 0x110c0400, 0x120b0400, 0x120b0300,
0x120b0300, 0x120b0300, 0x120a0300, 0x110a0300,
0x110a0200, 0x11090200, 0x11090200, 0x10090200,
0x10090200, 0x10080200, 0x10080200, 0x10080100,
0x0f080100, 0x10070100, 0x0f070100, 0x0f070100
};
static const u32 lan3coefftab32_right[480] = {
0x00000000, 0x00000002, 0x0000ff04, 0x0000ff06,
0x0000fe08, 0x0000fd0a, 0x0000fd0c, 0x0000fc0f,
0x0000fc12, 0x0001fb14, 0x0001fa17, 0x0001fa19,
0x0001f91c, 0x0001f91f, 0x0001f822, 0x0001f824,
0x0002f727, 0x0002f72a, 0x0002f72c, 0x0002f72f,
0x0002f731, 0x0002f733, 0x0002f735, 0x0002f737,
0x0002f73a, 0x0002f83b, 0x0002f93c, 0x0002fa3d,
0x0001fb3e, 0x0001fc3f, 0x0001fd40, 0x0000fe40,
0x0002fc06, 0x0002fb08, 0x0002fb0a, 0x0002fa0c,
0x0002fa0e, 0x0003f910, 0x0003f912, 0x0003f814,
0x0003f816, 0x0003f719, 0x0003f71a, 0x0003f71d,
0x0003f71f, 0x0003f721, 0x0003f723, 0x0003f725,
0x0002f727, 0x0002f729, 0x0002f72b, 0x0002f82d,
0x0002f82e, 0x0001f930, 0x0001fa31, 0x0000fa34,
0x0000fb34, 0x0100fc35, 0x01fffd36, 0x01ffff37,
0x01fe0037, 0x01fe0138, 0x01fd0338, 0x02fc0538,
0x0002fa0b, 0x0002fa0c, 0x0002f90e, 0x0002f910,
0x0002f911, 0x0002f813, 0x0002f816, 0x0002f817,
0x0002f818, 0x0002f81a, 0x0001f81c, 0x0001f81e,
0x0001f820, 0x0001f921, 0x0001f923, 0x0000f925,
0x0000fa26, 0x0100fa28, 0x01fffb29, 0x01fffc2a,
0x01fffc2c, 0x01fefd2d, 0x01fefe2e, 0x01fdff2f,
0x01fd0030, 0x01fd0130, 0x01fc0232, 0x02fc0432,
0x02fb0532, 0x02fb0633, 0x02fb0833, 0x02fa0933,
0x0001fa0e, 0x0001f90f, 0x0001f911, 0x0001f913,
0x0001f914, 0x0001f915, 0x0000f918, 0x0000fa18,
0x0000fa1a, 0x0000fa1b, 0x0000fa1d, 0x00fffb1e,
0x01fffb1f, 0x01fffb20, 0x01fffc22, 0x01fefc23,
0x01fefd24, 0x01fefe25, 0x01fdfe27, 0x01fdff28,
0x01fd0029, 0x01fc012a, 0x01fc022b, 0x01fc032b,
0x01fb042d, 0x01fb052d, 0x01fb062e, 0x01fb072e,
0x01fa092e, 0x01fa0a2f, 0x01fa0b2f, 0x01fa0d2f,
0x0000fa11, 0x0000fa12, 0x0000fa13, 0x0000fb14,
0x00fffb16, 0x00fffb16, 0x00fffb17, 0x00fffb19,
0x00fffc1a, 0x00fefc1c, 0x00fefd1c, 0x01fefd1d,
0x01fefe1e, 0x01fdfe20, 0x01fdff21, 0x01fdff22,
0x01fd0023, 0x01fc0124, 0x01fc0124, 0x01fc0225,
0x01fc0326, 0x01fc0427, 0x01fb0528, 0x01fb0629,
0x01fb0729, 0x01fb0829, 0x01fb092a, 0x01fb0a2a,
0x00fa0b2c, 0x00fa0c2b, 0x00fa0e2b, 0x00fa0f2c,
0x00fffc11, 0x00fffc12, 0x00fffc14, 0x00fffc15,
0x00fefd16, 0x00fefd17, 0x00fefd18, 0x00fefe19,
0x00fefe1a, 0x00fdfe1d, 0x00fdff1d, 0x00fdff1e,
0x00fd001d, 0x00fd011e, 0x00fd0120, 0x00fc0221,
0x00fc0321, 0x00fc0323, 0x00fc0423, 0x00fc0523,
0x00fc0624, 0x00fb0725, 0x00fb0726, 0x00fb0827,
0x00fb0926, 0x00fb0a26, 0x00fb0b27, 0x00fb0c27,
0x00fb0d27, 0xfffb0e28, 0xfffb0f29, 0xfffc1028,
0x00fefd13, 0x00fefd13, 0x00fefe14, 0x00fefe15,
0x00fefe17, 0x00feff17, 0x00feff17, 0x00fd0018,
0x00fd001a, 0x00fd001a, 0x00fd011b, 0x00fd021c,
0x00fd021c, 0x00fd031d, 0x00fc031f, 0x00fc041f,
0x00fc051f, 0x00fc0521, 0x00fc0621, 0x00fc0721,
0x00fc0821, 0x00fc0822, 0x00fc0922, 0x00fc0a23,
0xfffc0b24, 0xfffc0c24, 0xfffc0d24, 0xfffc0d25,
0xfffc0e25, 0xfffd0f25, 0xfffd1025, 0xfffd1125,
0x00feff12, 0x00feff14, 0x00feff14, 0x00fe0015,
0x00fe0015, 0x00fd0017, 0x00fd0118, 0x00fd0118,
0x00fd0218, 0x00fd0219, 0x00fd031a, 0x00fd031a,
0x00fd041b, 0x00fd041c, 0x00fd051c, 0x00fd061d,
0x00fd061d, 0x00fd071e, 0x00fd081e, 0xfffd081f,
0xfffd091f, 0xfffd0a20, 0xfffd0a20, 0xfffd0b21,
0xfffd0c21, 0xfffd0d21, 0xfffd0d22, 0xfffd0e23,
0xfffe0f22, 0xfefe1022, 0xfefe1122, 0xfefe1123,
0x00fe0012, 0x00fe0013, 0x00fe0114, 0x00fe0114,
0x00fe0116, 0x00fe0216, 0x00fe0216, 0x00fd0317,
0x00fd0317, 0x00fd0418, 0x00fd0419, 0x00fd0519,
0x00fd051a, 0x00fd061b, 0x00fd061b, 0x00fd071c,
0xfffd071e, 0xfffd081d, 0xfffd091d, 0xfffd091e,
0xfffe0a1d, 0xfffe0b1e, 0xfffe0b1e, 0xfffe0c1e,
0xfffe0d1f, 0xfffe0d1f, 0xfffe0e1f, 0xfeff0f1f,
0xfeff0f20, 0xfeff1020, 0xfeff1120, 0xfe001120,
0x00fe0212, 0x00fe0312, 0x00fe0313, 0x00fe0314,
0x00fe0414, 0x00fe0414, 0x00fe0416, 0x00fe0515,
0x00fe0516, 0x00fe0616, 0x00fe0617, 0x00fe0717,
0xfffe0719, 0xfffe0818, 0xffff0818, 0xffff0919,
0xffff0919, 0xffff0a19, 0xffff0a1a, 0xffff0b1a,
0xffff0b1b, 0xffff0c1a, 0xff000c1b, 0xff000d1b,
0xff000d1b, 0xff000e1b, 0xff000e1c, 0xff010f1c,
0xfe01101c, 0xfe01101d, 0xfe02111c, 0xfe02111c,
0x00ff0411, 0x00ff0411, 0x00ff0412, 0x00ff0512,
0x00ff0513, 0x00ff0513, 0x00ff0613, 0x00ff0614,
0x00ff0714, 0x00ff0715, 0x00ff0715, 0xffff0816,
0xffff0816, 0xff000916, 0xff000917, 0xff000918,
0xff000a17, 0xff000a18, 0xff000b18, 0xff000b18,
0xff010c18, 0xff010c19, 0xff010d18, 0xff010d18,
0xff020d18, 0xff020e19, 0xff020e19, 0xff020f19,
0xff030f19, 0xff031019, 0xff031019, 0xff031119,
0x00ff0511, 0x00ff0511, 0x00000511, 0x00000611,
0x00000612, 0x00000612, 0x00000712, 0x00000713,
0x00000714, 0x00000814, 0x00000814, 0x00000914,
0x00000914, 0xff010914, 0xff010a15, 0xff010a16,
0xff010a17, 0xff010b16, 0xff010b16, 0xff020c16,
0xff020c16, 0xff020c16, 0xff020d16, 0xff020d17,
0xff030d17, 0xff030e17, 0xff030e17, 0xff030f17,
0xff040f17, 0xff040f17, 0xff041017, 0xff051017,
0x00000610, 0x00000610, 0x00000611, 0x00000611,
0x00000711, 0x00000712, 0x00010712, 0x00010812,
0x00010812, 0x00010812, 0x00010913, 0x00010913,
0x00010913, 0x00010a13, 0x00020a13, 0x00020a14,
0x00020b14, 0x00020b14, 0x00020b14, 0x00020c14,
0x00030c14, 0x00030c15, 0x00030d15, 0x00030d15,
0x00040d15, 0x00040e15, 0x00040e15, 0x00040e16,
0x00050f15, 0x00050f15, 0x00050f16, 0x00051015,
0x00000611, 0x00010610, 0x00010710, 0x00010710,
0x00010711, 0x00010811, 0x00010811, 0x00010812,
0x00010812, 0x00010912, 0x00020912, 0x00020912,
0x00020a12, 0x00020a12, 0x00020a13, 0x00020a13,
0x00030b13, 0x00030b13, 0x00030b14, 0x00030c13,
0x00030c13, 0x00040c13, 0x00040d14, 0x00040d14,
0x00040d15, 0x00040d15, 0x00050e14, 0x00050e14,
0x00050e15, 0x00050f14, 0x00060f14, 0x00060f14,
0x0001070f, 0x0001070f, 0x00010710, 0x00010710,
0x00010810, 0x00010810, 0x00020810, 0x00020811,
0x00020911, 0x00020911, 0x00020912, 0x00020912,
0x00020a12, 0x00030a12, 0x00030a12, 0x00030b12,
0x00030b12, 0x00030b12, 0x00040b12, 0x00040c12,
0x00040c13, 0x00040c14, 0x00040c14, 0x00050d13,
0x00050d13, 0x00050d14, 0x00050e13, 0x01050e13,
0x01060e13, 0x01060e13, 0x01060e14, 0x01060f13
};
static const u32 lan2coefftab32[480] = {
0x00004000, 0x000140ff, 0x00033ffe, 0x00043ffd,
0x00063efc, 0xff083dfc, 0x000a3bfb, 0xff0d39fb,
0xff0f37fb, 0xff1136fa, 0xfe1433fb, 0xfe1631fb,
0xfd192ffb, 0xfd1c2cfb, 0xfd1f29fb, 0xfc2127fc,
0xfc2424fc, 0xfc2721fc, 0xfb291ffd, 0xfb2c1cfd,
0xfb2f19fd, 0xfb3116fe, 0xfb3314fe, 0xfa3611ff,
0xfb370fff, 0xfb390dff, 0xfb3b0a00, 0xfc3d08ff,
0xfc3e0600, 0xfd3f0400, 0xfe3f0300, 0xff400100,
0xff053804, 0xff063803, 0xff083801, 0xff093701,
0xff0a3700, 0xff0c3500, 0xff0e34ff, 0xff1033fe,
0xff1232fd, 0xfe1431fd, 0xfe162ffd, 0xfe182dfd,
0xfd1b2cfc, 0xfd1d2afc, 0xfd1f28fc, 0xfd2126fc,
0xfd2323fd, 0xfc2621fd, 0xfc281ffd, 0xfc2a1dfd,
0xfc2c1bfd, 0xfd2d18fe, 0xfd2f16fe, 0xfd3114fe,
0xfd3212ff, 0xfe3310ff, 0xff340eff, 0x00350cff,
0x00360a00, 0x01360900, 0x02370700, 0x03370600,
0xff083207, 0xff093206, 0xff0a3205, 0xff0c3203,
0xff0d3103, 0xff0e3102, 0xfe113001, 0xfe132f00,
0xfe142e00, 0xfe162dff, 0xfe182bff, 0xfe192aff,
0xfe1b29fe, 0xfe1d27fe, 0xfe1f25fe, 0xfd2124fe,
0xfe2222fe, 0xfe2421fd, 0xfe251ffe, 0xfe271dfe,
0xfe291bfe, 0xff2a19fe, 0xff2b18fe, 0xff2d16fe,
0x002e14fe, 0x002f12ff, 0x013010ff, 0x02300fff,
0x03310dff, 0x04310cff, 0x05310a00, 0x06310900,
0xff0a2e09, 0xff0b2e08, 0xff0c2e07, 0xff0e2d06,
0xff0f2d05, 0xff102d04, 0xff122c03, 0xfe142c02,
0xfe152b02, 0xfe172a01, 0xfe182901, 0xfe1a2800,
0xfe1b2700, 0xfe1d2500, 0xff1e24ff, 0xfe2023ff,
0xff2121ff, 0xff2320fe, 0xff241eff, 0x00251dfe,
0x00261bff, 0x00281afe, 0x012818ff, 0x012a16ff,
0x022a15ff, 0x032b13ff, 0x032c12ff, 0x052c10ff,
0x052d0fff, 0x062d0d00, 0x072d0c00, 0x082d0b00,
0xff0c2a0b, 0xff0d2a0a, 0xff0e2a09, 0xff0f2a08,
0xff102a07, 0xff112a06, 0xff132905, 0xff142904,
0xff162803, 0xff172703, 0xff182702, 0xff1a2601,
0xff1b2501, 0xff1c2401, 0xff1e2300, 0xff1f2200,
0x00202000, 0x00211f00, 0x01221d00, 0x01231c00,
0x01251bff, 0x02251aff, 0x032618ff, 0x032717ff,
0x042815ff, 0x052814ff, 0x052913ff, 0x06291100,
0x072a10ff, 0x082a0e00, 0x092a0d00, 0x0a2a0c00,
0xff0d280c, 0xff0e280b, 0xff0f280a, 0xff102809,
0xff112808, 0xff122708, 0xff142706, 0xff152705,
0xff162605, 0xff172604, 0xff192503, 0xff1a2403,
0x001b2302, 0x001c2202, 0x001d2201, 0x001e2101,
0x011f1f01, 0x01211e00, 0x01221d00, 0x02221c00,
0x02231b00, 0x03241900, 0x04241800, 0x04251700,
0x052616ff, 0x06261400, 0x072713ff, 0x08271100,
0x08271100, 0x09271000, 0x0a280e00, 0x0b280d00,
0xff0e260d, 0xff0f260c, 0xff10260b, 0xff11260a,
0xff122609, 0xff132608, 0xff142508, 0xff152507,
0x00152506, 0x00172405, 0x00182305, 0x00192304,
0x001b2203, 0x001c2103, 0x011d2002, 0x011d2002,
0x011f1f01, 0x021f1e01, 0x02201d01, 0x03211c00,
0x03221b00, 0x04221a00, 0x04231801, 0x05241700,
0x06241600, 0x07241500, 0x08251300, 0x09251200,
0x09261100, 0x0a261000, 0x0b260f00, 0x0c260e00,
0xff0e250e, 0xff0f250d, 0xff10250c, 0xff11250b,
0x0011250a, 0x00132409, 0x00142408, 0x00152407,
0x00162307, 0x00172306, 0x00182206, 0x00192205,
0x011a2104, 0x011b2004, 0x011c2003, 0x021c1f03,
0x021e1e02, 0x031e1d02, 0x03201c01, 0x04201b01,
0x04211a01, 0x05221900, 0x05221801, 0x06231700,
0x07231600, 0x07241500, 0x08241400, 0x09241300,
0x0a241200, 0x0b241100, 0x0c241000, 0x0d240f00,
0x000e240e, 0x000f240d, 0x0010240c, 0x0011240b,
0x0013230a, 0x0013230a, 0x00142309, 0x00152308,
0x00162208, 0x00172207, 0x01182106, 0x01192105,
0x011a2005, 0x021b1f04, 0x021b1f04, 0x021d1e03,
0x031d1d03, 0x031e1d02, 0x041e1c02, 0x041f1b02,
0x05201a01, 0x05211901, 0x06211801, 0x07221700,
0x07221601, 0x08231500, 0x09231400, 0x0a231300,
0x0a231300, 0x0b231200, 0x0c231100, 0x0d231000,
0x000f220f, 0x0010220e, 0x0011220d, 0x0012220c,
0x0013220b, 0x0013220b, 0x0015210a, 0x0015210a,
0x01162108, 0x01172008, 0x01182007, 0x02191f06,
0x02191f06, 0x021a1e06, 0x031a1e05, 0x031c1d04,
0x041c1c04, 0x041d1c03, 0x051d1b03, 0x051e1a03,
0x061f1902, 0x061f1902, 0x07201801, 0x08201701,
0x08211601, 0x09211501, 0x0a211500, 0x0b211400,
0x0b221300, 0x0c221200, 0x0d221100, 0x0e221000,
0x0010210f, 0x0011210e, 0x0011210e, 0x0012210d,
0x0013210c, 0x0014200c, 0x0114200b, 0x0115200a,
0x01161f0a, 0x01171f09, 0x02171f08, 0x02181e08,
0x03181e07, 0x031a1d06, 0x031a1d06, 0x041b1c05,
0x041c1c04, 0x051c1b04, 0x051d1a04, 0x061d1a03,
0x071d1903, 0x071e1803, 0x081e1802, 0x081f1702,
0x091f1602, 0x0a201501, 0x0b1f1501, 0x0b201401,
0x0c211300, 0x0d211200, 0x0e201200, 0x0e211100,
0x00102010, 0x0011200f, 0x0012200e, 0x0013200d,
0x0013200d, 0x01141f0c, 0x01151f0b, 0x01151f0b,
0x01161f0a, 0x02171e09, 0x02171e09, 0x03181d08,
0x03191d07, 0x03191d07, 0x041a1c06, 0x041b1c05,
0x051b1b05, 0x051c1b04, 0x061c1a04, 0x071d1903,
0x071d1903, 0x081d1803, 0x081e1703, 0x091e1702,
0x0a1f1601, 0x0a1f1502, 0x0b1f1501, 0x0c1f1401,
0x0d201300, 0x0d201300, 0x0e201200, 0x0f201100,
0x00102010, 0x0011200f, 0x00121f0f, 0x00131f0e,
0x00141f0d, 0x01141f0c, 0x01141f0c, 0x01151e0c,
0x02161e0a, 0x02171e09, 0x03171d09, 0x03181d08,
0x03181d08, 0x04191c07, 0x041a1c06, 0x051a1b06,
0x051b1b05, 0x061b1a05, 0x061c1a04, 0x071c1904,
0x081c1903, 0x081d1803, 0x091d1703, 0x091e1702,
0x0a1e1602, 0x0b1e1502, 0x0c1e1501, 0x0c1f1401,
0x0d1f1400, 0x0e1f1300, 0x0e1f1201, 0x0f1f1200,
0x00111e11, 0x00121e10, 0x00131e0f, 0x00131e0f,
0x01131e0e, 0x01141d0e, 0x02151d0c, 0x02151d0c,
0x02161d0b, 0x03161c0b, 0x03171c0a, 0x04171c09,
0x04181b09, 0x05181b08, 0x05191b07, 0x06191a07,
0x061a1a06, 0x071a1906, 0x071b1905, 0x081b1805,
0x091b1804, 0x091c1704, 0x0a1c1703, 0x0a1c1604,
0x0b1d1602, 0x0c1d1502, 0x0c1d1502, 0x0d1d1402,
0x0e1d1401, 0x0e1e1301, 0x0f1e1300, 0x101e1200,
0x00111e11, 0x00121e10, 0x00131d10, 0x01131d0f,
0x01141d0e, 0x01141d0e, 0x02151c0d, 0x02151c0d,
0x03161c0b, 0x03161c0b, 0x04171b0a, 0x04171b0a,
0x05171b09, 0x05181a09, 0x06181a08, 0x06191a07,
0x07191907, 0x071a1906, 0x081a1806, 0x081a1806,
0x091a1805, 0x0a1b1704, 0x0a1b1704, 0x0b1c1603,
0x0b1c1603, 0x0c1c1503, 0x0d1c1502, 0x0d1d1402,
0x0e1d1401, 0x0f1d1301, 0x0f1d1301, 0x101e1200,
};
static const u32 bicubic8coefftab32_left[480] = {
0x40000000, 0x40ff0000, 0x3ffe0000, 0x3efe0000,
0x3dfd0000, 0x3cfc0000, 0x3bfc0000, 0x39fc0000,
0x36fc0000, 0x35fb0000, 0x33fb0000, 0x31fb0000,
0x2ffb0000, 0x2cfb0000, 0x29fc0000, 0x27fc0000,
0x24fc0000, 0x21fc0000, 0x1efd0000, 0x1cfd0000,
0x19fd0000, 0x16fe0000, 0x14fe0000, 0x11fe0000,
0x0dff0000, 0x0cff0000, 0x0aff0000, 0x08ff0000,
0x05000000, 0x03000000, 0x02000000, 0x01000000,
0x3904ff00, 0x3903ff00, 0x3902ff00, 0x38010000,
0x37000000, 0x36ff0000, 0x35ff0000, 0x34fe0000,
0x32fe0000, 0x31fd0000, 0x30fd0000, 0x2efc0000,
0x2cfc0000, 0x2afc0000, 0x28fc0000, 0x26fc0000,
0x24fc0000, 0x22fc0000, 0x20fc0000, 0x1efc0000,
0x1cfc0000, 0x19fc0000, 0x17fc0000, 0x15fd0000,
0x12fd0000, 0x11fd0000, 0x0ffd0000, 0x0dfe0000,
0x0bfe0000, 0x09fe0000, 0x08fe0000, 0x06ff0000,
0x3209fe00, 0x3407fe00, 0x3306fe00, 0x3305fe00,
0x3204fe00, 0x3102ff00, 0x3102ff00, 0x3001ff00,
0x2f00ff00, 0x2effff00, 0x2cff0000, 0x2bfe0000,
0x29fe0000, 0x28fe0000, 0x26fd0000, 0x24fd0000,
0x23fd0000, 0x21fd0000, 0x20fc0000, 0x1efc0000,
0x1dfc0000, 0x1bfc0000, 0x19fc0000, 0x17fc0000,
0x16fc0000, 0x14fc0000, 0x12fc0000, 0x10fd0000,
0x0ffd0000, 0x0dfd0000, 0x0cfd0000, 0x0afd0000,
0x2e0cfd00, 0x2e0bfd00, 0x2e09fd00, 0x2e08fd00,
0x2e07fd00, 0x2c06fe00, 0x2c05fe00, 0x2b04fe00,
0x2b03fe00, 0x2a02fe00, 0x2901fe00, 0x2701ff00,
0x2700ff00, 0x26ffff00, 0x24ffff00, 0x23ffff00,
0x22feff00, 0x20fe0000, 0x1ffe0000, 0x1efd0000,
0x1dfd0000, 0x1bfd0000, 0x1afd0000, 0x19fd0000,
0x17fd0000, 0x15fd0000, 0x13fd0000, 0x12fd0000,
0x11fd0000, 0x10fd0000, 0x0ffd0000, 0x0cfd0000,
0x2a0efd00, 0x2a0dfd00, 0x2a0cfd00, 0x290bfd00,
0x290afd00, 0x2909fd00, 0x2908fd00, 0x2807fd00,
0x2706fd00, 0x2705fd00, 0x2604fe00, 0x2603fe00,
0x2502fe00, 0x2402fe00, 0x2401fe00, 0x2200fe00,
0x2200fe00, 0x2000ff00, 0x1fffff00, 0x1effff00,
0x1dfeff00, 0x1cfeff00, 0x1afeff00, 0x19feff00,
0x17fe0000, 0x16fd0000, 0x15fd0000, 0x14fd0000,
0x12fd0000, 0x11fd0000, 0x10fd0000, 0x0ffd0000,
0x2610fd00, 0x260ffd00, 0x260efd00, 0x260dfd00,
0x260cfd00, 0x260bfd00, 0x260afd00, 0x2609fd00,
0x2508fd00, 0x2507fd00, 0x2406fd00, 0x2406fd00,
0x2305fd00, 0x2304fd00, 0x2203fe00, 0x2103fe00,
0x2002fe00, 0x1f01fe00, 0x1e01fe00, 0x1e00fe00,
0x1c00fe00, 0x1b00fe00, 0x1afffe00, 0x19ffff00,
0x18ffff00, 0x17feff00, 0x16feff00, 0x15feff00,
0x14feff00, 0x13feff00, 0x11feff00, 0x10fd0000,
0x2411feff, 0x2410feff, 0x240ffeff, 0x230efeff,
0x240dfeff, 0x240cfeff, 0x230cfd00, 0x230bfd00,
0x230afd00, 0x2309fd00, 0x2208fd00, 0x2108fd00,
0x2007fd00, 0x2106fd00, 0x2005fd00, 0x1f05fd00,
0x1f04fd00, 0x1e03fd00, 0x1d03fe00, 0x1c02fe00,
0x1b02fe00, 0x1a01fe00, 0x1a01fe00, 0x1900fe00,
0x1800fe00, 0x1700fe00, 0x16fffe00, 0x15fffe00,
0x13ffff00, 0x12ffff00, 0x12feff00, 0x11feff00,
0x2212fffe, 0x2211fffe, 0x2210ffff, 0x220ffeff,
0x220efeff, 0x210efeff, 0x210dfeff, 0x210cfeff,
0x210bfeff, 0x200bfeff, 0x200afeff, 0x1f09feff,
0x1f08feff, 0x1d08fe00, 0x1e07fd00, 0x1e06fd00,
0x1d06fd00, 0x1c05fd00, 0x1b04fe00, 0x1a04fe00,
0x1a03fe00, 0x1903fe00, 0x1802fe00, 0x1802fe00,
0x1701fe00, 0x1601fe00, 0x1501fe00, 0x1500fe00,
0x1400fe00, 0x1400fe00, 0x13fffe00, 0x12fffe00,
0x201200fe, 0x201100fe, 0x1f1100fe, 0x2010fffe,
0x200ffffe, 0x1f0ffffe, 0x1f0efffe, 0x1e0dffff,
0x1f0cfeff, 0x1e0cfeff, 0x1e0bfeff, 0x1e0afeff,
0x1d0afeff, 0x1d09feff, 0x1c08feff, 0x1b08feff,
0x1b07feff, 0x1a07feff, 0x1a06feff, 0x1a05feff,
0x1805fe00, 0x1904fe00, 0x1704fe00, 0x1703fe00,
0x1603fe00, 0x1602fe00, 0x1402fe00, 0x1402fe00,
0x1401fe00, 0x1301fe00, 0x1201fe00, 0x1200fe00,
0x1c1202fe, 0x1c1102fe, 0x1b1102fe, 0x1c1001fe,
0x1b1001fe, 0x1c0f01fe, 0x1b0f00fe, 0x1b0e00fe,
0x1b0e00fe, 0x1b0d00fe, 0x1b0c00fe, 0x1a0cfffe,
0x1a0bfffe, 0x1a0bfffe, 0x190afffe, 0x190afffe,
0x1909fffe, 0x1709ffff, 0x1808ffff, 0x1708feff,
0x1707feff, 0x1707feff, 0x1606feff, 0x1506feff,
0x1505feff, 0x1505feff, 0x1404feff, 0x1404feff,
0x1404feff, 0x1303feff, 0x1203feff, 0x1202feff,
0x191104fe, 0x191104fe, 0x191003fe, 0x191003fe,
0x171003fe, 0x180f03fe, 0x180f02fe, 0x180e02fe,
0x180e02fe, 0x180d01fe, 0x180d01fe, 0x180d01fe,
0x170c01fe, 0x160c01fe, 0x170b00fe, 0x170b00fe,
0x160a00fe, 0x160a00fe, 0x160a00fe, 0x150900fe,
0x1509fffe, 0x1508fffe, 0x1508fffe, 0x1408fffe,
0x1407fffe, 0x1307ffff, 0x1306ffff, 0x1206ffff,
0x1206ffff, 0x1205ffff, 0x1205ffff, 0x1104feff,
0x161006ff, 0x161005ff, 0x161005ff, 0x160f05ff,
0x160f04ff, 0x150f04ff, 0x150e04ff, 0x150e04ff,
0x150e03ff, 0x150d03ff, 0x150d03ff, 0x150d02ff,
0x140c02ff, 0x150c02fe, 0x150c02fe, 0x150b02fe,
0x140b01fe, 0x140b01fe, 0x140a01fe, 0x140a01fe,
0x140a01fe, 0x130900fe, 0x130900fe, 0x130900fe,
0x130800fe, 0x120800fe, 0x120800fe, 0x120700fe,
0x120700fe, 0x1107fffe, 0x1106fffe, 0x1106fffe,
0x140f0700, 0x140f0600, 0x140f0600, 0x140f0600,
0x140e0600, 0x130e0500, 0x140e05ff, 0x130e05ff,
0x140d05ff, 0x130d04ff, 0x130d04ff, 0x120d04ff,
0x130c04ff, 0x130c03ff, 0x130c03ff, 0x120c03ff,
0x120b03ff, 0x120b02ff, 0x120b02ff, 0x120a02ff,
0x120a02ff, 0x110a02ff, 0x110a01ff, 0x120901ff,
0x100901ff, 0x100901ff, 0x110801ff, 0x110801ff,
0x100800ff, 0x100800ff, 0x100700ff, 0x100700fe,
0x120f0701, 0x120e0701, 0x120e0701, 0x120e0701,
0x120e0600, 0x110e0600, 0x120d0600, 0x120d0600,
0x120d0500, 0x120d0500, 0x110d0500, 0x110c0500,
0x110c0500, 0x110c0400, 0x110c0400, 0x110b04ff,
0x110b04ff, 0x110b04ff, 0x110b03ff, 0x110b03ff,
0x110a03ff, 0x110a03ff, 0x100a03ff, 0x110a02ff,
0x100902ff, 0x100902ff, 0x100902ff, 0x0f0902ff,
0x0e0902ff, 0x100801ff, 0x0f0801ff, 0x0f0801ff,
0x100e0802, 0x100e0802, 0x110e0702, 0x110d0701,
0x110d0701, 0x100d0701, 0x100d0701, 0x110d0601,
0x110d0601, 0x110c0601, 0x110c0601, 0x100c0600,
0x100c0500, 0x100c0500, 0x100c0500, 0x100b0500,
0x100b0500, 0x100b0400, 0x100b0400, 0x0f0b0400,
0x100a0400, 0x0f0a0400, 0x0f0a0400, 0x0f0a0300,
0x0f0a03ff, 0x0f0903ff, 0x0f0903ff, 0x0f0903ff,
0x0f0903ff, 0x0f0902ff, 0x0f0902ff, 0x0f0802ff
};
static const u32 bicubic8coefftab32_right[480] = {
0x00000000, 0x00000001, 0x00000003, 0x00000004,
0x00000006, 0x0000ff09, 0x0000ff0a, 0x0000ff0c,
0x0000ff0f, 0x0000fe12, 0x0000fe14, 0x0000fe16,
0x0000fd19, 0x0000fd1c, 0x0000fd1e, 0x0000fc21,
0x0000fc24, 0x0000fc27, 0x0000fc29, 0x0000fb2c,
0x0000fb2f, 0x0000fb31, 0x0000fb33, 0x0000fb36,
0x0000fc38, 0x0000fc39, 0x0000fc3b, 0x0000fc3d,
0x0000fd3e, 0x0000fe3f, 0x0000fe40, 0x0000ff40,
0x0000ff05, 0x0000ff06, 0x0000fe08, 0x0000fe09,
0x0000fe0b, 0x0000fe0d, 0x0000fd0f, 0x0000fd11,
0x0000fd13, 0x0000fd15, 0x0000fc17, 0x0000fc1a,
0x0000fc1c, 0x0000fc1e, 0x0000fc20, 0x0000fc22,
0x0000fc24, 0x0000fc26, 0x0000fc28, 0x0000fc2a,
0x0000fc2c, 0x0000fc2f, 0x0000fd30, 0x0000fd31,
0x0000fe33, 0x0000fe34, 0x0000ff35, 0x0000ff36,
0x00000037, 0x00000138, 0x00ff0239, 0x00ff0339,
0x0000fe09, 0x0000fd0a, 0x0000fd0c, 0x0000fd0d,
0x0000fd0f, 0x0000fd11, 0x0000fc12, 0x0000fc14,
0x0000fc16, 0x0000fc18, 0x0000fc19, 0x0000fc1b,
0x0000fc1d, 0x0000fc1e, 0x0000fc21, 0x0000fd22,
0x0000fd23, 0x0000fd25, 0x0000fd27, 0x0000fe28,
0x0000fe29, 0x0000fe2b, 0x0000ff2c, 0x00ffff2f,
0x00ff002f, 0x00ff0130, 0x00ff0231, 0x00ff0232,
0x00fe0432, 0x00fe0533, 0x00fe0633, 0x00fe0734,
0x0000fd0c, 0x0000fd0d, 0x0000fd0f, 0x0000fd10,
0x0000fd11, 0x0000fd13, 0x0000fd14, 0x0000fd16,
0x0000fd17, 0x0000fd19, 0x0000fd1b, 0x0000fd1c,
0x0000fd1d, 0x0000fd1f, 0x0000fe20, 0x0000fe21,
0x00fffe24, 0x00ffff24, 0x00ffff25, 0x00ffff27,
0x00ff0027, 0x00ff0128, 0x00fe012a, 0x00fe022a,
0x00fe032b, 0x00fe042c, 0x00fe052d, 0x00fe062d,
0x00fd072e, 0x00fd082e, 0x00fd092e, 0x00fd0b2f,
0x0000fd0e, 0x0000fd0f, 0x0000fd10, 0x0000fd12,
0x0000fd13, 0x0000fd14, 0x0000fd15, 0x0000fd17,
0x0000fe18, 0x00fffe1a, 0x00fffe1b, 0x00fffe1c,
0x00fffe1e, 0x00ffff1e, 0x00ffff1f, 0x00ff0021,
0x00fe0022, 0x00fe0023, 0x00fe0124, 0x00fe0224,
0x00fe0226, 0x00fe0326, 0x00fe0427, 0x00fd0528,
0x00fd0628, 0x00fd0729, 0x00fd0829, 0x00fd0929,
0x00fd0a2a, 0x00fd0b2a, 0x00fd0c2a, 0x00fd0d2a,
0x0000fd10, 0x0000fd11, 0x00fffe12, 0x00fffe13,
0x00fffe14, 0x00fffe15, 0x00fffe16, 0x00fffe17,
0x00ffff18, 0x00ffff19, 0x00feff1c, 0x00fe001b,
0x00fe001d, 0x00fe001e, 0x00fe011e, 0x00fe011f,
0x00fe0220, 0x00fe0321, 0x00fe0322, 0x00fd0423,
0x00fd0524, 0x00fd0624, 0x00fd0626, 0x00fd0725,
0x00fd0825, 0x00fd0926, 0x00fd0a26, 0x00fd0b26,
0x00fd0c26, 0x00fd0d26, 0x00fd0e27, 0x00fd0f27,
0x00fffe11, 0x00fffe12, 0x00fffe13, 0x00ffff14,
0x00ffff14, 0x00feff16, 0x00feff17, 0x00fe0017,
0x00fe0018, 0x00fe0019, 0x00fe011a, 0x00fe011b,
0x00fe021c, 0x00fe021c, 0x00fe031d, 0x00fd031f,
0x00fd041f, 0x00fd0520, 0x00fd0520, 0x00fd0621,
0x00fd0721, 0x00fd0822, 0x00fd0822, 0x00fd0923,
0x00fd0a23, 0x00fd0b23, 0x00fd0b25, 0x00fe0c24,
0x00fe0d24, 0x00fe0e24, 0x00fe0f24, 0x00fe1024,
0x00feff12, 0x00feff13, 0x00feff13, 0x00fe0014,
0x00fe0015, 0x00fe0016, 0x00fe0116, 0x00fe0117,
0x00fe0118, 0x00fe0218, 0x00fe0219, 0x00fe031a,
0x00fe031b, 0x00fe041b, 0x00fd041d, 0x00fd051d,
0x00fd061d, 0x00fd061f, 0x00fe071e, 0x00fe081e,
0x00fe081f, 0x00fe091f, 0x00fe0a20, 0x00fe0a20,
0x00fe0b21, 0x00fe0c21, 0x00fe0d21, 0x00fe0d22,
0x00fe0e22, 0x00fe0f21, 0x00ff1021, 0x00ff1022,
0x00fe0012, 0x00fe0013, 0x00fe0113, 0x00fe0114,
0x00fe0115, 0x00fe0215, 0x00fe0216, 0x00fe0217,
0x00fe0317, 0x00fe0318, 0x00fe0418, 0x00fe0419,
0x00fe0519, 0x00fe051a, 0x00fe061b, 0x00fe071b,
0x00fe071c, 0x00fe081c, 0x00fe081d, 0x00fe091d,
0x00fe0a1d, 0x00fe0a1d, 0x00fe0b1e, 0x00fe0c1e,
0x00ff0c1e, 0x00ff0d1e, 0x00ff0e1f, 0x00ff0e1f,
0x00ff0f1f, 0x00ff0f20, 0x0000101f, 0x0000111f,
0x00fe0212, 0x00fe0312, 0x00fe0313, 0x00fe0314,
0x00fe0414, 0x00fe0414, 0x00fe0515, 0x00fe0516,
0x00fe0516, 0x00fe0616, 0x00fe0617, 0x00fe0718,
0x00fe0719, 0x00fe0818, 0x00ff0819, 0x00ff0918,
0x00ff0919, 0x00ff0a19, 0x00ff0a19, 0x00ff0b1a,
0x00ff0b1b, 0x00ff0c1a, 0x00000c1b, 0x00000d1b,
0x00000d1c, 0x00000e1b, 0x00000e1d, 0x00010f1b,
0x00010f1b, 0x0001101c, 0x0001101d, 0x0002111c,
0x00fe0412, 0x00fe0412, 0x00ff0512, 0x00ff0512,
0x00ff0613, 0x00ff0613, 0x00ff0614, 0x00ff0714,
0x00ff0714, 0x00ff0815, 0x00ff0815, 0x00ff0815,
0x00ff0916, 0x00000916, 0x00000a16, 0x00000a16,
0x00000a18, 0x00000b17, 0x00000b17, 0x00010c17,
0x00010c18, 0x00010d18, 0x00010d18, 0x00010d19,
0x00020e18, 0x00020e18, 0x00020f18, 0x00030f18,
0x00030f18, 0x00031018, 0x00031018, 0x00041119,
0x00ff0610, 0x00ff0611, 0x00ff0611, 0x00ff0711,
0x00000711, 0x00000712, 0x00000812, 0x00000812,
0x00000813, 0x00000913, 0x00000913, 0x00000914,
0x00010a14, 0x00010a14, 0x00010a14, 0x00010b14,
0x00010b16, 0x00020b15, 0x00020c15, 0x00020c15,
0x00020c15, 0x00020d17, 0x00030d16, 0x00030d16,
0x00030e16, 0x00040e16, 0x00040e16, 0x00040f16,
0x00040f16, 0x00050f17, 0x00051017, 0x00051017,
0x0000070f, 0x00000710, 0x00000710, 0x00000710,
0x00000810, 0x00010811, 0x00010811, 0x00010911,
0x00010911, 0x00010913, 0x00010913, 0x00020a12,
0x00020a12, 0x00020a13, 0x00020b12, 0x00020b13,
0x00030b13, 0x00030c13, 0x00030c13, 0x00030c14,
0x00040c13, 0x00040d13, 0x00040d14, 0x00040d14,
0x00050e14, 0x00050e14, 0x00050e14, 0x00050e14,
0x00060f14, 0x00060f14, 0x00060f15, 0x00061015,
0x0001070f, 0x0001080f, 0x0001080f, 0x0001080f,
0x00010811, 0x00020910, 0x00020910, 0x00020910,
0x00020911, 0x00020a10, 0x00030a10, 0x00030a11,
0x00030a11, 0x00030b11, 0x00030b11, 0x00040b12,
0x00040b12, 0x00040c11, 0x00040c12, 0x00040c12,
0x00050c12, 0x00050c12, 0x00050d12, 0x00050d12,
0x00060d13, 0x00060d13, 0x00060e12, 0x00060e13,
0x00070e13, 0x00070e13, 0x00070f13, 0x00070f13,
0x0002080e, 0x0002080e, 0x0002080e, 0x00020810,
0x0002090f, 0x0003090f, 0x0003090f, 0x0003090f,
0x0003090f, 0x00030a0f, 0x00030a0f, 0x00040a10,
0x00040a11, 0x00040b10, 0x00040b10, 0x00040b11,
0x00050b10, 0x00050b11, 0x00050c10, 0x00050c11,
0x00050c11, 0x00060c11, 0x00060c11, 0x00060d11,
0x00060d12, 0x00070d12, 0x00070d12, 0x00070e11,
0x00070e11, 0x00070e12, 0x00080e11, 0x00080e12
};
static const u32 bicubic4coefftab32[480] = {
0x00004000, 0x000140ff, 0x00033ffe, 0x00043ffd,
0x00063dfd, 0xff083dfc, 0xff0a3bfc, 0xff0c39fc,
0xff0e37fc, 0xfe1136fb, 0xfe1433fb, 0xfe1631fb,
0xfd192ffb, 0xfd1c2cfb, 0xfd1e29fc, 0xfc2127fc,
0xfc2424fc, 0xfc2721fc, 0xfc291efd, 0xfb2c1cfd,
0xfb2f19fd, 0xfb3116fe, 0xfb3314fe, 0xfb3611fe,
0xfc370eff, 0xfc390cff, 0xfc3b0aff, 0xfc3d08ff,
0xfd3d0600, 0xfd3f0400, 0xfe3f0300, 0xff400100,
0xfe053904, 0xfe063903, 0xfe083901, 0xfe0a3800,
0xfd0b3800, 0xfe0d36ff, 0xfd0f35ff, 0xfd1134fe,
0xfd1332fe, 0xfd1531fd, 0xfc1730fd, 0xfc1a2efc,
0xfc1c2cfc, 0xfc1e2afc, 0xfc2028fc, 0xfc2226fc,
0xfc2424fc, 0xfc2622fc, 0xfc2820fc, 0xfc2a1efc,
0xfc2c1cfc, 0xfc2e1afc, 0xfd3017fc, 0xfd3115fd,
0xfe3213fd, 0xfe3411fd, 0xff350ffd, 0xff360dfe,
0x00370bfe, 0x013809fe, 0x023808fe, 0x033806ff,
0xfd093208, 0xfd0a3207, 0xfd0c3205, 0xfd0d3204,
0xfc0f3203, 0xfc113102, 0xfc123002, 0xfc143000,
0xfc152f00, 0xfc172d00, 0xfc192cff, 0xfc1b2bfe,
0xfc1d29fe, 0xfc1e28fe, 0xfc2027fd, 0xfd2125fd,
0xfd2323fd, 0xfd2521fd, 0xfd2720fc, 0xfe281efc,
0xfe291dfc, 0xfe2b1bfc, 0xff2c19fc, 0x002d17fc,
0x002e16fc, 0x012f14fc, 0x022f12fd, 0x023110fd,
0x03310ffd, 0x05310dfd, 0x06320bfd, 0x07320afd,
0xfc0c2d0b, 0xfc0d2d0a, 0xfc0e2d09, 0xfc102d07,
0xfc112c07, 0xfc132c05, 0xfc142c04, 0xfc162b03,
0xfc172a03, 0xfc192a01, 0xfc1a2901, 0xfd1b2800,
0xfd1c2700, 0xfd1e2500, 0xfe1f24ff, 0xfe2023ff,
0xfe2222fe, 0xff2320fe, 0xff241ffe, 0x00251efd,
0x00271cfd, 0x01271bfd, 0x01281afd, 0x022918fd,
0x032a16fd, 0x032b15fd, 0x042b14fd, 0x052c12fd,
0x072c10fd, 0x082c0ffd, 0x092c0efd, 0x0a2c0dfd,
0xfd0d290d, 0xfd0e290c, 0xfd0f290b, 0xfd11280a,
0xfd122809, 0xfd132808, 0xfd142807, 0xfd162706,
0xfd172705, 0xfd192604, 0xfe1a2503, 0xfe1b2502,
0xfe1c2402, 0xfe1d2302, 0xff1e2201, 0xff1f2101,
0x00202000, 0x00211f00, 0x01221eff, 0x02221dff,
0x02241cfe, 0x03241bfe, 0x042519fe, 0x042618fe,
0x052617fe, 0x062716fd, 0x072714fe, 0x082713fe,
0x092812fd, 0x0a2811fd, 0x0b2810fd, 0x0c280ffd,
0xfd0f250f, 0xfd10250e, 0xfd11250d, 0xfd12250c,
0xfd13250b, 0xfe13250a, 0xfe152409, 0xfe162408,
0xfe172308, 0xff182306, 0xff192305, 0xff1a2205,
0x001b2104, 0x001c2103, 0x001d2003, 0x011e1f02,
0x011f1f01, 0x021f1e01, 0x03201d00, 0x03211c00,
0x04211b00, 0x05221aff, 0x062219ff, 0x062318ff,
0x082316ff, 0x082316ff, 0x092415fe, 0x0a2414fe,
0x0b2413fe, 0x0c2412fe, 0x0d2411fe, 0x0e2410fe,
0xfe10230f, 0xfe11230e, 0xfe12220e, 0xfe13220d,
0xfe14220c, 0xff14220b, 0xff15220a, 0xff16210a,
0x00162109, 0x00172108, 0x00182008, 0x01192006,
0x011a1f06, 0x021a1f05, 0x021b1e05, 0x031c1d04,
0x031d1d03, 0x041d1c03, 0x041e1b03, 0x051e1b02,
0x061f1a01, 0x06201901, 0x07201801, 0x08201800,
0x09201700, 0x0a211500, 0x0b2115ff, 0x0c2114ff,
0x0c2213ff, 0x0d2212ff, 0x0e2211ff, 0x0f2211fe,
0xff112010, 0xff12200f, 0xff12200f, 0xff13200e,
0x0013200d, 0x0014200c, 0x00151f0c, 0x00161f0b,
0x01161f0a, 0x01171e0a, 0x02171e09, 0x02181e08,
0x03191d07, 0x03191d07, 0x041a1c06, 0x041b1c05,
0x051b1b05, 0x051c1b04, 0x061c1a04, 0x071c1a03,
0x071d1903, 0x081e1802, 0x091d1802, 0x091e1702,
0x0a1f1601, 0x0b1f1600, 0x0b1f1501, 0x0c201400,
0x0d1f1400, 0x0e2013ff, 0x0f1f1200, 0x102011ff,
0x00111f10, 0x00121e10, 0x00131e0f, 0x00131e0f,
0x01131e0e, 0x01141e0d, 0x01151d0d, 0x02151d0c,
0x02161d0b, 0x03161d0a, 0x03171c0a, 0x04171c09,
0x04181c08, 0x05181b08, 0x05191b07, 0x06191a07,
0x061a1a06, 0x071a1906, 0x071b1905, 0x081b1805,
0x081c1804, 0x091c1704, 0x0a1c1703, 0x0a1d1603,
0x0b1d1602, 0x0c1d1502, 0x0c1d1502, 0x0d1e1401,
0x0e1d1401, 0x0e1e1301, 0x0f1e1300, 0x101e1200,
0x02111c11, 0x02121c10, 0x02131b10, 0x03131b0f,
0x03131b0f, 0x03141b0e, 0x04141b0d, 0x04151a0d,
0x05151a0c, 0x05151a0c, 0x05161a0b, 0x0616190b,
0x0616190b, 0x0716190a, 0x0717180a, 0x08171809,
0x08181808, 0x09181708, 0x09181708, 0x0a181707,
0x0a191607, 0x0b191606, 0x0b1a1605, 0x0c1a1505,
0x0c1a1505, 0x0d1a1504, 0x0d1b1404, 0x0e1b1403,
0x0f1b1303, 0x0f1b1303, 0x101b1302, 0x101c1202,
0x04111a11, 0x04121911, 0x04131910, 0x0513190f,
0x0513190f, 0x0513190f, 0x0613190e, 0x0614180e,
0x0714180d, 0x0714180d, 0x0715180c, 0x0814180c,
0x0815170c, 0x0816170b, 0x0916170a, 0x0916170a,
0x0a16160a, 0x0a171609, 0x0a171609, 0x0b171608,
0x0b171509, 0x0c171508, 0x0c181507, 0x0d171507,
0x0d181407, 0x0e181406, 0x0e181406, 0x0e191306,
0x0f191305, 0x0f191305, 0x10191304, 0x10191205,
0x05121811, 0x06121810, 0x06121810, 0x06131710,
0x0713170f, 0x0713170f, 0x0713170f, 0x0813170e,
0x0813170e, 0x0814170d, 0x0914160d, 0x0914160d,
0x0914160d, 0x0a14160c, 0x0a15160b, 0x0a15150c,
0x0b15150b, 0x0b15150b, 0x0b16150a, 0x0c15150a,
0x0c16140a, 0x0d161409, 0x0d161409, 0x0d171408,
0x0e161408, 0x0e171308, 0x0e171308, 0x0f171307,
0x0f171307, 0x10171306, 0x10181206, 0x10181206,
0x07111711, 0x07121710, 0x07121611, 0x08121610,
0x08121610, 0x0813160f, 0x0912160f, 0x0913160e,
0x0913160e, 0x0913160e, 0x0a14150d, 0x0a14150d,
0x0a14150d, 0x0b14150c, 0x0b14150c, 0x0b14150c,
0x0c14140c, 0x0c15140b, 0x0c15140b, 0x0c15140b,
0x0d15140a, 0x0d15140a, 0x0d15140a, 0x0e161309,
0x0e161309, 0x0e161309, 0x0f151309, 0x0f161308,
0x0f161209, 0x10161208, 0x10161208, 0x10171207,
0x0a111411, 0x0b111410, 0x0b111410, 0x0b111410,
0x0b111410, 0x0b12140f, 0x0b12140f, 0x0c12130f,
0x0c12130f, 0x0c12130f, 0x0c12130f, 0x0c12130f,
0x0d12130e, 0x0d12130e, 0x0d12130e, 0x0d13130d,
0x0d13130d, 0x0d13130d, 0x0e12130d, 0x0e13120d,
0x0e13120d, 0x0e13120d, 0x0e13120d, 0x0f13120c,
0x0f13120c, 0x0f13120c, 0x0f14120b, 0x0f14120b,
0x1013120b, 0x1013120b, 0x1013120b, 0x1014110b,
0x0c111310, 0x0c111310, 0x0c111310, 0x0d101310,
0x0d101310, 0x0d111210, 0x0d111210, 0x0d111210,
0x0d12120f, 0x0d12120f, 0x0d12120f, 0x0d12120f,
0x0e11120f, 0x0e12120e, 0x0e12120e, 0x0e12120e,
0x0e12120e, 0x0e12120e, 0x0e12120e, 0x0e12120e,
0x0f11120e, 0x0f12120d, 0x0f12120d, 0x0f12120d,
0x0f12120d, 0x0f12110e, 0x0f12110e, 0x0f12110e,
0x1012110d, 0x1012110d, 0x1013110c, 0x1013110c,
};
static u32 sun8i_vi_scaler_base(struct sun8i_mixer *mixer, int channel)
{
if (mixer->cfg->is_de3)
return DE3_VI_SCALER_UNIT_BASE +
DE3_VI_SCALER_UNIT_SIZE * channel;
else
return DE2_VI_SCALER_UNIT_BASE +
DE2_VI_SCALER_UNIT_SIZE * channel;
}
static int sun8i_vi_scaler_coef_index(unsigned int step)
{
unsigned int scale, int_part, float_part;
scale = step >> (SUN8I_VI_SCALER_SCALE_FRAC - 3);
int_part = scale >> 3;
float_part = scale & 0x7;
switch (int_part) {
case 0:
return 0;
case 1:
return float_part;
case 2:
return 8 + (float_part >> 1);
case 3:
return 12;
case 4:
return 13;
default:
return 14;
}
}
static void sun8i_vi_scaler_set_coeff(struct regmap *map, u32 base,
u32 hstep, u32 vstep,
const struct drm_format_info *format)
{
const u32 *ch_left, *ch_right, *cy;
int offset, i;
if (format->hsub == 1 && format->vsub == 1) {
ch_left = lan3coefftab32_left;
ch_right = lan3coefftab32_right;
cy = lan2coefftab32;
} else {
ch_left = bicubic8coefftab32_left;
ch_right = bicubic8coefftab32_right;
cy = bicubic4coefftab32;
}
offset = sun8i_vi_scaler_coef_index(hstep) *
SUN8I_VI_SCALER_COEFF_COUNT;
for (i = 0; i < SUN8I_VI_SCALER_COEFF_COUNT; i++) {
regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF0(base, i),
lan3coefftab32_left[offset + i]);
regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF1(base, i),
lan3coefftab32_right[offset + i]);
regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF0(base, i),
ch_left[offset + i]);
regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF1(base, i),
ch_right[offset + i]);
}
offset = sun8i_vi_scaler_coef_index(hstep) *
SUN8I_VI_SCALER_COEFF_COUNT;
for (i = 0; i < SUN8I_VI_SCALER_COEFF_COUNT; i++) {
regmap_write(map, SUN8I_SCALER_VSU_YVCOEFF(base, i),
lan2coefftab32[offset + i]);
regmap_write(map, SUN8I_SCALER_VSU_CVCOEFF(base, i),
cy[offset + i]);
}
}
void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
{
u32 val, base;
base = sun8i_vi_scaler_base(mixer, layer);
if (enable)
val = SUN8I_SCALER_VSU_CTRL_EN |
SUN8I_SCALER_VSU_CTRL_COEFF_RDY;
else
val = 0;
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_CTRL(base), val);
}
void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
u32 hscale, u32 vscale, u32 hphase, u32 vphase,
const struct drm_format_info *format)
{
u32 chphase, cvphase;
u32 insize, outsize;
u32 base;
base = sun8i_vi_scaler_base(mixer, layer);
hphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
vphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
hscale <<= SUN8I_VI_SCALER_SCALE_FRAC - 16;
vscale <<= SUN8I_VI_SCALER_SCALE_FRAC - 16;
insize = SUN8I_VI_SCALER_SIZE(src_w, src_h);
outsize = SUN8I_VI_SCALER_SIZE(dst_w, dst_h);
/*
* This is chroma V/H phase calculation as it appears in
* BSP driver. There is no detailed explanation. YUV 420
* chroma is threated specialy for some reason.
*/
if (format->hsub == 2 && format->vsub == 2) {
chphase = hphase >> 1;
cvphase = (vphase >> 1) -
(1UL << (SUN8I_VI_SCALER_SCALE_FRAC - 2));
} else {
chphase = hphase;
cvphase = vphase;
}
if (mixer->cfg->is_de3) {
u32 val;
if (format->hsub == 1 && format->vsub == 1)
val = SUN50I_SCALER_VSU_SCALE_MODE_UI;
else
val = SUN50I_SCALER_VSU_SCALE_MODE_NORMAL;
regmap_write(mixer->engine.regs,
SUN50I_SCALER_VSU_SCALE_MODE(base), val);
}
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_OUTSIZE(base), outsize);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_YINSIZE(base), insize);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_YHSTEP(base), hscale);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_YVSTEP(base), vscale);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_YHPHASE(base), hphase);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_YVPHASE(base), vphase);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_CINSIZE(base),
SUN8I_VI_SCALER_SIZE(src_w / format->hsub,
src_h / format->vsub));
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_CHSTEP(base),
hscale / format->hsub);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_CVSTEP(base),
vscale / format->vsub);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_CHPHASE(base), chphase);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_VSU_CVPHASE(base), cvphase);
sun8i_vi_scaler_set_coeff(mixer->engine.regs, base,
hscale, vscale, format);
}
| linux-master | drivers/gpu/drm/sun4i/sun8i_vi_scaler.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Free Electrons
* Copyright (C) 2015 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sunxi_engine.h"
#define SUN4I_TVE_EN_REG 0x000
#define SUN4I_TVE_EN_DAC_MAP_MASK GENMASK(19, 4)
#define SUN4I_TVE_EN_DAC_MAP(dac, out) (((out) & 0xf) << (dac + 1) * 4)
#define SUN4I_TVE_EN_ENABLE BIT(0)
#define SUN4I_TVE_CFG0_REG 0x004
#define SUN4I_TVE_CFG0_DAC_CONTROL_54M BIT(26)
#define SUN4I_TVE_CFG0_CORE_DATAPATH_54M BIT(25)
#define SUN4I_TVE_CFG0_CORE_CONTROL_54M BIT(24)
#define SUN4I_TVE_CFG0_YC_EN BIT(17)
#define SUN4I_TVE_CFG0_COMP_EN BIT(16)
#define SUN4I_TVE_CFG0_RES(x) ((x) & 0xf)
#define SUN4I_TVE_CFG0_RES_480i SUN4I_TVE_CFG0_RES(0)
#define SUN4I_TVE_CFG0_RES_576i SUN4I_TVE_CFG0_RES(1)
#define SUN4I_TVE_DAC0_REG 0x008
#define SUN4I_TVE_DAC0_CLOCK_INVERT BIT(24)
#define SUN4I_TVE_DAC0_LUMA(x) (((x) & 3) << 20)
#define SUN4I_TVE_DAC0_LUMA_0_4 SUN4I_TVE_DAC0_LUMA(3)
#define SUN4I_TVE_DAC0_CHROMA(x) (((x) & 3) << 18)
#define SUN4I_TVE_DAC0_CHROMA_0_75 SUN4I_TVE_DAC0_CHROMA(3)
#define SUN4I_TVE_DAC0_INTERNAL_DAC(x) (((x) & 3) << 16)
#define SUN4I_TVE_DAC0_INTERNAL_DAC_37_5_OHMS SUN4I_TVE_DAC0_INTERNAL_DAC(3)
#define SUN4I_TVE_DAC0_DAC_EN(dac) BIT(dac)
#define SUN4I_TVE_NOTCH_REG 0x00c
#define SUN4I_TVE_NOTCH_DAC0_TO_DAC_DLY(dac, x) ((4 - (x)) << (dac * 3))
#define SUN4I_TVE_CHROMA_FREQ_REG 0x010
#define SUN4I_TVE_PORCH_REG 0x014
#define SUN4I_TVE_PORCH_BACK(x) ((x) << 16)
#define SUN4I_TVE_PORCH_FRONT(x) (x)
#define SUN4I_TVE_LINE_REG 0x01c
#define SUN4I_TVE_LINE_FIRST(x) ((x) << 16)
#define SUN4I_TVE_LINE_NUMBER(x) (x)
#define SUN4I_TVE_LEVEL_REG 0x020
#define SUN4I_TVE_LEVEL_BLANK(x) ((x) << 16)
#define SUN4I_TVE_LEVEL_BLACK(x) (x)
#define SUN4I_TVE_DAC1_REG 0x024
#define SUN4I_TVE_DAC1_AMPLITUDE(dac, x) ((x) << (dac * 8))
#define SUN4I_TVE_DETECT_STA_REG 0x038
#define SUN4I_TVE_DETECT_STA_DAC(dac) BIT((dac * 8))
#define SUN4I_TVE_DETECT_STA_UNCONNECTED 0
#define SUN4I_TVE_DETECT_STA_CONNECTED 1
#define SUN4I_TVE_DETECT_STA_GROUND 2
#define SUN4I_TVE_CB_CR_LVL_REG 0x10c
#define SUN4I_TVE_CB_CR_LVL_CR_BURST(x) ((x) << 8)
#define SUN4I_TVE_CB_CR_LVL_CB_BURST(x) (x)
#define SUN4I_TVE_TINT_BURST_PHASE_REG 0x110
#define SUN4I_TVE_TINT_BURST_PHASE_CHROMA(x) (x)
#define SUN4I_TVE_BURST_WIDTH_REG 0x114
#define SUN4I_TVE_BURST_WIDTH_BREEZEWAY(x) ((x) << 16)
#define SUN4I_TVE_BURST_WIDTH_BURST_WIDTH(x) ((x) << 8)
#define SUN4I_TVE_BURST_WIDTH_HSYNC_WIDTH(x) (x)
#define SUN4I_TVE_CB_CR_GAIN_REG 0x118
#define SUN4I_TVE_CB_CR_GAIN_CR(x) ((x) << 8)
#define SUN4I_TVE_CB_CR_GAIN_CB(x) (x)
#define SUN4I_TVE_SYNC_VBI_REG 0x11c
#define SUN4I_TVE_SYNC_VBI_SYNC(x) ((x) << 16)
#define SUN4I_TVE_SYNC_VBI_VBLANK(x) (x)
#define SUN4I_TVE_ACTIVE_LINE_REG 0x124
#define SUN4I_TVE_ACTIVE_LINE(x) (x)
#define SUN4I_TVE_CHROMA_REG 0x128
#define SUN4I_TVE_CHROMA_COMP_GAIN(x) ((x) & 3)
#define SUN4I_TVE_CHROMA_COMP_GAIN_50 SUN4I_TVE_CHROMA_COMP_GAIN(2)
#define SUN4I_TVE_12C_REG 0x12c
#define SUN4I_TVE_12C_NOTCH_WIDTH_WIDE BIT(8)
#define SUN4I_TVE_12C_COMP_YUV_EN BIT(0)
#define SUN4I_TVE_RESYNC_REG 0x130
#define SUN4I_TVE_RESYNC_FIELD BIT(31)
#define SUN4I_TVE_RESYNC_LINE(x) ((x) << 16)
#define SUN4I_TVE_RESYNC_PIXEL(x) (x)
#define SUN4I_TVE_SLAVE_REG 0x134
#define SUN4I_TVE_WSS_DATA2_REG 0x244
struct color_gains {
u16 cb;
u16 cr;
};
struct burst_levels {
u16 cb;
u16 cr;
};
struct video_levels {
u16 black;
u16 blank;
};
struct resync_parameters {
bool field;
u16 line;
u16 pixel;
};
struct tv_mode {
char *name;
unsigned int tv_mode;
u32 mode;
u32 chroma_freq;
u16 back_porch;
u16 front_porch;
u16 vblank_level;
bool yc_en;
bool dac3_en;
bool dac_bit25_en;
const struct color_gains *color_gains;
const struct burst_levels *burst_levels;
const struct video_levels *video_levels;
const struct resync_parameters *resync_params;
};
struct sun4i_tv {
struct drm_connector connector;
struct drm_encoder encoder;
struct clk *clk;
struct regmap *regs;
struct reset_control *reset;
struct sun4i_drv *drv;
};
static const struct video_levels ntsc_video_levels = {
.black = 282, .blank = 240,
};
static const struct video_levels pal_video_levels = {
.black = 252, .blank = 252,
};
static const struct burst_levels ntsc_burst_levels = {
.cb = 79, .cr = 0,
};
static const struct burst_levels pal_burst_levels = {
.cb = 40, .cr = 40,
};
static const struct color_gains ntsc_color_gains = {
.cb = 160, .cr = 160,
};
static const struct color_gains pal_color_gains = {
.cb = 224, .cr = 224,
};
static const struct resync_parameters ntsc_resync_parameters = {
.field = false, .line = 14, .pixel = 12,
};
static const struct resync_parameters pal_resync_parameters = {
.field = true, .line = 13, .pixel = 12,
};
static const struct tv_mode tv_modes[] = {
{
.tv_mode = DRM_MODE_TV_MODE_NTSC,
.mode = SUN4I_TVE_CFG0_RES_480i,
.chroma_freq = 0x21f07c1f,
.yc_en = true,
.dac3_en = true,
.dac_bit25_en = true,
.back_porch = 118,
.front_porch = 32,
.vblank_level = 240,
.color_gains = &ntsc_color_gains,
.burst_levels = &ntsc_burst_levels,
.video_levels = &ntsc_video_levels,
.resync_params = &ntsc_resync_parameters,
},
{
.tv_mode = DRM_MODE_TV_MODE_PAL,
.mode = SUN4I_TVE_CFG0_RES_576i,
.chroma_freq = 0x2a098acb,
.back_porch = 138,
.front_porch = 24,
.vblank_level = 252,
.color_gains = &pal_color_gains,
.burst_levels = &pal_burst_levels,
.video_levels = &pal_video_levels,
.resync_params = &pal_resync_parameters,
},
};
static inline struct sun4i_tv *
drm_encoder_to_sun4i_tv(struct drm_encoder *encoder)
{
return container_of(encoder, struct sun4i_tv,
encoder);
}
static const struct tv_mode *
sun4i_tv_find_tv_by_mode(unsigned int mode)
{
int i;
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (tv_mode->tv_mode == mode)
return tv_mode;
}
return NULL;
}
static void sun4i_tv_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
DRM_DEBUG_DRIVER("Disabling the TV Output\n");
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_ENABLE,
0);
sunxi_engine_disable_color_correction(crtc->engine);
}
static void sun4i_tv_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, encoder->crtc);
struct drm_display_mode *mode = &crtc_state->mode;
struct drm_connector *connector = &tv->connector;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
const struct tv_mode *tv_mode =
sun4i_tv_find_tv_by_mode(conn_state->tv.mode);
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
/* Enable and map the DAC to the output */
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_DAC_MAP_MASK,
SUN4I_TVE_EN_DAC_MAP(0, 1) |
SUN4I_TVE_EN_DAC_MAP(1, 2) |
SUN4I_TVE_EN_DAC_MAP(2, 3) |
SUN4I_TVE_EN_DAC_MAP(3, 4));
/* Set PAL settings */
regmap_write(tv->regs, SUN4I_TVE_CFG0_REG,
tv_mode->mode |
(tv_mode->yc_en ? SUN4I_TVE_CFG0_YC_EN : 0) |
SUN4I_TVE_CFG0_COMP_EN |
SUN4I_TVE_CFG0_DAC_CONTROL_54M |
SUN4I_TVE_CFG0_CORE_DATAPATH_54M |
SUN4I_TVE_CFG0_CORE_CONTROL_54M);
/* Configure the DAC for a composite output */
regmap_write(tv->regs, SUN4I_TVE_DAC0_REG,
SUN4I_TVE_DAC0_DAC_EN(0) |
(tv_mode->dac3_en ? SUN4I_TVE_DAC0_DAC_EN(3) : 0) |
SUN4I_TVE_DAC0_INTERNAL_DAC_37_5_OHMS |
SUN4I_TVE_DAC0_CHROMA_0_75 |
SUN4I_TVE_DAC0_LUMA_0_4 |
SUN4I_TVE_DAC0_CLOCK_INVERT |
(tv_mode->dac_bit25_en ? BIT(25) : 0) |
BIT(30));
/* Configure the sample delay between DAC0 and the other DAC */
regmap_write(tv->regs, SUN4I_TVE_NOTCH_REG,
SUN4I_TVE_NOTCH_DAC0_TO_DAC_DLY(1, 0) |
SUN4I_TVE_NOTCH_DAC0_TO_DAC_DLY(2, 0));
regmap_write(tv->regs, SUN4I_TVE_CHROMA_FREQ_REG,
tv_mode->chroma_freq);
/* Set the front and back porch */
regmap_write(tv->regs, SUN4I_TVE_PORCH_REG,
SUN4I_TVE_PORCH_BACK(tv_mode->back_porch) |
SUN4I_TVE_PORCH_FRONT(tv_mode->front_porch));
/* Set the lines setup */
regmap_write(tv->regs, SUN4I_TVE_LINE_REG,
SUN4I_TVE_LINE_FIRST(22) |
SUN4I_TVE_LINE_NUMBER(mode->vtotal));
regmap_write(tv->regs, SUN4I_TVE_LEVEL_REG,
SUN4I_TVE_LEVEL_BLANK(tv_mode->video_levels->blank) |
SUN4I_TVE_LEVEL_BLACK(tv_mode->video_levels->black));
regmap_write(tv->regs, SUN4I_TVE_DAC1_REG,
SUN4I_TVE_DAC1_AMPLITUDE(0, 0x18) |
SUN4I_TVE_DAC1_AMPLITUDE(1, 0x18) |
SUN4I_TVE_DAC1_AMPLITUDE(2, 0x18) |
SUN4I_TVE_DAC1_AMPLITUDE(3, 0x18));
regmap_write(tv->regs, SUN4I_TVE_CB_CR_LVL_REG,
SUN4I_TVE_CB_CR_LVL_CB_BURST(tv_mode->burst_levels->cb) |
SUN4I_TVE_CB_CR_LVL_CR_BURST(tv_mode->burst_levels->cr));
/* Set burst width for a composite output */
regmap_write(tv->regs, SUN4I_TVE_BURST_WIDTH_REG,
SUN4I_TVE_BURST_WIDTH_HSYNC_WIDTH(126) |
SUN4I_TVE_BURST_WIDTH_BURST_WIDTH(68) |
SUN4I_TVE_BURST_WIDTH_BREEZEWAY(22));
regmap_write(tv->regs, SUN4I_TVE_CB_CR_GAIN_REG,
SUN4I_TVE_CB_CR_GAIN_CB(tv_mode->color_gains->cb) |
SUN4I_TVE_CB_CR_GAIN_CR(tv_mode->color_gains->cr));
regmap_write(tv->regs, SUN4I_TVE_SYNC_VBI_REG,
SUN4I_TVE_SYNC_VBI_SYNC(0x10) |
SUN4I_TVE_SYNC_VBI_VBLANK(tv_mode->vblank_level));
regmap_write(tv->regs, SUN4I_TVE_ACTIVE_LINE_REG,
SUN4I_TVE_ACTIVE_LINE(1440));
/* Set composite chroma gain to 50 % */
regmap_write(tv->regs, SUN4I_TVE_CHROMA_REG,
SUN4I_TVE_CHROMA_COMP_GAIN_50);
regmap_write(tv->regs, SUN4I_TVE_12C_REG,
SUN4I_TVE_12C_COMP_YUV_EN |
SUN4I_TVE_12C_NOTCH_WIDTH_WIDE);
regmap_write(tv->regs, SUN4I_TVE_RESYNC_REG,
SUN4I_TVE_RESYNC_PIXEL(tv_mode->resync_params->pixel) |
SUN4I_TVE_RESYNC_LINE(tv_mode->resync_params->line) |
(tv_mode->resync_params->field ?
SUN4I_TVE_RESYNC_FIELD : 0));
regmap_write(tv->regs, SUN4I_TVE_SLAVE_REG, 0);
sunxi_engine_apply_color_correction(crtc->engine);
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_ENABLE,
SUN4I_TVE_EN_ENABLE);
}
static const struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
.atomic_disable = sun4i_tv_disable,
.atomic_enable = sun4i_tv_enable,
};
static const struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
.atomic_check = drm_atomic_helper_connector_tv_check,
.get_modes = drm_connector_helper_tv_get_modes,
};
static void sun4i_tv_connector_reset(struct drm_connector *connector)
{
drm_atomic_helper_connector_reset(connector);
drm_atomic_helper_connector_tv_reset(connector);
}
static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = sun4i_tv_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct regmap_config sun4i_tv_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = SUN4I_TVE_WSS_DATA2_REG,
.name = "tv-encoder",
};
static int sun4i_tv_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_tv *tv;
void __iomem *regs;
int ret;
tv = devm_kzalloc(dev, sizeof(*tv), GFP_KERNEL);
if (!tv)
return -ENOMEM;
tv->drv = drv;
dev_set_drvdata(dev, tv);
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs)) {
dev_err(dev, "Couldn't map the TV encoder registers\n");
return PTR_ERR(regs);
}
tv->regs = devm_regmap_init_mmio(dev, regs,
&sun4i_tv_regmap_config);
if (IS_ERR(tv->regs)) {
dev_err(dev, "Couldn't create the TV encoder regmap\n");
return PTR_ERR(tv->regs);
}
tv->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(tv->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(tv->reset);
}
ret = reset_control_deassert(tv->reset);
if (ret) {
dev_err(dev, "Couldn't deassert our reset line\n");
return ret;
}
tv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(tv->clk)) {
dev_err(dev, "Couldn't get the TV encoder clock\n");
ret = PTR_ERR(tv->clk);
goto err_assert_reset;
}
clk_prepare_enable(tv->clk);
drm_encoder_helper_add(&tv->encoder,
&sun4i_tv_helper_funcs);
ret = drm_simple_encoder_init(drm, &tv->encoder,
DRM_MODE_ENCODER_TVDAC);
if (ret) {
dev_err(dev, "Couldn't initialise the TV encoder\n");
goto err_disable_clk;
}
tv->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm,
dev->of_node);
if (!tv->encoder.possible_crtcs) {
ret = -EPROBE_DEFER;
goto err_disable_clk;
}
drm_connector_helper_add(&tv->connector,
&sun4i_tv_comp_connector_helper_funcs);
ret = drm_connector_init(drm, &tv->connector,
&sun4i_tv_comp_connector_funcs,
DRM_MODE_CONNECTOR_Composite);
if (ret) {
dev_err(dev,
"Couldn't initialise the Composite connector\n");
goto err_cleanup_encoder;
}
tv->connector.interlace_allowed = true;
drm_connector_attach_encoder(&tv->connector, &tv->encoder);
ret = drm_mode_create_tv_properties(drm,
BIT(DRM_MODE_TV_MODE_NTSC) |
BIT(DRM_MODE_TV_MODE_PAL));
if (ret)
goto err_cleanup_connector;
drm_object_attach_property(&tv->connector.base,
drm->mode_config.tv_mode_property,
DRM_MODE_TV_MODE_NTSC);
return 0;
err_cleanup_connector:
drm_connector_cleanup(&tv->connector);
err_cleanup_encoder:
drm_encoder_cleanup(&tv->encoder);
err_disable_clk:
clk_disable_unprepare(tv->clk);
err_assert_reset:
reset_control_assert(tv->reset);
return ret;
}
static void sun4i_tv_unbind(struct device *dev, struct device *master,
void *data)
{
struct sun4i_tv *tv = dev_get_drvdata(dev);
drm_connector_cleanup(&tv->connector);
drm_encoder_cleanup(&tv->encoder);
clk_disable_unprepare(tv->clk);
reset_control_assert(tv->reset);
}
static const struct component_ops sun4i_tv_ops = {
.bind = sun4i_tv_bind,
.unbind = sun4i_tv_unbind,
};
static int sun4i_tv_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &sun4i_tv_ops);
}
static void sun4i_tv_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun4i_tv_ops);
}
static const struct of_device_id sun4i_tv_of_table[] = {
{ .compatible = "allwinner,sun4i-a10-tv-encoder" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_tv_of_table);
static struct platform_driver sun4i_tv_platform_driver = {
.probe = sun4i_tv_probe,
.remove_new = sun4i_tv_remove,
.driver = {
.name = "sun4i-tve",
.of_match_table = sun4i_tv_of_table,
},
};
module_platform_driver(sun4i_tv_platform_driver);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner A10 TV Encoder Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun4i_tv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) Jernej Skrabec <[email protected]>
*/
#include <drm/drm_print.h>
#include "sun8i_csc.h"
#include "sun8i_mixer.h"
static const u32 ccsc_base[][2] = {
[CCSC_MIXER0_LAYOUT] = {CCSC00_OFFSET, CCSC01_OFFSET},
[CCSC_MIXER1_LAYOUT] = {CCSC10_OFFSET, CCSC11_OFFSET},
[CCSC_D1_MIXER0_LAYOUT] = {CCSC00_OFFSET, CCSC01_D1_OFFSET},
};
/*
* Factors are in two's complement format, 10 bits for fractinal part.
* First tree values in each line are multiplication factor and last
* value is constant, which is added at the end.
*/
static const u32 yuv2rgb[2][2][12] = {
[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
[DRM_COLOR_YCBCR_BT601] = {
0x000004A8, 0x00000000, 0x00000662, 0xFFFC8451,
0x000004A8, 0xFFFFFE6F, 0xFFFFFCC0, 0x00021E4D,
0x000004A8, 0x00000811, 0x00000000, 0xFFFBACA9,
},
[DRM_COLOR_YCBCR_BT709] = {
0x000004A8, 0x00000000, 0x0000072B, 0xFFFC1F99,
0x000004A8, 0xFFFFFF26, 0xFFFFFDDF, 0x00013383,
0x000004A8, 0x00000873, 0x00000000, 0xFFFB7BEF,
}
},
[DRM_COLOR_YCBCR_FULL_RANGE] = {
[DRM_COLOR_YCBCR_BT601] = {
0x00000400, 0x00000000, 0x0000059B, 0xFFFD322E,
0x00000400, 0xFFFFFEA0, 0xFFFFFD25, 0x00021DD5,
0x00000400, 0x00000716, 0x00000000, 0xFFFC74BD,
},
[DRM_COLOR_YCBCR_BT709] = {
0x00000400, 0x00000000, 0x0000064C, 0xFFFCD9B4,
0x00000400, 0xFFFFFF41, 0xFFFFFE21, 0x00014F96,
0x00000400, 0x0000076C, 0x00000000, 0xFFFC49EF,
}
},
};
/*
* DE3 has a bit different CSC units. Factors are in two's complement format.
* First three factors in a row are multiplication factors which have 17 bits
* for fractional part. Fourth value in a row is comprised of two factors.
* Upper 16 bits represents difference, which is subtracted from the input
* value before multiplication and lower 16 bits represents constant, which
* is addes at the end.
*
* x' = c00 * (x + d0) + c01 * (y + d1) + c02 * (z + d2) + const0
* y' = c10 * (x + d0) + c11 * (y + d1) + c12 * (z + d2) + const1
* z' = c20 * (x + d0) + c21 * (y + d1) + c22 * (z + d2) + const2
*
* Please note that above formula is true only for Blender CSC. Other DE3 CSC
* units takes only positive value for difference. From what can be deducted
* from BSP driver code, those units probably automatically assume that
* difference has to be subtracted.
*
* Layout of factors in table:
* c00 c01 c02 [d0 const0]
* c10 c11 c12 [d1 const1]
* c20 c21 c22 [d2 const2]
*/
static const u32 yuv2rgb_de3[2][3][12] = {
[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
[DRM_COLOR_YCBCR_BT601] = {
0x0002542A, 0x00000000, 0x0003312A, 0xFFC00000,
0x0002542A, 0xFFFF376B, 0xFFFE5FC3, 0xFE000000,
0x0002542A, 0x000408D2, 0x00000000, 0xFE000000,
},
[DRM_COLOR_YCBCR_BT709] = {
0x0002542A, 0x00000000, 0x000395E2, 0xFFC00000,
0x0002542A, 0xFFFF92D2, 0xFFFEEF27, 0xFE000000,
0x0002542A, 0x0004398C, 0x00000000, 0xFE000000,
},
[DRM_COLOR_YCBCR_BT2020] = {
0x0002542A, 0x00000000, 0x00035B7B, 0xFFC00000,
0x0002542A, 0xFFFFA017, 0xFFFEB2FC, 0xFE000000,
0x0002542A, 0x00044896, 0x00000000, 0xFE000000,
}
},
[DRM_COLOR_YCBCR_FULL_RANGE] = {
[DRM_COLOR_YCBCR_BT601] = {
0x00020000, 0x00000000, 0x0002CDD2, 0x00000000,
0x00020000, 0xFFFF4FCE, 0xFFFE925D, 0xFE000000,
0x00020000, 0x00038B43, 0x00000000, 0xFE000000,
},
[DRM_COLOR_YCBCR_BT709] = {
0x00020000, 0x00000000, 0x0003264C, 0x00000000,
0x00020000, 0xFFFFA018, 0xFFFF1053, 0xFE000000,
0x00020000, 0x0003B611, 0x00000000, 0xFE000000,
},
[DRM_COLOR_YCBCR_BT2020] = {
0x00020000, 0x00000000, 0x0002F2FE, 0x00000000,
0x00020000, 0xFFFFABC0, 0xFFFEDB78, 0xFE000000,
0x00020000, 0x0003C346, 0x00000000, 0xFE000000,
}
},
};
static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
enum sun8i_csc_mode mode,
enum drm_color_encoding encoding,
enum drm_color_range range)
{
const u32 *table;
u32 base_reg;
int i;
table = yuv2rgb[range][encoding];
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
base_reg = SUN8I_CSC_COEFF(base, 0);
regmap_bulk_write(map, base_reg, table, 12);
break;
case SUN8I_CSC_MODE_YVU2RGB:
for (i = 0; i < 12; i++) {
if ((i & 3) == 1)
base_reg = SUN8I_CSC_COEFF(base, i + 1);
else if ((i & 3) == 2)
base_reg = SUN8I_CSC_COEFF(base, i - 1);
else
base_reg = SUN8I_CSC_COEFF(base, i);
regmap_write(map, base_reg, table[i]);
}
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
}
static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
enum sun8i_csc_mode mode,
enum drm_color_encoding encoding,
enum drm_color_range range)
{
const u32 *table;
u32 addr;
int i;
table = yuv2rgb_de3[range][encoding];
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0);
regmap_bulk_write(map, addr, table, 12);
break;
case SUN8I_CSC_MODE_YVU2RGB:
for (i = 0; i < 12; i++) {
if ((i & 3) == 1)
addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
layer,
i + 1);
else if ((i & 3) == 2)
addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
layer,
i - 1);
else
addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
layer, i);
regmap_write(map, addr, table[i]);
}
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
}
static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
{
u32 val;
if (enable)
val = SUN8I_CSC_CTRL_EN;
else
val = 0;
regmap_update_bits(map, SUN8I_CSC_CTRL(base), SUN8I_CSC_CTRL_EN, val);
}
static void sun8i_de3_ccsc_enable(struct regmap *map, int layer, bool enable)
{
u32 val, mask;
mask = SUN50I_MIXER_BLEND_CSC_CTL_EN(layer);
if (enable)
val = mask;
else
val = 0;
regmap_update_bits(map, SUN50I_MIXER_BLEND_CSC_CTL(DE3_BLD_BASE),
mask, val);
}
void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
enum sun8i_csc_mode mode,
enum drm_color_encoding encoding,
enum drm_color_range range)
{
u32 base;
if (mixer->cfg->is_de3) {
sun8i_de3_ccsc_set_coefficients(mixer->engine.regs, layer,
mode, encoding, range);
return;
}
base = ccsc_base[mixer->cfg->ccsc][layer];
sun8i_csc_set_coefficients(mixer->engine.regs, base,
mode, encoding, range);
}
void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
{
u32 base;
if (mixer->cfg->is_de3) {
sun8i_de3_ccsc_enable(mixer->engine.regs, layer, enable);
return;
}
base = ccsc_base[mixer->cfg->ccsc][layer];
sun8i_csc_enable(mixer->engine.regs, base, enable);
}
| linux-master | drivers/gpu/drm/sun4i/sun8i_csc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2018 Jernej Skrabec <[email protected]>
*/
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "sun8i_dw_hdmi.h"
/*
* Address can be actually any value. Here is set to same value as
* it is set in BSP driver.
*/
#define I2C_ADDR 0x69
static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
{
30666000, {
{ 0x00b3, 0x0000 },
{ 0x2153, 0x0000 },
{ 0x40f3, 0x0000 },
},
}, {
36800000, {
{ 0x00b3, 0x0000 },
{ 0x2153, 0x0000 },
{ 0x40a2, 0x0001 },
},
}, {
46000000, {
{ 0x00b3, 0x0000 },
{ 0x2142, 0x0001 },
{ 0x40a2, 0x0001 },
},
}, {
61333000, {
{ 0x0072, 0x0001 },
{ 0x2142, 0x0001 },
{ 0x40a2, 0x0001 },
},
}, {
73600000, {
{ 0x0072, 0x0001 },
{ 0x2142, 0x0001 },
{ 0x4061, 0x0002 },
},
}, {
92000000, {
{ 0x0072, 0x0001 },
{ 0x2145, 0x0002 },
{ 0x4061, 0x0002 },
},
}, {
122666000, {
{ 0x0051, 0x0002 },
{ 0x2145, 0x0002 },
{ 0x4061, 0x0002 },
},
}, {
147200000, {
{ 0x0051, 0x0002 },
{ 0x2145, 0x0002 },
{ 0x4064, 0x0003 },
},
}, {
184000000, {
{ 0x0051, 0x0002 },
{ 0x214c, 0x0003 },
{ 0x4064, 0x0003 },
},
}, {
226666000, {
{ 0x0040, 0x0003 },
{ 0x214c, 0x0003 },
{ 0x4064, 0x0003 },
},
}, {
272000000, {
{ 0x0040, 0x0003 },
{ 0x214c, 0x0003 },
{ 0x5a64, 0x0003 },
},
}, {
340000000, {
{ 0x0040, 0x0003 },
{ 0x3b4c, 0x0003 },
{ 0x5a64, 0x0003 },
},
}, {
594000000, {
{ 0x1a40, 0x0003 },
{ 0x3b4c, 0x0003 },
{ 0x5a64, 0x0003 },
},
}, {
~0UL, {
{ 0x0000, 0x0000 },
{ 0x0000, 0x0000 },
{ 0x0000, 0x0000 },
},
}
};
static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
/* pixelclk bpp8 bpp10 bpp12 */
{ 27000000, { 0x0012, 0x0000, 0x0000 }, },
{ 74250000, { 0x0013, 0x001a, 0x001b }, },
{ 148500000, { 0x0019, 0x0033, 0x0034 }, },
{ 297000000, { 0x0019, 0x001b, 0x001b }, },
{ 594000000, { 0x0010, 0x001b, 0x001b }, },
{ ~0UL, { 0x0000, 0x0000, 0x0000 }, }
};
static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
/*pixelclk symbol term vlev*/
{ 27000000, 0x8009, 0x0007, 0x02b0 },
{ 74250000, 0x8009, 0x0006, 0x022d },
{ 148500000, 0x8029, 0x0006, 0x0270 },
{ 297000000, 0x8039, 0x0005, 0x01ab },
{ 594000000, 0x8029, 0x0000, 0x008a },
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
static void sun8i_hdmi_phy_set_polarity(struct sun8i_hdmi_phy *phy,
const struct drm_display_mode *mode)
{
u32 val = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
val |= SUN8I_HDMI_PHY_DBG_CTRL_POL_NHSYNC;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
val |= SUN8I_HDMI_PHY_DBG_CTRL_POL_NVSYNC;
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
SUN8I_HDMI_PHY_DBG_CTRL_POL_MASK, val);
};
static int sun8i_a83t_hdmi_phy_config(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *display,
const struct drm_display_mode *mode)
{
unsigned int clk_rate = mode->crtc_clock * 1000;
struct sun8i_hdmi_phy *phy = data;
sun8i_hdmi_phy_set_polarity(phy, mode);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN,
SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN);
/* power down */
dw_hdmi_phy_gen2_txpwron(hdmi, 0);
dw_hdmi_phy_gen2_pddq(hdmi, 1);
dw_hdmi_phy_gen2_reset(hdmi);
dw_hdmi_phy_gen2_pddq(hdmi, 0);
dw_hdmi_phy_i2c_set_addr(hdmi, I2C_ADDR);
/*
* Values are taken from BSP HDMI driver. Although AW didn't
* release any documentation, explanation of this values can
* be found in i.MX 6Dual/6Quad Reference Manual.
*/
if (clk_rate <= 27000000) {
dw_hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06);
dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
dw_hdmi_phy_i2c_write(hdmi, 0x08da, 0x10);
dw_hdmi_phy_i2c_write(hdmi, 0x0007, 0x19);
dw_hdmi_phy_i2c_write(hdmi, 0x0318, 0x0e);
dw_hdmi_phy_i2c_write(hdmi, 0x8009, 0x09);
} else if (clk_rate <= 74250000) {
dw_hdmi_phy_i2c_write(hdmi, 0x0540, 0x06);
dw_hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x10);
dw_hdmi_phy_i2c_write(hdmi, 0x0007, 0x19);
dw_hdmi_phy_i2c_write(hdmi, 0x02b5, 0x0e);
dw_hdmi_phy_i2c_write(hdmi, 0x8009, 0x09);
} else if (clk_rate <= 148500000) {
dw_hdmi_phy_i2c_write(hdmi, 0x04a0, 0x06);
dw_hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x10);
dw_hdmi_phy_i2c_write(hdmi, 0x0002, 0x19);
dw_hdmi_phy_i2c_write(hdmi, 0x0021, 0x0e);
dw_hdmi_phy_i2c_write(hdmi, 0x8029, 0x09);
} else {
dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x06);
dw_hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x10);
dw_hdmi_phy_i2c_write(hdmi, 0x0002, 0x19);
dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x0e);
dw_hdmi_phy_i2c_write(hdmi, 0x802b, 0x09);
}
dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x1e);
dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x13);
dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x17);
dw_hdmi_phy_gen2_txpwron(hdmi, 1);
return 0;
}
static void sun8i_a83t_hdmi_phy_disable(struct dw_hdmi *hdmi, void *data)
{
struct sun8i_hdmi_phy *phy = data;
dw_hdmi_phy_gen2_txpwron(hdmi, 0);
dw_hdmi_phy_gen2_pddq(hdmi, 1);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN, 0);
}
static const struct dw_hdmi_phy_ops sun8i_a83t_hdmi_phy_ops = {
.init = sun8i_a83t_hdmi_phy_config,
.disable = sun8i_a83t_hdmi_phy_disable,
.read_hpd = dw_hdmi_phy_read_hpd,
.update_hpd = dw_hdmi_phy_update_hpd,
.setup_hpd = dw_hdmi_phy_setup_hpd,
};
static int sun8i_h3_hdmi_phy_config(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *display,
const struct drm_display_mode *mode)
{
unsigned int clk_rate = mode->crtc_clock * 1000;
struct sun8i_hdmi_phy *phy = data;
u32 pll_cfg1_init;
u32 pll_cfg2_init;
u32 ana_cfg1_end;
u32 ana_cfg2_init;
u32 ana_cfg3_init;
u32 b_offset = 0;
u32 val;
if (phy->variant->has_phy_clk)
clk_set_rate(phy->clk_phy, clk_rate);
sun8i_hdmi_phy_set_polarity(phy, mode);
/* bandwidth / frequency independent settings */
pll_cfg1_init = SUN8I_HDMI_PHY_PLL_CFG1_LDO2_EN |
SUN8I_HDMI_PHY_PLL_CFG1_LDO1_EN |
SUN8I_HDMI_PHY_PLL_CFG1_LDO_VSET(7) |
SUN8I_HDMI_PHY_PLL_CFG1_UNKNOWN(1) |
SUN8I_HDMI_PHY_PLL_CFG1_PLLDBEN |
SUN8I_HDMI_PHY_PLL_CFG1_CS |
SUN8I_HDMI_PHY_PLL_CFG1_CP_S(2) |
SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(63) |
SUN8I_HDMI_PHY_PLL_CFG1_BWS;
pll_cfg2_init = SUN8I_HDMI_PHY_PLL_CFG2_SV_H |
SUN8I_HDMI_PHY_PLL_CFG2_VCOGAIN_EN |
SUN8I_HDMI_PHY_PLL_CFG2_SDIV2;
ana_cfg1_end = SUN8I_HDMI_PHY_ANA_CFG1_REG_SVBH(1) |
SUN8I_HDMI_PHY_ANA_CFG1_AMP_OPT |
SUN8I_HDMI_PHY_ANA_CFG1_EMP_OPT |
SUN8I_HDMI_PHY_ANA_CFG1_AMPCK_OPT |
SUN8I_HDMI_PHY_ANA_CFG1_EMPCK_OPT |
SUN8I_HDMI_PHY_ANA_CFG1_ENRCAL |
SUN8I_HDMI_PHY_ANA_CFG1_ENCALOG |
SUN8I_HDMI_PHY_ANA_CFG1_REG_SCKTMDS |
SUN8I_HDMI_PHY_ANA_CFG1_TMDSCLK_EN |
SUN8I_HDMI_PHY_ANA_CFG1_TXEN_MASK |
SUN8I_HDMI_PHY_ANA_CFG1_TXEN_ALL |
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDSCLK |
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS2 |
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS1 |
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS0 |
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS2 |
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS1 |
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS0 |
SUN8I_HDMI_PHY_ANA_CFG1_CKEN |
SUN8I_HDMI_PHY_ANA_CFG1_LDOEN |
SUN8I_HDMI_PHY_ANA_CFG1_ENVBS |
SUN8I_HDMI_PHY_ANA_CFG1_ENBI;
ana_cfg2_init = SUN8I_HDMI_PHY_ANA_CFG2_M_EN |
SUN8I_HDMI_PHY_ANA_CFG2_REG_DENCK |
SUN8I_HDMI_PHY_ANA_CFG2_REG_DEN |
SUN8I_HDMI_PHY_ANA_CFG2_REG_CKSS(1) |
SUN8I_HDMI_PHY_ANA_CFG2_REG_CSMPS(1);
ana_cfg3_init = SUN8I_HDMI_PHY_ANA_CFG3_REG_WIRE(0x3e0) |
SUN8I_HDMI_PHY_ANA_CFG3_SDAEN |
SUN8I_HDMI_PHY_ANA_CFG3_SCLEN;
/* bandwidth / frequency dependent settings */
if (clk_rate <= 27000000) {
pll_cfg1_init |= SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33 |
SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(32);
pll_cfg2_init |= SUN8I_HDMI_PHY_PLL_CFG2_VCO_S(4) |
SUN8I_HDMI_PHY_PLL_CFG2_S(4);
ana_cfg1_end |= SUN8I_HDMI_PHY_ANA_CFG1_REG_CALSW;
ana_cfg2_init |= SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(4) |
SUN8I_HDMI_PHY_ANA_CFG2_REG_RESDI(phy->rcal);
ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(3) |
SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(5);
} else if (clk_rate <= 74250000) {
pll_cfg1_init |= SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33 |
SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(32);
pll_cfg2_init |= SUN8I_HDMI_PHY_PLL_CFG2_VCO_S(4) |
SUN8I_HDMI_PHY_PLL_CFG2_S(5);
ana_cfg1_end |= SUN8I_HDMI_PHY_ANA_CFG1_REG_CALSW;
ana_cfg2_init |= SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(4) |
SUN8I_HDMI_PHY_ANA_CFG2_REG_RESDI(phy->rcal);
ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(5) |
SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(7);
} else if (clk_rate <= 148500000) {
pll_cfg1_init |= SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33 |
SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(32);
pll_cfg2_init |= SUN8I_HDMI_PHY_PLL_CFG2_VCO_S(4) |
SUN8I_HDMI_PHY_PLL_CFG2_S(6);
ana_cfg2_init |= SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSWCK |
SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSW |
SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(2);
ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(7) |
SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(9);
} else {
b_offset = 2;
pll_cfg1_init |= SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(63);
pll_cfg2_init |= SUN8I_HDMI_PHY_PLL_CFG2_VCO_S(6) |
SUN8I_HDMI_PHY_PLL_CFG2_S(7);
ana_cfg2_init |= SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSWCK |
SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSW |
SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(4);
ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(9) |
SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(13) |
SUN8I_HDMI_PHY_ANA_CFG3_REG_EMP(3);
}
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_TXEN_MASK, 0);
/*
* NOTE: We have to be careful not to overwrite PHY parent
* clock selection bit and clock divider.
*/
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
(u32)~SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
pll_cfg1_init);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG,
(u32)~SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK,
pll_cfg2_init);
usleep_range(10000, 15000);
regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG3_REG,
SUN8I_HDMI_PHY_PLL_CFG3_SOUT_DIV2);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
SUN8I_HDMI_PHY_PLL_CFG1_PLLEN,
SUN8I_HDMI_PHY_PLL_CFG1_PLLEN);
msleep(100);
/* get B value */
regmap_read(phy->regs, SUN8I_HDMI_PHY_ANA_STS_REG, &val);
val = (val & SUN8I_HDMI_PHY_ANA_STS_B_OUT_MSK) >>
SUN8I_HDMI_PHY_ANA_STS_B_OUT_SHIFT;
val = min(val + b_offset, (u32)0x3f);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
SUN8I_HDMI_PHY_PLL_CFG1_REG_OD1 |
SUN8I_HDMI_PHY_PLL_CFG1_REG_OD,
SUN8I_HDMI_PHY_PLL_CFG1_REG_OD1 |
SUN8I_HDMI_PHY_PLL_CFG1_REG_OD);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
SUN8I_HDMI_PHY_PLL_CFG1_B_IN_MSK,
val << SUN8I_HDMI_PHY_PLL_CFG1_B_IN_SHIFT);
msleep(100);
regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG, ana_cfg1_end);
regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG2_REG, ana_cfg2_init);
regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG3_REG, ana_cfg3_init);
return 0;
}
static void sun8i_h3_hdmi_phy_disable(struct dw_hdmi *hdmi, void *data)
{
struct sun8i_hdmi_phy *phy = data;
regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_LDOEN |
SUN8I_HDMI_PHY_ANA_CFG1_ENVBS |
SUN8I_HDMI_PHY_ANA_CFG1_ENBI);
regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, 0);
}
static const struct dw_hdmi_phy_ops sun8i_h3_hdmi_phy_ops = {
.init = sun8i_h3_hdmi_phy_config,
.disable = sun8i_h3_hdmi_phy_disable,
.read_hpd = dw_hdmi_phy_read_hpd,
.update_hpd = dw_hdmi_phy_update_hpd,
.setup_hpd = dw_hdmi_phy_setup_hpd,
};
static void sun8i_hdmi_phy_unlock(struct sun8i_hdmi_phy *phy)
{
/* enable read access to HDMI controller */
regmap_write(phy->regs, SUN8I_HDMI_PHY_READ_EN_REG,
SUN8I_HDMI_PHY_READ_EN_MAGIC);
/* unscramble register offsets */
regmap_write(phy->regs, SUN8I_HDMI_PHY_UNSCRAMBLE_REG,
SUN8I_HDMI_PHY_UNSCRAMBLE_MAGIC);
}
static void sun50i_hdmi_phy_init_h6(struct sun8i_hdmi_phy *phy)
{
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN,
SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
0xffff0000, 0x80c00000);
}
static void sun8i_hdmi_phy_init_a83t(struct sun8i_hdmi_phy *phy)
{
sun8i_hdmi_phy_unlock(phy);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK,
SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK);
/*
* Set PHY I2C address. It must match to the address set by
* dw_hdmi_phy_set_slave_addr().
*/
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
SUN8I_HDMI_PHY_DBG_CTRL_ADDR_MASK,
SUN8I_HDMI_PHY_DBG_CTRL_ADDR(I2C_ADDR));
}
static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
{
unsigned int val;
sun8i_hdmi_phy_unlock(phy);
regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG, 0);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_ENBI,
SUN8I_HDMI_PHY_ANA_CFG1_ENBI);
udelay(5);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_TMDSCLK_EN,
SUN8I_HDMI_PHY_ANA_CFG1_TMDSCLK_EN);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_ENVBS,
SUN8I_HDMI_PHY_ANA_CFG1_ENVBS);
usleep_range(10, 20);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_LDOEN,
SUN8I_HDMI_PHY_ANA_CFG1_LDOEN);
udelay(5);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_CKEN,
SUN8I_HDMI_PHY_ANA_CFG1_CKEN);
usleep_range(40, 100);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_ENRCAL,
SUN8I_HDMI_PHY_ANA_CFG1_ENRCAL);
usleep_range(100, 200);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_ENCALOG,
SUN8I_HDMI_PHY_ANA_CFG1_ENCALOG);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS0 |
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS1 |
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS2,
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS0 |
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS1 |
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS2);
/* wait for calibration to finish */
regmap_read_poll_timeout(phy->regs, SUN8I_HDMI_PHY_ANA_STS_REG, val,
(val & SUN8I_HDMI_PHY_ANA_STS_RCALEND2D),
100, 2000);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDSCLK,
SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDSCLK);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS0 |
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS1 |
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS2 |
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDSCLK,
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS0 |
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS1 |
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS2 |
SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDSCLK);
/* enable DDC communication */
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG3_REG,
SUN8I_HDMI_PHY_ANA_CFG3_SCLEN |
SUN8I_HDMI_PHY_ANA_CFG3_SDAEN,
SUN8I_HDMI_PHY_ANA_CFG3_SCLEN |
SUN8I_HDMI_PHY_ANA_CFG3_SDAEN);
/* reset PHY PLL clock parent */
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK, 0);
/* set HW control of CEC pins */
regmap_write(phy->regs, SUN8I_HDMI_PHY_CEC_REG, 0);
/* read calibration data */
regmap_read(phy->regs, SUN8I_HDMI_PHY_ANA_STS_REG, &val);
phy->rcal = (val & SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK) >> 2;
}
int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
{
int ret;
ret = reset_control_deassert(phy->rst_phy);
if (ret) {
dev_err(phy->dev, "Cannot deassert phy reset control: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(phy->clk_bus);
if (ret) {
dev_err(phy->dev, "Cannot enable bus clock: %d\n", ret);
goto err_assert_rst_phy;
}
ret = clk_prepare_enable(phy->clk_mod);
if (ret) {
dev_err(phy->dev, "Cannot enable mod clock: %d\n", ret);
goto err_disable_clk_bus;
}
if (phy->variant->has_phy_clk) {
ret = sun8i_phy_clk_create(phy, phy->dev,
phy->variant->has_second_pll);
if (ret) {
dev_err(phy->dev, "Couldn't create the PHY clock\n");
goto err_disable_clk_mod;
}
clk_prepare_enable(phy->clk_phy);
}
phy->variant->phy_init(phy);
return 0;
err_disable_clk_mod:
clk_disable_unprepare(phy->clk_mod);
err_disable_clk_bus:
clk_disable_unprepare(phy->clk_bus);
err_assert_rst_phy:
reset_control_assert(phy->rst_phy);
return ret;
}
void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy)
{
clk_disable_unprepare(phy->clk_mod);
clk_disable_unprepare(phy->clk_bus);
clk_disable_unprepare(phy->clk_phy);
reset_control_assert(phy->rst_phy);
}
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
struct dw_hdmi_plat_data *plat_data)
{
const struct sun8i_hdmi_phy_variant *variant = phy->variant;
if (variant->phy_ops) {
plat_data->phy_ops = variant->phy_ops;
plat_data->phy_name = "sun8i_dw_hdmi_phy";
plat_data->phy_data = phy;
} else {
plat_data->mpll_cfg = variant->mpll_cfg;
plat_data->cur_ctr = variant->cur_ctr;
plat_data->phy_config = variant->phy_cfg;
}
}
static const struct regmap_config sun8i_hdmi_phy_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = SUN8I_HDMI_PHY_CEC_REG,
.name = "phy"
};
static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
.phy_ops = &sun8i_a83t_hdmi_phy_ops,
.phy_init = &sun8i_hdmi_phy_init_a83t,
};
static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
.has_phy_clk = true,
.phy_ops = &sun8i_h3_hdmi_phy_ops,
.phy_init = &sun8i_hdmi_phy_init_h3,
};
static const struct sun8i_hdmi_phy_variant sun8i_r40_hdmi_phy = {
.has_phy_clk = true,
.has_second_pll = true,
.phy_ops = &sun8i_h3_hdmi_phy_ops,
.phy_init = &sun8i_hdmi_phy_init_h3,
};
static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
.has_phy_clk = true,
.phy_ops = &sun8i_h3_hdmi_phy_ops,
.phy_init = &sun8i_hdmi_phy_init_h3,
};
static const struct sun8i_hdmi_phy_variant sun50i_h6_hdmi_phy = {
.cur_ctr = sun50i_h6_cur_ctr,
.mpll_cfg = sun50i_h6_mpll_cfg,
.phy_cfg = sun50i_h6_phy_config,
.phy_init = &sun50i_hdmi_phy_init_h6,
};
static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{
.compatible = "allwinner,sun8i-a83t-hdmi-phy",
.data = &sun8i_a83t_hdmi_phy,
},
{
.compatible = "allwinner,sun8i-h3-hdmi-phy",
.data = &sun8i_h3_hdmi_phy,
},
{
.compatible = "allwinner,sun8i-r40-hdmi-phy",
.data = &sun8i_r40_hdmi_phy,
},
{
.compatible = "allwinner,sun50i-a64-hdmi-phy",
.data = &sun50i_a64_hdmi_phy,
},
{
.compatible = "allwinner,sun50i-h6-hdmi-phy",
.data = &sun50i_h6_hdmi_phy,
},
{ /* sentinel */ }
};
int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
{
struct platform_device *pdev = of_find_device_by_node(node);
struct sun8i_hdmi_phy *phy;
if (!pdev)
return -EPROBE_DEFER;
phy = platform_get_drvdata(pdev);
if (!phy) {
put_device(&pdev->dev);
return -EPROBE_DEFER;
}
hdmi->phy = phy;
put_device(&pdev->dev);
return 0;
}
static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sun8i_hdmi_phy *phy;
void __iomem *regs;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
phy->variant = of_device_get_match_data(dev);
phy->dev = dev;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return dev_err_probe(dev, PTR_ERR(regs),
"Couldn't map the HDMI PHY registers\n");
phy->regs = devm_regmap_init_mmio(dev, regs,
&sun8i_hdmi_phy_regmap_config);
if (IS_ERR(phy->regs))
return dev_err_probe(dev, PTR_ERR(phy->regs),
"Couldn't create the HDMI PHY regmap\n");
phy->clk_bus = devm_clk_get(dev, "bus");
if (IS_ERR(phy->clk_bus))
return dev_err_probe(dev, PTR_ERR(phy->clk_bus),
"Could not get bus clock\n");
phy->clk_mod = devm_clk_get(dev, "mod");
if (IS_ERR(phy->clk_mod))
return dev_err_probe(dev, PTR_ERR(phy->clk_mod),
"Could not get mod clock\n");
if (phy->variant->has_phy_clk) {
phy->clk_pll0 = devm_clk_get(dev, "pll-0");
if (IS_ERR(phy->clk_pll0))
return dev_err_probe(dev, PTR_ERR(phy->clk_pll0),
"Could not get pll-0 clock\n");
if (phy->variant->has_second_pll) {
phy->clk_pll1 = devm_clk_get(dev, "pll-1");
if (IS_ERR(phy->clk_pll1))
return dev_err_probe(dev, PTR_ERR(phy->clk_pll1),
"Could not get pll-1 clock\n");
}
}
phy->rst_phy = devm_reset_control_get_shared(dev, "phy");
if (IS_ERR(phy->rst_phy))
return dev_err_probe(dev, PTR_ERR(phy->rst_phy),
"Could not get phy reset control\n");
platform_set_drvdata(pdev, phy);
return 0;
}
struct platform_driver sun8i_hdmi_phy_driver = {
.probe = sun8i_hdmi_phy_probe,
.driver = {
.name = "sun8i-hdmi-phy",
.of_match_table = sun8i_hdmi_phy_of_table,
},
};
| linux-master | drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Free Electrons
*
* Maxime Ripard <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
struct sun6i_drc {
struct clk *bus_clk;
struct clk *mod_clk;
struct reset_control *reset;
};
static int sun6i_drc_bind(struct device *dev, struct device *master,
void *data)
{
struct sun6i_drc *drc;
int ret;
drc = devm_kzalloc(dev, sizeof(*drc), GFP_KERNEL);
if (!drc)
return -ENOMEM;
dev_set_drvdata(dev, drc);
drc->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(drc->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(drc->reset);
}
ret = reset_control_deassert(drc->reset);
if (ret) {
dev_err(dev, "Couldn't deassert our reset line\n");
return ret;
}
drc->bus_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(drc->bus_clk)) {
dev_err(dev, "Couldn't get our bus clock\n");
ret = PTR_ERR(drc->bus_clk);
goto err_assert_reset;
}
clk_prepare_enable(drc->bus_clk);
drc->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(drc->mod_clk)) {
dev_err(dev, "Couldn't get our mod clock\n");
ret = PTR_ERR(drc->mod_clk);
goto err_disable_bus_clk;
}
ret = clk_set_rate_exclusive(drc->mod_clk, 300000000);
if (ret) {
dev_err(dev, "Couldn't set the module clock frequency\n");
goto err_disable_bus_clk;
}
clk_prepare_enable(drc->mod_clk);
return 0;
err_disable_bus_clk:
clk_disable_unprepare(drc->bus_clk);
err_assert_reset:
reset_control_assert(drc->reset);
return ret;
}
static void sun6i_drc_unbind(struct device *dev, struct device *master,
void *data)
{
struct sun6i_drc *drc = dev_get_drvdata(dev);
clk_rate_exclusive_put(drc->mod_clk);
clk_disable_unprepare(drc->mod_clk);
clk_disable_unprepare(drc->bus_clk);
reset_control_assert(drc->reset);
}
static const struct component_ops sun6i_drc_ops = {
.bind = sun6i_drc_bind,
.unbind = sun6i_drc_unbind,
};
static int sun6i_drc_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &sun6i_drc_ops);
}
static void sun6i_drc_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun6i_drc_ops);
}
static const struct of_device_id sun6i_drc_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-drc" },
{ .compatible = "allwinner,sun6i-a31s-drc" },
{ .compatible = "allwinner,sun8i-a23-drc" },
{ .compatible = "allwinner,sun8i-a33-drc" },
{ .compatible = "allwinner,sun9i-a80-drc" },
{ }
};
MODULE_DEVICE_TABLE(of, sun6i_drc_of_table);
static struct platform_driver sun6i_drc_platform_driver = {
.probe = sun6i_drc_probe,
.remove_new = sun6i_drc_remove,
.driver = {
.name = "sun6i-drc",
.of_match_table = sun6i_drc_of_table,
},
};
module_platform_driver(sun6i_drc_platform_driver);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner A31 Dynamic Range Control (DRC) Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun6i_drc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) Icenowy Zheng <[email protected]>
*
* Based on sun4i_layer.h, which is:
* Copyright (C) 2015 Free Electrons
* Copyright (C) 2015 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun8i_mixer.h"
#include "sun8i_ui_layer.h"
#include "sun8i_ui_scaler.h"
static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
int overlay, bool enable, unsigned int zpos,
unsigned int old_zpos)
{
u32 val, bld_base, ch_base;
bld_base = sun8i_blender_base(mixer);
ch_base = sun8i_channel_base(mixer, channel);
DRM_DEBUG_DRIVER("%sabling channel %d overlay %d\n",
enable ? "En" : "Dis", channel, overlay);
if (enable)
val = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN;
else
val = 0;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val);
if (!enable || zpos != old_zpos) {
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
0);
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
0);
}
if (enable) {
val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
val, val);
val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
val);
}
}
static void sun8i_ui_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
u32 mask, val, ch_base;
ch_base = sun8i_channel_base(mixer, channel);
mask = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK |
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK;
val = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA(plane->state->alpha >> 8);
val |= (plane->state->alpha == DRM_BLEND_ALPHA_OPAQUE) ?
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_PIXEL :
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_COMBINED;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
mask, val);
}
static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane,
unsigned int zpos)
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
DRM_DEBUG_DRIVER("Updating UI channel %d overlay %d\n",
channel, overlay);
bld_base = sun8i_blender_base(mixer);
ch_base = sun8i_channel_base(mixer, channel);
src_w = drm_rect_width(&state->src) >> 16;
src_h = drm_rect_height(&state->src) >> 16;
dst_w = drm_rect_width(&state->dst);
dst_h = drm_rect_height(&state->dst);
hphase = state->src.x1 & 0xffff;
vphase = state->src.y1 & 0xffff;
insize = SUN8I_MIXER_SIZE(src_w, src_h);
outsize = SUN8I_MIXER_SIZE(dst_w, dst_h);
/* Set height and width */
DRM_DEBUG_DRIVER("Layer source offset X: %d Y: %d\n",
state->src.x1 >> 16, state->src.y1 >> 16);
DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch_base, overlay),
insize);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch_base),
insize);
if (insize != outsize || hphase || vphase) {
u32 hscale, vscale;
DRM_DEBUG_DRIVER("HW scaling is enabled\n");
hscale = state->src_w / state->crtc_w;
vscale = state->src_h / state->crtc_h;
sun8i_ui_scaler_setup(mixer, channel, src_w, src_h, dst_w,
dst_h, hscale, vscale, hphase, vphase);
sun8i_ui_scaler_enable(mixer, channel, true);
} else {
DRM_DEBUG_DRIVER("HW scaling is not needed\n");
sun8i_ui_scaler_enable(mixer, channel, false);
}
/* Set base coordinates */
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
return 0;
}
static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
const struct drm_format_info *fmt;
u32 val, ch_base, hw_fmt;
int ret;
ch_base = sun8i_channel_base(mixer, channel);
fmt = state->fb->format;
ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
if (ret || fmt->is_yuv) {
DRM_DEBUG_DRIVER("Invalid format\n");
return -EINVAL;
}
val = hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
return 0;
}
static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
struct drm_gem_dma_object *gem;
dma_addr_t dma_addr;
u32 ch_base;
int bpp;
ch_base = sun8i_channel_base(mixer, channel);
/* Get the physical address of the buffer in memory */
gem = drm_fb_dma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->dma_addr);
/* Compute the start of the displayed memory */
bpp = fb->format->cpp[0];
dma_addr = gem->dma_addr + fb->offsets[0];
/* Fixup framebuffer address for src coordinates */
dma_addr += (state->src.x1 >> 16) * bpp;
dma_addr += (state->src.y1 >> 16) * fb->pitches[0];
/* Set the line width */
DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, overlay),
fb->pitches[0]);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, overlay),
lower_32_bits(dma_addr));
return 0;
}
static int sun8i_ui_layer_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
int min_scale, max_scale;
if (!crtc)
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
min_scale = DRM_PLANE_NO_SCALING;
max_scale = DRM_PLANE_NO_SCALING;
if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) {
min_scale = SUN8I_UI_SCALER_SCALE_MIN;
max_scale = SUN8I_UI_SCALER_SCALE_MAX;
}
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
min_scale, max_scale,
true, true);
}
static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false, 0,
old_zpos);
}
static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
unsigned int zpos = new_state->normalized_zpos;
unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
if (!new_state->visible) {
sun8i_ui_layer_enable(mixer, layer->channel,
layer->overlay, false, 0, old_zpos);
return;
}
sun8i_ui_layer_update_coord(mixer, layer->channel,
layer->overlay, plane, zpos);
sun8i_ui_layer_update_alpha(mixer, layer->channel,
layer->overlay, plane);
sun8i_ui_layer_update_formats(mixer, layer->channel,
layer->overlay, plane);
sun8i_ui_layer_update_buffer(mixer, layer->channel,
layer->overlay, plane);
sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay,
true, zpos, old_zpos);
}
static const struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
.atomic_check = sun8i_ui_layer_atomic_check,
.atomic_disable = sun8i_ui_layer_atomic_disable,
.atomic_update = sun8i_ui_layer_atomic_update,
};
static const struct drm_plane_funcs sun8i_ui_layer_funcs = {
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.destroy = drm_plane_cleanup,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
.update_plane = drm_atomic_helper_update_plane,
};
static const u32 sun8i_ui_layer_formats[] = {
DRM_FORMAT_ABGR1555,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_BGRA4444,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
};
static const uint64_t sun8i_layer_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
struct sun8i_mixer *mixer,
int index)
{
enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
int channel = mixer->cfg->vi_num + index;
struct sun8i_ui_layer *layer;
unsigned int plane_cnt;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
if (!layer)
return ERR_PTR(-ENOMEM);
if (index == 0)
type = DRM_PLANE_TYPE_PRIMARY;
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_ui_layer_funcs,
sun8i_ui_layer_formats,
ARRAY_SIZE(sun8i_ui_layer_formats),
sun8i_layer_modifiers, type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
}
plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
ret = drm_plane_create_alpha_property(&layer->plane);
if (ret) {
dev_err(drm->dev, "Couldn't add alpha property\n");
return ERR_PTR(ret);
}
ret = drm_plane_create_zpos_property(&layer->plane, channel,
0, plane_cnt - 1);
if (ret) {
dev_err(drm->dev, "Couldn't add zpos property\n");
return ERR_PTR(ret);
}
drm_plane_helper_add(&layer->plane, &sun8i_ui_layer_helper_funcs);
layer->mixer = mixer;
layer->channel = channel;
layer->overlay = 0;
return layer;
}
| linux-master | drivers/gpu/drm/sun4i/sun8i_ui_layer.c |
/*
* Copyright (C) 2017 Jernej Skrabec <[email protected]>
*
* Coefficients are taken from BSP driver, which is:
* Copyright (C) 2014-2015 Allwinner
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include "sun8i_ui_scaler.h"
#include "sun8i_vi_scaler.h"
static const u32 lan2coefftab16[240] = {
0x00004000, 0x00033ffe, 0x00063efc, 0x000a3bfb,
0xff0f37fb, 0xfe1433fb, 0xfd192ffb, 0xfd1f29fb,
0xfc2424fc, 0xfb291ffd, 0xfb2f19fd, 0xfb3314fe,
0xfb370fff, 0xfb3b0a00, 0xfc3e0600, 0xfe3f0300,
0xff053804, 0xff083801, 0xff0a3700, 0xff0e34ff,
0xff1232fd, 0xfe162ffd, 0xfd1b2cfc, 0xfd1f28fc,
0xfd2323fd, 0xfc281ffd, 0xfc2c1bfd, 0xfd2f16fe,
0xfd3212ff, 0xff340eff, 0x00360a00, 0x02370700,
0xff083207, 0xff0a3205, 0xff0d3103, 0xfe113001,
0xfe142e00, 0xfe182bff, 0xfe1b29fe, 0xfe1f25fe,
0xfe2222fe, 0xfe251ffe, 0xfe291bfe, 0xff2b18fe,
0x002e14fe, 0x013010ff, 0x03310dff, 0x05310a00,
0xff0a2e09, 0xff0c2e07, 0xff0f2d05, 0xff122c03,
0xfe152b02, 0xfe182901, 0xfe1b2700, 0xff1e24ff,
0xff2121ff, 0xff241eff, 0x00261bff, 0x012818ff,
0x022a15ff, 0x032c12ff, 0x052d0fff, 0x072d0c00,
0xff0c2a0b, 0xff0e2a09, 0xff102a07, 0xff132905,
0xff162803, 0xff182702, 0xff1b2501, 0xff1e2300,
0x00202000, 0x01221d00, 0x01251bff, 0x032618ff,
0x042815ff, 0x052913ff, 0x072a10ff, 0x092a0d00,
0xff0d280c, 0xff0f280a, 0xff112808, 0xff142706,
0xff162605, 0xff192503, 0x001b2302, 0x001d2201,
0x011f1f01, 0x01221d00, 0x02231b00, 0x04241800,
0x052616ff, 0x072713ff, 0x08271100, 0x0a280e00,
0xff0e260d, 0xff10260b, 0xff122609, 0xff142508,
0x00152506, 0x00182305, 0x001b2203, 0x011d2002,
0x011f1f01, 0x02201d01, 0x03221b00, 0x04231801,
0x06241600, 0x08251300, 0x09261100, 0x0b260f00,
0xff0e250e, 0xff10250c, 0x0011250a, 0x00142408,
0x00162307, 0x00182206, 0x011a2104, 0x011c2003,
0x021e1e02, 0x03201c01, 0x04211a01, 0x05221801,
0x07231600, 0x08241400, 0x0a241200, 0x0c241000,
0x000e240e, 0x0010240c, 0x0013230a, 0x00142309,
0x00162208, 0x01182106, 0x011a2005, 0x021b1f04,
0x031d1d03, 0x041e1c02, 0x05201a01, 0x06211801,
0x07221601, 0x09231400, 0x0a231300, 0x0c231100,
0x000f220f, 0x0011220d, 0x0013220b, 0x0015210a,
0x01162108, 0x01182007, 0x02191f06, 0x031a1e05,
0x041c1c04, 0x051d1b03, 0x061f1902, 0x07201801,
0x08211601, 0x0a211500, 0x0b221300, 0x0d221100,
0x0010210f, 0x0011210e, 0x0013210c, 0x0114200b,
0x01161f0a, 0x02171f08, 0x03181e07, 0x031a1d06,
0x041c1c04, 0x051d1a04, 0x071d1903, 0x081e1802,
0x091f1602, 0x0b1f1501, 0x0c211300, 0x0e201200,
0x00102010, 0x0012200e, 0x0013200d, 0x01151f0b,
0x01161f0a, 0x02171e09, 0x03191d07, 0x041a1c06,
0x051b1b05, 0x061c1a04, 0x071d1903, 0x081e1703,
0x0a1f1601, 0x0b1f1501, 0x0d201300, 0x0e201200,
0x00102010, 0x00121f0f, 0x00141f0d, 0x01141f0c,
0x02161e0a, 0x03171d09, 0x03181d08, 0x041a1c06,
0x051b1b05, 0x061c1a04, 0x081c1903, 0x091d1703,
0x0a1e1602, 0x0c1e1501, 0x0d1f1400, 0x0e1f1201,
0x00111e11, 0x00131e0f, 0x01131e0e, 0x02151d0c,
0x02161d0b, 0x03171c0a, 0x04181b09, 0x05191b07,
0x061a1a06, 0x071b1905, 0x091b1804, 0x0a1c1703,
0x0b1d1602, 0x0c1d1502, 0x0e1d1401, 0x0f1e1300,
0x00111e11, 0x00131d10, 0x01141d0e, 0x02151c0d,
0x03161c0b, 0x04171b0a, 0x05171b09, 0x06181a08,
0x07191907, 0x081a1806, 0x091a1805, 0x0a1b1704,
0x0b1c1603, 0x0d1c1502, 0x0e1d1401, 0x0f1d1301,
};
static u32 sun8i_ui_scaler_base(struct sun8i_mixer *mixer, int channel)
{
int vi_num = mixer->cfg->vi_num;
if (mixer->cfg->is_de3)
return DE3_VI_SCALER_UNIT_BASE +
DE3_VI_SCALER_UNIT_SIZE * vi_num +
DE3_UI_SCALER_UNIT_SIZE * (channel - vi_num);
else
return DE2_VI_SCALER_UNIT_BASE +
DE2_VI_SCALER_UNIT_SIZE * vi_num +
DE2_UI_SCALER_UNIT_SIZE * (channel - vi_num);
}
static int sun8i_ui_scaler_coef_index(unsigned int step)
{
unsigned int scale, int_part, float_part;
scale = step >> (SUN8I_UI_SCALER_SCALE_FRAC - 3);
int_part = scale >> 3;
float_part = scale & 0x7;
switch (int_part) {
case 0:
return 0;
case 1:
return float_part;
case 2:
return 8 + (float_part >> 1);
case 3:
return 12;
case 4:
return 13;
default:
return 14;
}
}
void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
{
u32 val, base;
if (WARN_ON(layer < mixer->cfg->vi_num))
return;
base = sun8i_ui_scaler_base(mixer, layer);
if (enable)
val = SUN8I_SCALER_GSU_CTRL_EN |
SUN8I_SCALER_GSU_CTRL_COEFF_RDY;
else
val = 0;
regmap_write(mixer->engine.regs, SUN8I_SCALER_GSU_CTRL(base), val);
}
void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
u32 hscale, u32 vscale, u32 hphase, u32 vphase)
{
u32 insize, outsize;
int i, offset;
u32 base;
if (WARN_ON(layer < mixer->cfg->vi_num))
return;
base = sun8i_ui_scaler_base(mixer, layer);
hphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
vphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
hscale <<= SUN8I_UI_SCALER_SCALE_FRAC - 16;
vscale <<= SUN8I_UI_SCALER_SCALE_FRAC - 16;
insize = SUN8I_UI_SCALER_SIZE(src_w, src_h);
outsize = SUN8I_UI_SCALER_SIZE(dst_w, dst_h);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_GSU_OUTSIZE(base), outsize);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_GSU_INSIZE(base), insize);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_GSU_HSTEP(base), hscale);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_GSU_VSTEP(base), vscale);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_GSU_HPHASE(base), hphase);
regmap_write(mixer->engine.regs,
SUN8I_SCALER_GSU_VPHASE(base), vphase);
offset = sun8i_ui_scaler_coef_index(hscale) *
SUN8I_UI_SCALER_COEFF_COUNT;
for (i = 0; i < SUN8I_UI_SCALER_COEFF_COUNT; i++)
regmap_write(mixer->engine.regs,
SUN8I_SCALER_GSU_HCOEFF(base, i),
lan2coefftab16[offset + i]);
}
| linux-master | drivers/gpu/drm/sun4i/sun8i_ui_scaler.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2017 Free Electrons
* Maxime Ripard <[email protected]>
*/
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
#include "sun4i_lvds.h"
struct sun4i_lvds {
struct drm_connector connector;
struct drm_encoder encoder;
struct drm_panel *panel;
};
static inline struct sun4i_lvds *
drm_connector_to_sun4i_lvds(struct drm_connector *connector)
{
return container_of(connector, struct sun4i_lvds,
connector);
}
static inline struct sun4i_lvds *
drm_encoder_to_sun4i_lvds(struct drm_encoder *encoder)
{
return container_of(encoder, struct sun4i_lvds,
encoder);
}
static int sun4i_lvds_get_modes(struct drm_connector *connector)
{
struct sun4i_lvds *lvds =
drm_connector_to_sun4i_lvds(connector);
return drm_panel_get_modes(lvds->panel, connector);
}
static const struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
.get_modes = sun4i_lvds_get_modes,
};
static void
sun4i_lvds_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs sun4i_lvds_con_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_lvds_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
DRM_DEBUG_DRIVER("Enabling LVDS output\n");
if (lvds->panel) {
drm_panel_prepare(lvds->panel);
drm_panel_enable(lvds->panel);
}
}
static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
DRM_DEBUG_DRIVER("Disabling LVDS output\n");
if (lvds->panel) {
drm_panel_disable(lvds->panel);
drm_panel_unprepare(lvds->panel);
}
}
static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
.disable = sun4i_lvds_encoder_disable,
.enable = sun4i_lvds_encoder_enable,
};
int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
struct drm_bridge *bridge;
struct sun4i_lvds *lvds;
int ret;
lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL);
if (!lvds)
return -ENOMEM;
encoder = &lvds->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
&lvds->panel, &bridge);
if (ret) {
dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n");
return 0;
}
drm_encoder_helper_add(&lvds->encoder,
&sun4i_lvds_enc_helper_funcs);
ret = drm_simple_encoder_init(drm, &lvds->encoder,
DRM_MODE_ENCODER_LVDS);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the lvds encoder\n");
goto err_out;
}
/* The LVDS encoder can only work with the TCON channel 0 */
lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
if (lvds->panel) {
drm_connector_helper_add(&lvds->connector,
&sun4i_lvds_con_helper_funcs);
ret = drm_connector_init(drm, &lvds->connector,
&sun4i_lvds_con_funcs,
DRM_MODE_CONNECTOR_LVDS);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the lvds connector\n");
goto err_cleanup_connector;
}
drm_connector_attach_encoder(&lvds->connector,
&lvds->encoder);
}
if (bridge) {
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
goto err_cleanup_connector;
}
return 0;
err_cleanup_connector:
drm_encoder_cleanup(&lvds->encoder);
err_out:
return ret;
}
EXPORT_SYMBOL(sun4i_lvds_init);
| linux-master | drivers/gpu/drm/sun4i/sun4i_lvds.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Maxime Ripard
*
* Maxime Ripard <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/i2c.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sun4i_hdmi.h"
static inline struct sun4i_hdmi *
drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct sun4i_hdmi,
encoder);
}
static inline struct sun4i_hdmi *
drm_connector_to_sun4i_hdmi(struct drm_connector *connector)
{
return container_of(connector, struct sun4i_hdmi,
connector);
}
static int sun4i_hdmi_setup_avi_infoframes(struct sun4i_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct hdmi_avi_infoframe frame;
u8 buffer[17];
int i, ret;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame,
&hdmi->connector, mode);
if (ret < 0) {
DRM_ERROR("Failed to get infoframes from mode\n");
return ret;
}
ret = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (ret < 0) {
DRM_ERROR("Failed to pack infoframes\n");
return ret;
}
for (i = 0; i < sizeof(buffer); i++)
writeb(buffer[i], hdmi->base + SUN4I_HDMI_AVI_INFOFRAME_REG(i));
return 0;
}
static int sun4i_hdmi_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *mode = &crtc_state->mode;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL;
return 0;
}
static void sun4i_hdmi_disable(struct drm_encoder *encoder)
{
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
u32 val;
DRM_DEBUG_DRIVER("Disabling the HDMI Output\n");
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
clk_disable_unprepare(hdmi->tmds_clk);
}
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
struct drm_display_info *display = &hdmi->connector.display_info;
u32 val = 0;
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
clk_prepare_enable(hdmi->tmds_clk);
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
writel(val, hdmi->base + SUN4I_HDMI_PKT_CTRL_REG(0));
val = SUN4I_HDMI_VID_CTRL_ENABLE;
if (display->is_hdmi)
val |= SUN4I_HDMI_VID_CTRL_HDMI_MODE;
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
}
static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
unsigned int x, y;
u32 val;
clk_set_rate(hdmi->mod_clk, mode->crtc_clock * 1000);
clk_set_rate(hdmi->tmds_clk, mode->crtc_clock * 1000);
/* Set input sync enable */
writel(SUN4I_HDMI_UNKNOWN_INPUT_SYNC,
hdmi->base + SUN4I_HDMI_UNKNOWN_REG);
/*
* Setup output pad (?) controls
*
* This is done here instead of at probe/bind time because
* the controller seems to toggle some of the bits on its own.
*
* We can't just initialize the register there, we need to
* protect the clock bits that have already been read out and
* cached by the clock framework.
*/
val = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
val &= SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
val |= hdmi->variant->pad_ctrl1_init_val;
writel(val, hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
val = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
/* Setup timing registers */
writel(SUN4I_HDMI_VID_TIMING_X(mode->hdisplay) |
SUN4I_HDMI_VID_TIMING_Y(mode->vdisplay),
hdmi->base + SUN4I_HDMI_VID_TIMING_ACT_REG);
x = mode->htotal - mode->hsync_start;
y = mode->vtotal - mode->vsync_start;
writel(SUN4I_HDMI_VID_TIMING_X(x) | SUN4I_HDMI_VID_TIMING_Y(y),
hdmi->base + SUN4I_HDMI_VID_TIMING_BP_REG);
x = mode->hsync_start - mode->hdisplay;
y = mode->vsync_start - mode->vdisplay;
writel(SUN4I_HDMI_VID_TIMING_X(x) | SUN4I_HDMI_VID_TIMING_Y(y),
hdmi->base + SUN4I_HDMI_VID_TIMING_FP_REG);
x = mode->hsync_end - mode->hsync_start;
y = mode->vsync_end - mode->vsync_start;
writel(SUN4I_HDMI_VID_TIMING_X(x) | SUN4I_HDMI_VID_TIMING_Y(y),
hdmi->base + SUN4I_HDMI_VID_TIMING_SPW_REG);
val = SUN4I_HDMI_VID_TIMING_POL_TX_CLK;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= SUN4I_HDMI_VID_TIMING_POL_HSYNC;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_HDMI_VID_TIMING_POL_VSYNC;
writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG);
}
static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
unsigned long rate = mode->clock * 1000;
unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */
long rounded_rate;
/* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */
if (rate > 165000000)
return MODE_CLOCK_HIGH;
rounded_rate = clk_round_rate(hdmi->tmds_clk, rate);
if (rounded_rate > 0 &&
max_t(unsigned long, rounded_rate, rate) -
min_t(unsigned long, rounded_rate, rate) < diff)
return MODE_OK;
return MODE_NOCLOCK;
}
static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
.atomic_check = sun4i_hdmi_atomic_check,
.disable = sun4i_hdmi_disable,
.enable = sun4i_hdmi_enable,
.mode_set = sun4i_hdmi_mode_set,
.mode_valid = sun4i_hdmi_mode_valid,
};
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
struct edid *edid;
int ret;
edid = drm_get_edid(connector, hdmi->ddc_i2c ?: hdmi->i2c);
if (!edid)
return 0;
DRM_DEBUG_DRIVER("Monitor is %s monitor\n",
connector->display_info.is_hdmi ? "an HDMI" : "a DVI");
drm_connector_update_edid_property(connector, edid);
cec_s_phys_addr_from_edid(hdmi->cec_adap, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
}
static struct i2c_adapter *sun4i_hdmi_get_ddc(struct device *dev)
{
struct device_node *phandle, *remote;
struct i2c_adapter *ddc;
remote = of_graph_get_remote_node(dev->of_node, 1, -1);
if (!remote)
return ERR_PTR(-EINVAL);
phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
of_node_put(remote);
if (!phandle)
return ERR_PTR(-ENODEV);
ddc = of_get_i2c_adapter_by_node(phandle);
of_node_put(phandle);
if (!ddc)
return ERR_PTR(-EPROBE_DEFER);
return ddc;
}
static const struct drm_connector_helper_funcs sun4i_hdmi_connector_helper_funcs = {
.get_modes = sun4i_hdmi_get_modes,
};
static enum drm_connector_status
sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
unsigned long reg;
reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
if (!(reg & SUN4I_HDMI_HPD_HIGH)) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}
return connector_status_connected;
}
static const struct drm_connector_funcs sun4i_hdmi_connector_funcs = {
.detect = sun4i_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
static int sun4i_hdmi_cec_pin_read(struct cec_adapter *adap)
{
struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
return readl(hdmi->base + SUN4I_HDMI_CEC) & SUN4I_HDMI_CEC_RX;
}
static void sun4i_hdmi_cec_pin_low(struct cec_adapter *adap)
{
struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
/* Start driving the CEC pin low */
writel(SUN4I_HDMI_CEC_ENABLE, hdmi->base + SUN4I_HDMI_CEC);
}
static void sun4i_hdmi_cec_pin_high(struct cec_adapter *adap)
{
struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
/*
* Stop driving the CEC pin, the pull up will take over
* unless another CEC device is driving the pin low.
*/
writel(0, hdmi->base + SUN4I_HDMI_CEC);
}
static const struct cec_pin_ops sun4i_hdmi_cec_pin_ops = {
.read = sun4i_hdmi_cec_pin_read,
.low = sun4i_hdmi_cec_pin_low,
.high = sun4i_hdmi_cec_pin_high,
};
#endif
#define SUN4I_HDMI_PAD_CTRL1_MASK (GENMASK(24, 7) | GENMASK(5, 0))
#define SUN4I_HDMI_PLL_CTRL_MASK (GENMASK(31, 8) | GENMASK(3, 0))
/* Only difference from sun5i is AMP is 4 instead of 6 */
static const struct sun4i_hdmi_variant sun4i_variant = {
.pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN |
SUN4I_HDMI_PAD_CTRL0_CKEN |
SUN4I_HDMI_PAD_CTRL0_PWENG |
SUN4I_HDMI_PAD_CTRL0_PWEND |
SUN4I_HDMI_PAD_CTRL0_PWENC |
SUN4I_HDMI_PAD_CTRL0_LDODEN |
SUN4I_HDMI_PAD_CTRL0_LDOCEN |
SUN4I_HDMI_PAD_CTRL0_BIASEN,
.pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(4) |
SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
SUN4I_HDMI_PAD_CTRL1_REG_DEN |
SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
SUN4I_HDMI_PAD_CTRL1_AMP_OPT,
.pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
SUN4I_HDMI_PLL_CTRL_CS(7) |
SUN4I_HDMI_PLL_CTRL_CP_S(15) |
SUN4I_HDMI_PLL_CTRL_S(7) |
SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
SUN4I_HDMI_PLL_CTRL_SDIV2 |
SUN4I_HDMI_PLL_CTRL_LDO2_EN |
SUN4I_HDMI_PLL_CTRL_LDO1_EN |
SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
SUN4I_HDMI_PLL_CTRL_BWS |
SUN4I_HDMI_PLL_CTRL_PLL_EN,
.ddc_clk_reg = REG_FIELD(SUN4I_HDMI_DDC_CLK_REG, 0, 6),
.ddc_clk_pre_divider = 2,
.ddc_clk_m_offset = 1,
.field_ddc_en = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 31, 31),
.field_ddc_start = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 30, 30),
.field_ddc_reset = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 0, 0),
.field_ddc_addr_reg = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 31),
.field_ddc_slave_addr = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 6),
.field_ddc_int_status = REG_FIELD(SUN4I_HDMI_DDC_INT_STATUS_REG, 0, 8),
.field_ddc_fifo_clear = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 31, 31),
.field_ddc_fifo_rx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
.field_ddc_fifo_tx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
.field_ddc_byte_count = REG_FIELD(SUN4I_HDMI_DDC_BYTE_COUNT_REG, 0, 9),
.field_ddc_cmd = REG_FIELD(SUN4I_HDMI_DDC_CMD_REG, 0, 2),
.field_ddc_sda_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 9, 9),
.field_ddc_sck_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 8, 8),
.ddc_fifo_reg = SUN4I_HDMI_DDC_FIFO_DATA_REG,
.ddc_fifo_has_dir = true,
};
static const struct sun4i_hdmi_variant sun5i_variant = {
.pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN |
SUN4I_HDMI_PAD_CTRL0_CKEN |
SUN4I_HDMI_PAD_CTRL0_PWENG |
SUN4I_HDMI_PAD_CTRL0_PWEND |
SUN4I_HDMI_PAD_CTRL0_PWENC |
SUN4I_HDMI_PAD_CTRL0_LDODEN |
SUN4I_HDMI_PAD_CTRL0_LDOCEN |
SUN4I_HDMI_PAD_CTRL0_BIASEN,
.pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
SUN4I_HDMI_PAD_CTRL1_REG_DEN |
SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
SUN4I_HDMI_PAD_CTRL1_AMP_OPT,
.pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
SUN4I_HDMI_PLL_CTRL_CS(7) |
SUN4I_HDMI_PLL_CTRL_CP_S(15) |
SUN4I_HDMI_PLL_CTRL_S(7) |
SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
SUN4I_HDMI_PLL_CTRL_SDIV2 |
SUN4I_HDMI_PLL_CTRL_LDO2_EN |
SUN4I_HDMI_PLL_CTRL_LDO1_EN |
SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
SUN4I_HDMI_PLL_CTRL_BWS |
SUN4I_HDMI_PLL_CTRL_PLL_EN,
.ddc_clk_reg = REG_FIELD(SUN4I_HDMI_DDC_CLK_REG, 0, 6),
.ddc_clk_pre_divider = 2,
.ddc_clk_m_offset = 1,
.field_ddc_en = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 31, 31),
.field_ddc_start = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 30, 30),
.field_ddc_reset = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 0, 0),
.field_ddc_addr_reg = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 31),
.field_ddc_slave_addr = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 6),
.field_ddc_int_status = REG_FIELD(SUN4I_HDMI_DDC_INT_STATUS_REG, 0, 8),
.field_ddc_fifo_clear = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 31, 31),
.field_ddc_fifo_rx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
.field_ddc_fifo_tx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
.field_ddc_byte_count = REG_FIELD(SUN4I_HDMI_DDC_BYTE_COUNT_REG, 0, 9),
.field_ddc_cmd = REG_FIELD(SUN4I_HDMI_DDC_CMD_REG, 0, 2),
.field_ddc_sda_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 9, 9),
.field_ddc_sck_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 8, 8),
.ddc_fifo_reg = SUN4I_HDMI_DDC_FIFO_DATA_REG,
.ddc_fifo_has_dir = true,
};
static const struct sun4i_hdmi_variant sun6i_variant = {
.has_ddc_parent_clk = true,
.has_reset_control = true,
.pad_ctrl0_init_val = 0xff |
SUN4I_HDMI_PAD_CTRL0_TXEN |
SUN4I_HDMI_PAD_CTRL0_CKEN |
SUN4I_HDMI_PAD_CTRL0_PWENG |
SUN4I_HDMI_PAD_CTRL0_PWEND |
SUN4I_HDMI_PAD_CTRL0_PWENC |
SUN4I_HDMI_PAD_CTRL0_LDODEN |
SUN4I_HDMI_PAD_CTRL0_LDOCEN,
.pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
SUN4I_HDMI_PAD_CTRL1_REG_EMP(4) |
SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
SUN4I_HDMI_PAD_CTRL1_REG_DEN |
SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
SUN4I_HDMI_PAD_CTRL1_PWSDT |
SUN4I_HDMI_PAD_CTRL1_PWSCK |
SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
SUN4I_HDMI_PAD_CTRL1_AMP_OPT |
SUN4I_HDMI_PAD_CTRL1_UNKNOWN,
.pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
SUN4I_HDMI_PLL_CTRL_CS(3) |
SUN4I_HDMI_PLL_CTRL_CP_S(10) |
SUN4I_HDMI_PLL_CTRL_S(4) |
SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
SUN4I_HDMI_PLL_CTRL_SDIV2 |
SUN4I_HDMI_PLL_CTRL_LDO2_EN |
SUN4I_HDMI_PLL_CTRL_LDO1_EN |
SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
SUN4I_HDMI_PLL_CTRL_PLL_EN,
.ddc_clk_reg = REG_FIELD(SUN6I_HDMI_DDC_CLK_REG, 0, 6),
.ddc_clk_pre_divider = 1,
.ddc_clk_m_offset = 2,
.tmds_clk_div_offset = 1,
.field_ddc_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 0, 0),
.field_ddc_start = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 27, 27),
.field_ddc_reset = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 31, 31),
.field_ddc_addr_reg = REG_FIELD(SUN6I_HDMI_DDC_ADDR_REG, 1, 31),
.field_ddc_slave_addr = REG_FIELD(SUN6I_HDMI_DDC_ADDR_REG, 1, 7),
.field_ddc_int_status = REG_FIELD(SUN6I_HDMI_DDC_INT_STATUS_REG, 0, 8),
.field_ddc_fifo_clear = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 18, 18),
.field_ddc_fifo_rx_thres = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
.field_ddc_fifo_tx_thres = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
.field_ddc_byte_count = REG_FIELD(SUN6I_HDMI_DDC_CMD_REG, 16, 25),
.field_ddc_cmd = REG_FIELD(SUN6I_HDMI_DDC_CMD_REG, 0, 2),
.field_ddc_sda_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 6, 6),
.field_ddc_sck_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 4, 4),
.ddc_fifo_reg = SUN6I_HDMI_DDC_FIFO_DATA_REG,
.ddc_fifo_thres_incl = true,
};
static const struct regmap_config sun4i_hdmi_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x580,
};
static int sun4i_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct cec_connector_info conn_info;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_hdmi *hdmi;
u32 reg;
int ret;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
dev_set_drvdata(dev, hdmi);
hdmi->dev = dev;
hdmi->drv = drv;
hdmi->variant = of_device_get_match_data(dev);
if (!hdmi->variant)
return -EINVAL;
hdmi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->base)) {
dev_err(dev, "Couldn't map the HDMI encoder registers\n");
return PTR_ERR(hdmi->base);
}
if (hdmi->variant->has_reset_control) {
hdmi->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(hdmi->reset)) {
dev_err(dev, "Couldn't get the HDMI reset control\n");
return PTR_ERR(hdmi->reset);
}
ret = reset_control_deassert(hdmi->reset);
if (ret) {
dev_err(dev, "Couldn't deassert HDMI reset\n");
return ret;
}
}
hdmi->bus_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(hdmi->bus_clk)) {
dev_err(dev, "Couldn't get the HDMI bus clock\n");
ret = PTR_ERR(hdmi->bus_clk);
goto err_assert_reset;
}
clk_prepare_enable(hdmi->bus_clk);
hdmi->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(hdmi->mod_clk)) {
dev_err(dev, "Couldn't get the HDMI mod clock\n");
ret = PTR_ERR(hdmi->mod_clk);
goto err_disable_bus_clk;
}
clk_prepare_enable(hdmi->mod_clk);
hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
if (IS_ERR(hdmi->pll0_clk)) {
dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
ret = PTR_ERR(hdmi->pll0_clk);
goto err_disable_mod_clk;
}
hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
if (IS_ERR(hdmi->pll1_clk)) {
dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
ret = PTR_ERR(hdmi->pll1_clk);
goto err_disable_mod_clk;
}
hdmi->regmap = devm_regmap_init_mmio(dev, hdmi->base,
&sun4i_hdmi_regmap_config);
if (IS_ERR(hdmi->regmap)) {
dev_err(dev, "Couldn't create HDMI encoder regmap\n");
ret = PTR_ERR(hdmi->regmap);
goto err_disable_mod_clk;
}
ret = sun4i_tmds_create(hdmi);
if (ret) {
dev_err(dev, "Couldn't create the TMDS clock\n");
goto err_disable_mod_clk;
}
if (hdmi->variant->has_ddc_parent_clk) {
hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
if (IS_ERR(hdmi->ddc_parent_clk)) {
dev_err(dev, "Couldn't get the HDMI DDC clock\n");
ret = PTR_ERR(hdmi->ddc_parent_clk);
goto err_disable_mod_clk;
}
} else {
hdmi->ddc_parent_clk = hdmi->tmds_clk;
}
writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
writel(hdmi->variant->pad_ctrl0_init_val,
hdmi->base + SUN4I_HDMI_PAD_CTRL0_REG);
reg = readl(hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
reg &= SUN4I_HDMI_PLL_CTRL_DIV_MASK;
reg |= hdmi->variant->pll_ctrl_init_val;
writel(reg, hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
ret = sun4i_hdmi_i2c_create(dev, hdmi);
if (ret) {
dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
goto err_disable_mod_clk;
}
hdmi->ddc_i2c = sun4i_hdmi_get_ddc(dev);
if (IS_ERR(hdmi->ddc_i2c)) {
ret = PTR_ERR(hdmi->ddc_i2c);
if (ret == -ENODEV)
hdmi->ddc_i2c = NULL;
else
goto err_del_i2c_adapter;
}
drm_encoder_helper_add(&hdmi->encoder,
&sun4i_hdmi_helper_funcs);
ret = drm_simple_encoder_init(drm, &hdmi->encoder,
DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI encoder\n");
goto err_put_ddc_i2c;
}
hdmi->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm,
dev->of_node);
if (!hdmi->encoder.possible_crtcs) {
ret = -EPROBE_DEFER;
goto err_put_ddc_i2c;
}
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
hdmi->cec_adap = cec_pin_allocate_adapter(&sun4i_hdmi_cec_pin_ops,
hdmi, "sun4i", CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO);
ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
if (ret < 0)
goto err_cleanup_connector;
writel(readl(hdmi->base + SUN4I_HDMI_CEC) & ~SUN4I_HDMI_CEC_TX,
hdmi->base + SUN4I_HDMI_CEC);
#endif
drm_connector_helper_add(&hdmi->connector,
&sun4i_hdmi_connector_helper_funcs);
ret = drm_connector_init_with_ddc(drm, &hdmi->connector,
&sun4i_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc_i2c);
if (ret) {
dev_err(dev,
"Couldn't initialise the HDMI connector\n");
goto err_cleanup_connector;
}
cec_fill_conn_info_from_drm(&conn_info, &hdmi->connector);
cec_s_conn_info(hdmi->cec_adap, &conn_info);
/* There is no HPD interrupt, so we need to poll the controller */
hdmi->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
ret = cec_register_adapter(hdmi->cec_adap, dev);
if (ret < 0)
goto err_cleanup_connector;
drm_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
return 0;
err_cleanup_connector:
cec_delete_adapter(hdmi->cec_adap);
drm_encoder_cleanup(&hdmi->encoder);
err_put_ddc_i2c:
i2c_put_adapter(hdmi->ddc_i2c);
err_del_i2c_adapter:
i2c_del_adapter(hdmi->i2c);
err_disable_mod_clk:
clk_disable_unprepare(hdmi->mod_clk);
err_disable_bus_clk:
clk_disable_unprepare(hdmi->bus_clk);
err_assert_reset:
reset_control_assert(hdmi->reset);
return ret;
}
static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
cec_unregister_adapter(hdmi->cec_adap);
i2c_del_adapter(hdmi->i2c);
i2c_put_adapter(hdmi->ddc_i2c);
clk_disable_unprepare(hdmi->mod_clk);
clk_disable_unprepare(hdmi->bus_clk);
}
static const struct component_ops sun4i_hdmi_ops = {
.bind = sun4i_hdmi_bind,
.unbind = sun4i_hdmi_unbind,
};
static int sun4i_hdmi_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &sun4i_hdmi_ops);
}
static void sun4i_hdmi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun4i_hdmi_ops);
}
static const struct of_device_id sun4i_hdmi_of_table[] = {
{ .compatible = "allwinner,sun4i-a10-hdmi", .data = &sun4i_variant, },
{ .compatible = "allwinner,sun5i-a10s-hdmi", .data = &sun5i_variant, },
{ .compatible = "allwinner,sun6i-a31-hdmi", .data = &sun6i_variant, },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_hdmi_of_table);
static struct platform_driver sun4i_hdmi_driver = {
.probe = sun4i_hdmi_probe,
.remove_new = sun4i_hdmi_remove,
.driver = {
.name = "sun4i-hdmi",
.of_match_table = sun4i_hdmi_of_table,
},
};
module_platform_driver(sun4i_hdmi_driver);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner A10 HDMI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Free Electrons
* Copyright (C) 2015 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
#include "sun4i_backend.h"
#include "sun4i_frontend.h"
#include "sun4i_layer.h"
#include "sunxi_engine.h"
static void sun4i_backend_layer_reset(struct drm_plane *plane)
{
struct sun4i_layer_state *state;
if (plane->state) {
state = state_to_sun4i_layer_state(plane->state);
__drm_atomic_helper_plane_destroy_state(&state->state);
kfree(state);
plane->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_plane_reset(plane, &state->state);
}
static struct drm_plane_state *
sun4i_backend_layer_duplicate_state(struct drm_plane *plane)
{
struct sun4i_layer_state *orig = state_to_sun4i_layer_state(plane->state);
struct sun4i_layer_state *copy;
copy = kzalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, ©->state);
copy->uses_frontend = orig->uses_frontend;
return ©->state;
}
static void sun4i_backend_layer_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(state);
__drm_atomic_helper_plane_destroy_state(state);
kfree(s_state);
}
static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(old_state);
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
struct sun4i_backend *backend = layer->backend;
sun4i_backend_layer_enable(backend, layer->id, false);
if (layer_state->uses_frontend) {
unsigned long flags;
spin_lock_irqsave(&backend->frontend_lock, flags);
backend->frontend_teardown = true;
spin_unlock_irqrestore(&backend->frontend_lock, flags);
}
}
static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(new_state);
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
struct sun4i_backend *backend = layer->backend;
struct sun4i_frontend *frontend = backend->frontend;
sun4i_backend_cleanup_layer(backend, layer->id);
if (layer_state->uses_frontend) {
sun4i_frontend_init(frontend);
sun4i_frontend_update_coord(frontend, plane);
sun4i_frontend_update_buffer(frontend, plane);
sun4i_frontend_update_formats(frontend, plane,
DRM_FORMAT_XRGB8888);
sun4i_backend_update_layer_frontend(backend, layer->id,
DRM_FORMAT_XRGB8888);
sun4i_frontend_enable(frontend);
} else {
sun4i_backend_update_layer_formats(backend, layer->id, plane);
sun4i_backend_update_layer_buffer(backend, layer->id, plane);
}
sun4i_backend_update_layer_coord(backend, layer->id, plane);
sun4i_backend_update_layer_zpos(backend, layer->id, plane);
sun4i_backend_layer_enable(backend, layer->id, true);
}
static bool sun4i_layer_format_mod_supported(struct drm_plane *plane,
uint32_t format, uint64_t modifier)
{
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
if (IS_ERR_OR_NULL(layer->backend->frontend))
return sun4i_backend_format_is_supported(format, modifier);
return sun4i_backend_format_is_supported(format, modifier) ||
sun4i_frontend_format_is_supported(format, modifier);
}
static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
.atomic_disable = sun4i_backend_layer_atomic_disable,
.atomic_update = sun4i_backend_layer_atomic_update,
};
static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
.atomic_destroy_state = sun4i_backend_layer_destroy_state,
.atomic_duplicate_state = sun4i_backend_layer_duplicate_state,
.destroy = drm_plane_cleanup,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = sun4i_backend_layer_reset,
.update_plane = drm_atomic_helper_update_plane,
.format_mod_supported = sun4i_layer_format_mod_supported,
};
static const uint32_t sun4i_layer_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YUV444,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
DRM_FORMAT_YVU444,
DRM_FORMAT_YVYU,
};
static const uint32_t sun4i_backend_layer_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
};
static const uint64_t sun4i_layer_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_ALLWINNER_TILED,
DRM_FORMAT_MOD_INVALID
};
static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
struct sun4i_backend *backend,
enum drm_plane_type type,
unsigned int id)
{
const uint64_t *modifiers = sun4i_layer_modifiers;
const uint32_t *formats = sun4i_layer_formats;
unsigned int formats_len = ARRAY_SIZE(sun4i_layer_formats);
struct sun4i_layer *layer;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
if (!layer)
return ERR_PTR(-ENOMEM);
layer->id = id;
layer->backend = backend;
if (IS_ERR_OR_NULL(backend->frontend)) {
formats = sun4i_backend_layer_formats;
formats_len = ARRAY_SIZE(sun4i_backend_layer_formats);
modifiers = NULL;
}
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun4i_backend_layer_funcs,
formats, formats_len,
modifiers, type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
}
drm_plane_helper_add(&layer->plane,
&sun4i_backend_layer_helper_funcs);
drm_plane_create_alpha_property(&layer->plane);
drm_plane_create_zpos_property(&layer->plane, layer->id,
0, SUN4I_BACKEND_NUM_LAYERS - 1);
return layer;
}
struct drm_plane **sun4i_layers_init(struct drm_device *drm,
struct sunxi_engine *engine)
{
struct drm_plane **planes;
struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
int i;
/* We need to have a sentinel at the need, hence the overallocation */
planes = devm_kcalloc(drm->dev, SUN4I_BACKEND_NUM_LAYERS + 1,
sizeof(*planes), GFP_KERNEL);
if (!planes)
return ERR_PTR(-ENOMEM);
for (i = 0; i < SUN4I_BACKEND_NUM_LAYERS; i++) {
enum drm_plane_type type = i ? DRM_PLANE_TYPE_OVERLAY : DRM_PLANE_TYPE_PRIMARY;
struct sun4i_layer *layer;
layer = sun4i_layer_init_one(drm, backend, type, i);
if (IS_ERR(layer)) {
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
return ERR_CAST(layer);
}
planes[i] = &layer->plane;
}
return planes;
}
| linux-master | drivers/gpu/drm/sun4i/sun4i_layer.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2016 Allwinnertech Co., Ltd.
* Copyright (C) 2017-2018 Bootlin
*
* Maxime Ripard <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/crc-ccitt.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
#include <video/mipi_display.h>
#define SUN6I_DSI_CTL_REG 0x000
#define SUN6I_DSI_CTL_EN BIT(0)
#define SUN6I_DSI_BASIC_CTL_REG 0x00c
#define SUN6I_DSI_BASIC_CTL_TRAIL_INV(n) (((n) & 0xf) << 4)
#define SUN6I_DSI_BASIC_CTL_TRAIL_FILL BIT(3)
#define SUN6I_DSI_BASIC_CTL_HBP_DIS BIT(2)
#define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS BIT(1)
#define SUN6I_DSI_BASIC_CTL_VIDEO_BURST BIT(0)
#define SUN6I_DSI_BASIC_CTL0_REG 0x010
#define SUN6I_DSI_BASIC_CTL0_HS_EOTP_EN BIT(18)
#define SUN6I_DSI_BASIC_CTL0_CRC_EN BIT(17)
#define SUN6I_DSI_BASIC_CTL0_ECC_EN BIT(16)
#define SUN6I_DSI_BASIC_CTL0_INST_ST BIT(0)
#define SUN6I_DSI_BASIC_CTL1_REG 0x014
#define SUN6I_DSI_BASIC_CTL1_VIDEO_ST_DELAY(n) (((n) & 0x1fff) << 4)
#define SUN6I_DSI_BASIC_CTL1_VIDEO_FILL BIT(2)
#define SUN6I_DSI_BASIC_CTL1_VIDEO_PRECISION BIT(1)
#define SUN6I_DSI_BASIC_CTL1_VIDEO_MODE BIT(0)
#define SUN6I_DSI_BASIC_SIZE0_REG 0x018
#define SUN6I_DSI_BASIC_SIZE0_VBP(n) (((n) & 0xfff) << 16)
#define SUN6I_DSI_BASIC_SIZE0_VSA(n) ((n) & 0xfff)
#define SUN6I_DSI_BASIC_SIZE1_REG 0x01c
#define SUN6I_DSI_BASIC_SIZE1_VT(n) (((n) & 0xfff) << 16)
#define SUN6I_DSI_BASIC_SIZE1_VACT(n) ((n) & 0xfff)
#define SUN6I_DSI_INST_FUNC_REG(n) (0x020 + (n) * 0x04)
#define SUN6I_DSI_INST_FUNC_INST_MODE(n) (((n) & 0xf) << 28)
#define SUN6I_DSI_INST_FUNC_ESCAPE_ENTRY(n) (((n) & 0xf) << 24)
#define SUN6I_DSI_INST_FUNC_TRANS_PACKET(n) (((n) & 0xf) << 20)
#define SUN6I_DSI_INST_FUNC_LANE_CEN BIT(4)
#define SUN6I_DSI_INST_FUNC_LANE_DEN(n) ((n) & 0xf)
#define SUN6I_DSI_INST_LOOP_SEL_REG 0x040
#define SUN6I_DSI_INST_LOOP_NUM_REG(n) (0x044 + (n) * 0x10)
#define SUN6I_DSI_INST_LOOP_NUM_N1(n) (((n) & 0xfff) << 16)
#define SUN6I_DSI_INST_LOOP_NUM_N0(n) ((n) & 0xfff)
#define SUN6I_DSI_INST_JUMP_SEL_REG 0x048
#define SUN6I_DSI_INST_JUMP_CFG_REG(n) (0x04c + (n) * 0x04)
#define SUN6I_DSI_INST_JUMP_CFG_TO(n) (((n) & 0xf) << 20)
#define SUN6I_DSI_INST_JUMP_CFG_POINT(n) (((n) & 0xf) << 16)
#define SUN6I_DSI_INST_JUMP_CFG_NUM(n) ((n) & 0xffff)
#define SUN6I_DSI_TRANS_START_REG 0x060
#define SUN6I_DSI_TRANS_ZERO_REG 0x078
#define SUN6I_DSI_TCON_DRQ_REG 0x07c
#define SUN6I_DSI_TCON_DRQ_ENABLE_MODE BIT(28)
#define SUN6I_DSI_TCON_DRQ_SET(n) ((n) & 0x3ff)
#define SUN6I_DSI_PIXEL_CTL0_REG 0x080
#define SUN6I_DSI_PIXEL_CTL0_PD_PLUG_DISABLE BIT(16)
#define SUN6I_DSI_PIXEL_CTL0_FORMAT(n) ((n) & 0xf)
#define SUN6I_DSI_PIXEL_CTL1_REG 0x084
#define SUN6I_DSI_PIXEL_PH_REG 0x090
#define SUN6I_DSI_PIXEL_PH_ECC(n) (((n) & 0xff) << 24)
#define SUN6I_DSI_PIXEL_PH_WC(n) (((n) & 0xffff) << 8)
#define SUN6I_DSI_PIXEL_PH_VC(n) (((n) & 3) << 6)
#define SUN6I_DSI_PIXEL_PH_DT(n) ((n) & 0x3f)
#define SUN6I_DSI_PIXEL_PF0_REG 0x098
#define SUN6I_DSI_PIXEL_PF0_CRC_FORCE(n) ((n) & 0xffff)
#define SUN6I_DSI_PIXEL_PF1_REG 0x09c
#define SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINEN(n) (((n) & 0xffff) << 16)
#define SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINE0(n) ((n) & 0xffff)
#define SUN6I_DSI_SYNC_HSS_REG 0x0b0
#define SUN6I_DSI_SYNC_HSE_REG 0x0b4
#define SUN6I_DSI_SYNC_VSS_REG 0x0b8
#define SUN6I_DSI_SYNC_VSE_REG 0x0bc
#define SUN6I_DSI_BLK_HSA0_REG 0x0c0
#define SUN6I_DSI_BLK_HSA1_REG 0x0c4
#define SUN6I_DSI_BLK_PF(n) (((n) & 0xffff) << 16)
#define SUN6I_DSI_BLK_PD(n) ((n) & 0xff)
#define SUN6I_DSI_BLK_HBP0_REG 0x0c8
#define SUN6I_DSI_BLK_HBP1_REG 0x0cc
#define SUN6I_DSI_BLK_HFP0_REG 0x0d0
#define SUN6I_DSI_BLK_HFP1_REG 0x0d4
#define SUN6I_DSI_BLK_HBLK0_REG 0x0e0
#define SUN6I_DSI_BLK_HBLK1_REG 0x0e4
#define SUN6I_DSI_BLK_VBLK0_REG 0x0e8
#define SUN6I_DSI_BLK_VBLK1_REG 0x0ec
#define SUN6I_DSI_BURST_LINE_REG 0x0f0
#define SUN6I_DSI_BURST_LINE_SYNC_POINT(n) (((n) & 0xffff) << 16)
#define SUN6I_DSI_BURST_LINE_NUM(n) ((n) & 0xffff)
#define SUN6I_DSI_BURST_DRQ_REG 0x0f4
#define SUN6I_DSI_BURST_DRQ_EDGE1(n) (((n) & 0xffff) << 16)
#define SUN6I_DSI_BURST_DRQ_EDGE0(n) ((n) & 0xffff)
#define SUN6I_DSI_CMD_CTL_REG 0x200
#define SUN6I_DSI_CMD_CTL_RX_OVERFLOW BIT(26)
#define SUN6I_DSI_CMD_CTL_RX_FLAG BIT(25)
#define SUN6I_DSI_CMD_CTL_TX_FLAG BIT(9)
#define SUN6I_DSI_CMD_RX_REG(n) (0x240 + (n) * 0x04)
#define SUN6I_DSI_DEBUG_DATA_REG 0x2f8
#define SUN6I_DSI_CMD_TX_REG(n) (0x300 + (n) * 0x04)
#define SUN6I_DSI_SYNC_POINT 40
enum sun6i_dsi_start_inst {
DSI_START_LPRX,
DSI_START_LPTX,
DSI_START_HSC,
DSI_START_HSD,
};
enum sun6i_dsi_inst_id {
DSI_INST_ID_LP11 = 0,
DSI_INST_ID_TBA,
DSI_INST_ID_HSC,
DSI_INST_ID_HSD,
DSI_INST_ID_LPDT,
DSI_INST_ID_HSCEXIT,
DSI_INST_ID_NOP,
DSI_INST_ID_DLY,
DSI_INST_ID_END = 15,
};
enum sun6i_dsi_inst_mode {
DSI_INST_MODE_STOP = 0,
DSI_INST_MODE_TBA,
DSI_INST_MODE_HS,
DSI_INST_MODE_ESCAPE,
DSI_INST_MODE_HSCEXIT,
DSI_INST_MODE_NOP,
};
enum sun6i_dsi_inst_escape {
DSI_INST_ESCA_LPDT = 0,
DSI_INST_ESCA_ULPS,
DSI_INST_ESCA_UN1,
DSI_INST_ESCA_UN2,
DSI_INST_ESCA_RESET,
DSI_INST_ESCA_UN3,
DSI_INST_ESCA_UN4,
DSI_INST_ESCA_UN5,
};
enum sun6i_dsi_inst_packet {
DSI_INST_PACK_PIXEL = 0,
DSI_INST_PACK_COMMAND,
};
static const u32 sun6i_dsi_ecc_array[] = {
[0] = (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(7) | BIT(10) |
BIT(11) | BIT(13) | BIT(16) | BIT(20) | BIT(21) | BIT(22) |
BIT(23)),
[1] = (BIT(0) | BIT(1) | BIT(3) | BIT(4) | BIT(6) | BIT(8) | BIT(10) |
BIT(12) | BIT(14) | BIT(17) | BIT(20) | BIT(21) | BIT(22) |
BIT(23)),
[2] = (BIT(0) | BIT(2) | BIT(3) | BIT(5) | BIT(6) | BIT(9) | BIT(11) |
BIT(12) | BIT(15) | BIT(18) | BIT(20) | BIT(21) | BIT(22)),
[3] = (BIT(1) | BIT(2) | BIT(3) | BIT(7) | BIT(8) | BIT(9) | BIT(13) |
BIT(14) | BIT(15) | BIT(19) | BIT(20) | BIT(21) | BIT(23)),
[4] = (BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(16) |
BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(22) | BIT(23)),
[5] = (BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) |
BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(21) | BIT(22) |
BIT(23)),
};
static u32 sun6i_dsi_ecc_compute(unsigned int data)
{
int i;
u8 ecc = 0;
for (i = 0; i < ARRAY_SIZE(sun6i_dsi_ecc_array); i++) {
u32 field = sun6i_dsi_ecc_array[i];
bool init = false;
u8 val = 0;
int j;
for (j = 0; j < 24; j++) {
if (!(BIT(j) & field))
continue;
if (!init) {
val = (BIT(j) & data) ? 1 : 0;
init = true;
} else {
val ^= (BIT(j) & data) ? 1 : 0;
}
}
ecc |= val << i;
}
return ecc;
}
static u16 sun6i_dsi_crc_compute(u8 const *buffer, size_t len)
{
return crc_ccitt(0xffff, buffer, len);
}
static u16 sun6i_dsi_crc_repeat(u8 pd, u8 *buffer, size_t len)
{
memset(buffer, pd, len);
return sun6i_dsi_crc_compute(buffer, len);
}
static u32 sun6i_dsi_build_sync_pkt(u8 dt, u8 vc, u8 d0, u8 d1)
{
u32 val = dt & 0x3f;
val |= (vc & 3) << 6;
val |= (d0 & 0xff) << 8;
val |= (d1 & 0xff) << 16;
val |= sun6i_dsi_ecc_compute(val) << 24;
return val;
}
static u32 sun6i_dsi_build_blk0_pkt(u8 vc, u16 wc)
{
return sun6i_dsi_build_sync_pkt(MIPI_DSI_BLANKING_PACKET, vc,
wc & 0xff, wc >> 8);
}
static u32 sun6i_dsi_build_blk1_pkt(u16 pd, u8 *buffer, size_t len)
{
u32 val = SUN6I_DSI_BLK_PD(pd);
return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat(pd, buffer, len));
}
static void sun6i_dsi_inst_abort(struct sun6i_dsi *dsi)
{
regmap_update_bits(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
SUN6I_DSI_BASIC_CTL0_INST_ST, 0);
}
static void sun6i_dsi_inst_commit(struct sun6i_dsi *dsi)
{
regmap_update_bits(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
SUN6I_DSI_BASIC_CTL0_INST_ST,
SUN6I_DSI_BASIC_CTL0_INST_ST);
}
static int sun6i_dsi_inst_wait_for_completion(struct sun6i_dsi *dsi)
{
u32 val;
return regmap_read_poll_timeout(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
val,
!(val & SUN6I_DSI_BASIC_CTL0_INST_ST),
100, 5000);
}
static void sun6i_dsi_inst_setup(struct sun6i_dsi *dsi,
enum sun6i_dsi_inst_id id,
enum sun6i_dsi_inst_mode mode,
bool clock, u8 data,
enum sun6i_dsi_inst_packet packet,
enum sun6i_dsi_inst_escape escape)
{
regmap_write(dsi->regs, SUN6I_DSI_INST_FUNC_REG(id),
SUN6I_DSI_INST_FUNC_INST_MODE(mode) |
SUN6I_DSI_INST_FUNC_ESCAPE_ENTRY(escape) |
SUN6I_DSI_INST_FUNC_TRANS_PACKET(packet) |
(clock ? SUN6I_DSI_INST_FUNC_LANE_CEN : 0) |
SUN6I_DSI_INST_FUNC_LANE_DEN(data));
}
static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
struct mipi_dsi_device *device)
{
u8 lanes_mask = GENMASK(device->lanes - 1, 0);
sun6i_dsi_inst_setup(dsi, DSI_INST_ID_LP11, DSI_INST_MODE_STOP,
true, lanes_mask, 0, 0);
sun6i_dsi_inst_setup(dsi, DSI_INST_ID_TBA, DSI_INST_MODE_TBA,
false, 1, 0, 0);
sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSC, DSI_INST_MODE_HS,
true, 0, DSI_INST_PACK_PIXEL, 0);
sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSD, DSI_INST_MODE_HS,
false, lanes_mask, DSI_INST_PACK_PIXEL, 0);
sun6i_dsi_inst_setup(dsi, DSI_INST_ID_LPDT, DSI_INST_MODE_ESCAPE,
false, 1, DSI_INST_PACK_COMMAND,
DSI_INST_ESCA_LPDT);
sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSCEXIT, DSI_INST_MODE_HSCEXIT,
true, 0, 0, 0);
sun6i_dsi_inst_setup(dsi, DSI_INST_ID_NOP, DSI_INST_MODE_STOP,
false, lanes_mask, 0, 0);
sun6i_dsi_inst_setup(dsi, DSI_INST_ID_DLY, DSI_INST_MODE_NOP,
true, lanes_mask, 0, 0);
regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_CFG_REG(0),
SUN6I_DSI_INST_JUMP_CFG_POINT(DSI_INST_ID_NOP) |
SUN6I_DSI_INST_JUMP_CFG_TO(DSI_INST_ID_HSCEXIT) |
SUN6I_DSI_INST_JUMP_CFG_NUM(1));
};
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
u16 delay = mode->vtotal - (mode->vsync_start - mode->vdisplay) + 1;
if (delay > mode->vtotal)
delay = delay % mode->vtotal;
return max_t(u16, delay, 1);
}
static u16 sun6i_dsi_get_line_num(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
return mode->htotal * Bpp / device->lanes;
}
static u16 sun6i_dsi_get_drq_edge0(struct sun6i_dsi *dsi,
struct drm_display_mode *mode,
u16 line_num, u16 edge1)
{
u16 edge0 = edge1;
edge0 += (mode->hdisplay + 40) * SUN6I_DSI_TCON_DIV / 8;
if (edge0 > line_num)
return edge0 - line_num;
return 1;
}
static u16 sun6i_dsi_get_drq_edge1(struct sun6i_dsi *dsi,
struct drm_display_mode *mode,
u16 line_num)
{
struct mipi_dsi_device *device = dsi->device;
unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
unsigned int hbp = mode->htotal - mode->hsync_end;
u16 edge1;
edge1 = SUN6I_DSI_SYNC_POINT;
edge1 += (mode->hdisplay + hbp + 20) * Bpp / device->lanes;
if (edge1 > line_num)
return line_num;
return edge1;
}
static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
u32 val = 0;
if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
u16 line_num = sun6i_dsi_get_line_num(dsi, mode);
u16 edge0, edge1;
edge1 = sun6i_dsi_get_drq_edge1(dsi, mode, line_num);
edge0 = sun6i_dsi_get_drq_edge0(dsi, mode, line_num, edge1);
regmap_write(dsi->regs, SUN6I_DSI_BURST_DRQ_REG,
SUN6I_DSI_BURST_DRQ_EDGE0(edge0) |
SUN6I_DSI_BURST_DRQ_EDGE1(edge1));
regmap_write(dsi->regs, SUN6I_DSI_BURST_LINE_REG,
SUN6I_DSI_BURST_LINE_NUM(line_num) |
SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT));
val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE;
} else if ((mode->hsync_start - mode->hdisplay) > 20) {
/* Maaaaaagic */
u16 drq = (mode->hsync_start - mode->hdisplay) - 20;
drq *= mipi_dsi_pixel_format_to_bpp(device->format);
drq /= 32;
val = (SUN6I_DSI_TCON_DRQ_ENABLE_MODE |
SUN6I_DSI_TCON_DRQ_SET(drq));
}
regmap_write(dsi->regs, SUN6I_DSI_TCON_DRQ_REG, val);
}
static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
u16 delay = 50 - 1;
if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
u32 hsync_porch = (mode->htotal - mode->hdisplay) * 150;
delay = (hsync_porch / ((mode->clock / 1000) * 8));
delay -= 50;
}
regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_SEL_REG,
2 << (4 * DSI_INST_ID_LP11) |
3 << (4 * DSI_INST_ID_DLY));
regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0),
SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
SUN6I_DSI_INST_LOOP_NUM_N1(delay));
regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(1),
SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
SUN6I_DSI_INST_LOOP_NUM_N1(delay));
}
static void sun6i_dsi_setup_format(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
u32 val = SUN6I_DSI_PIXEL_PH_VC(device->channel);
u8 dt, fmt;
u16 wc;
/*
* TODO: The format defines are only valid in video mode and
* change in command mode.
*/
switch (device->format) {
case MIPI_DSI_FMT_RGB888:
dt = MIPI_DSI_PACKED_PIXEL_STREAM_24;
fmt = 8;
break;
case MIPI_DSI_FMT_RGB666:
dt = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
fmt = 9;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
dt = MIPI_DSI_PACKED_PIXEL_STREAM_18;
fmt = 10;
break;
case MIPI_DSI_FMT_RGB565:
dt = MIPI_DSI_PACKED_PIXEL_STREAM_16;
fmt = 11;
break;
default:
return;
}
val |= SUN6I_DSI_PIXEL_PH_DT(dt);
wc = mode->hdisplay * mipi_dsi_pixel_format_to_bpp(device->format) / 8;
val |= SUN6I_DSI_PIXEL_PH_WC(wc);
val |= SUN6I_DSI_PIXEL_PH_ECC(sun6i_dsi_ecc_compute(val));
regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PH_REG, val);
regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PF0_REG,
SUN6I_DSI_PIXEL_PF0_CRC_FORCE(0xffff));
regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PF1_REG,
SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINE0(0xffff) |
SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINEN(0xffff));
regmap_write(dsi->regs, SUN6I_DSI_PIXEL_CTL0_REG,
SUN6I_DSI_PIXEL_CTL0_PD_PLUG_DISABLE |
SUN6I_DSI_PIXEL_CTL0_FORMAT(fmt));
}
static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
u32 basic_ctl = 0;
size_t bytes;
u8 *buffer;
/* Do all timing calculations up front to allocate buffer space */
if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
hblk = mode->hdisplay * Bpp;
basic_ctl = SUN6I_DSI_BASIC_CTL_VIDEO_BURST |
SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS |
SUN6I_DSI_BASIC_CTL_HBP_DIS;
if (device->lanes == 4)
basic_ctl |= SUN6I_DSI_BASIC_CTL_TRAIL_FILL |
SUN6I_DSI_BASIC_CTL_TRAIL_INV(0xc);
} else {
/*
* A sync period is composed of a blanking packet (4
* bytes + payload + 2 bytes) and a sync event packet
* (4 bytes). Its minimal size is therefore 10 bytes
*/
#define HSA_PACKET_OVERHEAD 10
hsa = max(HSA_PACKET_OVERHEAD,
(mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
/*
* The backporch is set using a blanking packet (4
* bytes + payload + 2 bytes). Its minimal size is
* therefore 6 bytes
*/
#define HBP_PACKET_OVERHEAD 6
hbp = max(HBP_PACKET_OVERHEAD,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
* The frontporch is set using a sync event (4 bytes)
* and two blanking packets (each one is 4 bytes +
* payload + 2 bytes). Its minimal size is therefore
* 16 bytes
*/
#define HFP_PACKET_OVERHEAD 16
hfp = max(HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
/*
* The blanking is set using a sync event (4 bytes)
* and a blanking packet (4 bytes + payload + 2
* bytes). Its minimal size is therefore 10 bytes.
*/
#define HBLK_PACKET_OVERHEAD 10
hblk = max(HBLK_PACKET_OVERHEAD,
(mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
HBLK_PACKET_OVERHEAD);
/*
* And I'm not entirely sure what vblk is about. The driver in
* Allwinner BSP is using a rather convoluted calculation
* there only for 4 lanes. However, using 0 (the !4 lanes
* case) even with a 4 lanes screen seems to work...
*/
vblk = 0;
}
/* How many bytes do we need to send all payloads? */
bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk);
buffer = kmalloc(bytes, GFP_KERNEL);
if (WARN_ON(!buffer))
return;
regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, basic_ctl);
regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG,
sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START,
device->channel,
0, 0));
regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSE_REG,
sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_END,
device->channel,
0, 0));
regmap_write(dsi->regs, SUN6I_DSI_SYNC_VSS_REG,
sun6i_dsi_build_sync_pkt(MIPI_DSI_V_SYNC_START,
device->channel,
0, 0));
regmap_write(dsi->regs, SUN6I_DSI_SYNC_VSE_REG,
sun6i_dsi_build_sync_pkt(MIPI_DSI_V_SYNC_END,
device->channel,
0, 0));
regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG,
SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end -
mode->vsync_start) |
SUN6I_DSI_BASIC_SIZE0_VBP(mode->vtotal -
mode->vsync_end));
regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG,
SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
SUN6I_DSI_BASIC_SIZE1_VT(mode->vtotal));
/* sync */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hsa));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA1_REG,
sun6i_dsi_build_blk1_pkt(0, buffer, hsa));
/* backporch */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hbp));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP1_REG,
sun6i_dsi_build_blk1_pkt(0, buffer, hbp));
/* frontporch */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hfp));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP1_REG,
sun6i_dsi_build_blk1_pkt(0, buffer, hfp));
/* hblk */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hblk));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK1_REG,
sun6i_dsi_build_blk1_pkt(0, buffer, hblk));
/* vblk */
regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, vblk));
regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK1_REG,
sun6i_dsi_build_blk1_pkt(0, buffer, vblk));
kfree(buffer);
}
static int sun6i_dsi_start(struct sun6i_dsi *dsi,
enum sun6i_dsi_start_inst func)
{
switch (func) {
case DSI_START_LPTX:
regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
DSI_INST_ID_LPDT << (4 * DSI_INST_ID_LP11) |
DSI_INST_ID_END << (4 * DSI_INST_ID_LPDT));
break;
case DSI_START_LPRX:
regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
DSI_INST_ID_LPDT << (4 * DSI_INST_ID_LP11) |
DSI_INST_ID_DLY << (4 * DSI_INST_ID_LPDT) |
DSI_INST_ID_TBA << (4 * DSI_INST_ID_DLY) |
DSI_INST_ID_END << (4 * DSI_INST_ID_TBA));
break;
case DSI_START_HSC:
regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
DSI_INST_ID_HSC << (4 * DSI_INST_ID_LP11) |
DSI_INST_ID_END << (4 * DSI_INST_ID_HSC));
break;
case DSI_START_HSD:
regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
DSI_INST_ID_NOP << (4 * DSI_INST_ID_LP11) |
DSI_INST_ID_HSD << (4 * DSI_INST_ID_NOP) |
DSI_INST_ID_DLY << (4 * DSI_INST_ID_HSD) |
DSI_INST_ID_NOP << (4 * DSI_INST_ID_DLY) |
DSI_INST_ID_END << (4 * DSI_INST_ID_HSCEXIT));
break;
default:
regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
DSI_INST_ID_END << (4 * DSI_INST_ID_LP11));
break;
}
sun6i_dsi_inst_abort(dsi);
sun6i_dsi_inst_commit(dsi);
if (func == DSI_START_HSC)
regmap_write_bits(dsi->regs,
SUN6I_DSI_INST_FUNC_REG(DSI_INST_ID_LP11),
SUN6I_DSI_INST_FUNC_LANE_CEN, 0);
return 0;
}
static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
struct mipi_dsi_device *device = dsi->device;
union phy_configure_opts opts = { };
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
u16 delay;
int err;
DRM_DEBUG_DRIVER("Enabling DSI output\n");
err = regulator_enable(dsi->regulator);
if (err)
dev_warn(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
reset_control_deassert(dsi->reset);
clk_prepare_enable(dsi->mod_clk);
/*
* Enable the DSI block.
*/
regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
sun6i_dsi_inst_init(dsi, dsi->device);
regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
delay = sun6i_dsi_get_video_start_delay(dsi, mode);
regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL1_REG,
SUN6I_DSI_BASIC_CTL1_VIDEO_ST_DELAY(delay) |
SUN6I_DSI_BASIC_CTL1_VIDEO_FILL |
SUN6I_DSI_BASIC_CTL1_VIDEO_PRECISION |
SUN6I_DSI_BASIC_CTL1_VIDEO_MODE);
sun6i_dsi_setup_burst(dsi, mode);
sun6i_dsi_setup_inst_loop(dsi, mode);
sun6i_dsi_setup_format(dsi, mode);
sun6i_dsi_setup_timings(dsi, mode);
phy_init(dsi->dphy);
phy_mipi_dphy_get_default_config(mode->clock * 1000,
mipi_dsi_pixel_format_to_bpp(device->format),
device->lanes, cfg);
phy_set_mode(dsi->dphy, PHY_MODE_MIPI_DPHY);
phy_configure(dsi->dphy, &opts);
phy_power_on(dsi->dphy);
if (dsi->panel)
drm_panel_prepare(dsi->panel);
/*
* FIXME: This should be moved after the switch to HS mode.
*
* Unfortunately, once in HS mode, it seems like we're not
* able to send DCS commands anymore, which would prevent any
* panel to send any DCS command as part as their enable
* method, which is quite common.
*
* I haven't seen any artifact due to that sub-optimal
* ordering on the panels I've tested it with, so I guess this
* will do for now, until that IP is better understood.
*/
if (dsi->panel)
drm_panel_enable(dsi->panel);
sun6i_dsi_start(dsi, DSI_START_HSC);
udelay(1000);
sun6i_dsi_start(dsi, DSI_START_HSD);
}
static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
{
struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
DRM_DEBUG_DRIVER("Disabling DSI output\n");
if (dsi->panel) {
drm_panel_disable(dsi->panel);
drm_panel_unprepare(dsi->panel);
}
phy_power_off(dsi->dphy);
phy_exit(dsi->dphy);
clk_disable_unprepare(dsi->mod_clk);
reset_control_assert(dsi->reset);
regulator_disable(dsi->regulator);
}
static int sun6i_dsi_get_modes(struct drm_connector *connector)
{
struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
return drm_panel_get_modes(dsi->panel, connector);
}
static const struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
.get_modes = sun6i_dsi_get_modes,
};
static enum drm_connector_status
sun6i_dsi_connector_detect(struct drm_connector *connector, bool force)
{
struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
return dsi->panel ? connector_status_connected :
connector_status_disconnected;
}
static const struct drm_connector_funcs sun6i_dsi_connector_funcs = {
.detect = sun6i_dsi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_encoder_helper_funcs sun6i_dsi_enc_helper_funcs = {
.disable = sun6i_dsi_encoder_disable,
.enable = sun6i_dsi_encoder_enable,
};
static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
const struct mipi_dsi_msg *msg)
{
u32 pkt = msg->type;
if (msg->type == MIPI_DSI_DCS_LONG_WRITE) {
pkt |= ((msg->tx_len) & 0xffff) << 8;
pkt |= (((msg->tx_len) >> 8) & 0xffff) << 16;
} else {
pkt |= (((u8 *)msg->tx_buf)[0] << 8);
if (msg->tx_len > 1)
pkt |= (((u8 *)msg->tx_buf)[1] << 16);
}
pkt |= sun6i_dsi_ecc_compute(pkt) << 24;
return pkt;
}
static int sun6i_dsi_dcs_write_short(struct sun6i_dsi *dsi,
const struct mipi_dsi_msg *msg)
{
regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
regmap_write_bits(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
0xff, (4 - 1));
sun6i_dsi_start(dsi, DSI_START_LPTX);
return msg->tx_len;
}
static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
const struct mipi_dsi_msg *msg)
{
int ret, len = 0;
u8 *bounce;
u16 crc;
regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL);
if (!bounce)
return -ENOMEM;
memcpy(bounce, msg->tx_buf, msg->tx_len);
len += msg->tx_len;
crc = sun6i_dsi_crc_compute(bounce, msg->tx_len);
memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc));
len += sizeof(crc);
regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4));
regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1);
kfree(bounce);
sun6i_dsi_start(dsi, DSI_START_LPTX);
ret = sun6i_dsi_inst_wait_for_completion(dsi);
if (ret < 0) {
sun6i_dsi_inst_abort(dsi);
return ret;
}
/*
* TODO: There's some bits (reg 0x200, bits 8/9) that
* apparently can be used to check whether the data have been
* sent, but I couldn't get it to work reliably.
*/
return msg->tx_len;
}
static int sun6i_dsi_dcs_read(struct sun6i_dsi *dsi,
const struct mipi_dsi_msg *msg)
{
u32 val;
int ret;
u8 byte0;
regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
(4 - 1));
sun6i_dsi_start(dsi, DSI_START_LPRX);
ret = sun6i_dsi_inst_wait_for_completion(dsi);
if (ret < 0) {
sun6i_dsi_inst_abort(dsi);
return ret;
}
/*
* TODO: There's some bits (reg 0x200, bits 24/25) that
* apparently can be used to check whether the data have been
* received, but I couldn't get it to work reliably.
*/
regmap_read(dsi->regs, SUN6I_DSI_CMD_CTL_REG, &val);
if (val & SUN6I_DSI_CMD_CTL_RX_OVERFLOW)
return -EIO;
regmap_read(dsi->regs, SUN6I_DSI_CMD_RX_REG(0), &val);
byte0 = val & 0xff;
if (byte0 == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT)
return -EIO;
((u8 *)msg->rx_buf)[0] = (val >> 8);
return 1;
}
static int sun6i_dsi_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
struct drm_panel *panel = of_drm_find_panel(device->dev.of_node);
if (IS_ERR(panel))
return PTR_ERR(panel);
if (!dsi->drm || !dsi->drm->registered)
return -EPROBE_DEFER;
dsi->panel = panel;
dsi->device = device;
drm_kms_helper_hotplug_event(dsi->drm);
dev_info(host->dev, "Attached device %s\n", device->name);
return 0;
}
static int sun6i_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
dsi->panel = NULL;
dsi->device = NULL;
drm_kms_helper_hotplug_event(dsi->drm);
return 0;
}
static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
int ret;
ret = sun6i_dsi_inst_wait_for_completion(dsi);
if (ret < 0)
sun6i_dsi_inst_abort(dsi);
regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
SUN6I_DSI_CMD_CTL_RX_OVERFLOW |
SUN6I_DSI_CMD_CTL_RX_FLAG |
SUN6I_DSI_CMD_CTL_TX_FLAG);
switch (msg->type) {
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
ret = sun6i_dsi_dcs_write_short(dsi, msg);
break;
case MIPI_DSI_DCS_LONG_WRITE:
ret = sun6i_dsi_dcs_write_long(dsi, msg);
break;
case MIPI_DSI_DCS_READ:
if (msg->rx_len == 1) {
ret = sun6i_dsi_dcs_read(dsi, msg);
break;
}
fallthrough;
default:
ret = -EINVAL;
}
return ret;
}
static const struct mipi_dsi_host_ops sun6i_dsi_host_ops = {
.attach = sun6i_dsi_attach,
.detach = sun6i_dsi_detach,
.transfer = sun6i_dsi_transfer,
};
static const struct regmap_config sun6i_dsi_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = SUN6I_DSI_CMD_TX_REG(255),
.name = "mipi-dsi",
};
static int sun6i_dsi_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
int ret;
drm_encoder_helper_add(&dsi->encoder,
&sun6i_dsi_enc_helper_funcs);
ret = drm_simple_encoder_init(drm, &dsi->encoder,
DRM_MODE_ENCODER_DSI);
if (ret) {
dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n");
return ret;
}
dsi->encoder.possible_crtcs = BIT(0);
drm_connector_helper_add(&dsi->connector,
&sun6i_dsi_connector_helper_funcs);
ret = drm_connector_init(drm, &dsi->connector,
&sun6i_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
if (ret) {
dev_err(dsi->dev,
"Couldn't initialise the DSI connector\n");
goto err_cleanup_connector;
}
drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
dsi->drm = drm;
return 0;
err_cleanup_connector:
drm_encoder_cleanup(&dsi->encoder);
return ret;
}
static void sun6i_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
dsi->drm = NULL;
}
static const struct component_ops sun6i_dsi_ops = {
.bind = sun6i_dsi_bind,
.unbind = sun6i_dsi_unbind,
};
static int sun6i_dsi_probe(struct platform_device *pdev)
{
const struct sun6i_dsi_variant *variant;
struct device *dev = &pdev->dev;
struct sun6i_dsi *dsi;
void __iomem *base;
int ret;
variant = device_get_match_data(dev);
if (!variant)
return -EINVAL;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
dev_set_drvdata(dev, dsi);
dsi->dev = dev;
dsi->host.ops = &sun6i_dsi_host_ops;
dsi->host.dev = dev;
dsi->variant = variant;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(dev, "Couldn't map the DSI encoder registers\n");
return PTR_ERR(base);
}
dsi->regulator = devm_regulator_get(dev, "vcc-dsi");
if (IS_ERR(dsi->regulator))
return dev_err_probe(dev, PTR_ERR(dsi->regulator),
"Couldn't get VCC-DSI supply\n");
dsi->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(dsi->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(dsi->reset);
}
dsi->regs = devm_regmap_init_mmio(dev, base, &sun6i_dsi_regmap_config);
if (IS_ERR(dsi->regs)) {
dev_err(dev, "Couldn't init regmap\n");
return PTR_ERR(dsi->regs);
}
dsi->bus_clk = devm_clk_get(dev, variant->has_mod_clk ? "bus" : NULL);
if (IS_ERR(dsi->bus_clk))
return dev_err_probe(dev, PTR_ERR(dsi->bus_clk),
"Couldn't get the DSI bus clock\n");
ret = regmap_mmio_attach_clk(dsi->regs, dsi->bus_clk);
if (ret)
return ret;
if (variant->has_mod_clk) {
dsi->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(dsi->mod_clk)) {
dev_err(dev, "Couldn't get the DSI mod clock\n");
ret = PTR_ERR(dsi->mod_clk);
goto err_attach_clk;
}
/*
* In order to operate properly, the module clock on the
* A31 variant always seems to be set to 297MHz.
*/
if (variant->set_mod_clk)
clk_set_rate_exclusive(dsi->mod_clk, 297000000);
}
dsi->dphy = devm_phy_get(dev, "dphy");
if (IS_ERR(dsi->dphy)) {
dev_err(dev, "Couldn't get the MIPI D-PHY\n");
ret = PTR_ERR(dsi->dphy);
goto err_unprotect_clk;
}
ret = mipi_dsi_host_register(&dsi->host);
if (ret) {
dev_err(dev, "Couldn't register MIPI-DSI host\n");
goto err_unprotect_clk;
}
ret = component_add(&pdev->dev, &sun6i_dsi_ops);
if (ret) {
dev_err(dev, "Couldn't register our component\n");
goto err_remove_dsi_host;
}
return 0;
err_remove_dsi_host:
mipi_dsi_host_unregister(&dsi->host);
err_unprotect_clk:
if (dsi->variant->has_mod_clk && dsi->variant->set_mod_clk)
clk_rate_exclusive_put(dsi->mod_clk);
err_attach_clk:
regmap_mmio_detach_clk(dsi->regs);
return ret;
}
static void sun6i_dsi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
component_del(&pdev->dev, &sun6i_dsi_ops);
mipi_dsi_host_unregister(&dsi->host);
if (dsi->variant->has_mod_clk && dsi->variant->set_mod_clk)
clk_rate_exclusive_put(dsi->mod_clk);
regmap_mmio_detach_clk(dsi->regs);
}
static const struct sun6i_dsi_variant sun6i_a31_mipi_dsi_variant = {
.has_mod_clk = true,
.set_mod_clk = true,
};
static const struct sun6i_dsi_variant sun50i_a64_mipi_dsi_variant = {
};
static const struct sun6i_dsi_variant sun50i_a100_mipi_dsi_variant = {
.has_mod_clk = true,
};
static const struct of_device_id sun6i_dsi_of_table[] = {
{
.compatible = "allwinner,sun6i-a31-mipi-dsi",
.data = &sun6i_a31_mipi_dsi_variant,
},
{
.compatible = "allwinner,sun50i-a64-mipi-dsi",
.data = &sun50i_a64_mipi_dsi_variant,
},
{
.compatible = "allwinner,sun50i-a100-mipi-dsi",
.data = &sun50i_a100_mipi_dsi_variant,
},
{ }
};
MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
static struct platform_driver sun6i_dsi_platform_driver = {
.probe = sun6i_dsi_probe,
.remove_new = sun6i_dsi_remove,
.driver = {
.name = "sun6i-mipi-dsi",
.of_match_table = sun6i_dsi_of_table,
},
};
module_platform_driver(sun6i_dsi_platform_driver);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner A31 DSI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2017 Free Electrons
* Maxime Ripard <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
#include "sun4i_drv.h"
#include "sun4i_frontend.h"
static const u32 sun4i_frontend_vert_coef[32] = {
0x00004000, 0x000140ff, 0x00033ffe, 0x00043ffd,
0x00063efc, 0xff083dfc, 0x000a3bfb, 0xff0d39fb,
0xff0f37fb, 0xff1136fa, 0xfe1433fb, 0xfe1631fb,
0xfd192ffb, 0xfd1c2cfb, 0xfd1f29fb, 0xfc2127fc,
0xfc2424fc, 0xfc2721fc, 0xfb291ffd, 0xfb2c1cfd,
0xfb2f19fd, 0xfb3116fe, 0xfb3314fe, 0xfa3611ff,
0xfb370fff, 0xfb390dff, 0xfb3b0a00, 0xfc3d08ff,
0xfc3e0600, 0xfd3f0400, 0xfe3f0300, 0xff400100,
};
static const u32 sun4i_frontend_horz_coef[64] = {
0x40000000, 0x00000000, 0x40fe0000, 0x0000ff03,
0x3ffd0000, 0x0000ff05, 0x3ffc0000, 0x0000ff06,
0x3efb0000, 0x0000ff08, 0x3dfb0000, 0x0000ff09,
0x3bfa0000, 0x0000fe0d, 0x39fa0000, 0x0000fe0f,
0x38fa0000, 0x0000fe10, 0x36fa0000, 0x0000fe12,
0x33fa0000, 0x0000fd16, 0x31fa0000, 0x0000fd18,
0x2ffa0000, 0x0000fd1a, 0x2cfa0000, 0x0000fc1e,
0x29fa0000, 0x0000fc21, 0x27fb0000, 0x0000fb23,
0x24fb0000, 0x0000fb26, 0x21fb0000, 0x0000fb29,
0x1ffc0000, 0x0000fa2b, 0x1cfc0000, 0x0000fa2e,
0x19fd0000, 0x0000fa30, 0x16fd0000, 0x0000fa33,
0x14fd0000, 0x0000fa35, 0x11fe0000, 0x0000fa37,
0x0ffe0000, 0x0000fa39, 0x0dfe0000, 0x0000fa3b,
0x0afe0000, 0x0000fa3e, 0x08ff0000, 0x0000fb3e,
0x06ff0000, 0x0000fb40, 0x05ff0000, 0x0000fc40,
0x03ff0000, 0x0000fd41, 0x01ff0000, 0x0000fe42,
};
/*
* These coefficients are taken from the A33 BSP from Allwinner.
*
* The first three values of each row are coded as 13-bit signed fixed-point
* numbers, with 10 bits for the fractional part. The fourth value is a
* constant coded as a 14-bit signed fixed-point number with 4 bits for the
* fractional part.
*
* The values in table order give the following colorspace translation:
* G = 1.164 * Y - 0.391 * U - 0.813 * V + 135
* R = 1.164 * Y + 1.596 * V - 222
* B = 1.164 * Y + 2.018 * U + 276
*
* This seems to be a conversion from Y[16:235] UV[16:240] to RGB[0:255],
* following the BT601 spec.
*/
const u32 sunxi_bt601_yuv2rgb_coef[12] = {
0x000004a7, 0x00001e6f, 0x00001cbf, 0x00000877,
0x000004a7, 0x00000000, 0x00000662, 0x00003211,
0x000004a7, 0x00000812, 0x00000000, 0x00002eb1,
};
EXPORT_SYMBOL(sunxi_bt601_yuv2rgb_coef);
static void sun4i_frontend_scaler_init(struct sun4i_frontend *frontend)
{
int i;
if (frontend->data->has_coef_access_ctrl)
regmap_write_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
SUN4I_FRONTEND_FRM_CTRL_COEF_ACCESS_CTRL,
SUN4I_FRONTEND_FRM_CTRL_COEF_ACCESS_CTRL);
for (i = 0; i < 32; i++) {
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZCOEF0_REG(i),
sun4i_frontend_horz_coef[2 * i]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZCOEF0_REG(i),
sun4i_frontend_horz_coef[2 * i]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZCOEF1_REG(i),
sun4i_frontend_horz_coef[2 * i + 1]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZCOEF1_REG(i),
sun4i_frontend_horz_coef[2 * i + 1]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTCOEF_REG(i),
sun4i_frontend_vert_coef[i]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTCOEF_REG(i),
sun4i_frontend_vert_coef[i]);
}
if (frontend->data->has_coef_rdy)
regmap_write_bits(frontend->regs,
SUN4I_FRONTEND_FRM_CTRL_REG,
SUN4I_FRONTEND_FRM_CTRL_COEF_RDY,
SUN4I_FRONTEND_FRM_CTRL_COEF_RDY);
}
int sun4i_frontend_init(struct sun4i_frontend *frontend)
{
return pm_runtime_get_sync(frontend->dev);
}
EXPORT_SYMBOL(sun4i_frontend_init);
void sun4i_frontend_exit(struct sun4i_frontend *frontend)
{
pm_runtime_put(frontend->dev);
}
EXPORT_SYMBOL(sun4i_frontend_exit);
static bool sun4i_frontend_format_chroma_requires_swap(uint32_t fmt)
{
switch (fmt) {
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YVU444:
return true;
default:
return false;
}
}
static bool sun4i_frontend_format_supports_tiling(uint32_t fmt)
{
switch (fmt) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YVU411:
return true;
default:
return false;
}
}
void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
unsigned int strides[3] = {};
dma_addr_t dma_addr;
bool swap;
if (fb->modifier == DRM_FORMAT_MOD_ALLWINNER_TILED) {
unsigned int width = state->src_w >> 16;
unsigned int offset;
strides[0] = SUN4I_FRONTEND_LINESTRD_TILED(fb->pitches[0]);
/*
* The X1 offset is the offset to the bottom-right point in the
* end tile, which is the final pixel (at offset width - 1)
* within the end tile (with a 32-byte mask).
*/
offset = (width - 1) & (32 - 1);
regmap_write(frontend->regs, SUN4I_FRONTEND_TB_OFF0_REG,
SUN4I_FRONTEND_TB_OFF_X1(offset));
if (fb->format->num_planes > 1) {
strides[1] =
SUN4I_FRONTEND_LINESTRD_TILED(fb->pitches[1]);
regmap_write(frontend->regs, SUN4I_FRONTEND_TB_OFF1_REG,
SUN4I_FRONTEND_TB_OFF_X1(offset));
}
if (fb->format->num_planes > 2) {
strides[2] =
SUN4I_FRONTEND_LINESTRD_TILED(fb->pitches[2]);
regmap_write(frontend->regs, SUN4I_FRONTEND_TB_OFF2_REG,
SUN4I_FRONTEND_TB_OFF_X1(offset));
}
} else {
strides[0] = fb->pitches[0];
if (fb->format->num_planes > 1)
strides[1] = fb->pitches[1];
if (fb->format->num_planes > 2)
strides[2] = fb->pitches[2];
}
/* Set the line width */
DRM_DEBUG_DRIVER("Frontend stride: %d bytes\n", fb->pitches[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_LINESTRD0_REG,
strides[0]);
if (fb->format->num_planes > 1)
regmap_write(frontend->regs, SUN4I_FRONTEND_LINESTRD1_REG,
strides[1]);
if (fb->format->num_planes > 2)
regmap_write(frontend->regs, SUN4I_FRONTEND_LINESTRD2_REG,
strides[2]);
/* Some planar formats require chroma channel swapping by hand. */
swap = sun4i_frontend_format_chroma_requires_swap(fb->format->format);
/* Set the physical address of the buffer in memory */
dma_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &dma_addr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, dma_addr);
if (fb->format->num_planes > 1) {
dma_addr = drm_fb_dma_get_gem_addr(fb, state, swap ? 2 : 1);
DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n",
&dma_addr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG,
dma_addr);
}
if (fb->format->num_planes > 2) {
dma_addr = drm_fb_dma_get_gem_addr(fb, state, swap ? 1 : 2);
DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n",
&dma_addr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG,
dma_addr);
}
}
EXPORT_SYMBOL(sun4i_frontend_update_buffer);
static int
sun4i_frontend_drm_format_to_input_fmt(const struct drm_format_info *format,
u32 *val)
{
if (!format->is_yuv)
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB;
else if (drm_format_info_is_yuv_sampling_411(format))
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV411;
else if (drm_format_info_is_yuv_sampling_420(format))
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV420;
else if (drm_format_info_is_yuv_sampling_422(format))
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV422;
else if (drm_format_info_is_yuv_sampling_444(format))
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_YUV444;
else
return -EINVAL;
return 0;
}
static int
sun4i_frontend_drm_format_to_input_mode(const struct drm_format_info *format,
uint64_t modifier, u32 *val)
{
bool tiled = (modifier == DRM_FORMAT_MOD_ALLWINNER_TILED);
switch (format->num_planes) {
case 1:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED;
return 0;
case 2:
*val = tiled ? SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_MB32_SEMIPLANAR
: SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_SEMIPLANAR;
return 0;
case 3:
*val = tiled ? SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_MB32_PLANAR
: SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PLANAR;
return 0;
default:
return -EINVAL;
}
}
static int
sun4i_frontend_drm_format_to_input_sequence(const struct drm_format_info *format,
u32 *val)
{
/* Planar formats have an explicit input sequence. */
if (drm_format_info_is_yuv_planar(format)) {
*val = 0;
return 0;
}
switch (format->format) {
case DRM_FORMAT_BGRX8888:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX;
return 0;
case DRM_FORMAT_NV12:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_UV;
return 0;
case DRM_FORMAT_NV16:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_UV;
return 0;
case DRM_FORMAT_NV21:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_VU;
return 0;
case DRM_FORMAT_NV61:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_VU;
return 0;
case DRM_FORMAT_UYVY:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_UYVY;
return 0;
case DRM_FORMAT_VYUY:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_VYUY;
return 0;
case DRM_FORMAT_XRGB8888:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB;
return 0;
case DRM_FORMAT_YUYV:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_YUYV;
return 0;
case DRM_FORMAT_YVYU:
*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_YVYU;
return 0;
default:
return -EINVAL;
}
}
static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
{
switch (fmt) {
case DRM_FORMAT_BGRX8888:
*val = SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_BGRX8888;
return 0;
case DRM_FORMAT_XRGB8888:
*val = SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_XRGB8888;
return 0;
default:
return -EINVAL;
}
}
static const uint32_t sun4i_frontend_formats[] = {
DRM_FORMAT_BGRX8888,
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YUV444,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
DRM_FORMAT_YVU444,
DRM_FORMAT_YVYU,
};
bool sun4i_frontend_format_is_supported(uint32_t fmt, uint64_t modifier)
{
unsigned int i;
if (modifier == DRM_FORMAT_MOD_ALLWINNER_TILED)
return sun4i_frontend_format_supports_tiling(fmt);
else if (modifier != DRM_FORMAT_MOD_LINEAR)
return false;
for (i = 0; i < ARRAY_SIZE(sun4i_frontend_formats); i++)
if (sun4i_frontend_formats[i] == fmt)
return true;
return false;
}
EXPORT_SYMBOL(sun4i_frontend_format_is_supported);
int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_plane *plane, uint32_t out_fmt)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
uint64_t modifier = fb->modifier;
unsigned int ch1_phase_idx;
u32 out_fmt_val;
u32 in_fmt_val, in_mod_val, in_ps_val;
unsigned int i;
u32 bypass;
int ret;
ret = sun4i_frontend_drm_format_to_input_fmt(format, &in_fmt_val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid input format\n");
return ret;
}
ret = sun4i_frontend_drm_format_to_input_mode(format, modifier,
&in_mod_val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid input mode\n");
return ret;
}
ret = sun4i_frontend_drm_format_to_input_sequence(format, &in_ps_val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid pixel sequence\n");
return ret;
}
ret = sun4i_frontend_drm_format_to_output_fmt(out_fmt, &out_fmt_val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid output format\n");
return ret;
}
/*
* I have no idea what this does exactly, but it seems to be
* related to the scaler FIR filter phase parameters.
*/
ch1_phase_idx = (format->num_planes > 1) ? 1 : 0;
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG,
frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG,
frontend->data->ch_phase[ch1_phase_idx]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG,
frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG,
frontend->data->ch_phase[ch1_phase_idx]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG,
frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG,
frontend->data->ch_phase[ch1_phase_idx]);
/*
* Checking the input format is sufficient since we currently only
* support RGB output formats to the backend. If YUV output formats
* ever get supported, an YUV input and output would require bypassing
* the CSC engine too.
*/
if (format->is_yuv) {
/* Setup the CSC engine for YUV to RGB conversion. */
bypass = 0;
for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
regmap_write(frontend->regs,
SUN4I_FRONTEND_CSC_COEF_REG(i),
sunxi_bt601_yuv2rgb_coef[i]);
} else {
bypass = SUN4I_FRONTEND_BYPASS_CSC_EN;
}
regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
SUN4I_FRONTEND_BYPASS_CSC_EN, bypass);
regmap_write(frontend->regs, SUN4I_FRONTEND_INPUT_FMT_REG,
in_mod_val | in_fmt_val | in_ps_val);
/*
* TODO: It look like the A31 and A80 at least will need the
* bit 7 (ALPHA_EN) enabled when using a format with alpha (so
* ARGB8888).
*/
regmap_write(frontend->regs, SUN4I_FRONTEND_OUTPUT_FMT_REG,
out_fmt_val);
return 0;
}
EXPORT_SYMBOL(sun4i_frontend_update_formats);
void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
uint32_t luma_width, luma_height;
uint32_t chroma_width, chroma_height;
/* Set height and width */
DRM_DEBUG_DRIVER("Frontend size W: %u H: %u\n",
state->crtc_w, state->crtc_h);
luma_width = state->src_w >> 16;
luma_height = state->src_h >> 16;
chroma_width = DIV_ROUND_UP(luma_width, fb->format->hsub);
chroma_height = DIV_ROUND_UP(luma_height, fb->format->vsub);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_INSIZE_REG,
SUN4I_FRONTEND_INSIZE(luma_height, luma_width));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_INSIZE_REG,
SUN4I_FRONTEND_INSIZE(chroma_height, chroma_width));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_OUTSIZE_REG,
SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_OUTSIZE_REG,
SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZFACT_REG,
(luma_width << 16) / state->crtc_w);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZFACT_REG,
(chroma_width << 16) / state->crtc_w);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTFACT_REG,
(luma_height << 16) / state->crtc_h);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTFACT_REG,
(chroma_height << 16) / state->crtc_h);
regmap_write_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
SUN4I_FRONTEND_FRM_CTRL_REG_RDY,
SUN4I_FRONTEND_FRM_CTRL_REG_RDY);
}
EXPORT_SYMBOL(sun4i_frontend_update_coord);
int sun4i_frontend_enable(struct sun4i_frontend *frontend)
{
regmap_write_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
SUN4I_FRONTEND_FRM_CTRL_FRM_START,
SUN4I_FRONTEND_FRM_CTRL_FRM_START);
return 0;
}
EXPORT_SYMBOL(sun4i_frontend_enable);
static const struct regmap_config sun4i_frontend_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x0a14,
};
static int sun4i_frontend_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct sun4i_frontend *frontend;
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
void __iomem *regs;
frontend = devm_kzalloc(dev, sizeof(*frontend), GFP_KERNEL);
if (!frontend)
return -ENOMEM;
dev_set_drvdata(dev, frontend);
frontend->dev = dev;
frontend->node = dev->of_node;
frontend->data = of_device_get_match_data(dev);
if (!frontend->data)
return -ENODEV;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
frontend->regs = devm_regmap_init_mmio(dev, regs,
&sun4i_frontend_regmap_config);
if (IS_ERR(frontend->regs)) {
dev_err(dev, "Couldn't create the frontend regmap\n");
return PTR_ERR(frontend->regs);
}
frontend->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(frontend->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(frontend->reset);
}
frontend->bus_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(frontend->bus_clk)) {
dev_err(dev, "Couldn't get our bus clock\n");
return PTR_ERR(frontend->bus_clk);
}
frontend->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(frontend->mod_clk)) {
dev_err(dev, "Couldn't get our mod clock\n");
return PTR_ERR(frontend->mod_clk);
}
frontend->ram_clk = devm_clk_get(dev, "ram");
if (IS_ERR(frontend->ram_clk)) {
dev_err(dev, "Couldn't get our ram clock\n");
return PTR_ERR(frontend->ram_clk);
}
list_add_tail(&frontend->list, &drv->frontend_list);
pm_runtime_enable(dev);
return 0;
}
static void sun4i_frontend_unbind(struct device *dev, struct device *master,
void *data)
{
struct sun4i_frontend *frontend = dev_get_drvdata(dev);
list_del(&frontend->list);
pm_runtime_force_suspend(dev);
}
static const struct component_ops sun4i_frontend_ops = {
.bind = sun4i_frontend_bind,
.unbind = sun4i_frontend_unbind,
};
static int sun4i_frontend_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &sun4i_frontend_ops);
}
static void sun4i_frontend_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun4i_frontend_ops);
}
static int sun4i_frontend_runtime_resume(struct device *dev)
{
struct sun4i_frontend *frontend = dev_get_drvdata(dev);
int ret;
clk_set_rate(frontend->mod_clk, 300000000);
clk_prepare_enable(frontend->bus_clk);
clk_prepare_enable(frontend->mod_clk);
clk_prepare_enable(frontend->ram_clk);
ret = reset_control_reset(frontend->reset);
if (ret) {
dev_err(dev, "Couldn't reset our device\n");
return ret;
}
regmap_update_bits(frontend->regs, SUN4I_FRONTEND_EN_REG,
SUN4I_FRONTEND_EN_EN,
SUN4I_FRONTEND_EN_EN);
sun4i_frontend_scaler_init(frontend);
return 0;
}
static int sun4i_frontend_runtime_suspend(struct device *dev)
{
struct sun4i_frontend *frontend = dev_get_drvdata(dev);
clk_disable_unprepare(frontend->ram_clk);
clk_disable_unprepare(frontend->mod_clk);
clk_disable_unprepare(frontend->bus_clk);
reset_control_assert(frontend->reset);
return 0;
}
static const struct dev_pm_ops sun4i_frontend_pm_ops = {
.runtime_resume = sun4i_frontend_runtime_resume,
.runtime_suspend = sun4i_frontend_runtime_suspend,
};
static const struct sun4i_frontend_data sun4i_a10_frontend = {
.ch_phase = { 0x000, 0xfc000 },
.has_coef_rdy = true,
};
static const struct sun4i_frontend_data sun8i_a33_frontend = {
.ch_phase = { 0x400, 0xfc400 },
.has_coef_access_ctrl = true,
};
const struct of_device_id sun4i_frontend_of_table[] = {
{
.compatible = "allwinner,sun4i-a10-display-frontend",
.data = &sun4i_a10_frontend
},
{
.compatible = "allwinner,sun7i-a20-display-frontend",
.data = &sun4i_a10_frontend
},
{
.compatible = "allwinner,sun8i-a23-display-frontend",
.data = &sun8i_a33_frontend
},
{
.compatible = "allwinner,sun8i-a33-display-frontend",
.data = &sun8i_a33_frontend
},
{ }
};
EXPORT_SYMBOL(sun4i_frontend_of_table);
MODULE_DEVICE_TABLE(of, sun4i_frontend_of_table);
static struct platform_driver sun4i_frontend_driver = {
.probe = sun4i_frontend_probe,
.remove_new = sun4i_frontend_remove,
.driver = {
.name = "sun4i-frontend",
.of_match_table = sun4i_frontend_of_table,
.pm = &sun4i_frontend_pm_ops,
},
};
module_platform_driver(sun4i_frontend_driver);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner A10 Display Engine Frontend Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun4i_frontend.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Free Electrons
* Copyright (C) 2015 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
#include "sun4i_rgb.h"
struct sun4i_rgb {
struct drm_connector connector;
struct drm_encoder encoder;
struct sun4i_tcon *tcon;
struct drm_panel *panel;
struct drm_bridge *bridge;
};
static inline struct sun4i_rgb *
drm_connector_to_sun4i_rgb(struct drm_connector *connector)
{
return container_of(connector, struct sun4i_rgb,
connector);
}
static inline struct sun4i_rgb *
drm_encoder_to_sun4i_rgb(struct drm_encoder *encoder)
{
return container_of(encoder, struct sun4i_rgb,
encoder);
}
static int sun4i_rgb_get_modes(struct drm_connector *connector)
{
struct sun4i_rgb *rgb =
drm_connector_to_sun4i_rgb(connector);
return drm_panel_get_modes(rgb->panel, connector);
}
/*
* VESA DMT defines a tolerance of 0.5% on the pixel clock, while the
* CVT spec reuses that tolerance in its examples, so it looks to be a
* good default tolerance for the EDID-based modes. Define it to 5 per
* mille to avoid floating point operations.
*/
#define SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE 5
static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
const struct drm_display_mode *mode)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(crtc);
struct sun4i_tcon *tcon = rgb->tcon;
u32 hsync = mode->hsync_end - mode->hsync_start;
u32 vsync = mode->vsync_end - mode->vsync_start;
unsigned long long rate = mode->clock * 1000;
unsigned long long lowest, highest;
unsigned long long rounded_rate;
DRM_DEBUG_DRIVER("Validating modes...\n");
if (hsync < 1)
return MODE_HSYNC_NARROW;
if (hsync > 0x3ff)
return MODE_HSYNC_WIDE;
if ((mode->hdisplay < 1) || (mode->htotal < 1))
return MODE_H_ILLEGAL;
if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
return MODE_BAD_HVALUE;
DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
if (vsync < 1)
return MODE_VSYNC_NARROW;
if (vsync > 0x3ff)
return MODE_VSYNC_WIDE;
if ((mode->vdisplay < 1) || (mode->vtotal < 1))
return MODE_V_ILLEGAL;
if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
return MODE_BAD_VVALUE;
DRM_DEBUG_DRIVER("Vertical parameters OK\n");
/*
* TODO: We should use the struct display_timing if available
* and / or trying to stretch the timings within that
* tolerancy to take care of panels that we wouldn't be able
* to have a exact match for.
*/
if (rgb->panel) {
DRM_DEBUG_DRIVER("RGB panel used, skipping clock rate checks");
goto out;
}
/*
* That shouldn't ever happen unless something is really wrong, but it
* doesn't harm to check.
*/
if (!rgb->bridge)
goto out;
tcon->dclk_min_div = 6;
tcon->dclk_max_div = 127;
rounded_rate = clk_round_rate(tcon->dclk, rate);
lowest = rate * (1000 - SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE);
do_div(lowest, 1000);
if (rounded_rate < lowest)
return MODE_CLOCK_LOW;
highest = rate * (1000 + SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE);
do_div(highest, 1000);
if (rounded_rate > highest)
return MODE_CLOCK_HIGH;
out:
DRM_DEBUG_DRIVER("Clock rate OK\n");
return MODE_OK;
}
static const struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
.get_modes = sun4i_rgb_get_modes,
};
static void
sun4i_rgb_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs sun4i_rgb_con_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_rgb_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
DRM_DEBUG_DRIVER("Enabling RGB output\n");
if (rgb->panel) {
drm_panel_prepare(rgb->panel);
drm_panel_enable(rgb->panel);
}
}
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
DRM_DEBUG_DRIVER("Disabling RGB output\n");
if (rgb->panel) {
drm_panel_disable(rgb->panel);
drm_panel_unprepare(rgb->panel);
}
}
static const struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
.disable = sun4i_rgb_encoder_disable,
.enable = sun4i_rgb_encoder_enable,
.mode_valid = sun4i_rgb_mode_valid,
};
int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
struct sun4i_rgb *rgb;
int ret;
rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
if (!rgb)
return -ENOMEM;
rgb->tcon = tcon;
encoder = &rgb->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
&rgb->panel, &rgb->bridge);
if (ret) {
dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
return 0;
}
drm_encoder_helper_add(&rgb->encoder,
&sun4i_rgb_enc_helper_funcs);
ret = drm_simple_encoder_init(drm, &rgb->encoder,
DRM_MODE_ENCODER_NONE);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the rgb encoder\n");
goto err_out;
}
/* The RGB encoder can only work with the TCON channel 0 */
rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
if (rgb->panel) {
drm_connector_helper_add(&rgb->connector,
&sun4i_rgb_con_helper_funcs);
ret = drm_connector_init(drm, &rgb->connector,
&sun4i_rgb_con_funcs,
DRM_MODE_CONNECTOR_Unknown);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
goto err_cleanup_connector;
}
drm_connector_attach_encoder(&rgb->connector,
&rgb->encoder);
}
if (rgb->bridge) {
ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
if (ret)
goto err_cleanup_connector;
}
return 0;
err_cleanup_connector:
drm_encoder_cleanup(&rgb->encoder);
err_out:
return ret;
}
EXPORT_SYMBOL(sun4i_rgb_init);
| linux-master | drivers/gpu/drm/sun4i/sun4i_rgb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Free Electrons
* Copyright (C) 2015 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "sun4i_drv.h"
#include "sun4i_frontend.h"
#include "sun4i_framebuffer.h"
#include "sun4i_tcon.h"
#include "sun8i_tcon_top.h"
static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
/* The hardware only allows even pitches for YUV buffers. */
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 2);
return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
DEFINE_DRM_GEM_DMA_FOPS(sun4i_drv_fops);
static const struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
/* Generic Operations */
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
.date = "20150629",
.major = 1,
.minor = 0,
/* GEM Operations */
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
};
static int sun4i_drv_bind(struct device *dev)
{
struct drm_device *drm;
struct sun4i_drv *drv;
int ret;
drm = drm_dev_alloc(&sun4i_drv_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
if (!drv) {
ret = -ENOMEM;
goto free_drm;
}
drm->dev_private = drv;
INIT_LIST_HEAD(&drv->frontend_list);
INIT_LIST_HEAD(&drv->engine_list);
INIT_LIST_HEAD(&drv->tcon_list);
ret = of_reserved_mem_device_init(dev);
if (ret && ret != -ENODEV) {
dev_err(drm->dev, "Couldn't claim our memory region\n");
goto free_drm;
}
drm_mode_config_init(drm);
ret = component_bind_all(drm->dev, drm);
if (ret) {
dev_err(drm->dev, "Couldn't bind all pipelines components\n");
goto cleanup_mode_config;
}
/* drm_vblank_init calls kcalloc, which can fail */
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret)
goto unbind_all;
/* Remove early framebuffers (ie. simplefb) */
ret = drm_aperture_remove_framebuffers(&sun4i_drv_driver);
if (ret)
goto unbind_all;
sun4i_framebuffer_init(drm);
/* Enable connectors polling */
drm_kms_helper_poll_init(drm);
ret = drm_dev_register(drm, 0);
if (ret)
goto finish_poll;
drm_fbdev_dma_setup(drm, 32);
dev_set_drvdata(dev, drm);
return 0;
finish_poll:
drm_kms_helper_poll_fini(drm);
unbind_all:
component_unbind_all(dev, NULL);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
free_drm:
drm_dev_put(drm);
return ret;
}
static void sun4i_drv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
dev_set_drvdata(dev, NULL);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
component_unbind_all(dev, NULL);
of_reserved_mem_device_release(dev);
drm_dev_put(drm);
}
static const struct component_master_ops sun4i_drv_master_ops = {
.bind = sun4i_drv_bind,
.unbind = sun4i_drv_unbind,
};
static bool sun4i_drv_node_is_connector(struct device_node *node)
{
return of_device_is_compatible(node, "hdmi-connector");
}
static bool sun4i_drv_node_is_frontend(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun4i-a10-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun7i-a20-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun8i-a23-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun9i-a80-display-frontend");
}
static bool sun4i_drv_node_is_deu(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun9i-a80-deu");
}
static bool sun4i_drv_node_is_supported_frontend(struct device_node *node)
{
if (IS_ENABLED(CONFIG_DRM_SUN4I_BACKEND))
return !!of_match_node(sun4i_frontend_of_table, node);
return false;
}
static bool sun4i_drv_node_is_tcon(struct device_node *node)
{
return !!of_match_node(sun4i_tcon_of_table, node);
}
static bool sun4i_drv_node_is_tcon_with_ch0(struct device_node *node)
{
const struct of_device_id *match;
match = of_match_node(sun4i_tcon_of_table, node);
if (match) {
struct sun4i_tcon_quirks *quirks;
quirks = (struct sun4i_tcon_quirks *)match->data;
return quirks->has_channel_0;
}
return false;
}
static bool sun4i_drv_node_is_tcon_top(struct device_node *node)
{
return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
!!of_match_node(sun8i_tcon_top_of_table, node);
}
/*
* The encoder drivers use drm_of_find_possible_crtcs to get upstream
* crtcs from the device tree using of_graph. For the results to be
* correct, encoders must be probed/bound after _all_ crtcs have been
* created. The existing code uses a depth first recursive traversal
* of the of_graph, which means the encoders downstream of the TCON
* get add right after the first TCON. The second TCON or CRTC will
* never be properly associated with encoders connected to it.
*
* Also, in a dual display pipeline setup, both frontends can feed
* either backend, and both backends can feed either TCON, we want
* all components of the same type to be added before the next type
* in the pipeline. Fortunately, the pipelines are perfectly symmetric,
* i.e. components of the same type are at the same depth when counted
* from the frontend. The only exception is the third pipeline in
* the A80 SoC, which we do not support anyway.
*
* Hence we can use a breadth first search traversal order to add
* components. We do not need to check for duplicates. The component
* matching system handles this for us.
*/
struct endpoint_list {
DECLARE_KFIFO(fifo, struct device_node *, 16);
};
static void sun4i_drv_traverse_endpoints(struct endpoint_list *list,
struct device_node *node,
int port_id)
{
struct device_node *ep, *remote, *port;
port = of_graph_get_port_by_id(node, port_id);
if (!port) {
DRM_DEBUG_DRIVER("No output to bind on port %d\n", port_id);
return;
}
for_each_available_child_of_node(port, ep) {
remote = of_graph_get_remote_port_parent(ep);
if (!remote) {
DRM_DEBUG_DRIVER("Error retrieving the output node\n");
continue;
}
if (sun4i_drv_node_is_tcon(node)) {
/*
* TCON TOP is always probed before TCON. However, TCON
* points back to TCON TOP when it is source for HDMI.
* We have to skip it here to prevent infinite looping
* between TCON TOP and TCON.
*/
if (sun4i_drv_node_is_tcon_top(remote)) {
DRM_DEBUG_DRIVER("TCON output endpoint is TCON TOP... skipping\n");
of_node_put(remote);
continue;
}
/*
* If the node is our TCON with channel 0, the first
* port is used for panel or bridges, and will not be
* part of the component framework.
*/
if (sun4i_drv_node_is_tcon_with_ch0(node)) {
struct of_endpoint endpoint;
if (of_graph_parse_endpoint(ep, &endpoint)) {
DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
of_node_put(remote);
continue;
}
if (!endpoint.id) {
DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
of_node_put(remote);
continue;
}
}
}
kfifo_put(&list->fifo, remote);
}
}
static int sun4i_drv_add_endpoints(struct device *dev,
struct endpoint_list *list,
struct component_match **match,
struct device_node *node)
{
int count = 0;
/*
* The frontend has been disabled in some of our old device
* trees. If we find a node that is the frontend and is
* disabled, we should just follow through and parse its
* child, but without adding it to the component list.
* Otherwise, we obviously want to add it to the list.
*/
if (!sun4i_drv_node_is_frontend(node) &&
!of_device_is_available(node))
return 0;
/*
* The connectors will be the last nodes in our pipeline, we
* can just bail out.
*/
if (sun4i_drv_node_is_connector(node))
return 0;
/*
* If the device is either just a regular device, or an
* enabled frontend supported by the driver, we add it to our
* component list.
*/
if (!(sun4i_drv_node_is_frontend(node) ||
sun4i_drv_node_is_deu(node)) ||
(sun4i_drv_node_is_supported_frontend(node) &&
of_device_is_available(node))) {
/* Add current component */
DRM_DEBUG_DRIVER("Adding component %pOF\n", node);
drm_of_component_match_add(dev, match, component_compare_of, node);
count++;
}
/* each node has at least one output */
sun4i_drv_traverse_endpoints(list, node, 1);
/* TCON TOP has second and third output */
if (sun4i_drv_node_is_tcon_top(node)) {
sun4i_drv_traverse_endpoints(list, node, 3);
sun4i_drv_traverse_endpoints(list, node, 5);
}
return count;
}
#ifdef CONFIG_PM_SLEEP
static int sun4i_drv_drm_sys_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm);
}
static int sun4i_drv_drm_sys_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(drm);
}
#endif
static const struct dev_pm_ops sun4i_drv_drm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sun4i_drv_drm_sys_suspend,
sun4i_drv_drm_sys_resume)
};
static int sun4i_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
struct device_node *np = pdev->dev.of_node, *endpoint;
struct endpoint_list list;
int i, ret, count = 0;
INIT_KFIFO(list.fifo);
/*
* DE2 and DE3 cores actually supports 40-bit addresses, but
* driver does not.
*/
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
for (i = 0;; i++) {
struct device_node *pipeline = of_parse_phandle(np,
"allwinner,pipelines",
i);
if (!pipeline)
break;
kfifo_put(&list.fifo, pipeline);
}
while (kfifo_get(&list.fifo, &endpoint)) {
/* process this endpoint */
ret = sun4i_drv_add_endpoints(&pdev->dev, &list, &match,
endpoint);
/* sun4i_drv_add_endpoints can fail to allocate memory */
if (ret < 0)
return ret;
count += ret;
}
if (count)
return component_master_add_with_match(&pdev->dev,
&sun4i_drv_master_ops,
match);
else
return 0;
}
static void sun4i_drv_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &sun4i_drv_master_ops);
}
static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun4i-a10-display-engine" },
{ .compatible = "allwinner,sun5i-a10s-display-engine" },
{ .compatible = "allwinner,sun5i-a13-display-engine" },
{ .compatible = "allwinner,sun6i-a31-display-engine" },
{ .compatible = "allwinner,sun6i-a31s-display-engine" },
{ .compatible = "allwinner,sun7i-a20-display-engine" },
{ .compatible = "allwinner,sun8i-a23-display-engine" },
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ .compatible = "allwinner,sun8i-a83t-display-engine" },
{ .compatible = "allwinner,sun8i-h3-display-engine" },
{ .compatible = "allwinner,sun8i-r40-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
{ .compatible = "allwinner,sun20i-d1-display-engine" },
{ .compatible = "allwinner,sun50i-a64-display-engine" },
{ .compatible = "allwinner,sun50i-h6-display-engine" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
static struct platform_driver sun4i_drv_platform_driver = {
.probe = sun4i_drv_probe,
.remove_new = sun4i_drv_remove,
.driver = {
.name = "sun4i-drm",
.of_match_table = sun4i_drv_of_table,
.pm = &sun4i_drv_drm_pm_ops,
},
};
drm_module_platform_driver(sun4i_drv_platform_driver);
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner A10 Display Engine DRM/KMS Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sun4i/sun4i_drv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Free Electrons
* Copyright (C) 2016 NextThing Co
*
* Maxime Ripard <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/regmap.h>
#include "sun4i_hdmi.h"
struct sun4i_ddc {
struct clk_hw hw;
struct sun4i_hdmi *hdmi;
struct regmap_field *reg;
u8 pre_div;
u8 m_offset;
};
static inline struct sun4i_ddc *hw_to_ddc(struct clk_hw *hw)
{
return container_of(hw, struct sun4i_ddc, hw);
}
static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
unsigned long parent_rate,
const u8 pre_div,
const u8 m_offset,
u8 *m, u8 *n)
{
unsigned long best_rate = 0;
u8 best_m = 0, best_n = 0, _m, _n;
for (_m = 0; _m < 16; _m++) {
for (_n = 0; _n < 8; _n++) {
unsigned long tmp_rate;
tmp_rate = (((parent_rate / pre_div) / 10) >> _n) /
(_m + m_offset);
if (tmp_rate > rate)
continue;
if (abs(rate - tmp_rate) < abs(rate - best_rate)) {
best_rate = tmp_rate;
best_m = _m;
best_n = _n;
}
}
}
if (m && n) {
*m = best_m;
*n = best_n;
}
return best_rate;
}
static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct sun4i_ddc *ddc = hw_to_ddc(hw);
return sun4i_ddc_calc_divider(rate, *prate, ddc->pre_div,
ddc->m_offset, NULL, NULL);
}
static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct sun4i_ddc *ddc = hw_to_ddc(hw);
unsigned int reg;
u8 m, n;
regmap_field_read(ddc->reg, ®);
m = (reg >> 3) & 0xf;
n = reg & 0x7;
return (((parent_rate / ddc->pre_div) / 10) >> n) /
(m + ddc->m_offset);
}
static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct sun4i_ddc *ddc = hw_to_ddc(hw);
u8 div_m, div_n;
sun4i_ddc_calc_divider(rate, parent_rate, ddc->pre_div,
ddc->m_offset, &div_m, &div_n);
regmap_field_write(ddc->reg,
SUN4I_HDMI_DDC_CLK_M(div_m) |
SUN4I_HDMI_DDC_CLK_N(div_n));
return 0;
}
static const struct clk_ops sun4i_ddc_ops = {
.recalc_rate = sun4i_ddc_recalc_rate,
.round_rate = sun4i_ddc_round_rate,
.set_rate = sun4i_ddc_set_rate,
};
int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *parent)
{
struct clk_init_data init;
struct sun4i_ddc *ddc;
const char *parent_name;
parent_name = __clk_get_name(parent);
if (!parent_name)
return -ENODEV;
ddc = devm_kzalloc(hdmi->dev, sizeof(*ddc), GFP_KERNEL);
if (!ddc)
return -ENOMEM;
ddc->reg = devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
hdmi->variant->ddc_clk_reg);
if (IS_ERR(ddc->reg))
return PTR_ERR(ddc->reg);
init.name = "hdmi-ddc";
init.ops = &sun4i_ddc_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
ddc->hdmi = hdmi;
ddc->hw.init = &init;
ddc->pre_div = hdmi->variant->ddc_clk_pre_divider;
ddc->m_offset = hdmi->variant->ddc_clk_m_offset;
hdmi->ddc_clk = devm_clk_register(hdmi->dev, &ddc->hw);
if (IS_ERR(hdmi->ddc_clk))
return PTR_ERR(hdmi->ddc_clk);
return 0;
}
| linux-master | drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "logicvc_crtc.h"
#include "logicvc_drm.h"
#include "logicvc_interface.h"
#include "logicvc_layer.h"
#include "logicvc_regs.h"
#define logicvc_crtc(c) \
container_of(c, struct logicvc_crtc, drm_crtc)
static enum drm_mode_status
logicvc_crtc_mode_valid(struct drm_crtc *drm_crtc,
const struct drm_display_mode *mode)
{
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return -EINVAL;
return 0;
}
static void logicvc_crtc_atomic_begin(struct drm_crtc *drm_crtc,
struct drm_atomic_state *state)
{
struct logicvc_crtc *crtc = logicvc_crtc(drm_crtc);
struct drm_crtc_state *old_state =
drm_atomic_get_old_crtc_state(state, drm_crtc);
struct drm_device *drm_dev = drm_crtc->dev;
unsigned long flags;
/*
* We need to grab the pending event here if vblank was already enabled
* since we won't get a call to atomic_enable to grab it.
*/
if (drm_crtc->state->event && old_state->active) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
WARN_ON(drm_crtc_vblank_get(drm_crtc) != 0);
crtc->event = drm_crtc->state->event;
drm_crtc->state->event = NULL;
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
}
static void logicvc_crtc_atomic_enable(struct drm_crtc *drm_crtc,
struct drm_atomic_state *state)
{
struct logicvc_crtc *crtc = logicvc_crtc(drm_crtc);
struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
struct drm_crtc_state *old_state =
drm_atomic_get_old_crtc_state(state, drm_crtc);
struct drm_crtc_state *new_state =
drm_atomic_get_new_crtc_state(state, drm_crtc);
struct drm_display_mode *mode = &new_state->adjusted_mode;
struct drm_device *drm_dev = drm_crtc->dev;
unsigned int hact, hfp, hsl, hbp;
unsigned int vact, vfp, vsl, vbp;
unsigned long flags;
u32 ctrl;
/* Timings */
hact = mode->hdisplay;
hfp = mode->hsync_start - mode->hdisplay;
hsl = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
vact = mode->vdisplay;
vfp = mode->vsync_start - mode->vdisplay;
vsl = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
regmap_write(logicvc->regmap, LOGICVC_HSYNC_FRONT_PORCH_REG, hfp - 1);
regmap_write(logicvc->regmap, LOGICVC_HSYNC_REG, hsl - 1);
regmap_write(logicvc->regmap, LOGICVC_HSYNC_BACK_PORCH_REG, hbp - 1);
regmap_write(logicvc->regmap, LOGICVC_HRES_REG, hact - 1);
regmap_write(logicvc->regmap, LOGICVC_VSYNC_FRONT_PORCH_REG, vfp - 1);
regmap_write(logicvc->regmap, LOGICVC_VSYNC_REG, vsl - 1);
regmap_write(logicvc->regmap, LOGICVC_VSYNC_BACK_PORCH_REG, vbp - 1);
regmap_write(logicvc->regmap, LOGICVC_VRES_REG, vact - 1);
/* Signals */
ctrl = LOGICVC_CTRL_HSYNC_ENABLE | LOGICVC_CTRL_VSYNC_ENABLE |
LOGICVC_CTRL_DE_ENABLE;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl |= LOGICVC_CTRL_HSYNC_INVERT;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ctrl |= LOGICVC_CTRL_VSYNC_INVERT;
if (logicvc->interface) {
struct drm_connector *connector =
&logicvc->interface->drm_connector;
struct drm_display_info *display_info =
&connector->display_info;
if (display_info->bus_flags & DRM_BUS_FLAG_DE_LOW)
ctrl |= LOGICVC_CTRL_DE_INVERT;
if (display_info->bus_flags &
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
ctrl |= LOGICVC_CTRL_CLOCK_INVERT;
}
regmap_update_bits(logicvc->regmap, LOGICVC_CTRL_REG,
LOGICVC_CTRL_HSYNC_ENABLE |
LOGICVC_CTRL_HSYNC_INVERT |
LOGICVC_CTRL_VSYNC_ENABLE |
LOGICVC_CTRL_VSYNC_INVERT |
LOGICVC_CTRL_DE_ENABLE |
LOGICVC_CTRL_DE_INVERT |
LOGICVC_CTRL_PIXEL_INVERT |
LOGICVC_CTRL_CLOCK_INVERT, ctrl);
/* Generate internal state reset. */
regmap_write(logicvc->regmap, LOGICVC_DTYPE_REG, 0);
drm_crtc_vblank_on(drm_crtc);
/* Register our event after vblank is enabled. */
if (drm_crtc->state->event && !old_state->active) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
WARN_ON(drm_crtc_vblank_get(drm_crtc) != 0);
crtc->event = drm_crtc->state->event;
drm_crtc->state->event = NULL;
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
}
static void logicvc_crtc_atomic_disable(struct drm_crtc *drm_crtc,
struct drm_atomic_state *state)
{
struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
struct drm_device *drm_dev = drm_crtc->dev;
drm_crtc_vblank_off(drm_crtc);
/* Disable and clear CRTC bits. */
regmap_update_bits(logicvc->regmap, LOGICVC_CTRL_REG,
LOGICVC_CTRL_HSYNC_ENABLE |
LOGICVC_CTRL_HSYNC_INVERT |
LOGICVC_CTRL_VSYNC_ENABLE |
LOGICVC_CTRL_VSYNC_INVERT |
LOGICVC_CTRL_DE_ENABLE |
LOGICVC_CTRL_DE_INVERT |
LOGICVC_CTRL_PIXEL_INVERT |
LOGICVC_CTRL_CLOCK_INVERT, 0);
/* Generate internal state reset. */
regmap_write(logicvc->regmap, LOGICVC_DTYPE_REG, 0);
/* Consume any leftover event since vblank is now disabled. */
if (drm_crtc->state->event && !drm_crtc->state->active) {
spin_lock_irq(&drm_dev->event_lock);
drm_crtc_send_vblank_event(drm_crtc, drm_crtc->state->event);
drm_crtc->state->event = NULL;
spin_unlock_irq(&drm_dev->event_lock);
}
}
static const struct drm_crtc_helper_funcs logicvc_crtc_helper_funcs = {
.mode_valid = logicvc_crtc_mode_valid,
.atomic_begin = logicvc_crtc_atomic_begin,
.atomic_enable = logicvc_crtc_atomic_enable,
.atomic_disable = logicvc_crtc_atomic_disable,
};
static int logicvc_crtc_enable_vblank(struct drm_crtc *drm_crtc)
{
struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
/* Clear any pending V_SYNC interrupt. */
regmap_write_bits(logicvc->regmap, LOGICVC_INT_STAT_REG,
LOGICVC_INT_STAT_V_SYNC, LOGICVC_INT_STAT_V_SYNC);
/* Unmask V_SYNC interrupt. */
regmap_write_bits(logicvc->regmap, LOGICVC_INT_MASK_REG,
LOGICVC_INT_MASK_V_SYNC, 0);
return 0;
}
static void logicvc_crtc_disable_vblank(struct drm_crtc *drm_crtc)
{
struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
/* Mask V_SYNC interrupt. */
regmap_write_bits(logicvc->regmap, LOGICVC_INT_MASK_REG,
LOGICVC_INT_MASK_V_SYNC, LOGICVC_INT_MASK_V_SYNC);
}
static const struct drm_crtc_funcs logicvc_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = logicvc_crtc_enable_vblank,
.disable_vblank = logicvc_crtc_disable_vblank,
};
void logicvc_crtc_vblank_handler(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct logicvc_crtc *crtc = logicvc->crtc;
unsigned long flags;
if (!crtc)
return;
drm_crtc_handle_vblank(&crtc->drm_crtc);
if (crtc->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
drm_crtc_send_vblank_event(&crtc->drm_crtc, crtc->event);
drm_crtc_vblank_put(&crtc->drm_crtc);
crtc->event = NULL;
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
}
int logicvc_crtc_init(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct device_node *of_node = dev->of_node;
struct logicvc_crtc *crtc;
struct logicvc_layer *layer_primary;
int ret;
crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
if (!crtc)
return -ENOMEM;
layer_primary = logicvc_layer_get_primary(logicvc);
if (!layer_primary) {
drm_err(drm_dev, "Failed to get primary layer\n");
return -EINVAL;
}
ret = drm_crtc_init_with_planes(drm_dev, &crtc->drm_crtc,
&layer_primary->drm_plane, NULL,
&logicvc_crtc_funcs, NULL);
if (ret) {
drm_err(drm_dev, "Failed to initialize CRTC\n");
return ret;
}
drm_crtc_helper_add(&crtc->drm_crtc, &logicvc_crtc_helper_funcs);
crtc->drm_crtc.port = of_graph_get_port_by_id(of_node, 1);
logicvc->crtc = crtc;
return 0;
}
| linux-master | drivers/gpu/drm/logicvc/logicvc_crtc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <linux/types.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "logicvc_drm.h"
#include "logicvc_interface.h"
#include "logicvc_layer.h"
#include "logicvc_mode.h"
static const struct drm_mode_config_funcs logicvc_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
int logicvc_mode_init(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct drm_mode_config *mode_config = &drm_dev->mode_config;
struct logicvc_layer *layer_primary;
uint32_t preferred_depth;
int ret;
ret = drm_vblank_init(drm_dev, mode_config->num_crtc);
if (ret) {
drm_err(drm_dev, "Failed to initialize vblank\n");
return ret;
}
layer_primary = logicvc_layer_get_primary(logicvc);
if (!layer_primary) {
drm_err(drm_dev, "Failed to get primary layer\n");
return -EINVAL;
}
preferred_depth = layer_primary->formats->depth;
/* DRM counts alpha in depth, our driver doesn't. */
if (layer_primary->formats->alpha)
preferred_depth += 8;
mode_config->min_width = 64;
mode_config->max_width = 2048;
mode_config->min_height = 1;
mode_config->max_height = 2048;
mode_config->preferred_depth = preferred_depth;
mode_config->funcs = &logicvc_mode_config_funcs;
drm_mode_config_reset(drm_dev);
drm_kms_helper_poll_init(drm_dev);
return 0;
}
void logicvc_mode_fini(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
drm_kms_helper_poll_fini(drm_dev);
}
| linux-master | drivers/gpu/drm/logicvc/logicvc_mode.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <drm/drm_print.h>
#include "logicvc_drm.h"
#include "logicvc_layer.h"
#include "logicvc_of.h"
static struct logicvc_of_property_sv logicvc_of_display_interface_sv[] = {
{ "lvds-4bits", LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS },
{ "lvds-3bits", LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS },
{ },
};
static struct logicvc_of_property_sv logicvc_of_display_colorspace_sv[] = {
{ "rgb", LOGICVC_DISPLAY_COLORSPACE_RGB },
{ "yuv422", LOGICVC_DISPLAY_COLORSPACE_YUV422 },
{ "yuv444", LOGICVC_DISPLAY_COLORSPACE_YUV444 },
{ },
};
static struct logicvc_of_property_sv logicvc_of_layer_colorspace_sv[] = {
{ "rgb", LOGICVC_LAYER_COLORSPACE_RGB },
{ "yuv", LOGICVC_LAYER_COLORSPACE_YUV },
{ },
};
static struct logicvc_of_property_sv logicvc_of_layer_alpha_mode_sv[] = {
{ "layer", LOGICVC_LAYER_ALPHA_LAYER },
{ "pixel", LOGICVC_LAYER_ALPHA_PIXEL },
{ },
};
static struct logicvc_of_property logicvc_of_properties[] = {
[LOGICVC_OF_PROPERTY_DISPLAY_INTERFACE] = {
.name = "xylon,display-interface",
.sv = logicvc_of_display_interface_sv,
.range = {
LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS,
LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS,
},
},
[LOGICVC_OF_PROPERTY_DISPLAY_COLORSPACE] = {
.name = "xylon,display-colorspace",
.sv = logicvc_of_display_colorspace_sv,
.range = {
LOGICVC_DISPLAY_COLORSPACE_RGB,
LOGICVC_DISPLAY_COLORSPACE_YUV444,
},
},
[LOGICVC_OF_PROPERTY_DISPLAY_DEPTH] = {
.name = "xylon,display-depth",
.range = { 8, 24 },
},
[LOGICVC_OF_PROPERTY_ROW_STRIDE] = {
.name = "xylon,row-stride",
},
[LOGICVC_OF_PROPERTY_DITHERING] = {
.name = "xylon,dithering",
.optional = true,
},
[LOGICVC_OF_PROPERTY_BACKGROUND_LAYER] = {
.name = "xylon,background-layer",
.optional = true,
},
[LOGICVC_OF_PROPERTY_LAYERS_CONFIGURABLE] = {
.name = "xylon,layers-configurable",
.optional = true,
},
[LOGICVC_OF_PROPERTY_LAYERS_COUNT] = {
.name = "xylon,layers-count",
},
[LOGICVC_OF_PROPERTY_LAYER_DEPTH] = {
.name = "xylon,layer-depth",
.range = { 8, 24 },
},
[LOGICVC_OF_PROPERTY_LAYER_COLORSPACE] = {
.name = "xylon,layer-colorspace",
.sv = logicvc_of_layer_colorspace_sv,
.range = {
LOGICVC_LAYER_COLORSPACE_RGB,
LOGICVC_LAYER_COLORSPACE_RGB,
},
},
[LOGICVC_OF_PROPERTY_LAYER_ALPHA_MODE] = {
.name = "xylon,layer-alpha-mode",
.sv = logicvc_of_layer_alpha_mode_sv,
.range = {
LOGICVC_LAYER_ALPHA_LAYER,
LOGICVC_LAYER_ALPHA_PIXEL,
},
},
[LOGICVC_OF_PROPERTY_LAYER_BASE_OFFSET] = {
.name = "xylon,layer-base-offset",
},
[LOGICVC_OF_PROPERTY_LAYER_BUFFER_OFFSET] = {
.name = "xylon,layer-buffer-offset",
},
[LOGICVC_OF_PROPERTY_LAYER_PRIMARY] = {
.name = "xylon,layer-primary",
.optional = true,
},
};
static int logicvc_of_property_sv_value(struct logicvc_of_property_sv *sv,
const char *string, u32 *value)
{
unsigned int i = 0;
while (sv[i].string) {
if (!strcmp(sv[i].string, string)) {
*value = sv[i].value;
return 0;
}
i++;
}
return -EINVAL;
}
int logicvc_of_property_parse_u32(struct device_node *of_node,
unsigned int index, u32 *target)
{
struct logicvc_of_property *property;
const char *string;
u32 value;
int ret;
if (index >= LOGICVC_OF_PROPERTY_MAXIMUM)
return -EINVAL;
property = &logicvc_of_properties[index];
if (!property->optional &&
!of_property_read_bool(of_node, property->name))
return -ENODEV;
if (property->sv) {
ret = of_property_read_string(of_node, property->name, &string);
if (ret)
return ret;
ret = logicvc_of_property_sv_value(property->sv, string,
&value);
if (ret)
return ret;
} else {
ret = of_property_read_u32(of_node, property->name, &value);
if (ret)
return ret;
}
if (property->range[0] || property->range[1])
if (value < property->range[0] || value > property->range[1])
return -ERANGE;
*target = value;
return 0;
}
void logicvc_of_property_parse_bool(struct device_node *of_node,
unsigned int index, bool *target)
{
struct logicvc_of_property *property;
if (index >= LOGICVC_OF_PROPERTY_MAXIMUM) {
/* Fallback. */
*target = false;
return;
}
property = &logicvc_of_properties[index];
*target = of_property_read_bool(of_node, property->name);
}
bool logicvc_of_node_is_layer(struct device_node *of_node)
{
return !of_node_cmp(of_node->name, "layer");
}
| linux-master | drivers/gpu/drm/logicvc/logicvc_of.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
#include "logicvc_crtc.h"
#include "logicvc_drm.h"
#include "logicvc_interface.h"
#include "logicvc_mode.h"
#include "logicvc_layer.h"
#include "logicvc_of.h"
#include "logicvc_regs.h"
DEFINE_DRM_GEM_DMA_FOPS(logicvc_drm_fops);
static int logicvc_drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm_dev,
struct drm_mode_create_dumb *args)
{
struct logicvc_drm *logicvc = logicvc_drm(drm_dev);
/* Stride is always fixed to its configuration value. */
args->pitch = logicvc->config.row_stride * DIV_ROUND_UP(args->bpp, 8);
return drm_gem_dma_dumb_create_internal(file_priv, drm_dev, args);
}
static struct drm_driver logicvc_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET |
DRIVER_ATOMIC,
.fops = &logicvc_drm_fops,
.name = "logicvc-drm",
.desc = "Xylon LogiCVC DRM driver",
.date = "20200403",
.major = 1,
.minor = 0,
DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_dma_dumb_create),
};
static struct regmap_config logicvc_drm_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.name = "logicvc-drm",
};
static irqreturn_t logicvc_drm_irq_handler(int irq, void *data)
{
struct logicvc_drm *logicvc = data;
irqreturn_t ret = IRQ_NONE;
u32 stat = 0;
/* Get pending interrupt sources. */
regmap_read(logicvc->regmap, LOGICVC_INT_STAT_REG, &stat);
/* Clear all pending interrupt sources. */
regmap_write(logicvc->regmap, LOGICVC_INT_STAT_REG, stat);
if (stat & LOGICVC_INT_STAT_V_SYNC) {
logicvc_crtc_vblank_handler(logicvc);
ret = IRQ_HANDLED;
}
return ret;
}
static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct device_node *of_node = dev->of_node;
struct logicvc_drm_config *config = &logicvc->config;
struct device_node *layers_node;
int ret;
logicvc_of_property_parse_bool(of_node, LOGICVC_OF_PROPERTY_DITHERING,
&config->dithering);
logicvc_of_property_parse_bool(of_node,
LOGICVC_OF_PROPERTY_BACKGROUND_LAYER,
&config->background_layer);
logicvc_of_property_parse_bool(of_node,
LOGICVC_OF_PROPERTY_LAYERS_CONFIGURABLE,
&config->layers_configurable);
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_DISPLAY_INTERFACE,
&config->display_interface);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_DISPLAY_COLORSPACE,
&config->display_colorspace);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_DISPLAY_DEPTH,
&config->display_depth);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_ROW_STRIDE,
&config->row_stride);
if (ret)
return ret;
layers_node = of_get_child_by_name(of_node, "layers");
if (!layers_node) {
drm_err(drm_dev, "Missing non-optional layers node\n");
return -EINVAL;
}
config->layers_count = of_get_child_count(layers_node);
if (!config->layers_count) {
drm_err(drm_dev,
"Missing a non-optional layers children node\n");
return -EINVAL;
}
return 0;
}
static int logicvc_clocks_prepare(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct {
struct clk **clk;
char *name;
bool optional;
} clocks_map[] = {
{
.clk = &logicvc->vclk,
.name = "vclk",
.optional = false,
},
{
.clk = &logicvc->vclk2,
.name = "vclk2",
.optional = true,
},
{
.clk = &logicvc->lvdsclk,
.name = "lvdsclk",
.optional = true,
},
{
.clk = &logicvc->lvdsclkn,
.name = "lvdsclkn",
.optional = true,
},
};
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(clocks_map); i++) {
struct clk *clk;
clk = devm_clk_get(dev, clocks_map[i].name);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) == -ENOENT && clocks_map[i].optional)
continue;
drm_err(drm_dev, "Missing non-optional clock %s\n",
clocks_map[i].name);
ret = PTR_ERR(clk);
goto error;
}
ret = clk_prepare_enable(clk);
if (ret) {
drm_err(drm_dev,
"Failed to prepare and enable clock %s\n",
clocks_map[i].name);
goto error;
}
*clocks_map[i].clk = clk;
}
return 0;
error:
for (i = 0; i < ARRAY_SIZE(clocks_map); i++) {
if (!*clocks_map[i].clk)
continue;
clk_disable_unprepare(*clocks_map[i].clk);
*clocks_map[i].clk = NULL;
}
return ret;
}
static int logicvc_clocks_unprepare(struct logicvc_drm *logicvc)
{
struct clk **clocks[] = {
&logicvc->vclk,
&logicvc->vclk2,
&logicvc->lvdsclk,
&logicvc->lvdsclkn,
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(clocks); i++) {
if (!*clocks[i])
continue;
clk_disable_unprepare(*clocks[i]);
*clocks[i] = NULL;
}
return 0;
}
static const struct logicvc_drm_caps logicvc_drm_caps[] = {
{
.major = 3,
.layer_address = false,
},
{
.major = 4,
.layer_address = true,
},
{
.major = 5,
.layer_address = true,
},
};
static const struct logicvc_drm_caps *
logicvc_drm_caps_match(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
const struct logicvc_drm_caps *caps = NULL;
unsigned int major, minor;
char level;
unsigned int i;
u32 version;
regmap_read(logicvc->regmap, LOGICVC_IP_VERSION_REG, &version);
major = FIELD_GET(LOGICVC_IP_VERSION_MAJOR_MASK, version);
minor = FIELD_GET(LOGICVC_IP_VERSION_MINOR_MASK, version);
level = FIELD_GET(LOGICVC_IP_VERSION_LEVEL_MASK, version) + 'a';
for (i = 0; i < ARRAY_SIZE(logicvc_drm_caps); i++) {
if (logicvc_drm_caps[i].major &&
logicvc_drm_caps[i].major != major)
continue;
if (logicvc_drm_caps[i].minor &&
logicvc_drm_caps[i].minor != minor)
continue;
if (logicvc_drm_caps[i].level &&
logicvc_drm_caps[i].level != level)
continue;
caps = &logicvc_drm_caps[i];
}
drm_info(drm_dev, "LogiCVC version %d.%02d.%c\n", major, minor, level);
return caps;
}
static int logicvc_drm_probe(struct platform_device *pdev)
{
struct device_node *of_node = pdev->dev.of_node;
struct device_node *reserved_mem_node;
struct reserved_mem *reserved_mem = NULL;
const struct logicvc_drm_caps *caps;
struct logicvc_drm *logicvc;
struct device *dev = &pdev->dev;
struct drm_device *drm_dev;
struct regmap *regmap = NULL;
struct resource res;
void __iomem *base;
unsigned int preferred_bpp;
int irq;
int ret;
ret = of_reserved_mem_device_init(dev);
if (ret && ret != -ENODEV) {
dev_err(dev, "Failed to init memory region\n");
goto error_early;
}
reserved_mem_node = of_parse_phandle(of_node, "memory-region", 0);
if (reserved_mem_node) {
reserved_mem = of_reserved_mem_lookup(reserved_mem_node);
of_node_put(reserved_mem_node);
}
/* Get regmap from parent if available. */
if (of_node->parent)
regmap = syscon_node_to_regmap(of_node->parent);
/* Register our own regmap otherwise. */
if (IS_ERR_OR_NULL(regmap)) {
ret = of_address_to_resource(of_node, 0, &res);
if (ret) {
dev_err(dev, "Failed to get resource from address\n");
goto error_reserved_mem;
}
base = devm_ioremap_resource(dev, &res);
if (IS_ERR(base)) {
dev_err(dev, "Failed to map I/O base\n");
ret = PTR_ERR(base);
goto error_reserved_mem;
}
logicvc_drm_regmap_config.max_register = resource_size(&res) -
4;
regmap = devm_regmap_init_mmio(dev, base,
&logicvc_drm_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to create regmap for I/O\n");
ret = PTR_ERR(regmap);
goto error_reserved_mem;
}
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENODEV;
goto error_reserved_mem;
}
logicvc = devm_drm_dev_alloc(dev, &logicvc_drm_driver,
struct logicvc_drm, drm_dev);
if (IS_ERR(logicvc)) {
ret = PTR_ERR(logicvc);
goto error_reserved_mem;
}
platform_set_drvdata(pdev, logicvc);
drm_dev = &logicvc->drm_dev;
logicvc->regmap = regmap;
INIT_LIST_HEAD(&logicvc->layers_list);
caps = logicvc_drm_caps_match(logicvc);
if (!caps) {
ret = -EINVAL;
goto error_reserved_mem;
}
logicvc->caps = caps;
if (reserved_mem)
logicvc->reserved_mem_base = reserved_mem->base;
ret = logicvc_clocks_prepare(logicvc);
if (ret) {
drm_err(drm_dev, "Failed to prepare clocks\n");
goto error_reserved_mem;
}
ret = devm_request_irq(dev, irq, logicvc_drm_irq_handler, 0,
dev_name(dev), logicvc);
if (ret) {
drm_err(drm_dev, "Failed to request IRQ\n");
goto error_clocks;
}
ret = logicvc_drm_config_parse(logicvc);
if (ret && ret != -ENODEV) {
drm_err(drm_dev, "Failed to parse config\n");
goto error_clocks;
}
ret = drmm_mode_config_init(drm_dev);
if (ret) {
drm_err(drm_dev, "Failed to init mode config\n");
goto error_clocks;
}
ret = logicvc_layers_init(logicvc);
if (ret) {
drm_err(drm_dev, "Failed to initialize layers\n");
goto error_clocks;
}
ret = logicvc_crtc_init(logicvc);
if (ret) {
drm_err(drm_dev, "Failed to initialize CRTC\n");
goto error_clocks;
}
logicvc_layers_attach_crtc(logicvc);
ret = logicvc_interface_init(logicvc);
if (ret) {
if (ret != -EPROBE_DEFER)
drm_err(drm_dev, "Failed to initialize interface\n");
goto error_clocks;
}
logicvc_interface_attach_crtc(logicvc);
ret = logicvc_mode_init(logicvc);
if (ret) {
drm_err(drm_dev, "Failed to initialize KMS\n");
goto error_clocks;
}
ret = drm_dev_register(drm_dev, 0);
if (ret) {
drm_err(drm_dev, "Failed to register DRM device\n");
goto error_mode;
}
switch (drm_dev->mode_config.preferred_depth) {
case 16:
preferred_bpp = 16;
break;
case 24:
case 32:
default:
preferred_bpp = 32;
break;
}
drm_fbdev_dma_setup(drm_dev, preferred_bpp);
return 0;
error_mode:
logicvc_mode_fini(logicvc);
error_clocks:
logicvc_clocks_unprepare(logicvc);
error_reserved_mem:
of_reserved_mem_device_release(dev);
error_early:
return ret;
}
static void logicvc_drm_remove(struct platform_device *pdev)
{
struct logicvc_drm *logicvc = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct drm_device *drm_dev = &logicvc->drm_dev;
drm_dev_unregister(drm_dev);
drm_atomic_helper_shutdown(drm_dev);
logicvc_mode_fini(logicvc);
logicvc_clocks_unprepare(logicvc);
of_reserved_mem_device_release(dev);
}
static const struct of_device_id logicvc_drm_of_table[] = {
{ .compatible = "xylon,logicvc-3.02.a-display" },
{ .compatible = "xylon,logicvc-4.01.a-display" },
{},
};
MODULE_DEVICE_TABLE(of, logicvc_drm_of_table);
static struct platform_driver logicvc_drm_platform_driver = {
.probe = logicvc_drm_probe,
.remove_new = logicvc_drm_remove,
.driver = {
.name = "logicvc-drm",
.of_match_table = logicvc_drm_of_table,
},
};
module_platform_driver(logicvc_drm_platform_driver);
MODULE_AUTHOR("Paul Kocialkowski <[email protected]>");
MODULE_DESCRIPTION("Xylon LogiCVC DRM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/logicvc/logicvc_drm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <linux/types.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "logicvc_crtc.h"
#include "logicvc_drm.h"
#include "logicvc_interface.h"
#include "logicvc_regs.h"
#define logicvc_interface_from_drm_encoder(c) \
container_of(c, struct logicvc_interface, drm_encoder)
#define logicvc_interface_from_drm_connector(c) \
container_of(c, struct logicvc_interface, drm_connector)
static void logicvc_encoder_enable(struct drm_encoder *drm_encoder)
{
struct logicvc_drm *logicvc = logicvc_drm(drm_encoder->dev);
struct logicvc_interface *interface =
logicvc_interface_from_drm_encoder(drm_encoder);
regmap_update_bits(logicvc->regmap, LOGICVC_POWER_CTRL_REG,
LOGICVC_POWER_CTRL_VIDEO_ENABLE,
LOGICVC_POWER_CTRL_VIDEO_ENABLE);
if (interface->drm_panel) {
drm_panel_prepare(interface->drm_panel);
drm_panel_enable(interface->drm_panel);
}
}
static void logicvc_encoder_disable(struct drm_encoder *drm_encoder)
{
struct logicvc_interface *interface =
logicvc_interface_from_drm_encoder(drm_encoder);
if (interface->drm_panel) {
drm_panel_disable(interface->drm_panel);
drm_panel_unprepare(interface->drm_panel);
}
}
static const struct drm_encoder_helper_funcs logicvc_encoder_helper_funcs = {
.enable = logicvc_encoder_enable,
.disable = logicvc_encoder_disable,
};
static const struct drm_encoder_funcs logicvc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static int logicvc_connector_get_modes(struct drm_connector *drm_connector)
{
struct logicvc_interface *interface =
logicvc_interface_from_drm_connector(drm_connector);
if (interface->drm_panel)
return drm_panel_get_modes(interface->drm_panel, drm_connector);
WARN_ONCE(1, "Retrieving modes from a native connector is not implemented.");
return 0;
}
static const struct drm_connector_helper_funcs logicvc_connector_helper_funcs = {
.get_modes = logicvc_connector_get_modes,
};
static const struct drm_connector_funcs logicvc_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int logicvc_interface_encoder_type(struct logicvc_drm *logicvc)
{
switch (logicvc->config.display_interface) {
case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS:
case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS_CAMERA:
case LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS:
return DRM_MODE_ENCODER_LVDS;
case LOGICVC_DISPLAY_INTERFACE_DVI:
return DRM_MODE_ENCODER_TMDS;
case LOGICVC_DISPLAY_INTERFACE_RGB:
return DRM_MODE_ENCODER_DPI;
default:
return DRM_MODE_ENCODER_NONE;
}
}
static int logicvc_interface_connector_type(struct logicvc_drm *logicvc)
{
switch (logicvc->config.display_interface) {
case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS:
case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS_CAMERA:
case LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS:
return DRM_MODE_CONNECTOR_LVDS;
case LOGICVC_DISPLAY_INTERFACE_DVI:
return DRM_MODE_CONNECTOR_DVID;
case LOGICVC_DISPLAY_INTERFACE_RGB:
return DRM_MODE_CONNECTOR_DPI;
default:
return DRM_MODE_CONNECTOR_Unknown;
}
}
static bool logicvc_interface_native_connector(struct logicvc_drm *logicvc)
{
switch (logicvc->config.display_interface) {
case LOGICVC_DISPLAY_INTERFACE_DVI:
return true;
default:
return false;
}
}
void logicvc_interface_attach_crtc(struct logicvc_drm *logicvc)
{
uint32_t possible_crtcs = drm_crtc_mask(&logicvc->crtc->drm_crtc);
logicvc->interface->drm_encoder.possible_crtcs = possible_crtcs;
}
int logicvc_interface_init(struct logicvc_drm *logicvc)
{
struct logicvc_interface *interface;
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct device_node *of_node = dev->of_node;
int encoder_type = logicvc_interface_encoder_type(logicvc);
int connector_type = logicvc_interface_connector_type(logicvc);
bool native_connector = logicvc_interface_native_connector(logicvc);
int ret;
interface = devm_kzalloc(dev, sizeof(*interface), GFP_KERNEL);
if (!interface) {
ret = -ENOMEM;
goto error_early;
}
ret = drm_of_find_panel_or_bridge(of_node, 0, 0, &interface->drm_panel,
&interface->drm_bridge);
if (ret == -EPROBE_DEFER)
goto error_early;
ret = drm_encoder_init(drm_dev, &interface->drm_encoder,
&logicvc_encoder_funcs, encoder_type, NULL);
if (ret) {
drm_err(drm_dev, "Failed to initialize encoder\n");
goto error_early;
}
drm_encoder_helper_add(&interface->drm_encoder,
&logicvc_encoder_helper_funcs);
if (native_connector || interface->drm_panel) {
ret = drm_connector_init(drm_dev, &interface->drm_connector,
&logicvc_connector_funcs,
connector_type);
if (ret) {
drm_err(drm_dev, "Failed to initialize connector\n");
goto error_encoder;
}
drm_connector_helper_add(&interface->drm_connector,
&logicvc_connector_helper_funcs);
ret = drm_connector_attach_encoder(&interface->drm_connector,
&interface->drm_encoder);
if (ret) {
drm_err(drm_dev,
"Failed to attach connector to encoder\n");
goto error_encoder;
}
}
if (interface->drm_bridge) {
ret = drm_bridge_attach(&interface->drm_encoder,
interface->drm_bridge, NULL, 0);
if (ret) {
drm_err(drm_dev,
"Failed to attach bridge to encoder\n");
goto error_encoder;
}
}
logicvc->interface = interface;
return 0;
error_encoder:
drm_encoder_cleanup(&interface->drm_encoder);
error_early:
return ret;
}
| linux-master | drivers/gpu/drm/logicvc/logicvc_interface.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <linux/of.h>
#include <linux/types.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include "logicvc_crtc.h"
#include "logicvc_drm.h"
#include "logicvc_layer.h"
#include "logicvc_of.h"
#include "logicvc_regs.h"
#define logicvc_layer(p) \
container_of(p, struct logicvc_layer, drm_plane)
static uint32_t logicvc_layer_formats_rgb16[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_INVALID,
};
static uint32_t logicvc_layer_formats_rgb24[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_INVALID,
};
/*
* What we call depth in this driver only counts color components, not alpha.
* This allows us to stay compatible with the LogiCVC bistream definitions.
*/
static uint32_t logicvc_layer_formats_rgb24_alpha[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_INVALID,
};
static struct logicvc_layer_formats logicvc_layer_formats[] = {
{
.colorspace = LOGICVC_LAYER_COLORSPACE_RGB,
.depth = 16,
.formats = logicvc_layer_formats_rgb16,
},
{
.colorspace = LOGICVC_LAYER_COLORSPACE_RGB,
.depth = 24,
.formats = logicvc_layer_formats_rgb24,
},
{
.colorspace = LOGICVC_LAYER_COLORSPACE_RGB,
.depth = 24,
.alpha = true,
.formats = logicvc_layer_formats_rgb24_alpha,
},
{ }
};
static bool logicvc_layer_format_inverted(uint32_t format)
{
switch (format) {
case DRM_FORMAT_BGR565:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return true;
default:
return false;
}
}
static int logicvc_plane_atomic_check(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_device *drm_dev = drm_plane->dev;
struct logicvc_layer *layer = logicvc_layer(drm_plane);
struct logicvc_drm *logicvc = logicvc_drm(drm_dev);
struct drm_plane_state *new_state =
drm_atomic_get_new_plane_state(state, drm_plane);
struct drm_crtc_state *crtc_state;
int min_scale, max_scale;
bool can_position;
int ret;
if (!new_state->crtc)
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(new_state->state,
new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
if (new_state->crtc_x < 0 || new_state->crtc_y < 0) {
drm_err(drm_dev,
"Negative on-CRTC positions are not supported.\n");
return -EINVAL;
}
if (!logicvc->caps->layer_address) {
ret = logicvc_layer_buffer_find_setup(logicvc, layer, new_state,
NULL);
if (ret) {
drm_err(drm_dev, "No viable setup for buffer found.\n");
return ret;
}
}
min_scale = DRM_PLANE_NO_SCALING;
max_scale = DRM_PLANE_NO_SCALING;
can_position = (drm_plane->type == DRM_PLANE_TYPE_OVERLAY &&
layer->index != (logicvc->config.layers_count - 1) &&
logicvc->config.layers_configurable);
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
min_scale, max_scale,
can_position, true);
if (ret) {
drm_err(drm_dev, "Invalid plane state\n\n");
return ret;
}
return 0;
}
static void logicvc_plane_atomic_update(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct logicvc_layer *layer = logicvc_layer(drm_plane);
struct logicvc_drm *logicvc = logicvc_drm(drm_plane->dev);
struct drm_device *drm_dev = &logicvc->drm_dev;
struct drm_plane_state *new_state =
drm_atomic_get_new_plane_state(state, drm_plane);
struct drm_crtc *drm_crtc = &logicvc->crtc->drm_crtc;
struct drm_display_mode *mode = &drm_crtc->state->adjusted_mode;
struct drm_framebuffer *fb = new_state->fb;
struct logicvc_layer_buffer_setup setup = {};
u32 index = layer->index;
u32 reg;
/* Layer dimensions */
regmap_write(logicvc->regmap, LOGICVC_LAYER_WIDTH_REG(index),
new_state->crtc_w - 1);
regmap_write(logicvc->regmap, LOGICVC_LAYER_HEIGHT_REG(index),
new_state->crtc_h - 1);
if (logicvc->caps->layer_address) {
phys_addr_t fb_addr = drm_fb_dma_get_gem_addr(fb, new_state, 0);
regmap_write(logicvc->regmap, LOGICVC_LAYER_ADDRESS_REG(index),
fb_addr);
} else {
/* Rely on offsets to configure the address. */
logicvc_layer_buffer_find_setup(logicvc, layer, new_state,
&setup);
/* Layer memory offsets */
regmap_write(logicvc->regmap, LOGICVC_BUFFER_SEL_REG,
LOGICVC_BUFFER_SEL_VALUE(index, setup.buffer_sel));
regmap_write(logicvc->regmap, LOGICVC_LAYER_HOFFSET_REG(index),
setup.hoffset);
regmap_write(logicvc->regmap, LOGICVC_LAYER_VOFFSET_REG(index),
setup.voffset);
}
/* Layer position */
regmap_write(logicvc->regmap, LOGICVC_LAYER_HPOSITION_REG(index),
mode->hdisplay - 1 - new_state->crtc_x);
/* Vertical position must be set last to sync layer register changes. */
regmap_write(logicvc->regmap, LOGICVC_LAYER_VPOSITION_REG(index),
mode->vdisplay - 1 - new_state->crtc_y);
/* Layer alpha */
if (layer->config.alpha_mode == LOGICVC_LAYER_ALPHA_LAYER) {
u32 alpha_bits;
u32 alpha_max;
u32 alpha;
switch (layer->config.depth) {
case 8:
alpha_bits = 3;
break;
case 16:
if (layer->config.colorspace ==
LOGICVC_LAYER_COLORSPACE_YUV)
alpha_bits = 8;
else
alpha_bits = 6;
break;
default:
alpha_bits = 8;
break;
}
alpha_max = BIT(alpha_bits) - 1;
alpha = new_state->alpha * alpha_max / DRM_BLEND_ALPHA_OPAQUE;
drm_dbg_kms(drm_dev, "Setting layer %d alpha to %d/%d\n", index,
alpha, alpha_max);
regmap_write(logicvc->regmap, LOGICVC_LAYER_ALPHA_REG(index),
alpha);
}
/* Layer control */
reg = LOGICVC_LAYER_CTRL_ENABLE;
if (logicvc_layer_format_inverted(fb->format->format))
reg |= LOGICVC_LAYER_CTRL_PIXEL_FORMAT_INVERT;
reg |= LOGICVC_LAYER_CTRL_COLOR_KEY_DISABLE;
regmap_write(logicvc->regmap, LOGICVC_LAYER_CTRL_REG(index), reg);
}
static void logicvc_plane_atomic_disable(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct logicvc_layer *layer = logicvc_layer(drm_plane);
struct logicvc_drm *logicvc = logicvc_drm(drm_plane->dev);
u32 index = layer->index;
regmap_write(logicvc->regmap, LOGICVC_LAYER_CTRL_REG(index), 0);
}
static struct drm_plane_helper_funcs logicvc_plane_helper_funcs = {
.atomic_check = logicvc_plane_atomic_check,
.atomic_update = logicvc_plane_atomic_update,
.atomic_disable = logicvc_plane_atomic_disable,
};
static const struct drm_plane_funcs logicvc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
int logicvc_layer_buffer_find_setup(struct logicvc_drm *logicvc,
struct logicvc_layer *layer,
struct drm_plane_state *state,
struct logicvc_layer_buffer_setup *setup)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct drm_framebuffer *fb = state->fb;
/* All the supported formats have a single data plane. */
u32 layer_bytespp = fb->format->cpp[0];
u32 layer_stride = layer_bytespp * logicvc->config.row_stride;
u32 base_offset = layer->config.base_offset * layer_stride;
u32 buffer_offset = layer->config.buffer_offset * layer_stride;
u8 buffer_sel = 0;
u16 voffset = 0;
u16 hoffset = 0;
phys_addr_t fb_addr;
u32 fb_offset;
u32 gap;
if (!logicvc->reserved_mem_base) {
drm_err(drm_dev, "No reserved memory base was registered!\n");
return -ENOMEM;
}
fb_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
if (fb_addr < logicvc->reserved_mem_base) {
drm_err(drm_dev,
"Framebuffer memory below reserved memory base!\n");
return -EINVAL;
}
fb_offset = (u32) (fb_addr - logicvc->reserved_mem_base);
if (fb_offset < base_offset) {
drm_err(drm_dev,
"Framebuffer offset below layer base offset!\n");
return -EINVAL;
}
gap = fb_offset - base_offset;
/* Use the possible video buffers selection. */
if (gap && buffer_offset) {
buffer_sel = gap / buffer_offset;
if (buffer_sel > LOGICVC_BUFFER_SEL_MAX)
buffer_sel = LOGICVC_BUFFER_SEL_MAX;
gap -= buffer_sel * buffer_offset;
}
/* Use the vertical offset. */
if (gap && layer_stride && logicvc->config.layers_configurable) {
voffset = gap / layer_stride;
if (voffset > LOGICVC_LAYER_VOFFSET_MAX)
voffset = LOGICVC_LAYER_VOFFSET_MAX;
gap -= voffset * layer_stride;
}
/* Use the horizontal offset. */
if (gap && layer_bytespp && logicvc->config.layers_configurable) {
hoffset = gap / layer_bytespp;
if (hoffset > LOGICVC_DIMENSIONS_MAX)
hoffset = LOGICVC_DIMENSIONS_MAX;
gap -= hoffset * layer_bytespp;
}
if (gap) {
drm_err(drm_dev,
"Unable to find layer %d buffer setup for 0x%x byte gap\n",
layer->index, fb_offset - base_offset);
return -EINVAL;
}
drm_dbg_kms(drm_dev, "Found layer %d buffer setup for 0x%x byte gap:\n",
layer->index, fb_offset - base_offset);
drm_dbg_kms(drm_dev, "- buffer_sel = 0x%x chunks of 0x%x bytes\n",
buffer_sel, buffer_offset);
drm_dbg_kms(drm_dev, "- voffset = 0x%x chunks of 0x%x bytes\n", voffset,
layer_stride);
drm_dbg_kms(drm_dev, "- hoffset = 0x%x chunks of 0x%x bytes\n", hoffset,
layer_bytespp);
if (setup) {
setup->buffer_sel = buffer_sel;
setup->voffset = voffset;
setup->hoffset = hoffset;
}
return 0;
}
static struct logicvc_layer_formats *logicvc_layer_formats_lookup(struct logicvc_layer *layer)
{
bool alpha;
unsigned int i = 0;
alpha = (layer->config.alpha_mode == LOGICVC_LAYER_ALPHA_PIXEL);
while (logicvc_layer_formats[i].formats) {
if (logicvc_layer_formats[i].colorspace == layer->config.colorspace &&
logicvc_layer_formats[i].depth == layer->config.depth &&
logicvc_layer_formats[i].alpha == alpha)
return &logicvc_layer_formats[i];
i++;
}
return NULL;
}
static unsigned int logicvc_layer_formats_count(struct logicvc_layer_formats *formats)
{
unsigned int count = 0;
while (formats->formats[count] != DRM_FORMAT_INVALID)
count++;
return count;
}
static int logicvc_layer_config_parse(struct logicvc_drm *logicvc,
struct logicvc_layer *layer)
{
struct device_node *of_node = layer->of_node;
struct logicvc_layer_config *config = &layer->config;
int ret;
logicvc_of_property_parse_bool(of_node,
LOGICVC_OF_PROPERTY_LAYER_PRIMARY,
&config->primary);
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_LAYER_COLORSPACE,
&config->colorspace);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_LAYER_DEPTH,
&config->depth);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_LAYER_ALPHA_MODE,
&config->alpha_mode);
if (ret)
return ret;
/*
* Memory offset is only relevant without layer address configuration.
*/
if (logicvc->caps->layer_address)
return 0;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_LAYER_BASE_OFFSET,
&config->base_offset);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_LAYER_BUFFER_OFFSET,
&config->buffer_offset);
if (ret)
return ret;
return 0;
}
struct logicvc_layer *logicvc_layer_get_from_index(struct logicvc_drm *logicvc,
u32 index)
{
struct logicvc_layer *layer;
list_for_each_entry(layer, &logicvc->layers_list, list)
if (layer->index == index)
return layer;
return NULL;
}
struct logicvc_layer *logicvc_layer_get_from_type(struct logicvc_drm *logicvc,
enum drm_plane_type type)
{
struct logicvc_layer *layer;
list_for_each_entry(layer, &logicvc->layers_list, list)
if (layer->drm_plane.type == type)
return layer;
return NULL;
}
struct logicvc_layer *logicvc_layer_get_primary(struct logicvc_drm *logicvc)
{
return logicvc_layer_get_from_type(logicvc, DRM_PLANE_TYPE_PRIMARY);
}
static int logicvc_layer_init(struct logicvc_drm *logicvc,
struct device_node *of_node, u32 index)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct logicvc_layer *layer = NULL;
struct logicvc_layer_formats *formats;
unsigned int formats_count;
enum drm_plane_type type;
unsigned int zpos;
int ret;
layer = devm_kzalloc(dev, sizeof(*layer), GFP_KERNEL);
if (!layer) {
ret = -ENOMEM;
goto error;
}
layer->of_node = of_node;
layer->index = index;
ret = logicvc_layer_config_parse(logicvc, layer);
if (ret) {
drm_err(drm_dev, "Failed to parse config for layer #%d\n",
index);
goto error;
}
formats = logicvc_layer_formats_lookup(layer);
if (!formats) {
drm_err(drm_dev, "Failed to lookup formats for layer #%d\n",
index);
ret = -EINVAL;
goto error;
}
formats_count = logicvc_layer_formats_count(formats);
/* The final layer can be configured as a background layer. */
if (logicvc->config.background_layer &&
index == (logicvc->config.layers_count - 1)) {
/*
* A zero value for black is only valid for RGB, not for YUV,
* so this will need to take the format in account for YUV.
*/
u32 background = 0;
drm_dbg_kms(drm_dev, "Using layer #%d as background layer\n",
index);
regmap_write(logicvc->regmap, LOGICVC_BACKGROUND_COLOR_REG,
background);
devm_kfree(dev, layer);
return 0;
}
if (layer->config.primary)
type = DRM_PLANE_TYPE_PRIMARY;
else
type = DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(drm_dev, &layer->drm_plane, 0,
&logicvc_plane_funcs, formats->formats,
formats_count, NULL, type, NULL);
if (ret) {
drm_err(drm_dev, "Failed to initialize layer plane\n");
return ret;
}
drm_plane_helper_add(&layer->drm_plane, &logicvc_plane_helper_funcs);
zpos = logicvc->config.layers_count - index - 1;
drm_dbg_kms(drm_dev, "Giving layer #%d zpos %d\n", index, zpos);
if (layer->config.alpha_mode == LOGICVC_LAYER_ALPHA_LAYER)
drm_plane_create_alpha_property(&layer->drm_plane);
drm_plane_create_zpos_immutable_property(&layer->drm_plane, zpos);
drm_dbg_kms(drm_dev, "Registering layer #%d\n", index);
layer->formats = formats;
list_add_tail(&layer->list, &logicvc->layers_list);
return 0;
error:
if (layer)
devm_kfree(dev, layer);
return ret;
}
static void logicvc_layer_fini(struct logicvc_drm *logicvc,
struct logicvc_layer *layer)
{
struct device *dev = logicvc->drm_dev.dev;
list_del(&layer->list);
devm_kfree(dev, layer);
}
void logicvc_layers_attach_crtc(struct logicvc_drm *logicvc)
{
uint32_t possible_crtcs = drm_crtc_mask(&logicvc->crtc->drm_crtc);
struct logicvc_layer *layer;
list_for_each_entry(layer, &logicvc->layers_list, list) {
if (layer->drm_plane.type != DRM_PLANE_TYPE_OVERLAY)
continue;
layer->drm_plane.possible_crtcs = possible_crtcs;
}
}
int logicvc_layers_init(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct device_node *of_node = dev->of_node;
struct device_node *layer_node = NULL;
struct device_node *layers_node;
struct logicvc_layer *layer;
struct logicvc_layer *next;
int ret = 0;
layers_node = of_get_child_by_name(of_node, "layers");
if (!layers_node) {
drm_err(drm_dev, "No layers node found in the description\n");
ret = -ENODEV;
goto error;
}
for_each_child_of_node(layers_node, layer_node) {
u32 index = 0;
if (!logicvc_of_node_is_layer(layer_node))
continue;
ret = of_property_read_u32(layer_node, "reg", &index);
if (ret)
continue;
layer = logicvc_layer_get_from_index(logicvc, index);
if (layer) {
drm_err(drm_dev, "Duplicated entry for layer #%d\n",
index);
continue;
}
ret = logicvc_layer_init(logicvc, layer_node, index);
if (ret) {
of_node_put(layers_node);
goto error;
}
}
of_node_put(layers_node);
return 0;
error:
list_for_each_entry_safe(layer, next, &logicvc->layers_list, list)
logicvc_layer_fini(logicvc, layer);
return ret;
}
| linux-master | drivers/gpu/drm/logicvc/logicvc_layer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Broadcom
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
/**
* DOC: VC4 Falcon HDMI module
*
* The HDMI core has a state machine and a PHY. On BCM2835, most of
* the unit operates off of the HSM clock from CPRMAN. It also
* internally uses the PLLH_PIX clock for the PHY.
*
* HDMI infoframes are kept within a small packet ram, where each
* packet can be individually enabled for including in a frame.
*
* HDMI audio is implemented entirely within the HDMI IP block. A
* register in the HDMI encoder takes SPDIF frames from the DMA engine
* and transfers them over an internal MAI (multi-channel audio
* interconnect) bus to the encoder side for insertion into the video
* blank regions.
*
* The driver's HDMI encoder does not yet support power management.
* The HDMI encoder's power domain and the HSM/pixel clocks are kept
* continuously running, and only the HDMI logic and packet ram are
* powered off/on at disable/enable time.
*
* The driver does not yet support CEC control, though the HDMI
* encoder block has CEC support.
*/
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_runtime.h>
#include <linux/rational.h>
#include <linux/reset.h>
#include <sound/dmaengine_pcm.h>
#include <sound/hdmi-codec.h>
#include <sound/pcm_drm_eld.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "media/cec.h"
#include "vc4_drv.h"
#include "vc4_hdmi.h"
#include "vc4_hdmi_regs.h"
#include "vc4_regs.h"
#define VC5_HDMI_HORZA_HFP_SHIFT 16
#define VC5_HDMI_HORZA_HFP_MASK VC4_MASK(28, 16)
#define VC5_HDMI_HORZA_VPOS BIT(15)
#define VC5_HDMI_HORZA_HPOS BIT(14)
#define VC5_HDMI_HORZA_HAP_SHIFT 0
#define VC5_HDMI_HORZA_HAP_MASK VC4_MASK(13, 0)
#define VC5_HDMI_HORZB_HBP_SHIFT 16
#define VC5_HDMI_HORZB_HBP_MASK VC4_MASK(26, 16)
#define VC5_HDMI_HORZB_HSP_SHIFT 0
#define VC5_HDMI_HORZB_HSP_MASK VC4_MASK(10, 0)
#define VC5_HDMI_VERTA_VSP_SHIFT 24
#define VC5_HDMI_VERTA_VSP_MASK VC4_MASK(28, 24)
#define VC5_HDMI_VERTA_VFP_SHIFT 16
#define VC5_HDMI_VERTA_VFP_MASK VC4_MASK(22, 16)
#define VC5_HDMI_VERTA_VAL_SHIFT 0
#define VC5_HDMI_VERTA_VAL_MASK VC4_MASK(12, 0)
#define VC5_HDMI_VERTB_VSPO_SHIFT 16
#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16)
#define VC4_HDMI_MISC_CONTROL_PIXEL_REP_SHIFT 0
#define VC4_HDMI_MISC_CONTROL_PIXEL_REP_MASK VC4_MASK(3, 0)
#define VC5_HDMI_MISC_CONTROL_PIXEL_REP_SHIFT 0
#define VC5_HDMI_MISC_CONTROL_PIXEL_REP_MASK VC4_MASK(3, 0)
#define VC5_HDMI_SCRAMBLER_CTL_ENABLE BIT(0)
#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_SHIFT 8
#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK VC4_MASK(10, 8)
#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_SHIFT 0
#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK VC4_MASK(3, 0)
#define VC5_HDMI_GCP_CONFIG_GCP_ENABLE BIT(31)
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK VC4_MASK(7, 0)
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_SET_AVMUTE BIT(0)
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE BIT(4)
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
#define HSM_MIN_CLOCK_FREQ 120000000
#define CEC_CLOCK_FREQ 40000
#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000)
static const char * const output_format_str[] = {
[VC4_HDMI_OUTPUT_RGB] = "RGB",
[VC4_HDMI_OUTPUT_YUV420] = "YUV 4:2:0",
[VC4_HDMI_OUTPUT_YUV422] = "YUV 4:2:2",
[VC4_HDMI_OUTPUT_YUV444] = "YUV 4:4:4",
};
static const char *vc4_hdmi_output_fmt_str(enum vc4_hdmi_output_format fmt)
{
if (fmt >= ARRAY_SIZE(output_format_str))
return "invalid";
return output_format_str[fmt];
}
static unsigned long long
vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode,
unsigned int bpc, enum vc4_hdmi_output_format fmt);
static bool vc4_hdmi_supports_scrambling(struct vc4_hdmi *vc4_hdmi)
{
struct drm_display_info *display = &vc4_hdmi->connector.display_info;
lockdep_assert_held(&vc4_hdmi->mutex);
if (!display->is_hdmi)
return false;
if (!display->hdmi.scdc.supported ||
!display->hdmi.scdc.scrambling.supported)
return false;
return true;
}
static bool vc4_hdmi_mode_needs_scrambling(const struct drm_display_mode *mode,
unsigned int bpc,
enum vc4_hdmi_output_format fmt)
{
unsigned long long clock = vc4_hdmi_encoder_compute_mode_clock(mode, bpc, fmt);
return clock > HDMI_14_MAX_TMDS_CLK;
}
static bool vc4_hdmi_is_full_range(struct vc4_hdmi *vc4_hdmi,
struct vc4_hdmi_connector_state *vc4_state)
{
const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
struct drm_display_info *display = &vc4_hdmi->connector.display_info;
if (vc4_state->broadcast_rgb == VC4_HDMI_BROADCAST_RGB_LIMITED)
return false;
else if (vc4_state->broadcast_rgb == VC4_HDMI_BROADCAST_RGB_FULL)
return true;
return !display->is_hdmi ||
drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_FULL;
}
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_debugfs_entry *entry = m->private;
struct vc4_hdmi *vc4_hdmi = entry->file.data;
struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_printer p = drm_seq_file_printer(m);
int idx;
if (!drm_dev_enter(drm, &idx))
return -ENODEV;
drm_print_regset32(&p, &vc4_hdmi->hdmi_regset);
drm_print_regset32(&p, &vc4_hdmi->hd_regset);
drm_print_regset32(&p, &vc4_hdmi->cec_regset);
drm_print_regset32(&p, &vc4_hdmi->csc_regset);
drm_print_regset32(&p, &vc4_hdmi->dvp_regset);
drm_print_regset32(&p, &vc4_hdmi->phy_regset);
drm_print_regset32(&p, &vc4_hdmi->ram_regset);
drm_print_regset32(&p, &vc4_hdmi->rm_regset);
drm_dev_exit(idx);
return 0;
}
static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
/*
* We can be called by our bind callback, when the
* connector->dev pointer might not be initialised yet.
*/
if (drm && !drm_dev_enter(drm, &idx))
return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_SW_RST);
udelay(1);
HDMI_WRITE(HDMI_M_CTL, 0);
HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_ENABLE);
HDMI_WRITE(HDMI_SW_RESET_CONTROL,
VC4_HDMI_SW_RESET_HDMI |
VC4_HDMI_SW_RESET_FORMAT_DETECT);
HDMI_WRITE(HDMI_SW_RESET_CONTROL, 0);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (drm)
drm_dev_exit(idx);
}
static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
/*
* We can be called by our bind callback, when the
* connector->dev pointer might not be initialised yet.
*/
if (drm && !drm_dev_enter(drm, &idx))
return;
reset_control_reset(vc4_hdmi->reset);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_DVP_CTL, 0);
HDMI_WRITE(HDMI_CLOCK_STOP,
HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (drm)
drm_dev_exit(idx);
}
#ifdef CONFIG_DRM_VC4_HDMI_CEC
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
{
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long cec_rate;
unsigned long flags;
u16 clk_cnt;
u32 value;
int idx;
/*
* This function is called by our runtime_resume implementation
* and thus at bind time, when we haven't registered our
* connector yet and thus don't have a pointer to the DRM
* device.
*/
if (drm && !drm_dev_enter(drm, &idx))
return;
cec_rate = clk_get_rate(vc4_hdmi->cec_clock);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
value = HDMI_READ(HDMI_CEC_CNTRL_1);
value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
/*
* Set the clock divider: the hsm_clock rate and this divider
* setting will give a 40 kHz CEC clock.
*/
clk_cnt = cec_rate / CEC_CLOCK_FREQ;
value |= clk_cnt << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT;
HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (drm)
drm_dev_exit(idx);
}
#else
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
#endif
static int reset_pipe(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
int ret;
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto out;
}
crtc_state->connectors_changed = true;
ret = drm_atomic_commit(state);
out:
drm_atomic_state_put(state);
return ret;
}
static int vc4_hdmi_reset_link(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *drm;
struct vc4_hdmi *vc4_hdmi;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
bool scrambling_needed;
u8 config;
int ret;
if (!connector)
return 0;
drm = connector->dev;
ret = drm_modeset_lock(&drm->mode_config.connection_mutex, ctx);
if (ret)
return ret;
conn_state = connector->state;
crtc = conn_state->crtc;
if (!crtc)
return 0;
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
return ret;
crtc_state = crtc->state;
if (!crtc_state->active)
return 0;
vc4_hdmi = connector_to_vc4_hdmi(connector);
mutex_lock(&vc4_hdmi->mutex);
if (!vc4_hdmi_supports_scrambling(vc4_hdmi)) {
mutex_unlock(&vc4_hdmi->mutex);
return 0;
}
scrambling_needed = vc4_hdmi_mode_needs_scrambling(&vc4_hdmi->saved_adjusted_mode,
vc4_hdmi->output_bpc,
vc4_hdmi->output_format);
if (!scrambling_needed) {
mutex_unlock(&vc4_hdmi->mutex);
return 0;
}
if (conn_state->commit &&
!try_wait_for_completion(&conn_state->commit->hw_done)) {
mutex_unlock(&vc4_hdmi->mutex);
return 0;
}
ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
drm_err(drm, "Failed to read TMDS config: %d\n", ret);
mutex_unlock(&vc4_hdmi->mutex);
return 0;
}
if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed) {
mutex_unlock(&vc4_hdmi->mutex);
return 0;
}
mutex_unlock(&vc4_hdmi->mutex);
/*
* HDMI 2.0 says that one should not send scrambled data
* prior to configuring the sink scrambling, and that
* TMDS clock/data transmission should be suspended when
* changing the TMDS clock rate in the sink. So let's
* just do a full modeset here, even though some sinks
* would be perfectly happy if were to just reconfigure
* the SCDC settings on the fly.
*/
return reset_pipe(crtc, ctx);
}
static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
struct drm_modeset_acquire_ctx *ctx,
enum drm_connector_status status)
{
struct drm_connector *connector = &vc4_hdmi->connector;
struct edid *edid;
int ret;
/*
* NOTE: This function should really be called with
* vc4_hdmi->mutex held, but doing so results in reentrancy
* issues since cec_s_phys_addr_from_edid might call
* .adap_enable, which leads to that funtion being called with
* our mutex held.
*
* A similar situation occurs with vc4_hdmi_reset_link() that
* will call into our KMS hooks if the scrambling was enabled.
*
* Concurrency isn't an issue at the moment since we don't share
* any state with any of the other frameworks so we can ignore
* the lock for now.
*/
if (status == connector_status_disconnected) {
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
return;
}
edid = drm_get_edid(connector, vc4_hdmi->ddc);
if (!edid)
return;
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
kfree(edid);
for (;;) {
ret = vc4_hdmi_reset_link(connector, ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
continue;
}
break;
}
}
static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
enum drm_connector_status status = connector_status_disconnected;
/*
* NOTE: This function should really take vc4_hdmi->mutex, but
* doing so results in reentrancy issues since
* vc4_hdmi_handle_hotplug() can call into other functions that
* would take the mutex while it's held here.
*
* Concurrency isn't an issue at the moment since we don't share
* any state with any of the other frameworks so we can ignore
* the lock for now.
*/
WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
if (vc4_hdmi->hpd_gpio) {
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
status = connector_status_connected;
} else {
if (vc4_hdmi->variant->hp_detect &&
vc4_hdmi->variant->hp_detect(vc4_hdmi))
status = connector_status_connected;
}
vc4_hdmi_handle_hotplug(vc4_hdmi, ctx, status);
pm_runtime_put(&vc4_hdmi->pdev->dev);
return status;
}
static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
int ret = 0;
struct edid *edid;
/*
* NOTE: This function should really take vc4_hdmi->mutex, but
* doing so results in reentrancy issues since
* cec_s_phys_addr_from_edid might call .adap_enable, which
* leads to that funtion being called with our mutex held.
*
* Concurrency isn't an issue at the moment since we don't share
* any state with any of the other frameworks so we can ignore
* the lock for now.
*/
edid = drm_get_edid(connector, vc4_hdmi->ddc);
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
if (!edid)
return -ENODEV;
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
if (!vc4->hvs->vc5_hdmi_enable_hdmi_20) {
struct drm_device *drm = connector->dev;
const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->probed_modes, head) {
if (vc4_hdmi_mode_needs_scrambling(mode, 8, VC4_HDMI_OUTPUT_RGB)) {
drm_warn_once(drm, "The core clock cannot reach frequencies high enough to support 4k @ 60Hz.");
drm_warn_once(drm, "Please change your config.txt file to add hdmi_enable_4kp60.");
}
}
}
return ret;
}
static int vc4_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct drm_connector_state *old_state =
drm_atomic_get_old_connector_state(state, connector);
struct vc4_hdmi_connector_state *old_vc4_state =
conn_state_to_vc4_hdmi_conn_state(old_state);
struct drm_connector_state *new_state =
drm_atomic_get_new_connector_state(state, connector);
struct vc4_hdmi_connector_state *new_vc4_state =
conn_state_to_vc4_hdmi_conn_state(new_state);
struct drm_crtc *crtc = new_state->crtc;
if (!crtc)
return 0;
if (old_state->tv.margins.left != new_state->tv.margins.left ||
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
old_state->tv.margins.bottom != new_state->tv.margins.bottom) {
struct drm_crtc_state *crtc_state;
int ret;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
/*
* Strictly speaking, we should be calling
* drm_atomic_helper_check_planes() after our call to
* drm_atomic_add_affected_planes(). However, the
* connector atomic_check is called as part of
* drm_atomic_helper_check_modeset() that already
* happens before a call to
* drm_atomic_helper_check_planes() in
* drm_atomic_helper_check().
*/
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
}
if (old_state->colorspace != new_state->colorspace ||
old_vc4_state->broadcast_rgb != new_vc4_state->broadcast_rgb ||
!drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) {
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
crtc_state->mode_changed = true;
}
return 0;
}
static int vc4_hdmi_connector_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val)
{
struct drm_device *drm = connector->dev;
struct vc4_hdmi *vc4_hdmi =
connector_to_vc4_hdmi(connector);
const struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(state);
if (property == vc4_hdmi->broadcast_rgb_property) {
*val = vc4_conn_state->broadcast_rgb;
} else {
drm_dbg(drm, "Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
}
return 0;
}
static int vc4_hdmi_connector_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val)
{
struct drm_device *drm = connector->dev;
struct vc4_hdmi *vc4_hdmi =
connector_to_vc4_hdmi(connector);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(state);
if (property == vc4_hdmi->broadcast_rgb_property) {
vc4_conn_state->broadcast_rgb = val;
return 0;
}
drm_dbg(drm, "Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
}
static void vc4_hdmi_connector_reset(struct drm_connector *connector)
{
struct vc4_hdmi_connector_state *old_state =
conn_state_to_vc4_hdmi_conn_state(connector->state);
struct vc4_hdmi_connector_state *new_state =
kzalloc(sizeof(*new_state), GFP_KERNEL);
if (connector->state)
__drm_atomic_helper_connector_destroy_state(connector->state);
kfree(old_state);
__drm_atomic_helper_connector_reset(connector, &new_state->base);
if (!new_state)
return;
new_state->base.max_bpc = 8;
new_state->base.max_requested_bpc = 8;
new_state->output_format = VC4_HDMI_OUTPUT_RGB;
new_state->broadcast_rgb = VC4_HDMI_BROADCAST_RGB_AUTO;
drm_atomic_helper_connector_tv_margins_reset(connector);
}
static struct drm_connector_state *
vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
{
struct drm_connector_state *conn_state = connector->state;
struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
struct vc4_hdmi_connector_state *new_state;
new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
if (!new_state)
return NULL;
new_state->tmds_char_rate = vc4_state->tmds_char_rate;
new_state->output_bpc = vc4_state->output_bpc;
new_state->output_format = vc4_state->output_format;
new_state->broadcast_rgb = vc4_state->broadcast_rgb;
__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
return &new_state->base;
}
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = vc4_hdmi_connector_reset,
.atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_get_property = vc4_hdmi_connector_get_property,
.atomic_set_property = vc4_hdmi_connector_set_property,
};
static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
.detect_ctx = vc4_hdmi_connector_detect_ctx,
.get_modes = vc4_hdmi_connector_get_modes,
.atomic_check = vc4_hdmi_connector_atomic_check,
};
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
{ VC4_HDMI_BROADCAST_RGB_AUTO, "Automatic" },
{ VC4_HDMI_BROADCAST_RGB_FULL, "Full" },
{ VC4_HDMI_BROADCAST_RGB_LIMITED, "Limited 16:235" },
};
static void
vc4_hdmi_attach_broadcast_rgb_property(struct drm_device *dev,
struct vc4_hdmi *vc4_hdmi)
{
struct drm_property *prop = vc4_hdmi->broadcast_rgb_property;
if (!prop) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
broadcast_rgb_names,
ARRAY_SIZE(broadcast_rgb_names));
if (!prop)
return;
vc4_hdmi->broadcast_rgb_property = prop;
}
drm_object_attach_property(&vc4_hdmi->connector.base, prop,
VC4_HDMI_BROADCAST_RGB_AUTO);
}
static int vc4_hdmi_connector_init(struct drm_device *dev,
struct vc4_hdmi *vc4_hdmi)
{
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
int ret;
ret = drmm_connector_init(dev, connector,
&vc4_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
vc4_hdmi->ddc);
if (ret)
return ret;
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
* Some of the properties below require access to state, like bpc.
* Allocate some default initial connector state with our reset helper.
*/
if (connector->funcs->reset)
connector->funcs->reset(connector);
/* Create and attach TV margin props to this connector. */
ret = drm_mode_create_tv_margin_properties(dev);
if (ret)
return ret;
ret = drm_mode_create_hdmi_colorspace_property(connector, 0);
if (ret)
return ret;
drm_connector_attach_colorspace_property(connector);
drm_connector_attach_tv_margin_properties(connector);
drm_connector_attach_max_bpc_property(connector, 8, 12);
connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT);
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
if (vc4_hdmi->variant->supports_hdr)
drm_connector_attach_hdr_output_metadata_property(connector);
vc4_hdmi_attach_broadcast_rgb_property(dev, vc4_hdmi);
drm_connector_attach_encoder(connector, encoder);
return 0;
}
static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
bool poll)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_device *drm = vc4_hdmi->connector.dev;
u32 packet_id = type - 0x80;
unsigned long flags;
int ret = 0;
int idx;
if (!drm_dev_enter(drm, &idx))
return -ENODEV;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (poll) {
ret = wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
}
drm_dev_exit(idx);
return ret;
}
static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_device *drm = vc4_hdmi->connector.dev;
u32 packet_id = frame->any.type - 0x80;
const struct vc4_hdmi_register *ram_packet_start =
&vc4_hdmi->variant->registers[HDMI_RAM_PACKET_START];
u32 packet_reg = ram_packet_start->offset + VC4_HDMI_PACKET_STRIDE * packet_id;
u32 packet_reg_next = ram_packet_start->offset +
VC4_HDMI_PACKET_STRIDE * (packet_id + 1);
void __iomem *base = __vc4_hdmi_get_field_base(vc4_hdmi,
ram_packet_start->reg);
uint8_t buffer[VC4_HDMI_PACKET_STRIDE] = {};
unsigned long flags;
ssize_t len, i;
int ret;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
WARN_ONCE(!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE),
"Packet RAM has to be on to store the packet.");
len = hdmi_infoframe_pack(frame, buffer, sizeof(buffer));
if (len < 0)
goto out;
ret = vc4_hdmi_stop_packet(encoder, frame->any.type, true);
if (ret) {
DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
goto out;
}
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
for (i = 0; i < len; i += 7) {
writel(buffer[i + 0] << 0 |
buffer[i + 1] << 8 |
buffer[i + 2] << 16,
base + packet_reg);
packet_reg += 4;
writel(buffer[i + 3] << 0 |
buffer[i + 4] << 8 |
buffer[i + 5] << 16 |
buffer[i + 6] << 24,
base + packet_reg);
packet_reg += 4;
}
/*
* clear remainder of packet ram as it's included in the
* infoframe and triggers a checksum error on hdmi analyser
*/
for (; packet_reg < packet_reg_next; packet_reg += 4)
writel(0, base + packet_reg);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) | BIT(packet_id));
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
ret = wait_for((HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
if (ret)
DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret);
out:
drm_dev_exit(idx);
}
static void vc4_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
enum vc4_hdmi_output_format fmt)
{
switch (fmt) {
case VC4_HDMI_OUTPUT_RGB:
frame->colorspace = HDMI_COLORSPACE_RGB;
break;
case VC4_HDMI_OUTPUT_YUV420:
frame->colorspace = HDMI_COLORSPACE_YUV420;
break;
case VC4_HDMI_OUTPUT_YUV422:
frame->colorspace = HDMI_COLORSPACE_YUV422;
break;
case VC4_HDMI_OUTPUT_YUV444:
frame->colorspace = HDMI_COLORSPACE_YUV444;
break;
default:
break;
}
}
static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(cstate);
const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
union hdmi_infoframe frame;
int ret;
lockdep_assert_held(&vc4_hdmi->mutex);
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
connector, mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
}
drm_hdmi_avi_infoframe_quant_range(&frame.avi,
connector, mode,
vc4_hdmi_is_full_range(vc4_hdmi, vc4_state) ?
HDMI_QUANTIZATION_RANGE_FULL :
HDMI_QUANTIZATION_RANGE_LIMITED);
drm_hdmi_avi_infoframe_colorimetry(&frame.avi, cstate);
vc4_hdmi_avi_infoframe_colorspace(&frame.avi, vc4_state->output_format);
drm_hdmi_avi_infoframe_bars(&frame.avi, cstate);
vc4_hdmi_write_infoframe(encoder, &frame);
}
static void vc4_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
union hdmi_infoframe frame;
int ret;
ret = hdmi_spd_infoframe_init(&frame.spd, "Broadcom", "Videocore");
if (ret < 0) {
DRM_ERROR("couldn't fill SPD infoframe\n");
return;
}
frame.spd.sdi = HDMI_SPD_SDI_PC;
vc4_hdmi_write_infoframe(encoder, &frame);
}
static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct hdmi_audio_infoframe *audio = &vc4_hdmi->audio.infoframe;
union hdmi_infoframe frame;
memcpy(&frame.audio, audio, sizeof(*audio));
if (vc4_hdmi->packet_ram_enabled)
vc4_hdmi_write_infoframe(encoder, &frame);
}
static void vc4_hdmi_set_hdr_infoframe(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *conn_state = connector->state;
union hdmi_infoframe frame;
lockdep_assert_held(&vc4_hdmi->mutex);
if (!vc4_hdmi->variant->supports_hdr)
return;
if (!conn_state->hdr_output_metadata)
return;
if (drm_hdmi_infoframe_set_hdr_metadata(&frame.drm, conn_state))
return;
vc4_hdmi_write_infoframe(encoder, &frame);
}
static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
lockdep_assert_held(&vc4_hdmi->mutex);
vc4_hdmi_set_avi_infoframe(encoder);
vc4_hdmi_set_spd_infoframe(encoder);
/*
* If audio was streaming, then we need to reenabled the audio
* infoframe here during encoder_enable.
*/
if (vc4_hdmi->audio.streaming)
vc4_hdmi_set_audio_infoframe(encoder);
vc4_hdmi_set_hdr_infoframe(encoder);
}
#define SCRAMBLING_POLLING_DELAY_MS 1000
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_device *drm = connector->dev;
const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
unsigned long flags;
int idx;
lockdep_assert_held(&vc4_hdmi->mutex);
if (!vc4_hdmi_supports_scrambling(vc4_hdmi))
return;
if (!vc4_hdmi_mode_needs_scrambling(mode,
vc4_hdmi->output_bpc,
vc4_hdmi->output_format))
return;
if (!drm_dev_enter(drm, &idx))
return;
drm_scdc_set_high_tmds_clock_ratio(connector, true);
drm_scdc_set_scrambling(connector, true);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) |
VC5_HDMI_SCRAMBLER_CTL_ENABLE);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
drm_dev_exit(idx);
vc4_hdmi->scdc_enabled = true;
queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work,
msecs_to_jiffies(SCRAMBLING_POLLING_DELAY_MS));
}
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_device *drm = connector->dev;
unsigned long flags;
int idx;
lockdep_assert_held(&vc4_hdmi->mutex);
if (!vc4_hdmi->scdc_enabled)
return;
vc4_hdmi->scdc_enabled = false;
if (delayed_work_pending(&vc4_hdmi->scrambling_work))
cancel_delayed_work_sync(&vc4_hdmi->scrambling_work);
if (!drm_dev_enter(drm, &idx))
return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) &
~VC5_HDMI_SCRAMBLER_CTL_ENABLE);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
drm_scdc_set_scrambling(connector, false);
drm_scdc_set_high_tmds_clock_ratio(connector, false);
drm_dev_exit(idx);
}
static void vc4_hdmi_scrambling_wq(struct work_struct *work)
{
struct vc4_hdmi *vc4_hdmi = container_of(to_delayed_work(work),
struct vc4_hdmi,
scrambling_work);
struct drm_connector *connector = &vc4_hdmi->connector;
if (drm_scdc_get_scrambling_status(connector))
return;
drm_scdc_set_high_tmds_clock_ratio(connector, true);
drm_scdc_set_scrambling(connector, true);
queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work,
msecs_to_jiffies(SCRAMBLING_POLLING_DELAY_MS));
}
static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
mutex_lock(&vc4_hdmi->mutex);
vc4_hdmi->packet_ram_enabled = false;
if (!drm_dev_enter(drm, &idx))
goto out;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0);
HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_CLRRGB);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
mdelay(1);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
vc4_hdmi_disable_scrambling(encoder);
drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
}
static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int ret;
int idx;
mutex_lock(&vc4_hdmi->mutex);
if (!drm_dev_enter(drm, &idx))
goto out;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (vc4_hdmi->variant->phy_disable)
vc4_hdmi->variant->phy_disable(vc4_hdmi);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
if (ret < 0)
DRM_ERROR("Failed to release power domain: %d\n", ret);
drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
}
static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
{
struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(state);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 csc_ctl;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
VC4_HD_CSC_CTL_ORDER);
if (!vc4_hdmi_is_full_range(vc4_hdmi, vc4_state)) {
/* CEA VICs other than #1 requre limited range RGB
* output unless overridden by an AVI infoframe.
* Apply a colorspace conversion to squash 0-255 down
* to 16-235. The matrix here is:
*
* [ 0 0 0.8594 16]
* [ 0 0.8594 0 16]
* [ 0.8594 0 0 16]
* [ 0 0 0 1]
*/
csc_ctl |= VC4_HD_CSC_CTL_ENABLE;
csc_ctl |= VC4_HD_CSC_CTL_RGB2YCC;
csc_ctl |= VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM,
VC4_HD_CSC_CTL_MODE);
HDMI_WRITE(HDMI_CSC_12_11, (0x000 << 16) | 0x000);
HDMI_WRITE(HDMI_CSC_14_13, (0x100 << 16) | 0x6e0);
HDMI_WRITE(HDMI_CSC_22_21, (0x6e0 << 16) | 0x000);
HDMI_WRITE(HDMI_CSC_24_23, (0x100 << 16) | 0x000);
HDMI_WRITE(HDMI_CSC_32_31, (0x000 << 16) | 0x6e0);
HDMI_WRITE(HDMI_CSC_34_33, (0x100 << 16) | 0x000);
}
/* The RGB order applies even when CSC is disabled. */
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
drm_dev_exit(idx);
}
/*
* Matrices for (internal) RGB to RGB output.
*
* Matrices are signed 2p13 fixed point, with signed 9p6 offsets
*/
static const u16 vc5_hdmi_csc_full_rgb_to_rgb[2][3][4] = {
{
/*
* Full range - unity
*
* [ 1 0 0 0]
* [ 0 1 0 0]
* [ 0 0 1 0]
*/
{ 0x2000, 0x0000, 0x0000, 0x0000 },
{ 0x0000, 0x2000, 0x0000, 0x0000 },
{ 0x0000, 0x0000, 0x2000, 0x0000 },
},
{
/*
* Limited range
*
* CEA VICs other than #1 require limited range RGB
* output unless overridden by an AVI infoframe. Apply a
* colorspace conversion to squash 0-255 down to 16-235.
* The matrix here is:
*
* [ 0.8594 0 0 16]
* [ 0 0.8594 0 16]
* [ 0 0 0.8594 16]
*/
{ 0x1b80, 0x0000, 0x0000, 0x0400 },
{ 0x0000, 0x1b80, 0x0000, 0x0400 },
{ 0x0000, 0x0000, 0x1b80, 0x0400 },
},
};
/*
* Conversion between Full Range RGB and YUV using the BT.601 Colorspace
*
* Matrices are signed 2p13 fixed point, with signed 9p6 offsets
*/
static const u16 vc5_hdmi_csc_full_rgb_to_yuv_bt601[2][3][4] = {
{
/*
* Full Range
*
* [ 0.299000 0.587000 0.114000 0 ]
* [ -0.168736 -0.331264 0.500000 128 ]
* [ 0.500000 -0.418688 -0.081312 128 ]
*/
{ 0x0991, 0x12c9, 0x03a6, 0x0000 },
{ 0xfa9b, 0xf567, 0x1000, 0x2000 },
{ 0x1000, 0xf29b, 0xfd67, 0x2000 },
},
{
/* Limited Range
*
* [ 0.255785 0.502160 0.097523 16 ]
* [ -0.147644 -0.289856 0.437500 128 ]
* [ 0.437500 -0.366352 -0.071148 128 ]
*/
{ 0x082f, 0x1012, 0x031f, 0x0400 },
{ 0xfb48, 0xf6ba, 0x0e00, 0x2000 },
{ 0x0e00, 0xf448, 0xfdba, 0x2000 },
},
};
/*
* Conversion between Full Range RGB and YUV using the BT.709 Colorspace
*
* Matrices are signed 2p13 fixed point, with signed 9p6 offsets
*/
static const u16 vc5_hdmi_csc_full_rgb_to_yuv_bt709[2][3][4] = {
{
/*
* Full Range
*
* [ 0.212600 0.715200 0.072200 0 ]
* [ -0.114572 -0.385428 0.500000 128 ]
* [ 0.500000 -0.454153 -0.045847 128 ]
*/
{ 0x06ce, 0x16e3, 0x024f, 0x0000 },
{ 0xfc56, 0xf3ac, 0x1000, 0x2000 },
{ 0x1000, 0xf179, 0xfe89, 0x2000 },
},
{
/*
* Limited Range
*
* [ 0.181906 0.611804 0.061758 16 ]
* [ -0.100268 -0.337232 0.437500 128 ]
* [ 0.437500 -0.397386 -0.040114 128 ]
*/
{ 0x05d2, 0x1394, 0x01fa, 0x0400 },
{ 0xfccc, 0xf536, 0x0e00, 0x2000 },
{ 0x0e00, 0xf34a, 0xfeb8, 0x2000 },
},
};
/*
* Conversion between Full Range RGB and YUV using the BT.2020 Colorspace
*
* Matrices are signed 2p13 fixed point, with signed 9p6 offsets
*/
static const u16 vc5_hdmi_csc_full_rgb_to_yuv_bt2020[2][3][4] = {
{
/*
* Full Range
*
* [ 0.262700 0.678000 0.059300 0 ]
* [ -0.139630 -0.360370 0.500000 128 ]
* [ 0.500000 -0.459786 -0.040214 128 ]
*/
{ 0x0868, 0x15b2, 0x01e6, 0x0000 },
{ 0xfb89, 0xf479, 0x1000, 0x2000 },
{ 0x1000, 0xf14a, 0xfeb8, 0x2000 },
},
{
/* Limited Range
*
* [ 0.224732 0.580008 0.050729 16 ]
* [ -0.122176 -0.315324 0.437500 128 ]
* [ 0.437500 -0.402312 -0.035188 128 ]
*/
{ 0x082f, 0x1012, 0x031f, 0x0400 },
{ 0xfb48, 0xf6ba, 0x0e00, 0x2000 },
{ 0x0e00, 0xf448, 0xfdba, 0x2000 },
},
};
static void vc5_hdmi_set_csc_coeffs(struct vc4_hdmi *vc4_hdmi,
const u16 coeffs[3][4])
{
lockdep_assert_held(&vc4_hdmi->hw_lock);
HDMI_WRITE(HDMI_CSC_12_11, (coeffs[0][1] << 16) | coeffs[0][0]);
HDMI_WRITE(HDMI_CSC_14_13, (coeffs[0][3] << 16) | coeffs[0][2]);
HDMI_WRITE(HDMI_CSC_22_21, (coeffs[1][1] << 16) | coeffs[1][0]);
HDMI_WRITE(HDMI_CSC_24_23, (coeffs[1][3] << 16) | coeffs[1][2]);
HDMI_WRITE(HDMI_CSC_32_31, (coeffs[2][1] << 16) | coeffs[2][0]);
HDMI_WRITE(HDMI_CSC_34_33, (coeffs[2][3] << 16) | coeffs[2][2]);
}
static void vc5_hdmi_set_csc_coeffs_swap(struct vc4_hdmi *vc4_hdmi,
const u16 coeffs[3][4])
{
lockdep_assert_held(&vc4_hdmi->hw_lock);
/* YUV444 needs the CSC matrices using the channels in a different order */
HDMI_WRITE(HDMI_CSC_12_11, (coeffs[1][1] << 16) | coeffs[1][0]);
HDMI_WRITE(HDMI_CSC_14_13, (coeffs[1][3] << 16) | coeffs[1][2]);
HDMI_WRITE(HDMI_CSC_22_21, (coeffs[2][1] << 16) | coeffs[2][0]);
HDMI_WRITE(HDMI_CSC_24_23, (coeffs[2][3] << 16) | coeffs[2][2]);
HDMI_WRITE(HDMI_CSC_32_31, (coeffs[0][1] << 16) | coeffs[0][0]);
HDMI_WRITE(HDMI_CSC_34_33, (coeffs[0][3] << 16) | coeffs[0][2]);
}
static const u16
(*vc5_hdmi_find_yuv_csc_coeffs(struct vc4_hdmi *vc4_hdmi, u32 colorspace, bool limited))[4]
{
switch (colorspace) {
case DRM_MODE_COLORIMETRY_SMPTE_170M_YCC:
case DRM_MODE_COLORIMETRY_XVYCC_601:
case DRM_MODE_COLORIMETRY_SYCC_601:
case DRM_MODE_COLORIMETRY_OPYCC_601:
case DRM_MODE_COLORIMETRY_BT601_YCC:
return vc5_hdmi_csc_full_rgb_to_yuv_bt601[limited];
default:
case DRM_MODE_COLORIMETRY_NO_DATA:
case DRM_MODE_COLORIMETRY_BT709_YCC:
case DRM_MODE_COLORIMETRY_XVYCC_709:
case DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED:
case DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT:
return vc5_hdmi_csc_full_rgb_to_yuv_bt709[limited];
case DRM_MODE_COLORIMETRY_BT2020_CYCC:
case DRM_MODE_COLORIMETRY_BT2020_YCC:
case DRM_MODE_COLORIMETRY_BT2020_RGB:
case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
return vc5_hdmi_csc_full_rgb_to_yuv_bt2020[limited];
}
}
static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
{
struct drm_device *drm = vc4_hdmi->connector.dev;
struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(state);
unsigned int lim_range = vc4_hdmi_is_full_range(vc4_hdmi, vc4_state) ? 0 : 1;
unsigned long flags;
const u16 (*csc)[4];
u32 if_cfg = 0;
u32 if_xbar = 0x543210;
u32 csc_chan_ctl = 0;
u32 csc_ctl = VC5_MT_CP_CSC_CTL_ENABLE | VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM,
VC5_MT_CP_CSC_CTL_MODE);
int idx;
if (!drm_dev_enter(drm, &idx))
return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
switch (vc4_state->output_format) {
case VC4_HDMI_OUTPUT_YUV444:
csc = vc5_hdmi_find_yuv_csc_coeffs(vc4_hdmi, state->colorspace, !!lim_range);
vc5_hdmi_set_csc_coeffs_swap(vc4_hdmi, csc);
break;
case VC4_HDMI_OUTPUT_YUV422:
csc = vc5_hdmi_find_yuv_csc_coeffs(vc4_hdmi, state->colorspace, !!lim_range);
csc_ctl |= VC4_SET_FIELD(VC5_MT_CP_CSC_CTL_FILTER_MODE_444_TO_422_STANDARD,
VC5_MT_CP_CSC_CTL_FILTER_MODE_444_TO_422) |
VC5_MT_CP_CSC_CTL_USE_444_TO_422 |
VC5_MT_CP_CSC_CTL_USE_RNG_SUPPRESSION;
csc_chan_ctl |= VC4_SET_FIELD(VC5_MT_CP_CHANNEL_CTL_OUTPUT_REMAP_LEGACY_STYLE,
VC5_MT_CP_CHANNEL_CTL_OUTPUT_REMAP);
if_cfg |= VC4_SET_FIELD(VC5_DVP_HT_VEC_INTERFACE_CFG_SEL_422_FORMAT_422_LEGACY,
VC5_DVP_HT_VEC_INTERFACE_CFG_SEL_422);
vc5_hdmi_set_csc_coeffs(vc4_hdmi, csc);
break;
case VC4_HDMI_OUTPUT_RGB:
if_xbar = 0x354021;
vc5_hdmi_set_csc_coeffs(vc4_hdmi, vc5_hdmi_csc_full_rgb_to_rgb[lim_range]);
break;
default:
break;
}
HDMI_WRITE(HDMI_VEC_INTERFACE_CFG, if_cfg);
HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, if_xbar);
HDMI_WRITE(HDMI_CSC_CHANNEL_CTL, csc_chan_ctl);
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
drm_dev_exit(idx);
}
static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
{
struct drm_device *drm = vc4_hdmi->connector.dev;
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
u32 verta = (VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
VC4_HDMI_VERTA_VSP) |
VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
VC4_HDMI_VERTA_VFP) |
VC4_SET_FIELD(mode->crtc_vdisplay, VC4_HDMI_VERTA_VAL));
u32 vertb = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
interlaced,
VC4_HDMI_VERTB_VBP));
u32 vertb_even = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal -
mode->crtc_vsync_end,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
u32 reg;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_HORZA,
(vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) |
(hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) |
VC4_SET_FIELD(mode->hdisplay * pixel_rep,
VC4_HDMI_HORZA_HAP));
HDMI_WRITE(HDMI_HORZB,
VC4_SET_FIELD((mode->htotal -
mode->hsync_end) * pixel_rep,
VC4_HDMI_HORZB_HBP) |
VC4_SET_FIELD((mode->hsync_end -
mode->hsync_start) * pixel_rep,
VC4_HDMI_HORZB_HSP) |
VC4_SET_FIELD((mode->hsync_start -
mode->hdisplay) * pixel_rep,
VC4_HDMI_HORZB_HFP));
HDMI_WRITE(HDMI_VERTA0, verta);
HDMI_WRITE(HDMI_VERTA1, verta);
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
reg = HDMI_READ(HDMI_MISC_CONTROL);
reg &= ~VC4_HDMI_MISC_CONTROL_PIXEL_REP_MASK;
reg |= VC4_SET_FIELD(pixel_rep - 1, VC4_HDMI_MISC_CONTROL_PIXEL_REP);
HDMI_WRITE(HDMI_MISC_CONTROL, reg);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
drm_dev_exit(idx);
}
static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
{
struct drm_device *drm = vc4_hdmi->connector.dev;
const struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(state);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
u32 verta = (VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
VC5_HDMI_VERTA_VSP) |
VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
VC5_HDMI_VERTA_VFP) |
VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL));
u32 vertb = (VC4_SET_FIELD(mode->htotal >> (2 - pixel_rep),
VC5_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
interlaced,
VC4_HDMI_VERTB_VBP));
u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal -
mode->crtc_vsync_end,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
unsigned char gcp;
u32 reg;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_HORZA,
(vsync_pos ? VC5_HDMI_HORZA_VPOS : 0) |
(hsync_pos ? VC5_HDMI_HORZA_HPOS : 0) |
VC4_SET_FIELD(mode->hdisplay * pixel_rep,
VC5_HDMI_HORZA_HAP) |
VC4_SET_FIELD((mode->hsync_start -
mode->hdisplay) * pixel_rep,
VC5_HDMI_HORZA_HFP));
HDMI_WRITE(HDMI_HORZB,
VC4_SET_FIELD((mode->htotal -
mode->hsync_end) * pixel_rep,
VC5_HDMI_HORZB_HBP) |
VC4_SET_FIELD((mode->hsync_end -
mode->hsync_start) * pixel_rep,
VC5_HDMI_HORZB_HSP));
HDMI_WRITE(HDMI_VERTA0, verta);
HDMI_WRITE(HDMI_VERTA1, verta);
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
switch (vc4_state->output_bpc) {
case 12:
gcp = 6;
break;
case 10:
gcp = 5;
break;
case 8:
default:
gcp = 0;
break;
}
/*
* YCC422 is always 36-bit and not considered deep colour so
* doesn't signal in GCP.
*/
if (vc4_state->output_format == VC4_HDMI_OUTPUT_YUV422) {
gcp = 0;
}
reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
reg &= ~(VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK |
VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK);
reg |= VC4_SET_FIELD(2, VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE) |
VC4_SET_FIELD(gcp, VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH);
HDMI_WRITE(HDMI_DEEP_COLOR_CONFIG_1, reg);
reg = HDMI_READ(HDMI_GCP_WORD_1);
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK;
reg |= VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE;
HDMI_WRITE(HDMI_GCP_WORD_1, reg);
reg = HDMI_READ(HDMI_GCP_CONFIG);
reg |= VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
HDMI_WRITE(HDMI_GCP_CONFIG, reg);
reg = HDMI_READ(HDMI_MISC_CONTROL);
reg &= ~VC5_HDMI_MISC_CONTROL_PIXEL_REP_MASK;
reg |= VC4_SET_FIELD(pixel_rep - 1, VC5_HDMI_MISC_CONTROL_PIXEL_REP);
HDMI_WRITE(HDMI_MISC_CONTROL, reg);
HDMI_WRITE(HDMI_CLOCK_STOP, 0);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
drm_dev_exit(idx);
}
static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
{
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 drift;
int ret;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
drift = HDMI_READ(HDMI_FIFO_CTL);
drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK;
HDMI_WRITE(HDMI_FIFO_CTL,
drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
HDMI_WRITE(HDMI_FIFO_CTL,
drift | VC4_HDMI_FIFO_CTL_RECENTER);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
usleep_range(1000, 1100);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_FIFO_CTL,
drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
HDMI_WRITE(HDMI_FIFO_CTL,
drift | VC4_HDMI_FIFO_CTL_RECENTER);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
ret = wait_for(HDMI_READ(HDMI_FIFO_CTL) &
VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1);
WARN_ONCE(ret, "Timeout waiting for "
"VC4_HDMI_FIFO_CTL_RECENTER_DONE");
drm_dev_exit(idx);
}
static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
unsigned long tmds_char_rate = vc4_conn_state->tmds_char_rate;
unsigned long bvb_rate, hsm_rate;
unsigned long flags;
int ret;
int idx;
mutex_lock(&vc4_hdmi->mutex);
if (!drm_dev_enter(drm, &idx))
goto out;
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
goto err_dev_exit;
}
/*
* As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
* be faster than pixel clock, infinitesimally faster, tested in
* simulation. Otherwise, exact value is unimportant for HDMI
* operation." This conflicts with bcm2835's vc4 documentation, which
* states HSM's clock has to be at least 108% of the pixel clock.
*
* Real life tests reveal that vc4's firmware statement holds up, and
* users are able to use pixel clocks closer to HSM's, namely for
* 1920x1200@60Hz. So it was decided to have leave a 1% margin between
* both clocks. Which, for RPi0-3 implies a maximum pixel clock of
* 162MHz.
*
* Additionally, the AXI clock needs to be at least 25% of
* pixel clock, but HSM ends up being the limiting factor.
*/
hsm_rate = max_t(unsigned long,
HSM_MIN_CLOCK_FREQ,
(tmds_char_rate / 100) * 101);
ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
if (ret) {
DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
goto err_put_runtime_pm;
}
ret = clk_set_rate(vc4_hdmi->pixel_clock, tmds_char_rate);
if (ret) {
DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
goto err_put_runtime_pm;
}
ret = clk_prepare_enable(vc4_hdmi->pixel_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel clock: %d\n", ret);
goto err_put_runtime_pm;
}
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (tmds_char_rate > 297000000)
bvb_rate = 300000000;
else if (tmds_char_rate > 148500000)
bvb_rate = 150000000;
else
bvb_rate = 75000000;
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
goto err_disable_pixel_clock;
}
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
goto err_disable_pixel_clock;
}
if (vc4_hdmi->variant->phy_init)
vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
HDMI_READ(HDMI_SCHEDULER_CONTROL) |
VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT |
VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (vc4_hdmi->variant->set_timings)
vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode);
drm_dev_exit(idx);
mutex_unlock(&vc4_hdmi->mutex);
return;
err_disable_pixel_clock:
clk_disable_unprepare(vc4_hdmi->pixel_clock);
err_put_runtime_pm:
pm_runtime_put(&vc4_hdmi->pdev->dev);
err_dev_exit:
drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
return;
}
static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_connector *connector = &vc4_hdmi->connector;
const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
unsigned long flags;
int idx;
mutex_lock(&vc4_hdmi->mutex);
if (!drm_dev_enter(drm, &idx))
goto out;
if (vc4_hdmi->variant->csc_setup)
vc4_hdmi->variant->csc_setup(vc4_hdmi, conn_state, mode);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
}
static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_device *drm = vc4_hdmi->connector.dev;
const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
struct drm_display_info *display = &vc4_hdmi->connector.display_info;
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
unsigned long flags;
int ret;
int idx;
mutex_lock(&vc4_hdmi->mutex);
if (!drm_dev_enter(drm, &idx))
goto out;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
VC4_HD_VID_CTL_ENABLE |
VC4_HD_VID_CTL_CLRRGB |
VC4_HD_VID_CTL_UNDERFLOW_ENABLE |
VC4_HD_VID_CTL_FRAME_COUNTER_RESET |
(vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) |
(hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW));
HDMI_WRITE(HDMI_VID_CTL,
HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_BLANKPIX);
if (display->is_hdmi) {
HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
HDMI_READ(HDMI_SCHEDULER_CONTROL) |
VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
ret = wait_for(HDMI_READ(HDMI_SCHEDULER_CONTROL) &
VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1000);
WARN_ONCE(ret, "Timeout waiting for "
"VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n");
} else {
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
~(VC4_HDMI_RAM_PACKET_ENABLE));
HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
HDMI_READ(HDMI_SCHEDULER_CONTROL) &
~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
ret = wait_for(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) &
VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1000);
WARN_ONCE(ret, "Timeout waiting for "
"!VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n");
}
if (display->is_hdmi) {
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
WARN_ON(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) &
VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE));
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
VC4_HDMI_RAM_PACKET_ENABLE);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
vc4_hdmi->packet_ram_enabled = true;
vc4_hdmi_set_infoframes(encoder);
}
vc4_hdmi_recenter_fifo(vc4_hdmi);
vc4_hdmi_enable_scrambling(encoder);
drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
}
static void vc4_hdmi_encoder_atomic_mode_set(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
mutex_lock(&vc4_hdmi->mutex);
drm_mode_copy(&vc4_hdmi->saved_adjusted_mode,
&crtc_state->adjusted_mode);
vc4_hdmi->output_bpc = vc4_state->output_bpc;
vc4_hdmi->output_format = vc4_state->output_format;
mutex_unlock(&vc4_hdmi->mutex);
}
static bool
vc4_hdmi_sink_supports_format_bpc(const struct vc4_hdmi *vc4_hdmi,
const struct drm_display_info *info,
const struct drm_display_mode *mode,
unsigned int format, unsigned int bpc)
{
struct drm_device *dev = vc4_hdmi->connector.dev;
u8 vic = drm_match_cea_mode(mode);
if (vic == 1 && bpc != 8) {
drm_dbg(dev, "VIC1 requires a bpc of 8, got %u\n", bpc);
return false;
}
if (!info->is_hdmi &&
(format != VC4_HDMI_OUTPUT_RGB || bpc != 8)) {
drm_dbg(dev, "DVI Monitors require an RGB output at 8 bpc\n");
return false;
}
switch (format) {
case VC4_HDMI_OUTPUT_RGB:
drm_dbg(dev, "RGB Format, checking the constraints.\n");
if (!(info->color_formats & DRM_COLOR_FORMAT_RGB444))
return false;
if (bpc == 10 && !(info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30)) {
drm_dbg(dev, "10 BPC but sink doesn't support Deep Color 30.\n");
return false;
}
if (bpc == 12 && !(info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36)) {
drm_dbg(dev, "12 BPC but sink doesn't support Deep Color 36.\n");
return false;
}
drm_dbg(dev, "RGB format supported in that configuration.\n");
return true;
case VC4_HDMI_OUTPUT_YUV422:
drm_dbg(dev, "YUV422 format, checking the constraints.\n");
if (!(info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
drm_dbg(dev, "Sink doesn't support YUV422.\n");
return false;
}
if (bpc != 12) {
drm_dbg(dev, "YUV422 only supports 12 bpc.\n");
return false;
}
drm_dbg(dev, "YUV422 format supported in that configuration.\n");
return true;
case VC4_HDMI_OUTPUT_YUV444:
drm_dbg(dev, "YUV444 format, checking the constraints.\n");
if (!(info->color_formats & DRM_COLOR_FORMAT_YCBCR444)) {
drm_dbg(dev, "Sink doesn't support YUV444.\n");
return false;
}
if (bpc == 10 && !(info->edid_hdmi_ycbcr444_dc_modes & DRM_EDID_HDMI_DC_30)) {
drm_dbg(dev, "10 BPC but sink doesn't support Deep Color 30.\n");
return false;
}
if (bpc == 12 && !(info->edid_hdmi_ycbcr444_dc_modes & DRM_EDID_HDMI_DC_36)) {
drm_dbg(dev, "12 BPC but sink doesn't support Deep Color 36.\n");
return false;
}
drm_dbg(dev, "YUV444 format supported in that configuration.\n");
return true;
}
return false;
}
static enum drm_mode_status
vc4_hdmi_encoder_clock_valid(const struct vc4_hdmi *vc4_hdmi,
const struct drm_display_mode *mode,
unsigned long long clock)
{
const struct drm_connector *connector = &vc4_hdmi->connector;
const struct drm_display_info *info = &connector->display_info;
struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
if (clock > vc4_hdmi->variant->max_pixel_clock)
return MODE_CLOCK_HIGH;
if (!vc4->hvs->vc5_hdmi_enable_hdmi_20 && clock > HDMI_14_MAX_TMDS_CLK)
return MODE_CLOCK_HIGH;
/* 4096x2160@60 is not reliable without overclocking core */
if (!vc4->hvs->vc5_hdmi_enable_4096by2160 &&
mode->hdisplay > 3840 && mode->vdisplay >= 2160 &&
drm_mode_vrefresh(mode) >= 50)
return MODE_CLOCK_HIGH;
if (info->max_tmds_clock && clock > (info->max_tmds_clock * 1000))
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static unsigned long long
vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode,
unsigned int bpc,
enum vc4_hdmi_output_format fmt)
{
unsigned long long clock = mode->clock * 1000ULL;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock = clock * 2;
if (fmt == VC4_HDMI_OUTPUT_YUV422)
bpc = 8;
clock = clock * bpc;
do_div(clock, 8);
return clock;
}
static int
vc4_hdmi_encoder_compute_clock(const struct vc4_hdmi *vc4_hdmi,
struct vc4_hdmi_connector_state *vc4_state,
const struct drm_display_mode *mode,
unsigned int bpc, unsigned int fmt)
{
unsigned long long clock;
clock = vc4_hdmi_encoder_compute_mode_clock(mode, bpc, fmt);
if (vc4_hdmi_encoder_clock_valid(vc4_hdmi, mode, clock) != MODE_OK)
return -EINVAL;
vc4_state->tmds_char_rate = clock;
return 0;
}
static int
vc4_hdmi_encoder_compute_format(const struct vc4_hdmi *vc4_hdmi,
struct vc4_hdmi_connector_state *vc4_state,
const struct drm_display_mode *mode,
unsigned int bpc)
{
struct drm_device *dev = vc4_hdmi->connector.dev;
const struct drm_connector *connector = &vc4_hdmi->connector;
const struct drm_display_info *info = &connector->display_info;
unsigned int format;
drm_dbg(dev, "Trying with an RGB output\n");
format = VC4_HDMI_OUTPUT_RGB;
if (vc4_hdmi_sink_supports_format_bpc(vc4_hdmi, info, mode, format, bpc)) {
int ret;
ret = vc4_hdmi_encoder_compute_clock(vc4_hdmi, vc4_state,
mode, bpc, format);
if (!ret) {
vc4_state->output_format = format;
return 0;
}
}
drm_dbg(dev, "Failed, Trying with an YUV422 output\n");
format = VC4_HDMI_OUTPUT_YUV422;
if (vc4_hdmi_sink_supports_format_bpc(vc4_hdmi, info, mode, format, bpc)) {
int ret;
ret = vc4_hdmi_encoder_compute_clock(vc4_hdmi, vc4_state,
mode, bpc, format);
if (!ret) {
vc4_state->output_format = format;
return 0;
}
}
drm_dbg(dev, "Failed. No Format Supported for that bpc count.\n");
return -EINVAL;
}
static int
vc4_hdmi_encoder_compute_config(const struct vc4_hdmi *vc4_hdmi,
struct vc4_hdmi_connector_state *vc4_state,
const struct drm_display_mode *mode)
{
struct drm_device *dev = vc4_hdmi->connector.dev;
struct drm_connector_state *conn_state = &vc4_state->base;
unsigned int max_bpc = clamp_t(unsigned int, conn_state->max_bpc, 8, 12);
unsigned int bpc;
int ret;
for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
drm_dbg(dev, "Trying with a %d bpc output\n", bpc);
ret = vc4_hdmi_encoder_compute_format(vc4_hdmi, vc4_state,
mode, bpc);
if (ret)
continue;
vc4_state->output_bpc = bpc;
drm_dbg(dev,
"Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
vc4_state->output_bpc,
vc4_hdmi_output_fmt_str(vc4_state->output_format),
vc4_state->tmds_char_rate);
break;
}
return ret;
}
#define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL
#define WIFI_2_4GHz_CH1_MAX_FREQ 2422000000ULL
static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(conn_state->state, connector);
struct vc4_hdmi_connector_state *old_vc4_state =
conn_state_to_vc4_hdmi_conn_state(old_conn_state);
struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
unsigned long long tmds_char_rate = mode->clock * 1000;
unsigned long long tmds_bit_rate;
int ret;
if (vc4_hdmi->variant->unsupported_odd_h_timings) {
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
/* Only try to fixup DBLCLK modes to get 480i and 576i
* working.
* A generic solution for all modes with odd horizontal
* timing values seems impossible based on trying to
* solve it for 1366x768 monitors.
*/
if ((mode->hsync_start - mode->hdisplay) & 1)
mode->hsync_start--;
if ((mode->hsync_end - mode->hsync_start) & 1)
mode->hsync_end--;
}
/* Now check whether we still have odd values remaining */
if ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2))
return -EINVAL;
}
/*
* The 1440p@60 pixel rate is in the same range than the first
* WiFi channel (between 2.4GHz and 2.422GHz with 22MHz
* bandwidth). Slightly lower the frequency to bring it out of
* the WiFi range.
*/
tmds_bit_rate = tmds_char_rate * 10;
if (vc4_hdmi->disable_wifi_frequencies &&
(tmds_bit_rate >= WIFI_2_4GHz_CH1_MIN_FREQ &&
tmds_bit_rate <= WIFI_2_4GHz_CH1_MAX_FREQ)) {
mode->clock = 238560;
tmds_char_rate = mode->clock * 1000;
}
ret = vc4_hdmi_encoder_compute_config(vc4_hdmi, vc4_state, mode);
if (ret)
return ret;
/* vc4_hdmi_encoder_compute_config may have changed output_bpc and/or output_format */
if (vc4_state->output_bpc != old_vc4_state->output_bpc ||
vc4_state->output_format != old_vc4_state->output_format)
crtc_state->mode_changed = true;
return 0;
}
static enum drm_mode_status
vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
!(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return MODE_H_ILLEGAL;
return vc4_hdmi_encoder_clock_valid(vc4_hdmi, mode, mode->clock * 1000);
}
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
.atomic_check = vc4_hdmi_encoder_atomic_check,
.atomic_mode_set = vc4_hdmi_encoder_atomic_mode_set,
.mode_valid = vc4_hdmi_encoder_mode_valid,
};
static int vc4_hdmi_late_register(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
drm_debugfs_add_file(drm, variant->debugfs_name,
vc4_hdmi_debugfs_regs, vc4_hdmi);
return 0;
}
static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
.late_register = vc4_hdmi_late_register,
};
static u32 vc4_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
{
int i;
u32 channel_map = 0;
for (i = 0; i < 8; i++) {
if (channel_mask & BIT(i))
channel_map |= i << (3 * i);
}
return channel_map;
}
static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
{
int i;
u32 channel_map = 0;
for (i = 0; i < 8; i++) {
if (channel_mask & BIT(i))
channel_map |= i << (4 * i);
}
return channel_map;
}
static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
{
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 hotplug;
int idx;
if (!drm_dev_enter(drm, &idx))
return false;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
hotplug = HDMI_READ(HDMI_HOTPLUG);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
drm_dev_exit(idx);
return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED);
}
/* HDMI audio codec callbacks */
static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
unsigned int samplerate)
{
struct drm_device *drm = vc4_hdmi->connector.dev;
u32 hsm_clock;
unsigned long flags;
unsigned long n, m;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
hsm_clock = clk_get_rate(vc4_hdmi->audio_clock);
rational_best_approximation(hsm_clock, samplerate,
VC4_HD_MAI_SMP_N_MASK >>
VC4_HD_MAI_SMP_N_SHIFT,
(VC4_HD_MAI_SMP_M_MASK >>
VC4_HD_MAI_SMP_M_SHIFT) + 1,
&n, &m);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_SMP,
VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) |
VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M));
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
drm_dev_exit(idx);
}
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
{
const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
u32 n, cts;
u64 tmp;
lockdep_assert_held(&vc4_hdmi->mutex);
lockdep_assert_held(&vc4_hdmi->hw_lock);
n = 128 * samplerate / 1000;
tmp = (u64)(mode->clock * 1000) * n;
do_div(tmp, 128 * samplerate);
cts = tmp;
HDMI_WRITE(HDMI_CRP_CFG,
VC4_HDMI_CRP_CFG_EXTERNAL_CTS_EN |
VC4_SET_FIELD(n, VC4_HDMI_CRP_CFG_N));
/*
* We could get slightly more accurate clocks in some cases by
* providing a CTS_1 value. The two CTS values are alternated
* between based on the period fields
*/
HDMI_WRITE(HDMI_CTS_0, cts);
HDMI_WRITE(HDMI_CTS_1, cts);
}
static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
{
struct snd_soc_card *card = snd_soc_dai_get_drvdata(dai);
return snd_soc_card_get_drvdata(card);
}
static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi)
{
struct drm_display_info *display = &vc4_hdmi->connector.display_info;
lockdep_assert_held(&vc4_hdmi->mutex);
/*
* If the encoder is currently in DVI mode, treat the codec DAI
* as missing.
*/
if (!display->is_hdmi)
return false;
return true;
}
static int vc4_hdmi_audio_startup(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int ret = 0;
int idx;
mutex_lock(&vc4_hdmi->mutex);
if (!drm_dev_enter(drm, &idx)) {
ret = -ENODEV;
goto out;
}
if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
ret = -ENODEV;
goto out_dev_exit;
}
vc4_hdmi->audio.streaming = true;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_CTL,
VC4_HD_MAI_CTL_RESET |
VC4_HD_MAI_CTL_FLUSH |
VC4_HD_MAI_CTL_DLATE |
VC4_HD_MAI_CTL_ERRORE |
VC4_HD_MAI_CTL_ERRORF);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (vc4_hdmi->variant->phy_rng_enable)
vc4_hdmi->variant->phy_rng_enable(vc4_hdmi);
out_dev_exit:
drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
return ret;
}
static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
{
struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
struct device *dev = &vc4_hdmi->pdev->dev;
unsigned long flags;
int ret;
lockdep_assert_held(&vc4_hdmi->mutex);
vc4_hdmi->audio.streaming = false;
ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO, false);
if (ret)
dev_err(dev, "Failed to stop audio infoframe: %d\n", ret);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_RESET);
HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_ERRORF);
HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_FLUSH);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
mutex_lock(&vc4_hdmi->mutex);
if (!drm_dev_enter(drm, &idx))
goto out;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_CTL,
VC4_HD_MAI_CTL_DLATE |
VC4_HD_MAI_CTL_ERRORE |
VC4_HD_MAI_CTL_ERRORF);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (vc4_hdmi->variant->phy_rng_disable)
vc4_hdmi->variant->phy_rng_disable(vc4_hdmi);
vc4_hdmi->audio.streaming = false;
vc4_hdmi_audio_reset(vc4_hdmi);
drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
}
static int sample_rate_to_mai_fmt(int samplerate)
{
switch (samplerate) {
case 8000:
return VC4_HDMI_MAI_SAMPLE_RATE_8000;
case 11025:
return VC4_HDMI_MAI_SAMPLE_RATE_11025;
case 12000:
return VC4_HDMI_MAI_SAMPLE_RATE_12000;
case 16000:
return VC4_HDMI_MAI_SAMPLE_RATE_16000;
case 22050:
return VC4_HDMI_MAI_SAMPLE_RATE_22050;
case 24000:
return VC4_HDMI_MAI_SAMPLE_RATE_24000;
case 32000:
return VC4_HDMI_MAI_SAMPLE_RATE_32000;
case 44100:
return VC4_HDMI_MAI_SAMPLE_RATE_44100;
case 48000:
return VC4_HDMI_MAI_SAMPLE_RATE_48000;
case 64000:
return VC4_HDMI_MAI_SAMPLE_RATE_64000;
case 88200:
return VC4_HDMI_MAI_SAMPLE_RATE_88200;
case 96000:
return VC4_HDMI_MAI_SAMPLE_RATE_96000;
case 128000:
return VC4_HDMI_MAI_SAMPLE_RATE_128000;
case 176400:
return VC4_HDMI_MAI_SAMPLE_RATE_176400;
case 192000:
return VC4_HDMI_MAI_SAMPLE_RATE_192000;
default:
return VC4_HDMI_MAI_SAMPLE_RATE_NOT_INDICATED;
}
}
/* HDMI audio codec callbacks */
static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
unsigned int sample_rate = params->sample_rate;
unsigned int channels = params->channels;
unsigned long flags;
u32 audio_packet_config, channel_mask;
u32 channel_map;
u32 mai_audio_format;
u32 mai_sample_rate;
int ret = 0;
int idx;
dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
sample_rate, params->sample_width, channels);
mutex_lock(&vc4_hdmi->mutex);
if (!drm_dev_enter(drm, &idx)) {
ret = -ENODEV;
goto out;
}
if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
ret = -EINVAL;
goto out_dev_exit;
}
vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_CTL,
VC4_SET_FIELD(channels, VC4_HD_MAI_CTL_CHNUM) |
VC4_HD_MAI_CTL_WHOLSMP |
VC4_HD_MAI_CTL_CHALIGN |
VC4_HD_MAI_CTL_ENABLE);
mai_sample_rate = sample_rate_to_mai_fmt(sample_rate);
if (params->iec.status[0] & IEC958_AES0_NONAUDIO &&
params->channels == 8)
mai_audio_format = VC4_HDMI_MAI_FORMAT_HBR;
else
mai_audio_format = VC4_HDMI_MAI_FORMAT_PCM;
HDMI_WRITE(HDMI_MAI_FMT,
VC4_SET_FIELD(mai_sample_rate,
VC4_HDMI_MAI_FORMAT_SAMPLE_RATE) |
VC4_SET_FIELD(mai_audio_format,
VC4_HDMI_MAI_FORMAT_AUDIO_FORMAT));
/* The B frame identifier should match the value used by alsa-lib (8) */
audio_packet_config =
VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_SAMPLE_FLAT |
VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_INACTIVE_CHANNELS |
VC4_SET_FIELD(0x8, VC4_HDMI_AUDIO_PACKET_B_FRAME_IDENTIFIER);
channel_mask = GENMASK(channels - 1, 0);
audio_packet_config |= VC4_SET_FIELD(channel_mask,
VC4_HDMI_AUDIO_PACKET_CEA_MASK);
/* Set the MAI threshold */
HDMI_WRITE(HDMI_MAI_THR,
VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICHIGH) |
VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICLOW) |
VC4_SET_FIELD(0x06, VC4_HD_MAI_THR_DREQHIGH) |
VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_DREQLOW));
HDMI_WRITE(HDMI_MAI_CONFIG,
VC4_HDMI_MAI_CONFIG_BIT_REVERSE |
VC4_HDMI_MAI_CONFIG_FORMAT_REVERSE |
VC4_SET_FIELD(channel_mask, VC4_HDMI_MAI_CHANNEL_MASK));
channel_map = vc4_hdmi->variant->channel_map(vc4_hdmi, channel_mask);
HDMI_WRITE(HDMI_MAI_CHANNEL_MAP, channel_map);
HDMI_WRITE(HDMI_AUDIO_PACKET_CONFIG, audio_packet_config);
vc4_hdmi_set_n_cts(vc4_hdmi, sample_rate);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
memcpy(&vc4_hdmi->audio.infoframe, ¶ms->cea, sizeof(params->cea));
vc4_hdmi_set_audio_infoframe(encoder);
out_dev_exit:
drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
return ret;
}
static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
.name = "vc4-hdmi-cpu-dai-component",
.legacy_dai_naming = 1,
};
static int vc4_hdmi_audio_cpu_dai_probe(struct snd_soc_dai *dai)
{
struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai);
snd_soc_dai_init_dma_data(dai, &vc4_hdmi->audio.dma_data, NULL);
return 0;
}
static const struct snd_soc_dai_ops vc4_snd_dai_ops = {
.probe = vc4_hdmi_audio_cpu_dai_probe,
};
static struct snd_soc_dai_driver vc4_hdmi_audio_cpu_dai_drv = {
.name = "vc4-hdmi-cpu-dai",
.ops = &vc4_snd_dai_ops,
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 8,
.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE,
},
};
static const struct snd_dmaengine_pcm_config pcm_conf = {
.chan_names[SNDRV_PCM_STREAM_PLAYBACK] = "audio-rx",
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
};
static int vc4_hdmi_audio_get_eld(struct device *dev, void *data,
uint8_t *buf, size_t len)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
struct drm_connector *connector = &vc4_hdmi->connector;
mutex_lock(&vc4_hdmi->mutex);
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
mutex_unlock(&vc4_hdmi->mutex);
return 0;
}
static const struct hdmi_codec_ops vc4_hdmi_codec_ops = {
.get_eld = vc4_hdmi_audio_get_eld,
.prepare = vc4_hdmi_audio_prepare,
.audio_shutdown = vc4_hdmi_audio_shutdown,
.audio_startup = vc4_hdmi_audio_startup,
};
static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
.ops = &vc4_hdmi_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
};
static void vc4_hdmi_audio_codec_release(void *ptr)
{
struct vc4_hdmi *vc4_hdmi = ptr;
platform_device_unregister(vc4_hdmi->audio.codec_pdev);
vc4_hdmi->audio.codec_pdev = NULL;
}
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
&vc4_hdmi->variant->registers[HDMI_MAI_DATA];
struct snd_soc_dai_link *dai_link = &vc4_hdmi->audio.link;
struct snd_soc_card *card = &vc4_hdmi->audio.card;
struct device *dev = &vc4_hdmi->pdev->dev;
struct platform_device *codec_pdev;
const __be32 *addr;
int index, len;
int ret;
/*
* ASoC makes it a bit hard to retrieve a pointer to the
* vc4_hdmi structure. Registering the card will overwrite our
* device drvdata with a pointer to the snd_soc_card structure,
* which can then be used to retrieve whatever drvdata we want
* to associate.
*
* However, that doesn't fly in the case where we wouldn't
* register an ASoC card (because of an old DT that is missing
* the dmas properties for example), then the card isn't
* registered and the device drvdata wouldn't be set.
*
* We can deal with both cases by making sure a snd_soc_card
* pointer and a vc4_hdmi structure are pointing to the same
* memory address, so we can treat them indistinctly without any
* issue.
*/
BUILD_BUG_ON(offsetof(struct vc4_hdmi_audio, card) != 0);
BUILD_BUG_ON(offsetof(struct vc4_hdmi, audio) != 0);
if (!of_find_property(dev->of_node, "dmas", &len) || !len) {
dev_warn(dev,
"'dmas' DT property is missing or empty, no HDMI audio\n");
return 0;
}
if (mai_data->reg != VC4_HD) {
WARN_ONCE(true, "MAI isn't in the HD block\n");
return -EINVAL;
}
/*
* Get the physical address of VC4_HD_MAI_DATA. We need to retrieve
* the bus address specified in the DT, because the physical address
* (the one returned by platform_get_resource()) is not appropriate
* for DMA transfers.
* This VC/MMU should probably be exposed to avoid this kind of hacks.
*/
index = of_property_match_string(dev->of_node, "reg-names", "hd");
/* Before BCM2711, we don't have a named register range */
if (index < 0)
index = 1;
addr = of_get_address(dev->of_node, index, NULL, NULL);
vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset;
vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
vc4_hdmi->audio.dma_data.maxburst = 2;
/*
* NOTE: Strictly speaking, we should probably use a DRM-managed
* registration there to avoid removing all the audio components
* by the time the driver doesn't have any user anymore.
*
* However, the ASoC core uses a number of devm_kzalloc calls
* when registering, even when using non-device-managed
* functions (such as in snd_soc_register_component()).
*
* If we call snd_soc_unregister_component() in a DRM-managed
* action, the device-managed actions have already been executed
* and thus we would access memory that has been freed.
*
* Using device-managed hooks here probably leaves us open to a
* bunch of issues if userspace still has a handle on the ALSA
* device when the device is removed. However, this is mitigated
* by the use of drm_dev_enter()/drm_dev_exit() in the audio
* path to prevent the access to the device resources if it
* isn't there anymore.
*
* Then, the vc4_hdmi structure is DRM-managed and thus only
* freed whenever the last user has closed the DRM device file.
* It should thus outlive ALSA in most situations.
*/
ret = devm_snd_dmaengine_pcm_register(dev, &pcm_conf, 0);
if (ret) {
dev_err(dev, "Could not register PCM component: %d\n", ret);
return ret;
}
ret = devm_snd_soc_register_component(dev, &vc4_hdmi_audio_cpu_dai_comp,
&vc4_hdmi_audio_cpu_dai_drv, 1);
if (ret) {
dev_err(dev, "Could not register CPU DAI: %d\n", ret);
return ret;
}
codec_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
&vc4_hdmi_codec_pdata,
sizeof(vc4_hdmi_codec_pdata));
if (IS_ERR(codec_pdev)) {
dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev));
return PTR_ERR(codec_pdev);
}
vc4_hdmi->audio.codec_pdev = codec_pdev;
ret = devm_add_action_or_reset(dev, vc4_hdmi_audio_codec_release, vc4_hdmi);
if (ret)
return ret;
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
dai_link->num_cpus = 1;
dai_link->num_codecs = 1;
dai_link->num_platforms = 1;
dai_link->name = "MAI";
dai_link->stream_name = "MAI PCM";
dai_link->codecs->dai_name = "i2s-hifi";
dai_link->cpus->dai_name = dev_name(dev);
dai_link->codecs->name = dev_name(&codec_pdev->dev);
dai_link->platforms->name = dev_name(dev);
card->dai_link = dai_link;
card->num_links = 1;
card->name = vc4_hdmi->variant->card_name;
card->driver_name = "vc4-hdmi";
card->dev = dev;
card->owner = THIS_MODULE;
/*
* Be careful, snd_soc_register_card() calls dev_set_drvdata() and
* stores a pointer to the snd card object in dev->driver_data. This
* means we cannot use it for something else. The hdmi back-pointer is
* now stored in card->drvdata and should be retrieved with
* snd_soc_card_get_drvdata() if needed.
*/
snd_soc_card_set_drvdata(card, vc4_hdmi);
ret = devm_snd_soc_register_card(dev, card);
if (ret)
dev_err_probe(dev, ret, "Could not register sound card\n");
return ret;
}
static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_device *dev = connector->dev;
if (dev && dev->registered)
drm_connector_helper_hpd_irq_event(connector);
return IRQ_HANDLED;
}
static int vc4_hdmi_hotplug_init(struct vc4_hdmi *vc4_hdmi)
{
struct drm_connector *connector = &vc4_hdmi->connector;
struct platform_device *pdev = vc4_hdmi->pdev;
int ret;
if (vc4_hdmi->variant->external_irq_controller) {
unsigned int hpd_con = platform_get_irq_byname(pdev, "hpd-connected");
unsigned int hpd_rm = platform_get_irq_byname(pdev, "hpd-removed");
ret = devm_request_threaded_irq(&pdev->dev, hpd_con,
NULL,
vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
"vc4 hdmi hpd connected", vc4_hdmi);
if (ret)
return ret;
ret = devm_request_threaded_irq(&pdev->dev, hpd_rm,
NULL,
vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
"vc4 hdmi hpd disconnected", vc4_hdmi);
if (ret)
return ret;
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
return 0;
}
#ifdef CONFIG_DRM_VC4_HDMI_CEC
static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
if (vc4_hdmi->cec_rx_msg.len)
cec_received_msg(vc4_hdmi->cec_adap,
&vc4_hdmi->cec_rx_msg);
return IRQ_HANDLED;
}
static irqreturn_t vc4_cec_irq_handler_tx_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
if (vc4_hdmi->cec_tx_ok) {
cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK,
0, 0, 0, 0);
} else {
/*
* This CEC implementation makes 1 retry, so if we
* get a NACK, then that means it made 2 attempts.
*/
cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_NACK,
0, 2, 0, 0);
}
return IRQ_HANDLED;
}
static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
irqreturn_t ret;
if (vc4_hdmi->cec_irq_was_rx)
ret = vc4_cec_irq_handler_rx_thread(irq, priv);
else
ret = vc4_cec_irq_handler_tx_thread(irq, priv);
return ret;
}
static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
{
struct drm_device *dev = vc4_hdmi->connector.dev;
struct cec_msg *msg = &vc4_hdmi->cec_rx_msg;
unsigned int i;
lockdep_assert_held(&vc4_hdmi->hw_lock);
msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >>
VC4_HDMI_CEC_REC_WRD_CNT_SHIFT);
if (msg->len > 16) {
drm_err(dev, "Attempting to read too much data (%d)\n", msg->len);
return;
}
for (i = 0; i < msg->len; i += 4) {
u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + (i >> 2));
msg->msg[i] = val & 0xff;
msg->msg[i + 1] = (val >> 8) & 0xff;
msg->msg[i + 2] = (val >> 16) & 0xff;
msg->msg[i + 3] = (val >> 24) & 0xff;
}
}
static irqreturn_t vc4_cec_irq_handler_tx_bare_locked(struct vc4_hdmi *vc4_hdmi)
{
u32 cntrl1;
/*
* We don't need to protect the register access using
* drm_dev_enter() there because the interrupt handler lifetime
* is tied to the device itself, and not to the DRM device.
*
* So when the device will be gone, one of the first thing we
* will be doing will be to unregister the interrupt handler,
* and then unregister the DRM device. drm_dev_enter() would
* thus always succeed if we are here.
*/
lockdep_assert_held(&vc4_hdmi->hw_lock);
cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
return IRQ_WAKE_THREAD;
}
static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
irqreturn_t ret;
spin_lock(&vc4_hdmi->hw_lock);
ret = vc4_cec_irq_handler_tx_bare_locked(vc4_hdmi);
spin_unlock(&vc4_hdmi->hw_lock);
return ret;
}
static irqreturn_t vc4_cec_irq_handler_rx_bare_locked(struct vc4_hdmi *vc4_hdmi)
{
u32 cntrl1;
lockdep_assert_held(&vc4_hdmi->hw_lock);
/*
* We don't need to protect the register access using
* drm_dev_enter() there because the interrupt handler lifetime
* is tied to the device itself, and not to the DRM device.
*
* So when the device will be gone, one of the first thing we
* will be doing will be to unregister the interrupt handler,
* and then unregister the DRM device. drm_dev_enter() would
* thus always succeed if we are here.
*/
vc4_hdmi->cec_rx_msg.len = 0;
cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
vc4_cec_read_msg(vc4_hdmi, cntrl1);
cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
return IRQ_WAKE_THREAD;
}
static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
irqreturn_t ret;
spin_lock(&vc4_hdmi->hw_lock);
ret = vc4_cec_irq_handler_rx_bare_locked(vc4_hdmi);
spin_unlock(&vc4_hdmi->hw_lock);
return ret;
}
static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
u32 stat = HDMI_READ(HDMI_CEC_CPU_STATUS);
irqreturn_t ret;
u32 cntrl5;
/*
* We don't need to protect the register access using
* drm_dev_enter() there because the interrupt handler lifetime
* is tied to the device itself, and not to the DRM device.
*
* So when the device will be gone, one of the first thing we
* will be doing will be to unregister the interrupt handler,
* and then unregister the DRM device. drm_dev_enter() would
* thus always succeed if we are here.
*/
if (!(stat & VC4_HDMI_CPU_CEC))
return IRQ_NONE;
spin_lock(&vc4_hdmi->hw_lock);
cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5);
vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT;
if (vc4_hdmi->cec_irq_was_rx)
ret = vc4_cec_irq_handler_rx_bare_locked(vc4_hdmi);
else
ret = vc4_cec_irq_handler_tx_bare_locked(vc4_hdmi);
HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC);
spin_unlock(&vc4_hdmi->hw_lock);
return ret;
}
static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
struct drm_device *drm = vc4_hdmi->connector.dev;
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
unsigned long flags;
u32 val;
int ret;
int idx;
if (!drm_dev_enter(drm, &idx))
/*
* We can't return an error code, because the CEC
* framework will emit WARN_ON messages at unbind
* otherwise.
*/
return 0;
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret) {
drm_dev_exit(idx);
return ret;
}
mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
val = HDMI_READ(HDMI_CEC_CNTRL_5);
val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) |
((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT);
HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
HDMI_WRITE(HDMI_CEC_CNTRL_5, val);
HDMI_WRITE(HDMI_CEC_CNTRL_2,
((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) |
((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) |
((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) |
((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) |
((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT));
HDMI_WRITE(HDMI_CEC_CNTRL_3,
((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) |
((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) |
((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) |
((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT));
HDMI_WRITE(HDMI_CEC_CNTRL_4,
((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) |
((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) |
((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) |
((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT));
if (!vc4_hdmi->variant->external_irq_controller)
HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
mutex_unlock(&vc4_hdmi->mutex);
drm_dev_exit(idx);
return 0;
}
static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
if (!drm_dev_enter(drm, &idx))
/*
* We can't return an error code, because the CEC
* framework will emit WARN_ON messages at unbind
* otherwise.
*/
return 0;
mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
if (!vc4_hdmi->variant->external_irq_controller)
HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
HDMI_WRITE(HDMI_CEC_CNTRL_5, HDMI_READ(HDMI_CEC_CNTRL_5) |
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
mutex_unlock(&vc4_hdmi->mutex);
pm_runtime_put(&vc4_hdmi->pdev->dev);
drm_dev_exit(idx);
return 0;
}
static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
if (enable)
return vc4_hdmi_cec_enable(adap);
else
return vc4_hdmi_cec_disable(adap);
}
static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
if (!drm_dev_enter(drm, &idx))
/*
* We can't return an error code, because the CEC
* framework will emit WARN_ON messages at unbind
* otherwise.
*/
return 0;
mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_CEC_CNTRL_1,
(HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) |
(log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
mutex_unlock(&vc4_hdmi->mutex);
drm_dev_exit(idx);
return 0;
}
static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
struct drm_device *dev = vc4_hdmi->connector.dev;
unsigned long flags;
u32 val;
unsigned int i;
int idx;
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
if (msg->len > 16) {
drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len);
drm_dev_exit(idx);
return -ENOMEM;
}
mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
for (i = 0; i < msg->len; i += 4)
HDMI_WRITE(HDMI_CEC_TX_DATA_1 + (i >> 2),
(msg->msg[i]) |
(msg->msg[i + 1] << 8) |
(msg->msg[i + 2] << 16) |
(msg->msg[i + 3] << 24));
val = HDMI_READ(HDMI_CEC_CNTRL_1);
val &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
HDMI_WRITE(HDMI_CEC_CNTRL_1, val);
val &= ~VC4_HDMI_CEC_MESSAGE_LENGTH_MASK;
val |= (msg->len - 1) << VC4_HDMI_CEC_MESSAGE_LENGTH_SHIFT;
val |= VC4_HDMI_CEC_START_XMIT_BEGIN;
HDMI_WRITE(HDMI_CEC_CNTRL_1, val);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
mutex_unlock(&vc4_hdmi->mutex);
drm_dev_exit(idx);
return 0;
}
static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
.adap_enable = vc4_hdmi_cec_adap_enable,
.adap_log_addr = vc4_hdmi_cec_adap_log_addr,
.adap_transmit = vc4_hdmi_cec_adap_transmit,
};
static void vc4_hdmi_cec_release(void *ptr)
{
struct vc4_hdmi *vc4_hdmi = ptr;
cec_unregister_adapter(vc4_hdmi->cec_adap);
vc4_hdmi->cec_adap = NULL;
}
static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
struct cec_connector_info conn_info;
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
int ret;
if (!of_property_present(dev->of_node, "interrupts")) {
dev_warn(dev, "'interrupts' DT property is missing, no CEC\n");
return 0;
}
vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
vc4_hdmi,
vc4_hdmi->variant->card_name,
CEC_CAP_DEFAULTS |
CEC_CAP_CONNECTOR_INFO, 1);
ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
if (ret < 0)
return ret;
cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector);
cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
if (vc4_hdmi->variant->external_irq_controller) {
ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-rx"),
vc4_cec_irq_handler_rx_bare,
vc4_cec_irq_handler_rx_thread, 0,
"vc4 hdmi cec rx", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-tx"),
vc4_cec_irq_handler_tx_bare,
vc4_cec_irq_handler_tx_thread, 0,
"vc4 hdmi cec tx", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
} else {
ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
vc4_cec_irq_handler,
vc4_cec_irq_handler_thread, 0,
"vc4 hdmi cec", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
}
ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
if (ret < 0)
goto err_delete_cec_adap;
/*
* NOTE: Strictly speaking, we should probably use a DRM-managed
* registration there to avoid removing the CEC adapter by the
* time the DRM driver doesn't have any user anymore.
*
* However, the CEC framework already cleans up the CEC adapter
* only when the last user has closed its file descriptor, so we
* don't need to handle it in DRM.
*
* By the time the device-managed hook is executed, we will give
* up our reference to the CEC adapter and therefore don't
* really care when it's actually freed.
*
* There's still a problematic sequence: if we unregister our
* CEC adapter, but the userspace keeps a handle on the CEC
* adapter but not the DRM device for some reason. In such a
* case, our vc4_hdmi structure will be freed, but the
* cec_adapter structure will have a dangling pointer to what
* used to be our HDMI controller. If we get a CEC call at that
* moment, we could end up with a use-after-free. Fortunately,
* the CEC framework already handles this too, by calling
* cec_is_registered() in cec_ioctl() and cec_poll().
*/
ret = devm_add_action_or_reset(dev, vc4_hdmi_cec_release, vc4_hdmi);
if (ret)
return ret;
return 0;
err_delete_cec_adap:
cec_delete_adapter(vc4_hdmi->cec_adap);
return ret;
}
#else
static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
return 0;
}
#endif
static void vc4_hdmi_free_regset(struct drm_device *drm, void *ptr)
{
struct debugfs_reg32 *regs = ptr;
kfree(regs);
}
static int vc4_hdmi_build_regset(struct drm_device *drm,
struct vc4_hdmi *vc4_hdmi,
struct debugfs_regset32 *regset,
enum vc4_hdmi_regs reg)
{
const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
struct debugfs_reg32 *regs, *new_regs;
unsigned int count = 0;
unsigned int i;
int ret;
regs = kcalloc(variant->num_registers, sizeof(*regs),
GFP_KERNEL);
if (!regs)
return -ENOMEM;
for (i = 0; i < variant->num_registers; i++) {
const struct vc4_hdmi_register *field = &variant->registers[i];
if (field->reg != reg)
continue;
regs[count].name = field->name;
regs[count].offset = field->offset;
count++;
}
new_regs = krealloc(regs, count * sizeof(*regs), GFP_KERNEL);
if (!new_regs)
return -ENOMEM;
regset->base = __vc4_hdmi_get_field_base(vc4_hdmi, reg);
regset->regs = new_regs;
regset->nregs = count;
ret = drmm_add_action_or_reset(drm, vc4_hdmi_free_regset, new_regs);
if (ret)
return ret;
return 0;
}
static int vc4_hdmi_init_resources(struct drm_device *drm,
struct vc4_hdmi *vc4_hdmi)
{
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
int ret;
vc4_hdmi->hdmicore_regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vc4_hdmi->hdmicore_regs))
return PTR_ERR(vc4_hdmi->hdmicore_regs);
vc4_hdmi->hd_regs = vc4_ioremap_regs(pdev, 1);
if (IS_ERR(vc4_hdmi->hd_regs))
return PTR_ERR(vc4_hdmi->hd_regs);
ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
if (ret)
return ret;
ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
if (ret)
return ret;
vc4_hdmi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(vc4_hdmi->pixel_clock)) {
ret = PTR_ERR(vc4_hdmi->pixel_clock);
if (ret != -EPROBE_DEFER)
DRM_ERROR("Failed to get pixel clock\n");
return ret;
}
vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(vc4_hdmi->hsm_clock)) {
DRM_ERROR("Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_clock);
}
vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock;
return 0;
}
static int vc5_hdmi_init_resources(struct drm_device *drm,
struct vc4_hdmi *vc4_hdmi)
{
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi");
if (!res)
return -ENODEV;
vc4_hdmi->hdmicore_regs = devm_ioremap(dev, res->start,
resource_size(res));
if (!vc4_hdmi->hdmicore_regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hd");
if (!res)
return -ENODEV;
vc4_hdmi->hd_regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vc4_hdmi->hd_regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cec");
if (!res)
return -ENODEV;
vc4_hdmi->cec_regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vc4_hdmi->cec_regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csc");
if (!res)
return -ENODEV;
vc4_hdmi->csc_regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vc4_hdmi->csc_regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvp");
if (!res)
return -ENODEV;
vc4_hdmi->dvp_regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vc4_hdmi->dvp_regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
if (!res)
return -ENODEV;
vc4_hdmi->phy_regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vc4_hdmi->phy_regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "packet");
if (!res)
return -ENODEV;
vc4_hdmi->ram_regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vc4_hdmi->ram_regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rm");
if (!res)
return -ENODEV;
vc4_hdmi->rm_regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vc4_hdmi->rm_regs)
return -ENOMEM;
vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(vc4_hdmi->hsm_clock)) {
DRM_ERROR("Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_clock);
}
vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb");
if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) {
DRM_ERROR("Failed to get pixel bvb clock\n");
return PTR_ERR(vc4_hdmi->pixel_bvb_clock);
}
vc4_hdmi->audio_clock = devm_clk_get(dev, "audio");
if (IS_ERR(vc4_hdmi->audio_clock)) {
DRM_ERROR("Failed to get audio clock\n");
return PTR_ERR(vc4_hdmi->audio_clock);
}
vc4_hdmi->cec_clock = devm_clk_get(dev, "cec");
if (IS_ERR(vc4_hdmi->cec_clock)) {
DRM_ERROR("Failed to get CEC clock\n");
return PTR_ERR(vc4_hdmi->cec_clock);
}
vc4_hdmi->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(vc4_hdmi->reset)) {
DRM_ERROR("Failed to get HDMI reset line\n");
return PTR_ERR(vc4_hdmi->reset);
}
ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
if (ret)
return ret;
ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
if (ret)
return ret;
ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->cec_regset, VC5_CEC);
if (ret)
return ret;
ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->csc_regset, VC5_CSC);
if (ret)
return ret;
ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->dvp_regset, VC5_DVP);
if (ret)
return ret;
ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->phy_regset, VC5_PHY);
if (ret)
return ret;
ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->ram_regset, VC5_RAM);
if (ret)
return ret;
ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->rm_regset, VC5_RM);
if (ret)
return ret;
return 0;
}
static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
return 0;
}
static int vc4_hdmi_runtime_resume(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
unsigned long __maybe_unused flags;
u32 __maybe_unused value;
unsigned long rate;
int ret;
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
/*
* Whenever the RaspberryPi boots without an HDMI monitor
* plugged in, the firmware won't have initialized the HSM clock
* rate and it will be reported as 0.
*
* If we try to access a register of the controller in such a
* case, it will lead to a silent CPU stall. Let's make sure we
* prevent such a case.
*/
rate = clk_get_rate(vc4_hdmi->hsm_clock);
if (!rate) {
ret = -EINVAL;
goto err_disable_clk;
}
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
#ifdef CONFIG_DRM_VC4_HDMI_CEC
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
value = HDMI_READ(HDMI_CEC_CNTRL_1);
/* Set the logical address to Unregistered */
value |= VC4_HDMI_CEC_ADDR_MASK;
HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (!vc4_hdmi->variant->external_irq_controller) {
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
#endif
return 0;
err_disable_clk:
clk_disable_unprepare(vc4_hdmi->hsm_clock);
return ret;
}
static void vc4_hdmi_put_ddc_device(void *ptr)
{
struct vc4_hdmi *vc4_hdmi = ptr;
put_device(&vc4_hdmi->ddc->dev);
}
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_hdmi *vc4_hdmi;
struct drm_encoder *encoder;
struct device_node *ddc_node;
int ret;
vc4_hdmi = drmm_kzalloc(drm, sizeof(*vc4_hdmi), GFP_KERNEL);
if (!vc4_hdmi)
return -ENOMEM;
ret = drmm_mutex_init(drm, &vc4_hdmi->mutex);
if (ret)
return ret;
spin_lock_init(&vc4_hdmi->hw_lock);
INIT_DELAYED_WORK(&vc4_hdmi->scrambling_work, vc4_hdmi_scrambling_wq);
dev_set_drvdata(dev, vc4_hdmi);
encoder = &vc4_hdmi->encoder.base;
vc4_hdmi->encoder.type = variant->encoder_type;
vc4_hdmi->encoder.pre_crtc_configure = vc4_hdmi_encoder_pre_crtc_configure;
vc4_hdmi->encoder.pre_crtc_enable = vc4_hdmi_encoder_pre_crtc_enable;
vc4_hdmi->encoder.post_crtc_enable = vc4_hdmi_encoder_post_crtc_enable;
vc4_hdmi->encoder.post_crtc_disable = vc4_hdmi_encoder_post_crtc_disable;
vc4_hdmi->encoder.post_crtc_powerdown = vc4_hdmi_encoder_post_crtc_powerdown;
vc4_hdmi->pdev = pdev;
vc4_hdmi->variant = variant;
/*
* Since we don't know the state of the controller and its
* display (if any), let's assume it's always enabled.
* vc4_hdmi_disable_scrambling() will thus run at boot, make
* sure it's disabled, and avoid any inconsistency.
*/
if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK)
vc4_hdmi->scdc_enabled = true;
ret = variant->init_resources(drm, vc4_hdmi);
if (ret)
return ret;
ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
if (!ddc_node) {
DRM_ERROR("Failed to find ddc node in device tree\n");
return -ENODEV;
}
vc4_hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
if (!vc4_hdmi->ddc) {
DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
return -EPROBE_DEFER;
}
ret = devm_add_action_or_reset(dev, vc4_hdmi_put_ddc_device, vc4_hdmi);
if (ret)
return ret;
/* Only use the GPIO HPD pin if present in the DT, otherwise
* we'll use the HDMI core's register.
*/
vc4_hdmi->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
if (IS_ERR(vc4_hdmi->hpd_gpio)) {
return PTR_ERR(vc4_hdmi->hpd_gpio);
}
vc4_hdmi->disable_wifi_frequencies =
of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
/*
* We need to have the device powered up at this point to call
* our reset hook and for the CEC init.
*/
ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
HDMI_READ(HDMI_VID_CTL) & VC4_HD_VID_CTL_ENABLE) {
clk_prepare_enable(vc4_hdmi->pixel_clock);
clk_prepare_enable(vc4_hdmi->hsm_clock);
clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
}
ret = drmm_encoder_init(drm, encoder,
&vc4_hdmi_encoder_funcs,
DRM_MODE_ENCODER_TMDS,
NULL);
if (ret)
goto err_put_runtime_pm;
drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs);
ret = vc4_hdmi_connector_init(drm, vc4_hdmi);
if (ret)
goto err_put_runtime_pm;
ret = vc4_hdmi_hotplug_init(vc4_hdmi);
if (ret)
goto err_put_runtime_pm;
ret = vc4_hdmi_cec_init(vc4_hdmi);
if (ret)
goto err_put_runtime_pm;
ret = vc4_hdmi_audio_init(vc4_hdmi);
if (ret)
goto err_put_runtime_pm;
pm_runtime_put_sync(dev);
return 0;
err_put_runtime_pm:
pm_runtime_put_sync(dev);
return ret;
}
static const struct component_ops vc4_hdmi_ops = {
.bind = vc4_hdmi_bind,
};
static int vc4_hdmi_dev_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &vc4_hdmi_ops);
}
static void vc4_hdmi_dev_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_hdmi_ops);
}
static const struct vc4_hdmi_variant bcm2835_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI0,
.debugfs_name = "hdmi_regs",
.card_name = "vc4-hdmi",
.max_pixel_clock = 162000000,
.registers = vc4_hdmi_fields,
.num_registers = ARRAY_SIZE(vc4_hdmi_fields),
.init_resources = vc4_hdmi_init_resources,
.csc_setup = vc4_hdmi_csc_setup,
.reset = vc4_hdmi_reset,
.set_timings = vc4_hdmi_set_timings,
.phy_init = vc4_hdmi_phy_init,
.phy_disable = vc4_hdmi_phy_disable,
.phy_rng_enable = vc4_hdmi_phy_rng_enable,
.phy_rng_disable = vc4_hdmi_phy_rng_disable,
.channel_map = vc4_hdmi_channel_map,
.supports_hdr = false,
};
static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI0,
.debugfs_name = "hdmi0_regs",
.card_name = "vc4-hdmi-0",
.max_pixel_clock = 600000000,
.registers = vc5_hdmi_hdmi0_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields),
.phy_lane_mapping = {
PHY_LANE_0,
PHY_LANE_1,
PHY_LANE_2,
PHY_LANE_CK,
},
.unsupported_odd_h_timings = true,
.external_irq_controller = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
.reset = vc5_hdmi_reset,
.set_timings = vc5_hdmi_set_timings,
.phy_init = vc5_hdmi_phy_init,
.phy_disable = vc5_hdmi_phy_disable,
.phy_rng_enable = vc5_hdmi_phy_rng_enable,
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
.supports_hdr = true,
.hp_detect = vc5_hdmi_hp_detect,
};
static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI1,
.debugfs_name = "hdmi1_regs",
.card_name = "vc4-hdmi-1",
.max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi1_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields),
.phy_lane_mapping = {
PHY_LANE_1,
PHY_LANE_0,
PHY_LANE_CK,
PHY_LANE_2,
},
.unsupported_odd_h_timings = true,
.external_irq_controller = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
.reset = vc5_hdmi_reset,
.set_timings = vc5_hdmi_set_timings,
.phy_init = vc5_hdmi_phy_init,
.phy_disable = vc5_hdmi_phy_disable,
.phy_rng_enable = vc5_hdmi_phy_rng_enable,
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
.supports_hdr = true,
.hp_detect = vc5_hdmi_hp_detect,
};
static const struct of_device_id vc4_hdmi_dt_match[] = {
{ .compatible = "brcm,bcm2835-hdmi", .data = &bcm2835_variant },
{ .compatible = "brcm,bcm2711-hdmi0", .data = &bcm2711_hdmi0_variant },
{ .compatible = "brcm,bcm2711-hdmi1", .data = &bcm2711_hdmi1_variant },
{}
};
static const struct dev_pm_ops vc4_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
vc4_hdmi_runtime_resume,
NULL)
};
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove_new = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
.pm = &vc4_hdmi_pm_ops,
},
};
| linux-master | drivers/gpu/drm/vc4/vc4_hdmi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2018 Broadcom
*
* Authors:
* Eric Anholt <[email protected]>
* Boris Brezillon <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
/* Base address of the output. Raster formats must be 4-byte aligned,
* T and LT must be 16-byte aligned or maybe utile-aligned (docs are
* inconsistent, but probably utile).
*/
#define TXP_DST_PTR 0x00
/* Pitch in bytes for raster images, 16-byte aligned. For tiled, it's
* the width in tiles.
*/
#define TXP_DST_PITCH 0x04
/* For T-tiled imgaes, DST_PITCH should be the number of tiles wide,
* shifted up.
*/
# define TXP_T_TILE_WIDTH_SHIFT 7
/* For LT-tiled images, DST_PITCH should be the number of utiles wide,
* shifted up.
*/
# define TXP_LT_TILE_WIDTH_SHIFT 4
/* Pre-rotation width/height of the image. Must match HVS config.
*
* If TFORMAT and 32-bit, limit is 1920 for 32-bit and 3840 to 16-bit
* and width/height must be tile or utile-aligned as appropriate. If
* transposing (rotating), width is limited to 1920.
*
* Height is limited to various numbers between 4088 and 4095. I'd
* just use 4088 to be safe.
*/
#define TXP_DIM 0x08
# define TXP_HEIGHT_SHIFT 16
# define TXP_HEIGHT_MASK GENMASK(31, 16)
# define TXP_WIDTH_SHIFT 0
# define TXP_WIDTH_MASK GENMASK(15, 0)
#define TXP_DST_CTRL 0x0c
/* These bits are set to 0x54 */
#define TXP_PILOT_SHIFT 24
#define TXP_PILOT_MASK GENMASK(31, 24)
/* Bits 22-23 are set to 0x01 */
#define TXP_VERSION_SHIFT 22
#define TXP_VERSION_MASK GENMASK(23, 22)
/* Powers down the internal memory. */
# define TXP_POWERDOWN BIT(21)
/* Enables storing the alpha component in 8888/4444, instead of
* filling with ~ALPHA_INVERT.
*/
# define TXP_ALPHA_ENABLE BIT(20)
/* 4 bits, each enables stores for a channel in each set of 4 bytes.
* Set to 0xf for normal operation.
*/
# define TXP_BYTE_ENABLE_SHIFT 16
# define TXP_BYTE_ENABLE_MASK GENMASK(19, 16)
/* Debug: Generate VSTART again at EOF. */
# define TXP_VSTART_AT_EOF BIT(15)
/* Debug: Terminate the current frame immediately. Stops AXI
* writes.
*/
# define TXP_ABORT BIT(14)
# define TXP_DITHER BIT(13)
/* Inverts alpha if TXP_ALPHA_ENABLE, chooses fill value for
* !TXP_ALPHA_ENABLE.
*/
# define TXP_ALPHA_INVERT BIT(12)
/* Note: I've listed the channels here in high bit (in byte 3/2/1) to
* low bit (in byte 0) order.
*/
# define TXP_FORMAT_SHIFT 8
# define TXP_FORMAT_MASK GENMASK(11, 8)
# define TXP_FORMAT_ABGR4444 0
# define TXP_FORMAT_ARGB4444 1
# define TXP_FORMAT_BGRA4444 2
# define TXP_FORMAT_RGBA4444 3
# define TXP_FORMAT_BGR565 6
# define TXP_FORMAT_RGB565 7
/* 888s are non-rotated, raster-only */
# define TXP_FORMAT_BGR888 8
# define TXP_FORMAT_RGB888 9
# define TXP_FORMAT_ABGR8888 12
# define TXP_FORMAT_ARGB8888 13
# define TXP_FORMAT_BGRA8888 14
# define TXP_FORMAT_RGBA8888 15
/* If TFORMAT is set, generates LT instead of T format. */
# define TXP_LINEAR_UTILE BIT(7)
/* Rotate output by 90 degrees. */
# define TXP_TRANSPOSE BIT(6)
/* Generate a tiled format for V3D. */
# define TXP_TFORMAT BIT(5)
/* Generates some undefined test mode output. */
# define TXP_TEST_MODE BIT(4)
/* Request odd field from HVS. */
# define TXP_FIELD BIT(3)
/* Raise interrupt when idle. */
# define TXP_EI BIT(2)
/* Set when generating a frame, clears when idle. */
# define TXP_BUSY BIT(1)
/* Starts a frame. Self-clearing. */
# define TXP_GO BIT(0)
/* Number of lines received and committed to memory. */
#define TXP_PROGRESS 0x10
#define TXP_READ(offset) \
({ \
kunit_fail_current_test("Accessing a register in a unit test!\n"); \
readl(txp->regs + (offset)); \
})
#define TXP_WRITE(offset, val) \
do { \
kunit_fail_current_test("Accessing a register in a unit test!\n"); \
writel(val, txp->regs + (offset)); \
} while (0)
struct vc4_txp {
struct vc4_crtc base;
struct platform_device *pdev;
struct vc4_encoder encoder;
struct drm_writeback_connector connector;
void __iomem *regs;
};
#define encoder_to_vc4_txp(_encoder) \
container_of_const(_encoder, struct vc4_txp, encoder.base)
#define connector_to_vc4_txp(_connector) \
container_of_const(_connector, struct vc4_txp, connector.base)
static const struct debugfs_reg32 txp_regs[] = {
VC4_REG32(TXP_DST_PTR),
VC4_REG32(TXP_DST_PITCH),
VC4_REG32(TXP_DIM),
VC4_REG32(TXP_DST_CTRL),
VC4_REG32(TXP_PROGRESS),
};
static int vc4_txp_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
return drm_add_modes_noedid(connector, dev->mode_config.max_width,
dev->mode_config.max_height);
}
static enum drm_mode_status
vc4_txp_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
int w = mode->hdisplay, h = mode->vdisplay;
if (w < mode_config->min_width || w > mode_config->max_width)
return MODE_BAD_HVALUE;
if (h < mode_config->min_height || h > mode_config->max_height)
return MODE_BAD_VVALUE;
return MODE_OK;
}
static const u32 drm_fmts[] = {
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_BGRA8888,
};
static const u32 txp_fmts[] = {
TXP_FORMAT_RGB888,
TXP_FORMAT_BGR888,
TXP_FORMAT_ARGB8888,
TXP_FORMAT_ABGR8888,
TXP_FORMAT_ARGB8888,
TXP_FORMAT_ABGR8888,
TXP_FORMAT_RGBA8888,
TXP_FORMAT_BGRA8888,
TXP_FORMAT_RGBA8888,
TXP_FORMAT_BGRA8888,
};
static void vc4_txp_armed(struct drm_crtc_state *state)
{
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
vc4_state->txp_armed = true;
}
static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *state)
{
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb;
int i;
conn_state = drm_atomic_get_new_connector_state(state, conn);
if (!conn_state->writeback_job)
return 0;
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
fb = conn_state->writeback_job->fb;
if (fb->width != crtc_state->mode.hdisplay ||
fb->height != crtc_state->mode.vdisplay) {
DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
fb->width, fb->height);
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
if (fb->format->format == drm_fmts[i])
break;
}
if (i == ARRAY_SIZE(drm_fmts))
return -EINVAL;
/* Pitch must be aligned on 16 bytes. */
if (fb->pitches[0] & GENMASK(3, 0))
return -EINVAL;
vc4_txp_armed(crtc_state);
return 0;
}
static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
struct drm_atomic_state *state)
{
struct drm_device *drm = conn->dev;
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
struct drm_gem_dma_object *gem;
struct drm_display_mode *mode;
struct drm_framebuffer *fb;
u32 ctrl;
int idx;
int i;
if (WARN_ON(!conn_state->writeback_job))
return;
mode = &conn_state->crtc->state->adjusted_mode;
fb = conn_state->writeback_job->fb;
for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
if (fb->format->format == drm_fmts[i])
break;
}
if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
return;
ctrl = TXP_GO | TXP_EI |
VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
if (fb->format->has_alpha)
ctrl |= TXP_ALPHA_ENABLE;
else
/*
* If TXP_ALPHA_ENABLE isn't set and TXP_ALPHA_INVERT is, the
* hardware will force the output padding to be 0xff.
*/
ctrl |= TXP_ALPHA_INVERT;
if (!drm_dev_enter(drm, &idx))
return;
gem = drm_fb_dma_get_gem_obj(fb, 0);
TXP_WRITE(TXP_DST_PTR, gem->dma_addr + fb->offsets[0]);
TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
TXP_WRITE(TXP_DIM,
VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
VC4_SET_FIELD(mode->vdisplay, TXP_HEIGHT));
TXP_WRITE(TXP_DST_CTRL, ctrl);
drm_writeback_queue_job(&txp->connector, conn_state);
drm_dev_exit(idx);
}
static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
.get_modes = vc4_txp_connector_get_modes,
.mode_valid = vc4_txp_connector_mode_valid,
.atomic_check = vc4_txp_connector_atomic_check,
.atomic_commit = vc4_txp_connector_atomic_commit,
};
static enum drm_connector_status
vc4_txp_connector_detect(struct drm_connector *connector, bool force)
{
return connector_status_connected;
}
static const struct drm_connector_funcs vc4_txp_connector_funcs = {
.detect = vc4_txp_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
int idx;
if (!drm_dev_enter(drm, &idx))
return;
if (TXP_READ(TXP_DST_CTRL) & TXP_BUSY) {
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
TXP_WRITE(TXP_DST_CTRL, TXP_ABORT);
while (TXP_READ(TXP_DST_CTRL) & TXP_BUSY &&
time_before(jiffies, timeout))
;
WARN_ON(TXP_READ(TXP_DST_CTRL) & TXP_BUSY);
}
TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
drm_dev_exit(idx);
}
static const struct drm_encoder_helper_funcs vc4_txp_encoder_helper_funcs = {
.disable = vc4_txp_encoder_disable,
};
static int vc4_txp_enable_vblank(struct drm_crtc *crtc)
{
return 0;
}
static void vc4_txp_disable_vblank(struct drm_crtc *crtc) {}
static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = vc4_page_flip,
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
.enable_vblank = vc4_txp_enable_vblank,
.disable_vblank = vc4_txp_disable_vblank,
.late_register = vc4_crtc_late_register,
};
static int vc4_txp_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
int ret;
ret = vc4_hvs_atomic_check(crtc, state);
if (ret)
return ret;
crtc_state->no_vblank = true;
return 0;
}
static void vc4_txp_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
drm_crtc_vblank_on(crtc);
vc4_hvs_atomic_enable(crtc, state);
}
static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
vc4_hvs_atomic_disable(crtc, state);
/*
* Make sure we issue a vblank event after disabling the CRTC if
* someone was waiting it.
*/
if (crtc->state->event) {
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
static const struct drm_crtc_helper_funcs vc4_txp_crtc_helper_funcs = {
.atomic_check = vc4_txp_atomic_check,
.atomic_begin = vc4_hvs_atomic_begin,
.atomic_flush = vc4_hvs_atomic_flush,
.atomic_enable = vc4_txp_atomic_enable,
.atomic_disable = vc4_txp_atomic_disable,
};
static irqreturn_t vc4_txp_interrupt(int irq, void *data)
{
struct vc4_txp *txp = data;
struct vc4_crtc *vc4_crtc = &txp->base;
/*
* We don't need to protect the register access using
* drm_dev_enter() there because the interrupt handler lifetime
* is tied to the device itself, and not to the DRM device.
*
* So when the device will be gone, one of the first thing we
* will be doing will be to unregister the interrupt handler,
* and then unregister the DRM device. drm_dev_enter() would
* thus always succeed if we are here.
*/
TXP_WRITE(TXP_DST_CTRL, TXP_READ(TXP_DST_CTRL) & ~TXP_EI);
vc4_crtc_handle_vblank(vc4_crtc);
drm_writeback_signal_completion(&txp->connector, 0);
return IRQ_HANDLED;
}
const struct vc4_crtc_data vc4_txp_crtc_data = {
.name = "txp",
.debugfs_name = "txp_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
};
static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_encoder *vc4_encoder;
struct drm_encoder *encoder;
struct vc4_crtc *vc4_crtc;
struct vc4_txp *txp;
int ret, irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
txp = drmm_kzalloc(drm, sizeof(*txp), GFP_KERNEL);
if (!txp)
return -ENOMEM;
txp->pdev = pdev;
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
return PTR_ERR(txp->regs);
vc4_crtc = &txp->base;
vc4_crtc->regset.base = txp->regs;
vc4_crtc->regset.regs = txp_regs;
vc4_crtc->regset.nregs = ARRAY_SIZE(txp_regs);
ret = vc4_crtc_init(drm, pdev, vc4_crtc, &vc4_txp_crtc_data,
&vc4_txp_crtc_funcs, &vc4_txp_crtc_helper_funcs, true);
if (ret)
return ret;
vc4_encoder = &txp->encoder;
txp->encoder.type = VC4_ENCODER_TYPE_TXP;
encoder = &vc4_encoder->base;
encoder->possible_crtcs = drm_crtc_mask(&vc4_crtc->base);
drm_encoder_helper_add(encoder, &vc4_txp_encoder_helper_funcs);
ret = drmm_encoder_init(drm, encoder, NULL, DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
return ret;
drm_connector_helper_add(&txp->connector.base,
&vc4_txp_connector_helper_funcs);
ret = drm_writeback_connector_init_with_encoder(drm, &txp->connector,
encoder,
&vc4_txp_connector_funcs,
drm_fmts, ARRAY_SIZE(drm_fmts));
if (ret)
return ret;
ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
dev_name(dev), txp);
if (ret)
return ret;
dev_set_drvdata(dev, txp);
return 0;
}
static void vc4_txp_unbind(struct device *dev, struct device *master,
void *data)
{
struct vc4_txp *txp = dev_get_drvdata(dev);
drm_connector_cleanup(&txp->connector.base);
}
static const struct component_ops vc4_txp_ops = {
.bind = vc4_txp_bind,
.unbind = vc4_txp_unbind,
};
static int vc4_txp_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &vc4_txp_ops);
}
static void vc4_txp_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_txp_ops);
}
static const struct of_device_id vc4_txp_dt_match[] = {
{ .compatible = "brcm,bcm2835-txp" },
{ /* sentinel */ },
};
struct platform_driver vc4_txp_driver = {
.probe = vc4_txp_probe,
.remove_new = vc4_txp_remove,
.driver = {
.name = "vc4_txp",
.of_match_table = vc4_txp_dt_match,
},
};
| linux-master | drivers/gpu/drm/vc4/vc4_txp.c |
/*
* Copyright © 2017 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "vc4_drv.h"
static const char *vc4_fence_get_driver_name(struct dma_fence *fence)
{
return "vc4";
}
static const char *vc4_fence_get_timeline_name(struct dma_fence *fence)
{
return "vc4-v3d";
}
static bool vc4_fence_signaled(struct dma_fence *fence)
{
struct vc4_fence *f = to_vc4_fence(fence);
struct vc4_dev *vc4 = to_vc4_dev(f->dev);
return vc4->finished_seqno >= f->seqno;
}
const struct dma_fence_ops vc4_fence_ops = {
.get_driver_name = vc4_fence_get_driver_name,
.get_timeline_name = vc4_fence_get_timeline_name,
.signaled = vc4_fence_signaled,
};
| linux-master | drivers/gpu/drm/vc4/vc4_fence.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Broadcom
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include "vc4_hdmi.h"
#include "vc4_regs.h"
#include "vc4_hdmi_regs.h"
#define VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB BIT(5)
#define VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB BIT(4)
#define VC4_HDMI_TX_PHY_RESET_CTL_TX_CK_RESET BIT(3)
#define VC4_HDMI_TX_PHY_RESET_CTL_TX_2_RESET BIT(2)
#define VC4_HDMI_TX_PHY_RESET_CTL_TX_1_RESET BIT(1)
#define VC4_HDMI_TX_PHY_RESET_CTL_TX_0_RESET BIT(0)
#define VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN BIT(4)
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP_SHIFT 29
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP_MASK VC4_MASK(31, 29)
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV_SHIFT 24
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV_MASK VC4_MASK(28, 24)
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP_SHIFT 21
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP_MASK VC4_MASK(23, 21)
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV_SHIFT 16
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV_MASK VC4_MASK(20, 16)
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP_SHIFT 13
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP_MASK VC4_MASK(15, 13)
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV_SHIFT 8
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV_MASK VC4_MASK(12, 8)
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP_SHIFT 5
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP_MASK VC4_MASK(7, 5)
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV_SHIFT 0
#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV_MASK VC4_MASK(4, 0)
#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2_SHIFT 15
#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2_MASK VC4_MASK(19, 15)
#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1_SHIFT 10
#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1_MASK VC4_MASK(14, 10)
#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0_SHIFT 5
#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0_MASK VC4_MASK(9, 5)
#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK_SHIFT 0
#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK_MASK VC4_MASK(4, 0)
#define VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN_SHIFT 16
#define VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN_MASK VC4_MASK(19, 16)
#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2_SHIFT 12
#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2_MASK VC4_MASK(15, 12)
#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1_SHIFT 8
#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1_MASK VC4_MASK(11, 8)
#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0_SHIFT 4
#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0_MASK VC4_MASK(7, 4)
#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK_SHIFT 0
#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK_MASK VC4_MASK(3, 0)
#define VC4_HDMI_TX_PHY_CTL_3_RP_SHIFT 17
#define VC4_HDMI_TX_PHY_CTL_3_RP_MASK VC4_MASK(19, 17)
#define VC4_HDMI_TX_PHY_CTL_3_RZ_SHIFT 12
#define VC4_HDMI_TX_PHY_CTL_3_RZ_MASK VC4_MASK(16, 12)
#define VC4_HDMI_TX_PHY_CTL_3_CP1_SHIFT 10
#define VC4_HDMI_TX_PHY_CTL_3_CP1_MASK VC4_MASK(11, 10)
#define VC4_HDMI_TX_PHY_CTL_3_CP_SHIFT 8
#define VC4_HDMI_TX_PHY_CTL_3_CP_MASK VC4_MASK(9, 8)
#define VC4_HDMI_TX_PHY_CTL_3_CZ_SHIFT 6
#define VC4_HDMI_TX_PHY_CTL_3_CZ_MASK VC4_MASK(7, 6)
#define VC4_HDMI_TX_PHY_CTL_3_ICP_SHIFT 0
#define VC4_HDMI_TX_PHY_CTL_3_ICP_MASK VC4_MASK(5, 0)
#define VC4_HDMI_TX_PHY_PLL_CTL_0_MASH11_MODE BIT(13)
#define VC4_HDMI_TX_PHY_PLL_CTL_0_VC_RANGE_EN BIT(12)
#define VC4_HDMI_TX_PHY_PLL_CTL_0_EMULATE_VC_LOW BIT(11)
#define VC4_HDMI_TX_PHY_PLL_CTL_0_EMULATE_VC_HIGH BIT(10)
#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL_SHIFT 9
#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL_MASK VC4_MASK(9, 9)
#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_FB_DIV2 BIT(8)
#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_POST_DIV2 BIT(7)
#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_CONT_EN BIT(6)
#define VC4_HDMI_TX_PHY_PLL_CTL_0_ENA_VCO_CLK BIT(5)
#define VC4_HDMI_TX_PHY_PLL_CTL_1_CPP_SHIFT 16
#define VC4_HDMI_TX_PHY_PLL_CTL_1_CPP_MASK VC4_MASK(27, 16)
#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY_SHIFT 14
#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY_MASK VC4_MASK(15, 14)
#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_ENABLE BIT(13)
#define VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL_SHIFT 11
#define VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL_MASK VC4_MASK(12, 11)
#define VC4_HDMI_TX_PHY_CLK_DIV_VCO_SHIFT 8
#define VC4_HDMI_TX_PHY_CLK_DIV_VCO_MASK VC4_MASK(15, 8)
#define VC4_HDMI_TX_PHY_PLL_CFG_PDIV_SHIFT 0
#define VC4_HDMI_TX_PHY_PLL_CFG_PDIV_MASK VC4_MASK(3, 0)
#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL_MASK VC4_MASK(13, 12)
#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL_SHIFT 12
#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL_MASK VC4_MASK(9, 8)
#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL_SHIFT 8
#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL_MASK VC4_MASK(5, 4)
#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL_SHIFT 4
#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL_MASK VC4_MASK(1, 0)
#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL_SHIFT 0
#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_MASK VC4_MASK(27, 0)
#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_SHIFT 0
#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_MASK VC4_MASK(27, 0)
#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_SHIFT 0
#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD_MASK VC4_MASK(31, 16)
#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD_SHIFT 16
#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD_MASK VC4_MASK(15, 0)
#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD_SHIFT 0
#define VC4_HDMI_RM_CONTROL_EN_FREEZE_COUNTERS BIT(19)
#define VC4_HDMI_RM_CONTROL_EN_LOAD_INTEGRATOR BIT(17)
#define VC4_HDMI_RM_CONTROL_FREE_RUN BIT(4)
#define VC4_HDMI_RM_OFFSET_ONLY BIT(31)
#define VC4_HDMI_RM_OFFSET_OFFSET_SHIFT 0
#define VC4_HDMI_RM_OFFSET_OFFSET_MASK VC4_MASK(30, 0)
#define VC4_HDMI_RM_FORMAT_SHIFT_SHIFT 24
#define VC4_HDMI_RM_FORMAT_SHIFT_MASK VC4_MASK(25, 24)
#define OSCILLATOR_FREQUENCY 54000000
void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
struct vc4_hdmi_connector_state *conn_state)
{
unsigned long flags;
/* PHY should be in reset, like
* vc4_hdmi_encoder_disable() does.
*/
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16);
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
{
unsigned long flags;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi)
{
unsigned long flags;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_CTL_0,
HDMI_READ(HDMI_TX_PHY_CTL_0) &
~VC4_HDMI_TX_PHY_RNG_PWRDN);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
{
unsigned long flags;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_CTL_0,
HDMI_READ(HDMI_TX_PHY_CTL_0) |
VC4_HDMI_TX_PHY_RNG_PWRDN);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
static unsigned long long
phy_get_vco_freq(unsigned long long clock, u8 *vco_sel, u8 *vco_div)
{
unsigned long long vco_freq = clock;
unsigned int _vco_div = 0;
unsigned int _vco_sel = 0;
while (vco_freq < 3000000000ULL) {
_vco_div++;
vco_freq = clock * _vco_div * 10;
}
if (vco_freq > 4500000000ULL)
_vco_sel = 1;
*vco_sel = _vco_sel;
*vco_div = _vco_div;
return vco_freq;
}
static u8 phy_get_cp_current(unsigned long vco_freq)
{
if (vco_freq < 3700000000ULL)
return 0x1c;
return 0x18;
}
static u32 phy_get_rm_offset(unsigned long long vco_freq)
{
unsigned long long fref = OSCILLATOR_FREQUENCY;
u64 offset = 0;
/* RM offset is stored as 9.22 format */
offset = vco_freq * 2;
offset = offset << 22;
do_div(offset, fref);
offset >>= 2;
return offset;
}
static u8 phy_get_vco_gain(unsigned long long vco_freq)
{
if (vco_freq < 3350000000ULL)
return 0xf;
if (vco_freq < 3700000000ULL)
return 0xc;
if (vco_freq < 4050000000ULL)
return 0x6;
if (vco_freq < 4800000000ULL)
return 0x5;
if (vco_freq < 5200000000ULL)
return 0x7;
return 0x2;
}
struct phy_lane_settings {
struct {
u8 preemphasis;
u8 main_driver;
} amplitude;
u8 res_sel_data;
u8 term_res_sel_data;
};
struct phy_settings {
unsigned long long min_rate;
unsigned long long max_rate;
struct phy_lane_settings channel[3];
struct phy_lane_settings clock;
};
static const struct phy_settings vc5_hdmi_phy_settings[] = {
{
0, 50000000,
{
{{0x0, 0x0A}, 0x12, 0x0},
{{0x0, 0x0A}, 0x12, 0x0},
{{0x0, 0x0A}, 0x12, 0x0}
},
{{0x0, 0x0A}, 0x18, 0x0},
},
{
50000001, 75000000,
{
{{0x0, 0x09}, 0x12, 0x0},
{{0x0, 0x09}, 0x12, 0x0},
{{0x0, 0x09}, 0x12, 0x0}
},
{{0x0, 0x0C}, 0x18, 0x3},
},
{
75000001, 165000000,
{
{{0x0, 0x09}, 0x12, 0x0},
{{0x0, 0x09}, 0x12, 0x0},
{{0x0, 0x09}, 0x12, 0x0}
},
{{0x0, 0x0C}, 0x18, 0x3},
},
{
165000001, 250000000,
{
{{0x0, 0x0F}, 0x12, 0x1},
{{0x0, 0x0F}, 0x12, 0x1},
{{0x0, 0x0F}, 0x12, 0x1}
},
{{0x0, 0x0C}, 0x18, 0x3},
},
{
250000001, 340000000,
{
{{0x2, 0x0D}, 0x12, 0x1},
{{0x2, 0x0D}, 0x12, 0x1},
{{0x2, 0x0D}, 0x12, 0x1}
},
{{0x0, 0x0C}, 0x18, 0xF},
},
{
340000001, 450000000,
{
{{0x0, 0x1B}, 0x12, 0xF},
{{0x0, 0x1B}, 0x12, 0xF},
{{0x0, 0x1B}, 0x12, 0xF}
},
{{0x0, 0x0A}, 0x12, 0xF},
},
{
450000001, 600000000,
{
{{0x0, 0x1C}, 0x12, 0xF},
{{0x0, 0x1C}, 0x12, 0xF},
{{0x0, 0x1C}, 0x12, 0xF}
},
{{0x0, 0x0B}, 0x13, 0xF},
},
};
static const struct phy_settings *phy_get_settings(unsigned long long tmds_rate)
{
unsigned int count = ARRAY_SIZE(vc5_hdmi_phy_settings);
unsigned int i;
for (i = 0; i < count; i++) {
const struct phy_settings *s = &vc5_hdmi_phy_settings[i];
if (tmds_rate >= s->min_rate && tmds_rate <= s->max_rate)
return s;
}
/*
* If the pixel clock exceeds our max setting, try the max
* setting anyway.
*/
return &vc5_hdmi_phy_settings[count - 1];
}
static const struct phy_lane_settings *
phy_get_channel_settings(enum vc4_hdmi_phy_channel chan,
unsigned long long tmds_rate)
{
const struct phy_settings *settings = phy_get_settings(tmds_rate);
if (chan == PHY_LANE_CK)
return &settings->clock;
return &settings->channel[chan];
}
static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
{
lockdep_assert_held(&vc4_hdmi->hw_lock);
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x0f);
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10));
}
void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
struct vc4_hdmi_connector_state *conn_state)
{
const struct phy_lane_settings *chan0_settings, *chan1_settings, *chan2_settings, *clock_settings;
const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
unsigned long long pixel_freq = conn_state->tmds_char_rate;
unsigned long long vco_freq;
unsigned char word_sel;
unsigned long flags;
u8 vco_sel, vco_div;
vco_freq = phy_get_vco_freq(pixel_freq, &vco_sel, &vco_div);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
vc5_hdmi_reset_phy(vc4_hdmi);
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL,
HDMI_READ(HDMI_TX_PHY_RESET_CTL) &
~VC4_HDMI_TX_PHY_RESET_CTL_TX_0_RESET &
~VC4_HDMI_TX_PHY_RESET_CTL_TX_1_RESET &
~VC4_HDMI_TX_PHY_RESET_CTL_TX_2_RESET &
~VC4_HDMI_TX_PHY_RESET_CTL_TX_CK_RESET);
HDMI_WRITE(HDMI_RM_CONTROL,
HDMI_READ(HDMI_RM_CONTROL) |
VC4_HDMI_RM_CONTROL_EN_FREEZE_COUNTERS |
VC4_HDMI_RM_CONTROL_EN_LOAD_INTEGRATOR |
VC4_HDMI_RM_CONTROL_FREE_RUN);
HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1,
(HDMI_READ(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1) &
~VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_MASK) |
VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT));
HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2,
(HDMI_READ(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2) &
~VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_MASK) |
VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT));
HDMI_WRITE(HDMI_RM_OFFSET,
VC4_SET_FIELD(phy_get_rm_offset(vco_freq),
VC4_HDMI_RM_OFFSET_OFFSET) |
VC4_HDMI_RM_OFFSET_ONLY);
HDMI_WRITE(HDMI_TX_PHY_CLK_DIV,
VC4_SET_FIELD(vco_div, VC4_HDMI_TX_PHY_CLK_DIV_VCO));
HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4,
VC4_SET_FIELD(0xe147, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD) |
VC4_SET_FIELD(0xe14, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD));
HDMI_WRITE(HDMI_TX_PHY_PLL_CTL_0,
VC4_HDMI_TX_PHY_PLL_CTL_0_ENA_VCO_CLK |
VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_CONT_EN |
VC4_HDMI_TX_PHY_PLL_CTL_0_MASH11_MODE |
VC4_SET_FIELD(vco_sel, VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL));
HDMI_WRITE(HDMI_TX_PHY_PLL_CTL_1,
HDMI_READ(HDMI_TX_PHY_PLL_CTL_1) |
VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_ENABLE |
VC4_SET_FIELD(3, VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL) |
VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY) |
VC4_SET_FIELD(0x8a, VC4_HDMI_TX_PHY_PLL_CTL_1_CPP));
HDMI_WRITE(HDMI_RM_FORMAT,
HDMI_READ(HDMI_RM_FORMAT) |
VC4_SET_FIELD(2, VC4_HDMI_RM_FORMAT_SHIFT));
HDMI_WRITE(HDMI_TX_PHY_PLL_CFG,
HDMI_READ(HDMI_TX_PHY_PLL_CFG) |
VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_PLL_CFG_PDIV));
if (pixel_freq >= 340000000)
word_sel = 3;
else
word_sel = 0;
HDMI_WRITE(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, word_sel);
HDMI_WRITE(HDMI_TX_PHY_CTL_3,
VC4_SET_FIELD(phy_get_cp_current(vco_freq),
VC4_HDMI_TX_PHY_CTL_3_ICP) |
VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_CTL_3_CP) |
VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_CTL_3_CP1) |
VC4_SET_FIELD(3, VC4_HDMI_TX_PHY_CTL_3_CZ) |
VC4_SET_FIELD(4, VC4_HDMI_TX_PHY_CTL_3_RP) |
VC4_SET_FIELD(6, VC4_HDMI_TX_PHY_CTL_3_RZ));
chan0_settings =
phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_0],
pixel_freq);
chan1_settings =
phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_1],
pixel_freq);
chan2_settings =
phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_2],
pixel_freq);
clock_settings =
phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_CK],
pixel_freq);
HDMI_WRITE(HDMI_TX_PHY_CTL_0,
VC4_SET_FIELD(chan0_settings->amplitude.preemphasis,
VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP) |
VC4_SET_FIELD(chan0_settings->amplitude.main_driver,
VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV) |
VC4_SET_FIELD(chan1_settings->amplitude.preemphasis,
VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP) |
VC4_SET_FIELD(chan1_settings->amplitude.main_driver,
VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV) |
VC4_SET_FIELD(chan2_settings->amplitude.preemphasis,
VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP) |
VC4_SET_FIELD(chan2_settings->amplitude.main_driver,
VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV) |
VC4_SET_FIELD(clock_settings->amplitude.preemphasis,
VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP) |
VC4_SET_FIELD(clock_settings->amplitude.main_driver,
VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV));
HDMI_WRITE(HDMI_TX_PHY_CTL_1,
HDMI_READ(HDMI_TX_PHY_CTL_1) |
VC4_SET_FIELD(chan0_settings->res_sel_data,
VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0) |
VC4_SET_FIELD(chan1_settings->res_sel_data,
VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1) |
VC4_SET_FIELD(chan2_settings->res_sel_data,
VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2) |
VC4_SET_FIELD(clock_settings->res_sel_data,
VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK));
HDMI_WRITE(HDMI_TX_PHY_CTL_2,
VC4_SET_FIELD(chan0_settings->term_res_sel_data,
VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0) |
VC4_SET_FIELD(chan1_settings->term_res_sel_data,
VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1) |
VC4_SET_FIELD(chan2_settings->term_res_sel_data,
VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2) |
VC4_SET_FIELD(clock_settings->term_res_sel_data,
VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK) |
VC4_SET_FIELD(phy_get_vco_gain(vco_freq),
VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN));
HDMI_WRITE(HDMI_TX_PHY_CHANNEL_SWAP,
VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_0],
VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL) |
VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_1],
VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL) |
VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_2],
VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL) |
VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_CK],
VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL));
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL,
HDMI_READ(HDMI_TX_PHY_RESET_CTL) &
~(VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB |
VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB));
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL,
HDMI_READ(HDMI_TX_PHY_RESET_CTL) |
VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB |
VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
{
unsigned long flags;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
vc5_hdmi_reset_phy(vc4_hdmi);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi)
{
unsigned long flags;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) &
~VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
{
unsigned long flags;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) |
VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
| linux-master | drivers/gpu/drm/vc4/vc4_hdmi_phy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Broadcom
*/
/**
* DOC: VC4 KMS
*
* This is the general code for implementing KMS mode setting that
* doesn't clearly associate with any of the other objects (plane,
* crtc, HDMI encoder).
*/
#include <linux/clk.h>
#include <linux/sort.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
struct vc4_ctm_state {
struct drm_private_state base;
struct drm_color_ctm *ctm;
int fifo;
};
#define to_vc4_ctm_state(_state) \
container_of_const(_state, struct vc4_ctm_state, base)
struct vc4_load_tracker_state {
struct drm_private_state base;
u64 hvs_load;
u64 membus_load;
};
#define to_vc4_load_tracker_state(_state) \
container_of_const(_state, struct vc4_load_tracker_state, base)
static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
struct drm_private_obj *manager)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_private_state *priv_state;
int ret;
ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
priv_state = drm_atomic_get_private_obj_state(state, manager);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
return to_vc4_ctm_state(priv_state);
}
static struct drm_private_state *
vc4_ctm_duplicate_state(struct drm_private_obj *obj)
{
struct vc4_ctm_state *state;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
kfree(ctm_state);
}
static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
.atomic_duplicate_state = vc4_ctm_duplicate_state,
.atomic_destroy_state = vc4_ctm_destroy_state,
};
static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
drm_atomic_private_obj_fini(&vc4->ctm_manager);
}
static int vc4_ctm_obj_init(struct vc4_dev *vc4)
{
struct vc4_ctm_state *ctm_state;
drm_modeset_lock_init(&vc4->ctm_state_lock);
ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
if (!ctm_state)
return -ENOMEM;
drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
&vc4_ctm_state_funcs);
return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
}
/* Converts a DRM S31.32 value to the HW S0.9 format. */
static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
{
u16 r;
/* Sign bit. */
r = in & BIT_ULL(63) ? BIT(9) : 0;
if ((in & GENMASK_ULL(62, 32)) > 0) {
/* We have zero integer bits so we can only saturate here. */
r |= GENMASK(8, 0);
} else {
/* Otherwise take the 9 most important fractional bits. */
r |= (in >> 23) & GENMASK(8, 0);
}
return r;
}
static void
vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
{
struct vc4_hvs *hvs = vc4->hvs;
struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
struct drm_color_ctm *ctm = ctm_state->ctm;
if (ctm_state->fifo) {
HVS_WRITE(SCALER_OLEDCOEF2,
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
SCALER_OLEDCOEF2_R_TO_R) |
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
SCALER_OLEDCOEF2_R_TO_G) |
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
SCALER_OLEDCOEF2_R_TO_B));
HVS_WRITE(SCALER_OLEDCOEF1,
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
SCALER_OLEDCOEF1_G_TO_R) |
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
SCALER_OLEDCOEF1_G_TO_G) |
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
SCALER_OLEDCOEF1_G_TO_B));
HVS_WRITE(SCALER_OLEDCOEF0,
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
SCALER_OLEDCOEF0_B_TO_R) |
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
SCALER_OLEDCOEF0_B_TO_G) |
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
SCALER_OLEDCOEF0_B_TO_B));
}
HVS_WRITE(SCALER_OLEDOFFS,
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
}
struct vc4_hvs_state *
vc4_hvs_get_new_global_state(const struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
if (!priv_state)
return ERR_PTR(-EINVAL);
return to_vc4_hvs_state(priv_state);
}
struct vc4_hvs_state *
vc4_hvs_get_old_global_state(const struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
if (!priv_state)
return ERR_PTR(-EINVAL);
return to_vc4_hvs_state(priv_state);
}
struct vc4_hvs_state *
vc4_hvs_get_global_state(struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
return to_vc4_hvs_state(priv_state);
}
static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
struct drm_atomic_state *state)
{
struct vc4_hvs *hvs = vc4->hvs;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned int i;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
u32 dispctrl;
u32 dsp3_mux;
if (!crtc_state->active)
continue;
if (vc4_state->assigned_channel != 2)
continue;
/*
* SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
* FIFO X'.
* SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
*
* DSP3 is connected to FIFO2 unless the transposer is
* enabled. In this case, FIFO 2 is directly accessed by the
* TXP IP, and we need to disable the FIFO2 -> pixelvalve1
* route.
*/
if (vc4_crtc->feeds_txp)
dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
else
dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
dispctrl = HVS_READ(SCALER_DISPCTRL) &
~SCALER_DISPCTRL_DSP3_MUX_MASK;
HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
}
}
static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
struct drm_atomic_state *state)
{
struct vc4_hvs *hvs = vc4->hvs;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned char mux;
unsigned int i;
u32 reg;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
unsigned int channel = vc4_state->assigned_channel;
if (!vc4_state->update_muxing)
continue;
switch (vc4_crtc->data->hvs_output) {
case 2:
drm_WARN_ON(&vc4->base,
VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL),
SCALER_DISPCTRL_DSP3_MUX) == channel);
mux = (channel == 2) ? 0 : 1;
reg = HVS_READ(SCALER_DISPECTRL);
HVS_WRITE(SCALER_DISPECTRL,
(reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
break;
case 3:
if (channel == VC4_HVS_CHANNEL_DISABLED)
mux = 3;
else
mux = channel;
reg = HVS_READ(SCALER_DISPCTRL);
HVS_WRITE(SCALER_DISPCTRL,
(reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
break;
case 4:
if (channel == VC4_HVS_CHANNEL_DISABLED)
mux = 3;
else
mux = channel;
reg = HVS_READ(SCALER_DISPEOLN);
HVS_WRITE(SCALER_DISPEOLN,
(reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
break;
case 5:
if (channel == VC4_HVS_CHANNEL_DISABLED)
mux = 3;
else
mux = channel;
reg = HVS_READ(SCALER_DISPDITHER);
HVS_WRITE(SCALER_DISPDITHER,
(reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
break;
default:
break;
}
}
}
static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
struct drm_crtc_state *new_crtc_state;
struct vc4_hvs_state *new_hvs_state;
struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state;
unsigned int channel;
int i;
old_hvs_state = vc4_hvs_get_old_global_state(state);
if (WARN_ON(IS_ERR(old_hvs_state)))
return;
new_hvs_state = vc4_hvs_get_new_global_state(state);
if (WARN_ON(IS_ERR(new_hvs_state)))
return;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state;
if (!new_crtc_state->commit)
continue;
vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
}
for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
struct drm_crtc_commit *commit;
int ret;
if (!old_hvs_state->fifo_state[channel].in_use)
continue;
commit = old_hvs_state->fifo_state[channel].pending_commit;
if (!commit)
continue;
ret = drm_crtc_commit_wait(commit);
if (ret)
drm_err(dev, "Timed out waiting for commit\n");
drm_crtc_commit_put(commit);
old_hvs_state->fifo_state[channel].pending_commit = NULL;
}
if (vc4->is_vc5) {
unsigned long state_rate = max(old_hvs_state->core_clock_rate,
new_hvs_state->core_clock_rate);
unsigned long core_rate = clamp_t(unsigned long, state_rate,
500000000, hvs->max_core_rate);
drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate);
/*
* Do a temporary request on the core clock during the
* modeset.
*/
WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
}
drm_atomic_helper_commit_modeset_disables(dev, state);
vc4_ctm_commit(vc4, state);
if (vc4->is_vc5)
vc5_hvs_pv_muxing_commit(vc4, state);
else
vc4_hvs_pv_muxing_commit(vc4, state);
drm_atomic_helper_commit_planes(dev, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(dev, state);
drm_atomic_helper_fake_vblank(state);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_wait_for_flip_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
if (vc4->is_vc5) {
unsigned long core_rate = min_t(unsigned long,
hvs->max_core_rate,
new_hvs_state->core_clock_rate);
drm_dbg(dev, "Running the core clock at %lu Hz\n", core_rate);
/*
* Request a clock rate based on the current HVS
* requirements.
*/
WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
clk_get_rate(hvs->core_clk));
}
}
static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state;
struct vc4_hvs_state *hvs_state;
struct drm_crtc *crtc;
unsigned int i;
hvs_state = vc4_hvs_get_new_global_state(state);
if (WARN_ON(IS_ERR(hvs_state)))
return PTR_ERR(hvs_state);
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state =
to_vc4_crtc_state(crtc_state);
unsigned int channel =
vc4_crtc_state->assigned_channel;
if (channel == VC4_HVS_CHANNEL_DISABLED)
continue;
if (!hvs_state->fifo_state[channel].in_use)
continue;
hvs_state->fifo_state[channel].pending_commit =
drm_crtc_commit_get(crtc_state->commit);
}
return 0;
}
static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_mode_fb_cmd2 mode_cmd_local;
if (WARN_ON_ONCE(vc4->is_vc5))
return ERR_PTR(-ENODEV);
/* If the user didn't specify a modifier, use the
* vc4_set_tiling_ioctl() state for the BO.
*/
if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
gem_obj = drm_gem_object_lookup(file_priv,
mode_cmd->handles[0]);
if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n",
mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
bo = to_vc4_bo(gem_obj);
mode_cmd_local = *mode_cmd;
if (bo->t_format) {
mode_cmd_local.modifier[0] =
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
} else {
mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
}
drm_gem_object_put(gem_obj);
mode_cmd = &mode_cmd_local;
}
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
* at a time and the HW only supports S0.9 scalars. To account for the latter,
* we don't allow userland to set a CTM that we have no hope of approximating.
*/
static int
vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_ctm_state *ctm_state = NULL;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_color_ctm *ctm;
int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
/* CTM is being disabled. */
if (!new_crtc_state->ctm && old_crtc_state->ctm) {
ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
if (IS_ERR(ctm_state))
return PTR_ERR(ctm_state);
ctm_state->fifo = 0;
}
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (new_crtc_state->ctm == old_crtc_state->ctm)
continue;
if (!ctm_state) {
ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
if (IS_ERR(ctm_state))
return PTR_ERR(ctm_state);
}
/* CTM is being enabled or the matrix changed. */
if (new_crtc_state->ctm) {
struct vc4_crtc_state *vc4_crtc_state =
to_vc4_crtc_state(new_crtc_state);
/* fifo is 1-based since 0 disables CTM. */
int fifo = vc4_crtc_state->assigned_channel + 1;
/* Check userland isn't trying to turn on CTM for more
* than one CRTC at a time.
*/
if (ctm_state->fifo && ctm_state->fifo != fifo) {
DRM_DEBUG_DRIVER("Too many CTM configured\n");
return -EINVAL;
}
/* Check we can approximate the specified CTM.
* We disallow scalars |c| > 1.0 since the HW has
* no integer bits.
*/
ctm = new_crtc_state->ctm->data;
for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
u64 val = ctm->matrix[i];
val &= ~BIT_ULL(63);
if (val > BIT_ULL(32))
return -EINVAL;
}
ctm_state->fifo = fifo;
ctm_state->ctm = ctm;
}
}
return 0;
}
static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
{
struct drm_plane_state *old_plane_state, *new_plane_state;
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
struct vc4_load_tracker_state *load_state;
struct drm_private_state *priv_state;
struct drm_plane *plane;
int i;
priv_state = drm_atomic_get_private_obj_state(state,
&vc4->load_tracker);
if (IS_ERR(priv_state))
return PTR_ERR(priv_state);
load_state = to_vc4_load_tracker_state(priv_state);
for_each_oldnew_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
struct vc4_plane_state *vc4_plane_state;
if (old_plane_state->fb && old_plane_state->crtc) {
vc4_plane_state = to_vc4_plane_state(old_plane_state);
load_state->membus_load -= vc4_plane_state->membus_load;
load_state->hvs_load -= vc4_plane_state->hvs_load;
}
if (new_plane_state->fb && new_plane_state->crtc) {
vc4_plane_state = to_vc4_plane_state(new_plane_state);
load_state->membus_load += vc4_plane_state->membus_load;
load_state->hvs_load += vc4_plane_state->hvs_load;
}
}
/* Don't check the load when the tracker is disabled. */
if (!vc4->load_tracker_enabled)
return 0;
/* The absolute limit is 2Gbyte/sec, but let's take a margin to let
* the system work when other blocks are accessing the memory.
*/
if (load_state->membus_load > SZ_1G + SZ_512M)
return -ENOSPC;
/* HVS clock is supposed to run @ 250Mhz, let's take a margin and
* consider the maximum number of cycles is 240M.
*/
if (load_state->hvs_load > 240000000ULL)
return -ENOSPC;
return 0;
}
static struct drm_private_state *
vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
{
struct vc4_load_tracker_state *state;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct vc4_load_tracker_state *load_state;
load_state = to_vc4_load_tracker_state(state);
kfree(load_state);
}
static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
.atomic_duplicate_state = vc4_load_tracker_duplicate_state,
.atomic_destroy_state = vc4_load_tracker_destroy_state,
};
static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
drm_atomic_private_obj_fini(&vc4->load_tracker);
}
static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
{
struct vc4_load_tracker_state *load_state;
load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
if (!load_state)
return -ENOMEM;
drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
&load_state->base,
&vc4_load_tracker_state_funcs);
return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
}
static struct drm_private_state *
vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
{
struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
struct vc4_hvs_state *state;
unsigned int i;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
}
state->core_clock_rate = old_state->core_clock_rate;
return &state->base;
}
static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
unsigned int i;
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
if (!hvs_state->fifo_state[i].pending_commit)
continue;
drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
}
kfree(hvs_state);
}
static void vc4_hvs_channels_print_state(struct drm_printer *p,
const struct drm_private_state *state)
{
const struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
unsigned int i;
drm_printf(p, "HVS State\n");
drm_printf(p, "\tCore Clock Rate: %lu\n", hvs_state->core_clock_rate);
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
drm_printf(p, "\tChannel %d\n", i);
drm_printf(p, "\t\tin use=%d\n", hvs_state->fifo_state[i].in_use);
drm_printf(p, "\t\tload=%lu\n", hvs_state->fifo_state[i].fifo_load);
}
}
static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
.atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
.atomic_destroy_state = vc4_hvs_channels_destroy_state,
.atomic_print_state = vc4_hvs_channels_print_state,
};
static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
drm_atomic_private_obj_fini(&vc4->hvs_channels);
}
static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
{
struct vc4_hvs_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
&state->base,
&vc4_hvs_state_funcs);
return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
}
static int cmp_vc4_crtc_hvs_output(const void *a, const void *b)
{
const struct vc4_crtc *crtc_a =
to_vc4_crtc(*(const struct drm_crtc **)a);
const struct vc4_crtc_data *data_a =
vc4_crtc_to_vc4_crtc_data(crtc_a);
const struct vc4_crtc *crtc_b =
to_vc4_crtc(*(const struct drm_crtc **)b);
const struct vc4_crtc_data *data_b =
vc4_crtc_to_vc4_crtc_data(crtc_b);
return data_a->hvs_output - data_b->hvs_output;
}
/*
* The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
* the TXP (and therefore all the CRTCs found on that platform).
*
* The naive (and our initial) implementation would just iterate over
* all the active CRTCs, try to find a suitable FIFO, and then remove it
* from the pool of available FIFOs. However, there are a few corner
* cases that need to be considered:
*
* - When running in a dual-display setup (so with two CRTCs involved),
* we can update the state of a single CRTC (for example by changing
* its mode using xrandr under X11) without affecting the other. In
* this case, the other CRTC wouldn't be in the state at all, so we
* need to consider all the running CRTCs in the DRM device to assign
* a FIFO, not just the one in the state.
*
* - To fix the above, we can't use drm_atomic_get_crtc_state on all
* enabled CRTCs to pull their CRTC state into the global state, since
* a page flip would start considering their vblank to complete. Since
* we don't have a guarantee that they are actually active, that
* vblank might never happen, and shouldn't even be considered if we
* want to do a page flip on a single CRTC. That can be tested by
* doing a modetest -v first on HDMI1 and then on HDMI0.
*
* - Since we need the pixelvalve to be disabled and enabled back when
* the FIFO is changed, we should keep the FIFO assigned for as long
* as the CRTC is enabled, only considering it free again once that
* CRTC has been disabled. This can be tested by booting X11 on a
* single display, and changing the resolution down and then back up.
*/
static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct vc4_hvs_state *hvs_new_state;
struct drm_crtc **sorted_crtcs;
struct drm_crtc *crtc;
unsigned int unassigned_channels = 0;
unsigned int i;
int ret;
hvs_new_state = vc4_hvs_get_global_state(state);
if (IS_ERR(hvs_new_state))
return PTR_ERR(hvs_new_state);
for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
if (!hvs_new_state->fifo_state[i].in_use)
unassigned_channels |= BIT(i);
/*
* The problem we have to solve here is that we have up to 7
* encoders, connected to up to 6 CRTCs.
*
* Those CRTCs, depending on the instance, can be routed to 1, 2
* or 3 HVS FIFOs, and we need to set the muxing between FIFOs and
* outputs in the HVS accordingly.
*
* It would be pretty hard to come up with an algorithm that
* would generically solve this. However, the current routing
* trees we support allow us to simplify a bit the problem.
*
* Indeed, with the current supported layouts, if we try to
* assign in the ascending crtc index order the FIFOs, we can't
* fall into the situation where an earlier CRTC that had
* multiple routes is assigned one that was the only option for
* a later CRTC.
*
* If the layout changes and doesn't give us that in the future,
* we will need to have something smarter, but it works so far.
*/
sorted_crtcs = kmalloc_array(dev->num_crtcs, sizeof(*sorted_crtcs), GFP_KERNEL);
if (!sorted_crtcs)
return -ENOMEM;
i = 0;
drm_for_each_crtc(crtc, dev)
sorted_crtcs[i++] = crtc;
sort(sorted_crtcs, i, sizeof(*sorted_crtcs), cmp_vc4_crtc_hvs_output, NULL);
for (i = 0; i < dev->num_crtcs; i++) {
struct vc4_crtc_state *old_vc4_crtc_state, *new_vc4_crtc_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct vc4_crtc *vc4_crtc;
unsigned int matching_channels;
unsigned int channel;
crtc = sorted_crtcs[i];
if (!crtc)
continue;
vc4_crtc = to_vc4_crtc(crtc);
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
if (!old_crtc_state)
continue;
old_vc4_crtc_state = to_vc4_crtc_state(old_crtc_state);
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (!new_crtc_state)
continue;
new_vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
/* Nothing to do here, let's skip it */
if (old_crtc_state->enable == new_crtc_state->enable) {
if (new_crtc_state->enable)
drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n",
crtc->name, new_vc4_crtc_state->assigned_channel);
else
drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name);
continue;
}
/* Muxing will need to be modified, mark it as such */
new_vc4_crtc_state->update_muxing = true;
/* If we're disabling our CRTC, we put back our channel */
if (!new_crtc_state->enable) {
channel = old_vc4_crtc_state->assigned_channel;
drm_dbg(dev, "%s: Disabling, Freeing channel %d\n",
crtc->name, channel);
hvs_new_state->fifo_state[channel].in_use = false;
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
continue;
}
matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
if (!matching_channels) {
ret = -EINVAL;
goto err_free_crtc_array;
}
channel = ffs(matching_channels) - 1;
drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name);
new_vc4_crtc_state->assigned_channel = channel;
unassigned_channels &= ~BIT(channel);
hvs_new_state->fifo_state[channel].in_use = true;
}
kfree(sorted_crtcs);
return 0;
err_free_crtc_array:
kfree(sorted_crtcs);
return ret;
}
static int
vc4_core_clock_atomic_check(struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
struct drm_private_state *priv_state;
struct vc4_hvs_state *hvs_new_state;
struct vc4_load_tracker_state *load_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
unsigned int num_outputs;
unsigned long pixel_rate;
unsigned long cob_rate;
unsigned int i;
priv_state = drm_atomic_get_private_obj_state(state,
&vc4->load_tracker);
if (IS_ERR(priv_state))
return PTR_ERR(priv_state);
load_state = to_vc4_load_tracker_state(priv_state);
hvs_new_state = vc4_hvs_get_global_state(state);
if (IS_ERR(hvs_new_state))
return PTR_ERR(hvs_new_state);
for_each_oldnew_crtc_in_state(state, crtc,
old_crtc_state,
new_crtc_state,
i) {
if (old_crtc_state->active) {
struct vc4_crtc_state *old_vc4_state =
to_vc4_crtc_state(old_crtc_state);
unsigned int channel = old_vc4_state->assigned_channel;
hvs_new_state->fifo_state[channel].fifo_load = 0;
}
if (new_crtc_state->active) {
struct vc4_crtc_state *new_vc4_state =
to_vc4_crtc_state(new_crtc_state);
unsigned int channel = new_vc4_state->assigned_channel;
hvs_new_state->fifo_state[channel].fifo_load =
new_vc4_state->hvs_load;
}
}
cob_rate = 0;
num_outputs = 0;
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
if (!hvs_new_state->fifo_state[i].in_use)
continue;
num_outputs++;
cob_rate = max_t(unsigned long,
hvs_new_state->fifo_state[i].fifo_load,
cob_rate);
}
pixel_rate = load_state->hvs_load;
if (num_outputs > 1) {
pixel_rate = (pixel_rate * 40) / 100;
} else {
pixel_rate = (pixel_rate * 60) / 100;
}
hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
return 0;
}
static int
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
int ret;
ret = vc4_pv_muxing_atomic_check(dev, state);
if (ret)
return ret;
ret = vc4_ctm_atomic_check(dev, state);
if (ret < 0)
return ret;
ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
ret = vc4_load_tracker_atomic_check(state);
if (ret)
return ret;
return vc4_core_clock_atomic_check(state);
}
static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
.atomic_commit_setup = vc4_atomic_commit_setup,
.atomic_commit_tail = vc4_atomic_commit_tail,
};
static const struct drm_mode_config_funcs vc4_mode_funcs = {
.atomic_check = vc4_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = vc4_fb_create,
};
static const struct drm_mode_config_funcs vc5_mode_funcs = {
.atomic_check = vc4_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
};
int vc4_kms_load(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
/*
* The limits enforced by the load tracker aren't relevant for
* the BCM2711, but the load tracker computations are used for
* the core clock rate calculation.
*/
if (!vc4->is_vc5) {
/* Start with the load tracker enabled. Can be
* disabled through the debugfs load_tracker file.
*/
vc4->load_tracker_enabled = true;
}
/* Set support for vblank irq fast disable, before drm_vblank_init() */
dev->vblank_disable_immediate = true;
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");
return ret;
}
if (vc4->is_vc5) {
dev->mode_config.max_width = 7680;
dev->mode_config.max_height = 7680;
} else {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
}
dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
dev->mode_config.normalize_zpos = true;
ret = vc4_ctm_obj_init(vc4);
if (ret)
return ret;
ret = vc4_load_tracker_obj_init(vc4);
if (ret)
return ret;
ret = vc4_hvs_channels_obj_init(vc4);
if (ret)
return ret;
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
return 0;
}
| linux-master | drivers/gpu/drm/vc4/vc4_kms.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2014 Broadcom
*/
#include <drm/drm_drv.h>
#include <linux/seq_file.h>
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
/*
* Called at drm_dev_register() time on each of the minors registered
* by the DRM device, to attach the debugfs files.
*/
void
vc4_debugfs_init(struct drm_minor *minor)
{
struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
struct drm_device *drm = &vc4->base;
drm_WARN_ON(drm, vc4_hvs_debugfs_init(minor));
if (vc4->v3d) {
drm_WARN_ON(drm, vc4_bo_debugfs_init(minor));
drm_WARN_ON(drm, vc4_v3d_debugfs_init(minor));
}
}
static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *drm = entry->dev;
struct debugfs_regset32 *regset = entry->file.data;
struct drm_printer p = drm_seq_file_printer(m);
int idx;
if (!drm_dev_enter(drm, &idx))
return -ENODEV;
drm_print_regset32(&p, regset);
drm_dev_exit(idx);
return 0;
}
void vc4_debugfs_add_regset32(struct drm_device *drm,
const char *name,
struct debugfs_regset32 *regset)
{
drm_debugfs_add_file(drm, name, vc4_debugfs_regset32, regset);
}
| linux-master | drivers/gpu/drm/vc4/vc4_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Broadcom
*/
/**
* DOC: VC4 plane module
*
* Each DRM plane is a layer of pixels being scanned out by the HVS.
*
* At atomic modeset check time, we compute the HVS display element
* state that would be necessary for displaying the plane (giving us a
* chance to figure out if a plane configuration is invalid), then at
* atomic flush time the CRTC will ask us to write our element state
* into the region of the HVS that it has allocated for us.
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
static const struct hvs_format {
u32 drm; /* DRM_FORMAT_* */
u32 hvs; /* HVS_FORMAT_* */
u32 pixel_order;
u32 pixel_order_hvs5;
bool hvs5_only;
} hvs_formats[] = {
{
.drm = DRM_FORMAT_XRGB8888,
.hvs = HVS_PIXEL_FORMAT_RGBA8888,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_ARGB8888,
.hvs = HVS_PIXEL_FORMAT_RGBA8888,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_ABGR8888,
.hvs = HVS_PIXEL_FORMAT_RGBA8888,
.pixel_order = HVS_PIXEL_ORDER_ARGB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
},
{
.drm = DRM_FORMAT_XBGR8888,
.hvs = HVS_PIXEL_FORMAT_RGBA8888,
.pixel_order = HVS_PIXEL_ORDER_ARGB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
},
{
.drm = DRM_FORMAT_RGB565,
.hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XRGB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XRGB,
},
{
.drm = DRM_FORMAT_BGR565,
.hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XBGR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XBGR,
},
{
.drm = DRM_FORMAT_ARGB1555,
.hvs = HVS_PIXEL_FORMAT_RGBA5551,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_XRGB1555,
.hvs = HVS_PIXEL_FORMAT_RGBA5551,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_RGB888,
.hvs = HVS_PIXEL_FORMAT_RGB888,
.pixel_order = HVS_PIXEL_ORDER_XRGB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XRGB,
},
{
.drm = DRM_FORMAT_BGR888,
.hvs = HVS_PIXEL_FORMAT_RGB888,
.pixel_order = HVS_PIXEL_ORDER_XBGR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XBGR,
},
{
.drm = DRM_FORMAT_YUV422,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_YVU422,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_YUV420,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_YVU420,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_NV12,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_NV21,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_NV16,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_NV61,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_P030,
.hvs = HVS_PIXEL_FORMAT_YCBCR_10BIT,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
.hvs5_only = true,
},
{
.drm = DRM_FORMAT_XRGB2101010,
.hvs = HVS_PIXEL_FORMAT_RGBA1010102,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
.hvs5_only = true,
},
{
.drm = DRM_FORMAT_ARGB2101010,
.hvs = HVS_PIXEL_FORMAT_RGBA1010102,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
.hvs5_only = true,
},
{
.drm = DRM_FORMAT_ABGR2101010,
.hvs = HVS_PIXEL_FORMAT_RGBA1010102,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
.hvs5_only = true,
},
{
.drm = DRM_FORMAT_XBGR2101010,
.hvs = HVS_PIXEL_FORMAT_RGBA1010102,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
.hvs5_only = true,
},
{
.drm = DRM_FORMAT_RGB332,
.hvs = HVS_PIXEL_FORMAT_RGB332,
.pixel_order = HVS_PIXEL_ORDER_ARGB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_BGR233,
.hvs = HVS_PIXEL_FORMAT_RGB332,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
},
{
.drm = DRM_FORMAT_XRGB4444,
.hvs = HVS_PIXEL_FORMAT_RGBA4444,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_ARGB4444,
.hvs = HVS_PIXEL_FORMAT_RGBA4444,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_XBGR4444,
.hvs = HVS_PIXEL_FORMAT_RGBA4444,
.pixel_order = HVS_PIXEL_ORDER_ARGB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
},
{
.drm = DRM_FORMAT_ABGR4444,
.hvs = HVS_PIXEL_FORMAT_RGBA4444,
.pixel_order = HVS_PIXEL_ORDER_ARGB,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
},
{
.drm = DRM_FORMAT_BGRX4444,
.hvs = HVS_PIXEL_FORMAT_RGBA4444,
.pixel_order = HVS_PIXEL_ORDER_RGBA,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_BGRA,
},
{
.drm = DRM_FORMAT_BGRA4444,
.hvs = HVS_PIXEL_FORMAT_RGBA4444,
.pixel_order = HVS_PIXEL_ORDER_RGBA,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_BGRA,
},
{
.drm = DRM_FORMAT_RGBX4444,
.hvs = HVS_PIXEL_FORMAT_RGBA4444,
.pixel_order = HVS_PIXEL_ORDER_BGRA,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_RGBA,
},
{
.drm = DRM_FORMAT_RGBA4444,
.hvs = HVS_PIXEL_FORMAT_RGBA4444,
.pixel_order = HVS_PIXEL_ORDER_BGRA,
.pixel_order_hvs5 = HVS_PIXEL_ORDER_RGBA,
},
};
static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
{
unsigned i;
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
if (hvs_formats[i].drm == drm_format)
return &hvs_formats[i];
}
return NULL;
}
static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
{
if (dst == src)
return VC4_SCALING_NONE;
if (3 * dst >= 2 * src)
return VC4_SCALING_PPF;
else
return VC4_SCALING_TPZ;
}
static bool plane_enabled(struct drm_plane_state *state)
{
return state->fb && !WARN_ON(!state->crtc);
}
static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
{
struct vc4_plane_state *vc4_state;
if (WARN_ON(!plane->state))
return NULL;
vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL);
if (!vc4_state)
return NULL;
memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
vc4_state->dlist_initialized = 0;
__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
if (vc4_state->dlist) {
vc4_state->dlist = kmemdup(vc4_state->dlist,
vc4_state->dlist_count * 4,
GFP_KERNEL);
if (!vc4_state->dlist) {
kfree(vc4_state);
return NULL;
}
vc4_state->dlist_size = vc4_state->dlist_count;
}
return &vc4_state->base;
}
static void vc4_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
if (drm_mm_node_allocated(&vc4_state->lbm)) {
unsigned long irqflags;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
drm_mm_remove_node(&vc4_state->lbm);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
}
kfree(vc4_state->dlist);
__drm_atomic_helper_plane_destroy_state(&vc4_state->base);
kfree(state);
}
/* Called during init to allocate the plane's atomic state. */
static void vc4_plane_reset(struct drm_plane *plane)
{
struct vc4_plane_state *vc4_state;
WARN_ON(plane->state);
vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
if (!vc4_state)
return;
__drm_atomic_helper_plane_reset(plane, &vc4_state->base);
}
static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
{
if (vc4_state->dlist_count == vc4_state->dlist_size) {
u32 new_size = max(4u, vc4_state->dlist_count * 2);
u32 *new_dlist = kmalloc_array(new_size, 4, GFP_KERNEL);
if (!new_dlist)
return;
memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4);
kfree(vc4_state->dlist);
vc4_state->dlist = new_dlist;
vc4_state->dlist_size = new_size;
}
vc4_state->dlist_count++;
}
static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
{
unsigned int idx = vc4_state->dlist_count;
vc4_dlist_counter_increment(vc4_state);
vc4_state->dlist[idx] = val;
}
/* Returns the scl0/scl1 field based on whether the dimensions need to
* be up/down/non-scaled.
*
* This is a replication of a table from the spec.
*/
static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) {
case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF:
return SCALER_CTL0_SCL_H_PPF_V_PPF;
case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF:
return SCALER_CTL0_SCL_H_TPZ_V_PPF;
case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ:
return SCALER_CTL0_SCL_H_PPF_V_TPZ;
case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ:
return SCALER_CTL0_SCL_H_TPZ_V_TPZ;
case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE:
return SCALER_CTL0_SCL_H_PPF_V_NONE;
case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF:
return SCALER_CTL0_SCL_H_NONE_V_PPF;
case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ:
return SCALER_CTL0_SCL_H_NONE_V_TPZ;
case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE:
return SCALER_CTL0_SCL_H_TPZ_V_NONE;
default:
case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE:
/* The unity case is independently handled by
* SCALER_CTL0_UNITY.
*/
return 0;
}
}
static int vc4_plane_margins_adj(struct drm_plane_state *pstate)
{
struct vc4_plane_state *vc4_pstate = to_vc4_plane_state(pstate);
unsigned int left, right, top, bottom, adjhdisplay, adjvdisplay;
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_new_crtc_state(pstate->state,
pstate->crtc);
vc4_crtc_get_margins(crtc_state, &left, &right, &top, &bottom);
if (!left && !right && !top && !bottom)
return 0;
if (left + right >= crtc_state->mode.hdisplay ||
top + bottom >= crtc_state->mode.vdisplay)
return -EINVAL;
adjhdisplay = crtc_state->mode.hdisplay - (left + right);
vc4_pstate->crtc_x = DIV_ROUND_CLOSEST(vc4_pstate->crtc_x *
adjhdisplay,
crtc_state->mode.hdisplay);
vc4_pstate->crtc_x += left;
if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - right)
vc4_pstate->crtc_x = crtc_state->mode.hdisplay - right;
adjvdisplay = crtc_state->mode.vdisplay - (top + bottom);
vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y *
adjvdisplay,
crtc_state->mode.vdisplay);
vc4_pstate->crtc_y += top;
if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - bottom)
vc4_pstate->crtc_y = crtc_state->mode.vdisplay - bottom;
vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w *
adjhdisplay,
crtc_state->mode.hdisplay);
vc4_pstate->crtc_h = DIV_ROUND_CLOSEST(vc4_pstate->crtc_h *
adjvdisplay,
crtc_state->mode.vdisplay);
if (!vc4_pstate->crtc_w || !vc4_pstate->crtc_h)
return -EINVAL;
return 0;
}
static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_gem_dma_object *bo;
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
u32 v_subsample = fb->format->vsub;
int i, ret;
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
if (!crtc_state) {
DRM_DEBUG_KMS("Invalid crtc state\n");
return -EINVAL;
}
ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1,
INT_MAX, true, true);
if (ret)
return ret;
for (i = 0; i < num_planes; i++) {
bo = drm_fb_dma_get_gem_obj(fb, i);
vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
}
/*
* We don't support subpixel source positioning for scaling,
* but fractional coordinates can be generated by clipping
* so just round for now
*/
vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16);
vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16);
vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x;
vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y;
vc4_state->crtc_x = state->dst.x1;
vc4_state->crtc_y = state->dst.y1;
vc4_state->crtc_w = state->dst.x2 - state->dst.x1;
vc4_state->crtc_h = state->dst.y2 - state->dst.y1;
ret = vc4_plane_margins_adj(state);
if (ret)
return ret;
vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
vc4_state->crtc_w);
vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
vc4_state->crtc_h);
vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
vc4_state->y_scaling[0] == VC4_SCALING_NONE);
if (num_planes > 1) {
vc4_state->is_yuv = true;
vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
vc4_state->x_scaling[1] =
vc4_get_scaling_mode(vc4_state->src_w[1],
vc4_state->crtc_w);
vc4_state->y_scaling[1] =
vc4_get_scaling_mode(vc4_state->src_h[1],
vc4_state->crtc_h);
/* YUV conversion requires that horizontal scaling be enabled
* on the UV plane even if vc4_get_scaling_mode() returned
* VC4_SCALING_NONE (which can happen when the down-scaling
* ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
* case.
*/
if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
vc4_state->x_scaling[1] = VC4_SCALING_PPF;
} else {
vc4_state->is_yuv = false;
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
return 0;
}
static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
{
u32 scale, recip;
scale = (1 << 16) * src / dst;
/* The specs note that while the reciprocal would be defined
* as (1<<32)/scale, ~0 is close enough.
*/
recip = ~0 / scale;
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP));
}
static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
{
u32 scale = (1 << 16) * src / dst;
vc4_dlist_write(vc4_state,
SCALER_PPF_AGC |
VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
VC4_SET_FIELD(0, SCALER_PPF_IPHASE));
}
static u32 vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
u32 pix_per_line;
u32 lbm;
/* LBM is not needed when there's no vertical scaling. */
if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
vc4_state->y_scaling[1] == VC4_SCALING_NONE)
return 0;
/*
* This can be further optimized in the RGB/YUV444 case if the PPF
* decimation factor is between 0.5 and 1.0 by using crtc_w.
*
* It's not an issue though, since in that case since src_w[0] is going
* to be greater than or equal to crtc_w.
*/
if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ)
pix_per_line = vc4_state->crtc_w;
else
pix_per_line = vc4_state->src_w[0];
if (!vc4_state->is_yuv) {
if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
lbm = pix_per_line * 8;
else {
/* In special cases, this multiplier might be 12. */
lbm = pix_per_line * 16;
}
} else {
/* There are cases for this going down to a multiplier
* of 2, but according to the firmware source, the
* table in the docs is somewhat wrong.
*/
lbm = pix_per_line * 16;
}
/* Align it to 64 or 128 (hvs5) bytes */
lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
lbm /= vc4->is_vc5 ? 4 : 2;
return lbm;
}
static void vc4_write_scaling_parameters(struct drm_plane_state *state,
int channel)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
/* Ch0 H-PPF Word 0: Scaling Parameters */
if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
vc4_write_ppf(vc4_state,
vc4_state->src_w[channel], vc4_state->crtc_w);
}
/* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */
if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) {
vc4_write_ppf(vc4_state,
vc4_state->src_h[channel], vc4_state->crtc_h);
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
}
/* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */
if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) {
vc4_write_tpz(vc4_state,
vc4_state->src_w[channel], vc4_state->crtc_w);
}
/* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */
if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) {
vc4_write_tpz(vc4_state,
vc4_state->src_h[channel], vc4_state->crtc_h);
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
}
}
static void vc4_plane_calc_load(struct drm_plane_state *state)
{
unsigned int hvs_load_shift, vrefresh, i;
struct drm_framebuffer *fb = state->fb;
struct vc4_plane_state *vc4_state;
struct drm_crtc_state *crtc_state;
unsigned int vscale_factor;
vc4_state = to_vc4_plane_state(state);
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode);
/* The HVS is able to process 2 pixels/cycle when scaling the source,
* 4 pixels/cycle otherwise.
* Alpha blending step seems to be pipelined and it's always operating
* at 4 pixels/cycle, so the limiting aspect here seems to be the
* scaler block.
* HVS load is expressed in clk-cycles/sec (AKA Hz).
*/
if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE)
hvs_load_shift = 1;
else
hvs_load_shift = 2;
vc4_state->membus_load = 0;
vc4_state->hvs_load = 0;
for (i = 0; i < fb->format->num_planes; i++) {
/* Even if the bandwidth/plane required for a single frame is
*
* vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh
*
* when downscaling, we have to read more pixels per line in
* the time frame reserved for a single line, so the bandwidth
* demand can be punctually higher. To account for that, we
* calculate the down-scaling factor and multiply the plane
* load by this number. We're likely over-estimating the read
* demand, but that's better than under-estimating it.
*/
vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i],
vc4_state->crtc_h);
vc4_state->membus_load += vc4_state->src_w[i] *
vc4_state->src_h[i] * vscale_factor *
fb->format->cpp[i];
vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w;
}
vc4_state->hvs_load *= vrefresh;
vc4_state->hvs_load >>= hvs_load_shift;
vc4_state->membus_load *= vrefresh;
}
static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
unsigned long irqflags;
u32 lbm_size;
lbm_size = vc4_lbm_size(state);
if (!lbm_size)
return 0;
if (WARN_ON(!vc4_state->lbm_offset))
return -EINVAL;
/* Allocate the LBM memory that the HVS will use for temporary
* storage due to our scaling/format conversion.
*/
if (!drm_mm_node_allocated(&vc4_state->lbm)) {
int ret;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
&vc4_state->lbm,
lbm_size,
vc4->is_vc5 ? 64 : 32,
0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
if (ret)
return ret;
} else {
WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
}
vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start;
return 0;
}
/*
* The colorspace conversion matrices are held in 3 entries in the dlist.
* Create an array of them, with entries for each full and limited mode, and
* each supported colorspace.
*/
static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = {
{
/* Limited range */
{
/* BT601 */
SCALER_CSC0_ITR_R_601_5,
SCALER_CSC1_ITR_R_601_5,
SCALER_CSC2_ITR_R_601_5,
}, {
/* BT709 */
SCALER_CSC0_ITR_R_709_3,
SCALER_CSC1_ITR_R_709_3,
SCALER_CSC2_ITR_R_709_3,
}, {
/* BT2020 */
SCALER_CSC0_ITR_R_2020,
SCALER_CSC1_ITR_R_2020,
SCALER_CSC2_ITR_R_2020,
}
}, {
/* Full range */
{
/* JFIF */
SCALER_CSC0_JPEG_JFIF,
SCALER_CSC1_JPEG_JFIF,
SCALER_CSC2_JPEG_JFIF,
}, {
/* BT709 */
SCALER_CSC0_ITR_R_709_3_FR,
SCALER_CSC1_ITR_R_709_3_FR,
SCALER_CSC2_ITR_R_709_3_FR,
}, {
/* BT2020 */
SCALER_CSC0_ITR_R_2020_FR,
SCALER_CSC1_ITR_R_2020_FR,
SCALER_CSC2_ITR_R_2020_FR,
}
}
};
static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
{
if (!state->fb->format->has_alpha)
return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
SCALER_POS2_ALPHA_MODE);
switch (state->pixel_blend_mode) {
case DRM_MODE_BLEND_PIXEL_NONE:
return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
SCALER_POS2_ALPHA_MODE);
default:
case DRM_MODE_BLEND_PREMULTI:
return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_PIPELINE,
SCALER_POS2_ALPHA_MODE) |
SCALER_POS2_ALPHA_PREMULT;
case DRM_MODE_BLEND_COVERAGE:
return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_PIPELINE,
SCALER_POS2_ALPHA_MODE);
}
}
static u32 vc4_hvs5_get_alpha_blend_mode(struct drm_plane_state *state)
{
if (!state->fb->format->has_alpha)
return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
SCALER5_CTL2_ALPHA_MODE);
switch (state->pixel_blend_mode) {
case DRM_MODE_BLEND_PIXEL_NONE:
return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
SCALER5_CTL2_ALPHA_MODE);
default:
case DRM_MODE_BLEND_PREMULTI:
return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
SCALER5_CTL2_ALPHA_MODE) |
SCALER5_CTL2_ALPHA_PREMULT;
case DRM_MODE_BLEND_COVERAGE:
return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
SCALER5_CTL2_ALPHA_MODE);
}
}
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
static int vc4_plane_mode_set(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
u32 ctl0_offset = vc4_state->dlist_count;
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
int num_planes = fb->format->num_planes;
u32 h_subsample = fb->format->hsub;
u32 v_subsample = fb->format->vsub;
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
u32 tiling, src_y;
u32 hvs_format = format->hvs;
unsigned int rotation;
int ret, i;
if (vc4_state->dlist_initialized)
return 0;
ret = vc4_plane_setup_clipping_and_scaling(state);
if (ret)
return ret;
/* SCL1 is used for Cb/Cr scaling of planar formats. For RGB
* and 4:4:4, scl1 should be set to scl0 so both channels of
* the scaler do the same thing. For YUV, the Y plane needs
* to be put in channel 1 and Cb/Cr in channel 0, so we swap
* the scl fields here.
*/
if (num_planes == 1) {
scl0 = vc4_get_scl_field(state, 0);
scl1 = scl0;
} else {
scl0 = vc4_get_scl_field(state, 1);
scl1 = vc4_get_scl_field(state, 0);
}
rotation = drm_rotation_simplify(state->rotation,
DRM_MODE_ROTATE_0 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
/* We must point to the last line when Y reflection is enabled. */
src_y = vc4_state->src_y;
if (rotation & DRM_MODE_REFLECT_Y)
src_y += vc4_state->src_h[0] - 1;
switch (base_format_mod) {
case DRM_FORMAT_MOD_LINEAR:
tiling = SCALER_CTL0_TILING_LINEAR;
pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
/* Adjust the base pointer to the first pixel to be scanned
* out.
*/
for (i = 0; i < num_planes; i++) {
vc4_state->offsets[i] += src_y /
(i ? v_subsample : 1) *
fb->pitches[i];
vc4_state->offsets[i] += vc4_state->src_x /
(i ? h_subsample : 1) *
fb->format->cpp[i];
}
break;
case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: {
u32 tile_size_shift = 12; /* T tiles are 4kb */
/* Whole-tile offsets, mostly for setting the pitch. */
u32 tile_w_shift = fb->format->cpp[0] == 2 ? 6 : 5;
u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */
u32 tile_w_mask = (1 << tile_w_shift) - 1;
/* The height mask on 32-bit-per-pixel tiles is 63, i.e. twice
* the height (in pixels) of a 4k tile.
*/
u32 tile_h_mask = (2 << tile_h_shift) - 1;
/* For T-tiled, the FB pitch is "how many bytes from one row to
* the next, such that
*
* pitch * tile_h == tile_size * tiles_per_row
*/
u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
u32 tiles_l = vc4_state->src_x >> tile_w_shift;
u32 tiles_r = tiles_w - tiles_l;
u32 tiles_t = src_y >> tile_h_shift;
/* Intra-tile offsets, which modify the base address (the
* SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that
* base address).
*/
u32 tile_y = (src_y >> 4) & 1;
u32 subtile_y = (src_y >> 2) & 3;
u32 utile_y = src_y & 3;
u32 x_off = vc4_state->src_x & tile_w_mask;
u32 y_off = src_y & tile_h_mask;
/* When Y reflection is requested we must set the
* SCALER_PITCH0_TILE_LINE_DIR flag to tell HVS that all lines
* after the initial one should be fetched in descending order,
* which makes sense since we start from the last line and go
* backward.
* Don't know why we need y_off = max_y_off - y_off, but it's
* definitely required (I guess it's also related to the "going
* backward" situation).
*/
if (rotation & DRM_MODE_REFLECT_Y) {
y_off = tile_h_mask - y_off;
pitch0 = SCALER_PITCH0_TILE_LINE_DIR;
} else {
pitch0 = 0;
}
tiling = SCALER_CTL0_TILING_256B_OR_T;
pitch0 |= (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) |
VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) |
VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) |
VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R));
vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift);
vc4_state->offsets[0] += subtile_y << 8;
vc4_state->offsets[0] += utile_y << 4;
/* Rows of tiles alternate left-to-right and right-to-left. */
if (tiles_t & 1) {
pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR;
vc4_state->offsets[0] += (tiles_w - tiles_l) <<
tile_size_shift;
vc4_state->offsets[0] -= (1 + !tile_y) << 10;
} else {
vc4_state->offsets[0] += tiles_l << tile_size_shift;
vc4_state->offsets[0] += tile_y << 10;
}
break;
}
case DRM_FORMAT_MOD_BROADCOM_SAND64:
case DRM_FORMAT_MOD_BROADCOM_SAND128:
case DRM_FORMAT_MOD_BROADCOM_SAND256: {
uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
if (param > SCALER_TILE_HEIGHT_MASK) {
DRM_DEBUG_KMS("SAND height too large (%d)\n",
param);
return -EINVAL;
}
if (fb->format->format == DRM_FORMAT_P030) {
hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT;
tiling = SCALER_CTL0_TILING_128B;
} else {
hvs_format = HVS_PIXEL_FORMAT_H264;
switch (base_format_mod) {
case DRM_FORMAT_MOD_BROADCOM_SAND64:
tiling = SCALER_CTL0_TILING_64B;
break;
case DRM_FORMAT_MOD_BROADCOM_SAND128:
tiling = SCALER_CTL0_TILING_128B;
break;
case DRM_FORMAT_MOD_BROADCOM_SAND256:
tiling = SCALER_CTL0_TILING_256B_OR_T;
break;
default:
return -EINVAL;
}
}
/* Adjust the base pointer to the first pixel to be scanned
* out.
*
* For P030, y_ptr [31:4] is the 128bit word for the start pixel
* y_ptr [3:0] is the pixel (0-11) contained within that 128bit
* word that should be taken as the first pixel.
* Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the
* element within the 128bit word, eg for pixel 3 the value
* should be 6.
*/
for (i = 0; i < num_planes; i++) {
u32 tile_w, tile, x_off, pix_per_tile;
if (fb->format->format == DRM_FORMAT_P030) {
/*
* Spec says: bits [31:4] of the given address
* should point to the 128-bit word containing
* the desired starting pixel, and bits[3:0]
* should be between 0 and 11, indicating which
* of the 12-pixels in that 128-bit word is the
* first pixel to be used
*/
u32 remaining_pixels = vc4_state->src_x % 96;
u32 aligned = remaining_pixels / 12;
u32 last_bits = remaining_pixels % 12;
x_off = aligned * 16 + last_bits;
tile_w = 128;
pix_per_tile = 96;
} else {
switch (base_format_mod) {
case DRM_FORMAT_MOD_BROADCOM_SAND64:
tile_w = 64;
break;
case DRM_FORMAT_MOD_BROADCOM_SAND128:
tile_w = 128;
break;
case DRM_FORMAT_MOD_BROADCOM_SAND256:
tile_w = 256;
break;
default:
return -EINVAL;
}
pix_per_tile = tile_w / fb->format->cpp[0];
x_off = (vc4_state->src_x % pix_per_tile) /
(i ? h_subsample : 1) *
fb->format->cpp[i];
}
tile = vc4_state->src_x / pix_per_tile;
vc4_state->offsets[i] += param * tile_w * tile;
vc4_state->offsets[i] += src_y /
(i ? v_subsample : 1) *
tile_w;
vc4_state->offsets[i] += x_off & ~(i ? 1 : 0);
}
pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
break;
}
default:
DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
(long long)fb->modifier);
return -EINVAL;
}
/* Don't waste cycles mixing with plane alpha if the set alpha
* is opaque or there is no per-pixel alpha information.
* In any case we use the alpha property value as the fixed alpha.
*/
mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
fb->format->has_alpha;
if (!vc4->is_vc5) {
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
(rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) |
(rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) |
VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
(format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
(hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
(vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1));
/* Position Word 0: Image Positions and Alpha Value */
vc4_state->pos0_offset = vc4_state->dlist_count;
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) |
VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
/* Position Word 1: Scaled Image Dimensions. */
if (!vc4_state->is_unity) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(vc4_state->crtc_w,
SCALER_POS1_SCL_WIDTH) |
VC4_SET_FIELD(vc4_state->crtc_h,
SCALER_POS1_SCL_HEIGHT));
}
/* Position Word 2: Source Image Size, Alpha */
vc4_state->pos2_offset = vc4_state->dlist_count;
vc4_dlist_write(vc4_state,
(mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
vc4_hvs4_get_alpha_blend_mode(state) |
VC4_SET_FIELD(vc4_state->src_w[0],
SCALER_POS2_WIDTH) |
VC4_SET_FIELD(vc4_state->src_h[0],
SCALER_POS2_HEIGHT));
/* Position Word 3: Context. Written by the HVS. */
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
} else {
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
(format->pixel_order_hvs5 << SCALER_CTL0_ORDER_SHIFT) |
(hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
(vc4_state->is_unity ?
SCALER5_CTL0_UNITY : 0) |
VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1) |
SCALER5_CTL0_ALPHA_EXPAND |
SCALER5_CTL0_RGB_EXPAND);
/* Position Word 0: Image Positions and Alpha Value */
vc4_state->pos0_offset = vc4_state->dlist_count;
vc4_dlist_write(vc4_state,
(rotation & DRM_MODE_REFLECT_Y ?
SCALER5_POS0_VFLIP : 0) |
VC4_SET_FIELD(vc4_state->crtc_x,
SCALER_POS0_START_X) |
(rotation & DRM_MODE_REFLECT_X ?
SCALER5_POS0_HFLIP : 0) |
VC4_SET_FIELD(vc4_state->crtc_y,
SCALER5_POS0_START_Y)
);
/* Control Word 2 */
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(state->alpha >> 4,
SCALER5_CTL2_ALPHA) |
vc4_hvs5_get_alpha_blend_mode(state) |
(mix_plane_alpha ?
SCALER5_CTL2_ALPHA_MIX : 0)
);
/* Position Word 1: Scaled Image Dimensions. */
if (!vc4_state->is_unity) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(vc4_state->crtc_w,
SCALER5_POS1_SCL_WIDTH) |
VC4_SET_FIELD(vc4_state->crtc_h,
SCALER5_POS1_SCL_HEIGHT));
}
/* Position Word 2: Source Image Size */
vc4_state->pos2_offset = vc4_state->dlist_count;
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(vc4_state->src_w[0],
SCALER5_POS2_WIDTH) |
VC4_SET_FIELD(vc4_state->src_h[0],
SCALER5_POS2_HEIGHT));
/* Position Word 3: Context. Written by the HVS. */
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
}
/* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers
*
* The pointers may be any byte address.
*/
vc4_state->ptr0_offset = vc4_state->dlist_count;
for (i = 0; i < num_planes; i++)
vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
/* Pointer Context Word 0/1/2: Written by the HVS */
for (i = 0; i < num_planes; i++)
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
/* Pitch word 0 */
vc4_dlist_write(vc4_state, pitch0);
/* Pitch word 1/2 */
for (i = 1; i < num_planes; i++) {
if (hvs_format != HVS_PIXEL_FORMAT_H264 &&
hvs_format != HVS_PIXEL_FORMAT_YCBCR_10BIT) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(fb->pitches[i],
SCALER_SRC_PITCH));
} else {
vc4_dlist_write(vc4_state, pitch0);
}
}
/* Colorspace conversion words */
if (vc4_state->is_yuv) {
enum drm_color_encoding color_encoding = state->color_encoding;
enum drm_color_range color_range = state->color_range;
const u32 *ccm;
if (color_encoding >= DRM_COLOR_ENCODING_MAX)
color_encoding = DRM_COLOR_YCBCR_BT601;
if (color_range >= DRM_COLOR_RANGE_MAX)
color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
ccm = colorspace_coeffs[color_range][color_encoding];
vc4_dlist_write(vc4_state, ccm[0]);
vc4_dlist_write(vc4_state, ccm[1]);
vc4_dlist_write(vc4_state, ccm[2]);
}
vc4_state->lbm_offset = 0;
if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
/* Reserve a slot for the LBM Base Address. The real value will
* be set when calling vc4_plane_allocate_lbm().
*/
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
vc4_state->lbm_offset = vc4_state->dlist_count;
vc4_dlist_counter_increment(vc4_state);
}
if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel
* 1. This matches how we set up scl0/scl1
* above.
*/
vc4_write_scaling_parameters(state, 1);
}
vc4_write_scaling_parameters(state, 0);
/* If any PPF setup was done, then all the kernel
* pointers get uploaded.
*/
if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
SCALER_PPF_KERNEL_OFFSET);
/* HPPF plane 0 */
vc4_dlist_write(vc4_state, kernel);
/* VPPF plane 0 */
vc4_dlist_write(vc4_state, kernel);
/* HPPF plane 1 */
vc4_dlist_write(vc4_state, kernel);
/* VPPF plane 1 */
vc4_dlist_write(vc4_state, kernel);
}
}
vc4_state->dlist[ctl0_offset] |=
VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
/* crtc_* are already clipped coordinates. */
covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 &&
vc4_state->crtc_w == state->crtc->mode.hdisplay &&
vc4_state->crtc_h == state->crtc->mode.vdisplay;
/* Background fill might be necessary when the plane has per-pixel
* alpha content or a non-opaque plane alpha and could blend from the
* background or does not cover the entire screen.
*/
vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
state->alpha != DRM_BLEND_ALPHA_OPAQUE;
/* Flag the dlist as initialized to avoid checking it twice in case
* the async update check already called vc4_plane_mode_set() and
* decided to fallback to sync update because async update was not
* possible.
*/
vc4_state->dlist_initialized = 1;
vc4_plane_calc_load(state);
return 0;
}
/* If a modeset involves changing the setup of a plane, the atomic
* infrastructure will call this to validate a proposed plane setup.
* However, if a plane isn't getting updated, this (and the
* corresponding vc4_plane_atomic_update) won't get called. Thus, we
* compute the dlist here and have all active plane dlists get updated
* in the CRTC's flush.
*/
static int vc4_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(new_plane_state);
int ret;
vc4_state->dlist_count = 0;
if (!plane_enabled(new_plane_state))
return 0;
ret = vc4_plane_mode_set(plane, new_plane_state);
if (ret)
return ret;
return vc4_plane_allocate_lbm(new_plane_state);
}
static void vc4_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
/* No contents here. Since we don't know where in the CRTC's
* dlist we should be stored, our dlist is uploaded to the
* hardware with vc4_plane_write_dlist() at CRTC atomic_flush
* time.
*/
}
u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
int i;
int idx;
if (!drm_dev_enter(plane->dev, &idx))
goto out;
vc4_state->hw_dlist = dlist;
/* Can't memcpy_toio() because it needs to be 32-bit writes. */
for (i = 0; i < vc4_state->dlist_count; i++)
writel(vc4_state->dlist[i], &dlist[i]);
drm_dev_exit(idx);
out:
return vc4_state->dlist_count;
}
u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
{
const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
return vc4_state->dlist_count;
}
/* Updates the plane to immediately (well, once the FIFO needs
* refilling) scan out from at a new framebuffer.
*/
void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
uint32_t addr;
int idx;
if (!drm_dev_enter(plane->dev, &idx))
return;
/* We're skipping the address adjustment for negative origin,
* because this is only called on the primary plane.
*/
WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
addr = bo->dma_addr + fb->offsets[0];
/* Write the new address into the hardware immediately. The
* scanout will start from this address as soon as the FIFO
* needs to refill with pixels.
*/
writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
/* Also update the CPU-side dlist copy, so that any later
* atomic updates that don't do a new modeset on our plane
* also use our updated address.
*/
vc4_state->dlist[vc4_state->ptr0_offset] = addr;
drm_dev_exit(idx);
}
static void vc4_plane_atomic_async_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *vc4_state, *new_vc4_state;
int idx;
if (!drm_dev_enter(plane->dev, &idx))
return;
swap(plane->state->fb, new_plane_state->fb);
plane->state->crtc_x = new_plane_state->crtc_x;
plane->state->crtc_y = new_plane_state->crtc_y;
plane->state->crtc_w = new_plane_state->crtc_w;
plane->state->crtc_h = new_plane_state->crtc_h;
plane->state->src_x = new_plane_state->src_x;
plane->state->src_y = new_plane_state->src_y;
plane->state->src_w = new_plane_state->src_w;
plane->state->src_h = new_plane_state->src_h;
plane->state->alpha = new_plane_state->alpha;
plane->state->pixel_blend_mode = new_plane_state->pixel_blend_mode;
plane->state->rotation = new_plane_state->rotation;
plane->state->zpos = new_plane_state->zpos;
plane->state->normalized_zpos = new_plane_state->normalized_zpos;
plane->state->color_encoding = new_plane_state->color_encoding;
plane->state->color_range = new_plane_state->color_range;
plane->state->src = new_plane_state->src;
plane->state->dst = new_plane_state->dst;
plane->state->visible = new_plane_state->visible;
new_vc4_state = to_vc4_plane_state(new_plane_state);
vc4_state = to_vc4_plane_state(plane->state);
vc4_state->crtc_x = new_vc4_state->crtc_x;
vc4_state->crtc_y = new_vc4_state->crtc_y;
vc4_state->crtc_h = new_vc4_state->crtc_h;
vc4_state->crtc_w = new_vc4_state->crtc_w;
vc4_state->src_x = new_vc4_state->src_x;
vc4_state->src_y = new_vc4_state->src_y;
memcpy(vc4_state->src_w, new_vc4_state->src_w,
sizeof(vc4_state->src_w));
memcpy(vc4_state->src_h, new_vc4_state->src_h,
sizeof(vc4_state->src_h));
memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling,
sizeof(vc4_state->x_scaling));
memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling,
sizeof(vc4_state->y_scaling));
vc4_state->is_unity = new_vc4_state->is_unity;
vc4_state->is_yuv = new_vc4_state->is_yuv;
memcpy(vc4_state->offsets, new_vc4_state->offsets,
sizeof(vc4_state->offsets));
vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill;
/* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
vc4_state->dlist[vc4_state->pos0_offset] =
new_vc4_state->dlist[vc4_state->pos0_offset];
vc4_state->dlist[vc4_state->pos2_offset] =
new_vc4_state->dlist[vc4_state->pos2_offset];
vc4_state->dlist[vc4_state->ptr0_offset] =
new_vc4_state->dlist[vc4_state->ptr0_offset];
/* Note that we can't just call vc4_plane_write_dlist()
* because that would smash the context data that the HVS is
* currently using.
*/
writel(vc4_state->dlist[vc4_state->pos0_offset],
&vc4_state->hw_dlist[vc4_state->pos0_offset]);
writel(vc4_state->dlist[vc4_state->pos2_offset],
&vc4_state->hw_dlist[vc4_state->pos2_offset]);
writel(vc4_state->dlist[vc4_state->ptr0_offset],
&vc4_state->hw_dlist[vc4_state->ptr0_offset]);
drm_dev_exit(idx);
}
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *old_vc4_state, *new_vc4_state;
int ret;
u32 i;
ret = vc4_plane_mode_set(plane, new_plane_state);
if (ret)
return ret;
old_vc4_state = to_vc4_plane_state(plane->state);
new_vc4_state = to_vc4_plane_state(new_plane_state);
if (!new_vc4_state->hw_dlist)
return -EINVAL;
if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
vc4_lbm_size(plane->state) != vc4_lbm_size(new_plane_state))
return -EINVAL;
/* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update
* if anything else has changed, fallback to a sync update.
*/
for (i = 0; i < new_vc4_state->dlist_count; i++) {
if (i == new_vc4_state->pos0_offset ||
i == new_vc4_state->pos2_offset ||
i == new_vc4_state->ptr0_offset ||
(new_vc4_state->lbm_offset &&
i == new_vc4_state->lbm_offset))
continue;
if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i])
return -EINVAL;
}
return 0;
}
static int vc4_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_bo *bo;
if (!state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
drm_gem_plane_helper_prepare_fb(plane, state);
if (plane->state->fb == state->fb)
return 0;
return vc4_bo_inc_usecnt(bo);
}
static void vc4_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_bo *bo;
if (plane->state->fb == state->fb || !state->fb)
return;
bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
vc4_bo_dec_usecnt(bo);
}
static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_check = vc4_plane_atomic_check,
.atomic_update = vc4_plane_atomic_update,
.prepare_fb = vc4_prepare_fb,
.cleanup_fb = vc4_cleanup_fb,
.atomic_async_check = vc4_plane_atomic_async_check,
.atomic_async_update = vc4_plane_atomic_async_update,
};
static const struct drm_plane_helper_funcs vc5_plane_helper_funcs = {
.atomic_check = vc4_plane_atomic_check,
.atomic_update = vc4_plane_atomic_update,
.atomic_async_check = vc4_plane_atomic_async_check,
.atomic_async_update = vc4_plane_atomic_async_update,
};
static bool vc4_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
/* Support T_TILING for RGB formats only. */
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
switch (fourcc_mod_broadcom_mod(modifier)) {
case DRM_FORMAT_MOD_LINEAR:
case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
return true;
default:
return false;
}
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
switch (fourcc_mod_broadcom_mod(modifier)) {
case DRM_FORMAT_MOD_LINEAR:
case DRM_FORMAT_MOD_BROADCOM_SAND64:
case DRM_FORMAT_MOD_BROADCOM_SAND128:
case DRM_FORMAT_MOD_BROADCOM_SAND256:
return true;
default:
return false;
}
case DRM_FORMAT_P030:
switch (fourcc_mod_broadcom_mod(modifier)) {
case DRM_FORMAT_MOD_BROADCOM_SAND128:
return true;
default:
return false;
}
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_XBGR4444:
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_RGBX4444:
case DRM_FORMAT_RGBA4444:
case DRM_FORMAT_BGRX4444:
case DRM_FORMAT_BGRA4444:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
default:
return (modifier == DRM_FORMAT_MOD_LINEAR);
}
}
static const struct drm_plane_funcs vc4_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = vc4_plane_reset,
.atomic_duplicate_state = vc4_plane_duplicate_state,
.atomic_destroy_state = vc4_plane_destroy_state,
.format_mod_supported = vc4_format_mod_supported,
};
struct drm_plane *vc4_plane_init(struct drm_device *dev,
enum drm_plane_type type,
uint32_t possible_crtcs)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
int num_formats = 0;
unsigned i;
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
DRM_FORMAT_MOD_BROADCOM_SAND128,
DRM_FORMAT_MOD_BROADCOM_SAND64,
DRM_FORMAT_MOD_BROADCOM_SAND256,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
formats[num_formats] = hvs_formats[i].drm;
num_formats++;
}
}
vc4_plane = drmm_universal_plane_alloc(dev, struct vc4_plane, base,
possible_crtcs,
&vc4_plane_funcs,
formats, num_formats,
modifiers, type, NULL);
if (IS_ERR(vc4_plane))
return ERR_CAST(vc4_plane);
plane = &vc4_plane->base;
if (vc4->is_vc5)
drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
else
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
drm_plane_create_alpha_property(plane);
drm_plane_create_blend_mode_property(plane,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
drm_plane_create_color_properties(plane,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709) |
BIT(DRM_COLOR_YCBCR_BT2020),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
if (type == DRM_PLANE_TYPE_PRIMARY)
drm_plane_create_zpos_immutable_property(plane, 0);
return plane;
}
#define VC4_NUM_OVERLAY_PLANES 16
int vc4_plane_create_additional_planes(struct drm_device *drm)
{
struct drm_plane *cursor_plane;
struct drm_crtc *crtc;
unsigned int i;
/* Set up some arbitrary number of planes. We're not limited
* by a set number of physical registers, just the space in
* the HVS (16k) and how small an plane can be (28 bytes).
* However, each plane we set up takes up some memory, and
* increases the cost of looping over planes, which atomic
* modesetting does quite a bit. As a result, we pick a
* modest number of planes to expose, that should hopefully
* still cover any sane usecase.
*/
for (i = 0; i < VC4_NUM_OVERLAY_PLANES; i++) {
struct drm_plane *plane =
vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY,
GENMASK(drm->mode_config.num_crtc - 1, 0));
if (IS_ERR(plane))
continue;
/* Create zpos property. Max of all the overlays + 1 primary +
* 1 cursor plane on a crtc.
*/
drm_plane_create_zpos_property(plane, i + 1, 1,
VC4_NUM_OVERLAY_PLANES + 1);
}
drm_for_each_crtc(crtc, drm) {
/* Set up the legacy cursor after overlay initialization,
* since the zpos fallback is that planes are rendered by plane
* ID order, and that then puts the cursor on top.
*/
cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR,
drm_crtc_mask(crtc));
if (!IS_ERR(cursor_plane)) {
crtc->cursor = cursor_plane;
drm_plane_create_zpos_property(cursor_plane,
VC4_NUM_OVERLAY_PLANES + 1,
1,
VC4_NUM_OVERLAY_PLANES + 1);
}
}
return 0;
}
| linux-master | drivers/gpu/drm/vc4/vc4_plane.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Broadcom
*/
/**
* DOC: VC4 CRTC module
*
* In VC4, the Pixel Valve is what most closely corresponds to the
* DRM's concept of a CRTC. The PV generates video timings from the
* encoder's clock plus its configuration. It pulls scaled pixels from
* the HVS at that timing, and feeds it to the encoder.
*
* However, the DRM CRTC also collects the configuration of all the
* DRM planes attached to it. As a result, the CRTC is also
* responsible for writing the display list for the HVS channel that
* the CRTC will use.
*
* The 2835 has 3 different pixel valves. pv0 in the audio power
* domain feeds DSI0 or DPI, while pv1 feeds DS1 or SMI. pv2 in the
* image domain can feed either HDMI or the SDTV controller. The
* pixel valve chooses from the CPRMAN clocks (HSM for HDMI, VEC for
* SDTV, etc.) according to which output type is chosen in the mux.
*
* For power management, the pixel valve's registers are all clocked
* by the AXI clock, while the timings and FIFOs make use of the
* output-specific clock. Since the encoders also directly consume
* the CPRMAN clocks, and know what timings they need, they are the
* ones that set the clock.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "vc4_drv.h"
#include "vc4_hdmi.h"
#include "vc4_regs.h"
#define HVS_FIFO_LATENCY_PIX 6
#define CRTC_WRITE(offset, val) \
do { \
kunit_fail_current_test("Accessing a register in a unit test!\n"); \
writel(val, vc4_crtc->regs + (offset)); \
} while (0)
#define CRTC_READ(offset) \
({ \
kunit_fail_current_test("Accessing a register in a unit test!\n"); \
readl(vc4_crtc->regs + (offset)); \
})
static const struct debugfs_reg32 crtc_regs[] = {
VC4_REG32(PV_CONTROL),
VC4_REG32(PV_V_CONTROL),
VC4_REG32(PV_VSYNCD_EVEN),
VC4_REG32(PV_HORZA),
VC4_REG32(PV_HORZB),
VC4_REG32(PV_VERTA),
VC4_REG32(PV_VERTB),
VC4_REG32(PV_VERTA_EVEN),
VC4_REG32(PV_VERTB_EVEN),
VC4_REG32(PV_INTEN),
VC4_REG32(PV_INTSTAT),
VC4_REG32(PV_STAT),
VC4_REG32(PV_HACT_ACT),
};
static unsigned int
vc4_crtc_get_cob_allocation(struct vc4_dev *vc4, unsigned int channel)
{
struct vc4_hvs *hvs = vc4->hvs;
u32 dispbase = HVS_READ(SCALER_DISPBASEX(channel));
/* Top/base are supposed to be 4-pixel aligned, but the
* Raspberry Pi firmware fills the low bits (which are
* presumably ignored).
*/
u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
return top - base + 4;
}
static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
unsigned int cob_size;
u32 val;
int fifo_lines;
int vblank_lines;
bool ret = false;
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
/* Get optional system timestamp before query. */
if (stime)
*stime = ktime_get();
/*
* Read vertical scanline which is currently composed for our
* pixelvalve by the HVS, and also the scaler status.
*/
val = HVS_READ(SCALER_DISPSTATX(vc4_crtc_state->assigned_channel));
/* Get optional system timestamp after query. */
if (etime)
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
/* Vertical position of hvs composed scanline. */
*vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
*hpos = 0;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
*vpos /= 2;
/* Use hpos to correct for field offset in interlaced mode. */
if (vc4_hvs_get_fifo_frame_count(hvs, vc4_crtc_state->assigned_channel) % 2)
*hpos += mode->crtc_htotal / 2;
}
cob_size = vc4_crtc_get_cob_allocation(vc4, vc4_crtc_state->assigned_channel);
/* This is the offset we need for translating hvs -> pv scanout pos. */
fifo_lines = cob_size / mode->crtc_hdisplay;
if (fifo_lines > 0)
ret = true;
/* HVS more than fifo_lines into frame for compositing? */
if (*vpos > fifo_lines) {
/*
* We are in active scanout and can get some meaningful results
* from HVS. The actual PV scanout can not trail behind more
* than fifo_lines as that is the fifo's capacity. Assume that
* in active scanout the HVS and PV work in lockstep wrt. HVS
* refilling the fifo and PV consuming from the fifo, ie.
* whenever the PV consumes and frees up a scanline in the
* fifo, the HVS will immediately refill it, therefore
* incrementing vpos. Therefore we choose HVS read position -
* fifo size in scanlines as a estimate of the real scanout
* position of the PV.
*/
*vpos -= fifo_lines + 1;
return ret;
}
/*
* Less: This happens when we are in vblank and the HVS, after getting
* the VSTART restart signal from the PV, just started refilling its
* fifo with new lines from the top-most lines of the new framebuffers.
* The PV does not scan out in vblank, so does not remove lines from
* the fifo, so the fifo will be full quickly and the HVS has to pause.
* We can't get meaningful readings wrt. scanline position of the PV
* and need to make things up in a approximative but consistent way.
*/
vblank_lines = mode->vtotal - mode->vdisplay;
if (in_vblank_irq) {
/*
* Assume the irq handler got called close to first
* line of vblank, so PV has about a full vblank
* scanlines to go, and as a base timestamp use the
* one taken at entry into vblank irq handler, so it
* is not affected by random delays due to lock
* contention on event_lock or vblank_time lock in
* the core.
*/
*vpos = -vblank_lines;
if (stime)
*stime = vc4_crtc->t_vblank;
if (etime)
*etime = vc4_crtc->t_vblank;
/*
* If the HVS fifo is not yet full then we know for certain
* we are at the very beginning of vblank, as the hvs just
* started refilling, and the stime and etime timestamps
* truly correspond to start of vblank.
*
* Unfortunately there's no way to report this to upper levels
* and make it more useful.
*/
} else {
/*
* No clue where we are inside vblank. Return a vpos of zero,
* which will cause calling code to just return the etime
* timestamp uncorrected. At least this is no worse than the
* standard fallback.
*/
*vpos = 0;
}
return ret;
}
static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
{
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
u32 fifo_len_bytes = pv_data->fifo_depth;
/*
* Pixels are pulled from the HVS if the number of bytes is
* lower than the FIFO full level.
*
* The latency of the pixel fetch mechanism is 6 pixels, so we
* need to convert those 6 pixels in bytes, depending on the
* format, and then subtract that from the length of the FIFO
* to make sure we never end up in a situation where the FIFO
* is full.
*/
switch (format) {
case PV_CONTROL_FORMAT_DSIV_16:
case PV_CONTROL_FORMAT_DSIC_16:
return fifo_len_bytes - 2 * HVS_FIFO_LATENCY_PIX;
case PV_CONTROL_FORMAT_DSIV_18:
return fifo_len_bytes - 14;
case PV_CONTROL_FORMAT_24:
case PV_CONTROL_FORMAT_DSIV_24:
default:
/*
* For some reason, the pixelvalve4 doesn't work with
* the usual formula and will only work with 32.
*/
if (crtc_data->hvs_output == 5)
return 32;
/*
* It looks like in some situations, we will overflow
* the PixelValve FIFO (with the bit 10 of PV stat being
* set) and stall the HVS / PV, eventually resulting in
* a page flip timeout.
*
* Displaying the video overlay during a playback with
* Kodi on an RPi3 seems to be a great solution with a
* failure rate around 50%.
*
* Removing 1 from the FIFO full level however
* seems to completely remove that issue.
*/
if (!vc4->is_vc5)
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
}
}
static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc,
u32 format)
{
u32 level = vc4_get_fifo_full_level(vc4_crtc, format);
u32 ret = 0;
ret |= VC4_SET_FIELD((level >> 6),
PV5_CONTROL_FIFO_LEVEL_HIGH);
return ret | VC4_SET_FIELD(level & 0x3f,
PV_CONTROL_FIFO_LEVEL);
}
/*
* Returns the encoder attached to the CRTC.
*
* VC4 can only scan out to one encoder at a time, while the DRM core
* allows drivers to push pixels to more than one encoder from the
* same CRTC.
*/
struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct drm_encoder *encoder;
WARN_ON(hweight32(state->encoder_mask) > 1);
drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask)
return encoder;
return NULL;
}
static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_device *dev = crtc->dev;
int idx;
if (!drm_dev_enter(dev, &idx))
return;
/* The PV needs to be disabled before it can be flushed */
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) & ~PV_CONTROL_EN);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR);
drm_dev_exit(idx);
}
static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
struct drm_crtc_state *crtc_state = crtc->state;
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
bool is_hdmi = vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0 ||
vc4_encoder->type == VC4_ENCODER_TYPE_HDMI1;
u32 pixel_rep = ((mode->flags & DRM_MODE_FLAG_DBLCLK) && !is_hdmi) ? 2 : 1;
bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
bool is_dsi1 = vc4_encoder->type == VC4_ENCODER_TYPE_DSI1;
bool is_vec = vc4_encoder->type == VC4_ENCODER_TYPE_VEC;
u32 format = is_dsi1 ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
u8 ppc = pv_data->pixels_per_clock;
u16 vert_bp = mode->crtc_vtotal - mode->crtc_vsync_end;
u16 vert_sync = mode->crtc_vsync_end - mode->crtc_vsync_start;
u16 vert_fp = mode->crtc_vsync_start - mode->crtc_vdisplay;
bool debug_dump_regs = false;
int idx;
if (!drm_dev_enter(dev, &idx))
return;
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n",
drm_crtc_index(crtc));
drm_print_regset32(&p, &vc4_crtc->regset);
}
vc4_crtc_pixelvalve_reset(crtc);
CRTC_WRITE(PV_HORZA,
VC4_SET_FIELD((mode->htotal - mode->hsync_end) * pixel_rep / ppc,
PV_HORZA_HBP) |
VC4_SET_FIELD((mode->hsync_end - mode->hsync_start) * pixel_rep / ppc,
PV_HORZA_HSYNC));
CRTC_WRITE(PV_HORZB,
VC4_SET_FIELD((mode->hsync_start - mode->hdisplay) * pixel_rep / ppc,
PV_HORZB_HFP) |
VC4_SET_FIELD(mode->hdisplay * pixel_rep / ppc,
PV_HORZB_HACTIVE));
if (interlace) {
bool odd_field_first = false;
u32 field_delay = mode->htotal * pixel_rep / (2 * ppc);
u16 vert_bp_even = vert_bp;
u16 vert_fp_even = vert_fp;
if (is_vec) {
/* VEC (composite output) */
++field_delay;
if (mode->htotal == 858) {
/* 525-line mode (NTSC or PAL-M) */
odd_field_first = true;
}
}
if (odd_field_first)
++vert_fp_even;
else
++vert_bp;
CRTC_WRITE(PV_VERTA_EVEN,
VC4_SET_FIELD(vert_bp_even, PV_VERTA_VBP) |
VC4_SET_FIELD(vert_sync, PV_VERTA_VSYNC));
CRTC_WRITE(PV_VERTB_EVEN,
VC4_SET_FIELD(vert_fp_even, PV_VERTB_VFP) |
VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
/* We set up first field even mode for HDMI and VEC's PAL.
* For NTSC, we need first field odd.
*/
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
(is_dsi ? PV_VCONTROL_DSI : 0) |
PV_VCONTROL_INTERLACE |
(odd_field_first
? PV_VCONTROL_ODD_FIRST
: VC4_SET_FIELD(field_delay,
PV_VCONTROL_ODD_DELAY)));
CRTC_WRITE(PV_VSYNCD_EVEN,
(odd_field_first ? field_delay : 0));
} else {
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
(is_dsi ? PV_VCONTROL_DSI : 0));
CRTC_WRITE(PV_VSYNCD_EVEN, 0);
}
CRTC_WRITE(PV_VERTA,
VC4_SET_FIELD(vert_bp, PV_VERTA_VBP) |
VC4_SET_FIELD(vert_sync, PV_VERTA_VSYNC));
CRTC_WRITE(PV_VERTB,
VC4_SET_FIELD(vert_fp, PV_VERTB_VFP) |
VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
if (vc4->is_vc5)
CRTC_WRITE(PV_MUX_CFG,
VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR |
vc4_crtc_get_fifo_full_level_bits(vc4_crtc, format) |
VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
VC4_SET_FIELD(pixel_rep - 1, PV_CONTROL_PIXEL_REP) |
PV_CONTROL_CLR_AT_START |
PV_CONTROL_TRIGGER_UNDERFLOW |
PV_CONTROL_WAIT_HSTART |
VC4_SET_FIELD(vc4_encoder->clock_select,
PV_CONTROL_CLK_SELECT));
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs after:\n",
drm_crtc_index(crtc));
drm_print_regset32(&p, &vc4_crtc->regset);
}
drm_dev_exit(idx);
}
static void require_hvs_enabled(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
WARN_ON_ONCE((HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE) !=
SCALER_DISPCTRL_ENABLE);
}
static int vc4_crtc_disable(struct drm_crtc *crtc,
struct drm_encoder *encoder,
struct drm_atomic_state *state,
unsigned int channel)
{
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
int idx, ret;
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1);
WARN_ONCE(ret, "Timeout waiting for !PV_VCONTROL_VIDEN\n");
/*
* This delay is needed to avoid to get a pixel stuck in an
* unflushable FIFO between the pixelvalve and the HDMI
* controllers on the BCM2711.
*
* Timing is fairly sensitive here, so mdelay is the safest
* approach.
*
* If it was to be reworked, the stuck pixel happens on a
* BCM2711 when changing mode with a good probability, so a
* script that changes mode on a regular basis should trigger
* the bug after less than 10 attempts. It manifests itself with
* every pixels being shifted by one to the right, and thus the
* last pixel of a line actually being displayed as the first
* pixel on the next line.
*/
mdelay(20);
if (vc4_encoder && vc4_encoder->post_crtc_disable)
vc4_encoder->post_crtc_disable(encoder, state);
vc4_crtc_pixelvalve_reset(crtc);
vc4_hvs_stop_channel(vc4->hvs, channel);
if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
vc4_encoder->post_crtc_powerdown(encoder, state);
drm_dev_exit(idx);
return 0;
}
int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
{
struct drm_device *drm = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
enum vc4_encoder_type encoder_type;
const struct vc4_pv_data *pv_data;
struct drm_encoder *encoder;
struct vc4_hdmi *vc4_hdmi;
unsigned encoder_sel;
int channel;
int ret;
if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
"brcm,bcm2711-pixelvalve2") ||
of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
"brcm,bcm2711-pixelvalve4")))
return 0;
if (!(CRTC_READ(PV_CONTROL) & PV_CONTROL_EN))
return 0;
if (!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN))
return 0;
channel = vc4_hvs_get_fifo_from_output(vc4->hvs, vc4_crtc->data->hvs_output);
if (channel < 0)
return 0;
encoder_sel = VC4_GET_FIELD(CRTC_READ(PV_CONTROL), PV_CONTROL_CLK_SELECT);
if (WARN_ON(encoder_sel != 0))
return 0;
pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
encoder_type = pv_data->encoder_types[encoder_sel];
encoder = vc4_find_encoder_by_type(drm, encoder_type);
if (WARN_ON(!encoder))
return 0;
vc4_hdmi = encoder_to_vc4_hdmi(encoder);
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret)
return ret;
ret = vc4_crtc_disable(crtc, encoder, NULL, channel);
if (ret)
return ret;
/*
* post_crtc_powerdown will have called pm_runtime_put, so we
* don't need it here otherwise we'll get the reference counting
* wrong.
*/
return 0;
}
void vc4_crtc_send_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned long flags;
if (!crtc->state || !crtc->state->event)
return;
spin_lock_irqsave(&dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, old_state);
struct drm_device *dev = crtc->dev;
drm_dbg(dev, "Disabling CRTC %s (%u) connected to Encoder %s (%u)",
crtc->name, crtc->base.id, encoder->name, encoder->base.id);
require_hvs_enabled(dev);
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
vc4_crtc_disable(crtc, encoder, state, old_vc4_state->assigned_channel);
/*
* Make sure we issue a vblank event after disabling the CRTC if
* someone was waiting it.
*/
vc4_crtc_send_vblank(crtc);
}
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_device *dev = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, new_state);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
int idx;
drm_dbg(dev, "Enabling CRTC %s (%u) connected to Encoder %s (%u)",
crtc->name, crtc->base.id, encoder->name, encoder->base.id);
if (!drm_dev_enter(dev, &idx))
return;
require_hvs_enabled(dev);
/* Enable vblank irq handling before crtc is started otherwise
* drm_crtc_get_vblank() fails in vc4_crtc_update_dlist().
*/
drm_crtc_vblank_on(crtc);
vc4_hvs_atomic_enable(crtc, state);
if (vc4_encoder->pre_crtc_configure)
vc4_encoder->pre_crtc_configure(encoder, state);
vc4_crtc_config_pv(crtc, encoder, state);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
if (vc4_encoder->pre_crtc_enable)
vc4_encoder->pre_crtc_enable(encoder, state);
/* When feeding the transposer block the pixelvalve is unneeded and
* should not be enabled.
*/
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
if (vc4_encoder->post_crtc_enable)
vc4_encoder->post_crtc_enable(encoder, state);
drm_dev_exit(idx);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
/* Do not allow doublescan modes from user space */
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
DRM_DEBUG_KMS("[CRTC:%d] Doublescan mode rejected.\n",
crtc->base.id);
return MODE_NO_DBLESCAN;
}
return MODE_OK;
}
void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *left, unsigned int *right,
unsigned int *top, unsigned int *bottom)
{
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
*left = vc4_state->margins.left;
*right = vc4_state->margins.right;
*top = vc4_state->margins.top;
*bottom = vc4_state->margins.bottom;
/* We have to interate over all new connector states because
* vc4_crtc_get_margins() might be called before
* vc4_crtc_atomic_check() which means margins info in vc4_crtc_state
* might be outdated.
*/
for_each_new_connector_in_state(state->state, conn, conn_state, i) {
if (conn_state->crtc != state->crtc)
continue;
*left = conn_state->tv.margins.left;
*right = conn_state->tv.margins.right;
*top = conn_state->tv.margins.top;
*bottom = conn_state->tv.margins.bottom;
break;
}
}
int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct drm_connector *conn;
struct drm_connector_state *conn_state;
struct drm_encoder *encoder;
int ret, i;
ret = vc4_hvs_atomic_check(crtc, state);
if (ret)
return ret;
encoder = vc4_get_crtc_encoder(crtc, crtc_state);
if (encoder) {
const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000,
mode->clock * 9 / 10) * 1000;
} else {
vc4_state->hvs_load = mode->clock * 1000;
}
}
for_each_new_connector_in_state(state, conn, conn_state,
i) {
if (conn_state->crtc != crtc)
continue;
vc4_state->margins.left = conn_state->tv.margins.left;
vc4_state->margins.right = conn_state->tv.margins.right;
vc4_state->margins.top = conn_state->tv.margins.top;
vc4_state->margins.bottom = conn_state->tv.margins.bottom;
break;
}
return 0;
}
static int vc4_enable_vblank(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_device *dev = crtc->dev;
int idx;
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
CRTC_WRITE(PV_INTEN, PV_INT_VFP_START);
drm_dev_exit(idx);
return 0;
}
static void vc4_disable_vblank(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_device *dev = crtc->dev;
int idx;
if (!drm_dev_enter(dev, &idx))
return;
CRTC_WRITE(PV_INTEN, 0);
drm_dev_exit(idx);
}
static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
{
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
u32 chan = vc4_crtc->current_hvs_channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
spin_lock(&vc4_crtc->irq_lock);
if (vc4_crtc->event &&
(vc4_crtc->current_dlist == HVS_READ(SCALER_DISPLACTX(chan)) ||
vc4_crtc->feeds_txp)) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
/* Wait for the page flip to unmask the underrun to ensure that
* the display list was updated by the hardware. Before that
* happens, the HVS will be using the previous display list with
* the CRTC and encoder already reconfigured, leading to
* underruns. This can be seen when reconfiguring the CRTC.
*/
vc4_hvs_unmask_underrun(hvs, chan);
}
spin_unlock(&vc4_crtc->irq_lock);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc)
{
crtc->t_vblank = ktime_get();
drm_crtc_handle_vblank(&crtc->base);
vc4_crtc_handle_page_flip(crtc);
}
static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
{
struct vc4_crtc *vc4_crtc = data;
u32 stat = CRTC_READ(PV_INTSTAT);
irqreturn_t ret = IRQ_NONE;
if (stat & PV_INT_VFP_START) {
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
vc4_crtc_handle_vblank(vc4_crtc);
ret = IRQ_HANDLED;
}
return ret;
}
struct vc4_async_flip_state {
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct drm_framebuffer *old_fb;
struct drm_pending_vblank_event *event;
union {
struct dma_fence_cb fence;
struct vc4_seqno_cb seqno;
} cb;
};
/* Called when the V3D execution for the BO being flipped to is done, so that
* we can actually update the plane's address to point to it.
*/
static void
vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state)
{
struct drm_crtc *crtc = flip_state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_plane *plane = crtc->primary;
vc4_plane_async_set_fb(plane, flip_state->fb);
if (flip_state->event) {
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, flip_state->event);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
drm_crtc_vblank_put(crtc);
drm_framebuffer_put(flip_state->fb);
if (flip_state->old_fb)
drm_framebuffer_put(flip_state->old_fb);
kfree(flip_state);
}
static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
{
struct vc4_async_flip_state *flip_state =
container_of(cb, struct vc4_async_flip_state, cb.seqno);
struct vc4_bo *bo = NULL;
if (flip_state->old_fb) {
struct drm_gem_dma_object *dma_bo =
drm_fb_dma_get_gem_obj(flip_state->old_fb, 0);
bo = to_vc4_bo(&dma_bo->base);
}
vc4_async_page_flip_complete(flip_state);
/*
* Decrement the BO usecnt in order to keep the inc/dec
* calls balanced when the planes are updated through
* the async update path.
*
* FIXME: we should move to generic async-page-flip when
* it's available, so that we can get rid of this
* hand-made cleanup_fb() logic.
*/
if (bo)
vc4_bo_dec_usecnt(bo);
}
static void vc4_async_page_flip_fence_complete(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
struct vc4_async_flip_state *flip_state =
container_of(cb, struct vc4_async_flip_state, cb.fence);
vc4_async_page_flip_complete(flip_state);
dma_fence_put(fence);
}
static int vc4_async_set_fence_cb(struct drm_device *dev,
struct vc4_async_flip_state *flip_state)
{
struct drm_framebuffer *fb = flip_state->fb;
struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct dma_fence *fence;
int ret;
if (!vc4->is_vc5) {
struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
vc4_async_page_flip_seqno_complete);
}
ret = dma_resv_get_singleton(dma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
return ret;
/* If there's no fence, complete the page flip immediately */
if (!fence) {
vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
return 0;
}
/* If the fence has already been completed, complete the page flip */
if (dma_fence_add_callback(fence, &flip_state->cb.fence,
vc4_async_page_flip_fence_complete))
vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
return 0;
}
static int
vc4_async_page_flip_common(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags)
{
struct drm_device *dev = crtc->dev;
struct drm_plane *plane = crtc->primary;
struct vc4_async_flip_state *flip_state;
flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
if (!flip_state)
return -ENOMEM;
drm_framebuffer_get(fb);
flip_state->fb = fb;
flip_state->crtc = crtc;
flip_state->event = event;
/* Save the current FB before it's replaced by the new one in
* drm_atomic_set_fb_for_plane(). We'll need the old FB in
* vc4_async_page_flip_complete() to decrement the BO usecnt and keep
* it consistent.
* FIXME: we should move to generic async-page-flip when it's
* available, so that we can get rid of this hand-made cleanup_fb()
* logic.
*/
flip_state->old_fb = plane->state->fb;
if (flip_state->old_fb)
drm_framebuffer_get(flip_state->old_fb);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
/* Immediately update the plane's legacy fb pointer, so that later
* modeset prep sees the state that will be present when the semaphore
* is released.
*/
drm_atomic_set_fb_for_plane(plane->state, fb);
vc4_async_set_fence_cb(dev, flip_state);
/* Driver takes ownership of state on successful async commit. */
return 0;
}
/* Implements async (non-vblank-synced) page flips.
*
* The page flip ioctl needs to return immediately, so we grab the
* modeset semaphore on the pipe, and queue the address update for
* when V3D is done with the BO being flipped to.
*/
static int vc4_async_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
/*
* Increment the BO usecnt here, so that we never end up with an
* unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
* plane is later updated through the non-async path.
*
* FIXME: we should move to generic async-page-flip when
* it's available, so that we can get rid of this
* hand-made prepare_fb() logic.
*/
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
ret = vc4_async_page_flip_common(crtc, fb, event, flags);
if (ret) {
vc4_bo_dec_usecnt(bo);
return ret;
}
return 0;
}
static int vc5_async_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags)
{
return vc4_async_page_flip_common(crtc, fb, event, flags);
}
int vc4_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags,
struct drm_modeset_acquire_ctx *ctx)
{
if (flags & DRM_MODE_PAGE_FLIP_ASYNC) {
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
if (vc4->is_vc5)
return vc5_async_page_flip(crtc, fb, event, flags);
else
return vc4_async_page_flip(crtc, fb, event, flags);
} else {
return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx);
}
}
struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct vc4_crtc_state *vc4_state, *old_vc4_state;
vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
if (!vc4_state)
return NULL;
old_vc4_state = to_vc4_crtc_state(crtc->state);
vc4_state->margins = old_vc4_state->margins;
vc4_state->assigned_channel = old_vc4_state->assigned_channel;
__drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
return &vc4_state->base;
}
void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
if (drm_mm_node_allocated(&vc4_state->mm)) {
unsigned long flags;
spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
drm_mm_remove_node(&vc4_state->mm);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
}
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
void vc4_crtc_reset(struct drm_crtc *crtc)
{
struct vc4_crtc_state *vc4_crtc_state;
if (crtc->state)
vc4_crtc_destroy_state(crtc, crtc->state);
vc4_crtc_state = kzalloc(sizeof(*vc4_crtc_state), GFP_KERNEL);
if (!vc4_crtc_state) {
crtc->state = NULL;
return;
}
vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
__drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base);
}
int vc4_crtc_late_register(struct drm_crtc *crtc)
{
struct drm_device *drm = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
vc4_debugfs_add_regset32(drm, crtc_data->debugfs_name,
&vc4_crtc->regset);
return 0;
}
static const struct drm_crtc_funcs vc4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = vc4_page_flip,
.set_property = NULL,
.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
.cursor_move = NULL, /* handled by drm_mode_cursor_universal */
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.late_register = vc4_crtc_late_register,
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.mode_valid = vc4_crtc_mode_valid,
.atomic_check = vc4_crtc_atomic_check,
.atomic_begin = vc4_hvs_atomic_begin,
.atomic_flush = vc4_hvs_atomic_flush,
.atomic_enable = vc4_crtc_atomic_enable,
.atomic_disable = vc4_crtc_atomic_disable,
.get_scanout_position = vc4_crtc_get_scanout_position,
};
const struct vc4_pv_data bcm2835_pv0_data = {
.base = {
.name = "pixelvalve-0",
.debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
},
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
},
};
const struct vc4_pv_data bcm2835_pv1_data = {
.base = {
.name = "pixelvalve-1",
.debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
},
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
},
};
const struct vc4_pv_data bcm2835_pv2_data = {
.base = {
.name = "pixelvalve-2",
.debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
},
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI0,
[PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
},
};
const struct vc4_pv_data bcm2711_pv0_data = {
.base = {
.name = "pixelvalve-0",
.debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
},
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
[0] = VC4_ENCODER_TYPE_DSI0,
[1] = VC4_ENCODER_TYPE_DPI,
},
};
const struct vc4_pv_data bcm2711_pv1_data = {
.base = {
.name = "pixelvalve-1",
.debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 3,
},
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
[0] = VC4_ENCODER_TYPE_DSI1,
[1] = VC4_ENCODER_TYPE_SMI,
},
};
const struct vc4_pv_data bcm2711_pv2_data = {
.base = {
.name = "pixelvalve-2",
.debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 4,
},
.fifo_depth = 256,
.pixels_per_clock = 2,
.encoder_types = {
[0] = VC4_ENCODER_TYPE_HDMI0,
},
};
const struct vc4_pv_data bcm2711_pv3_data = {
.base = {
.name = "pixelvalve-3",
.debugfs_name = "crtc3_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
},
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
[PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
},
};
const struct vc4_pv_data bcm2711_pv4_data = {
.base = {
.name = "pixelvalve-4",
.debugfs_name = "crtc4_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 5,
},
.fifo_depth = 64,
.pixels_per_clock = 2,
.encoder_types = {
[0] = VC4_ENCODER_TYPE_HDMI1,
},
};
static const struct of_device_id vc4_crtc_dt_match[] = {
{ .compatible = "brcm,bcm2835-pixelvalve0", .data = &bcm2835_pv0_data },
{ .compatible = "brcm,bcm2835-pixelvalve1", .data = &bcm2835_pv1_data },
{ .compatible = "brcm,bcm2835-pixelvalve2", .data = &bcm2835_pv2_data },
{ .compatible = "brcm,bcm2711-pixelvalve0", .data = &bcm2711_pv0_data },
{ .compatible = "brcm,bcm2711-pixelvalve1", .data = &bcm2711_pv1_data },
{ .compatible = "brcm,bcm2711-pixelvalve2", .data = &bcm2711_pv2_data },
{ .compatible = "brcm,bcm2711-pixelvalve3", .data = &bcm2711_pv3_data },
{ .compatible = "brcm,bcm2711-pixelvalve4", .data = &bcm2711_pv4_data },
{}
};
static void vc4_set_crtc_possible_masks(struct drm_device *drm,
struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
const enum vc4_encoder_type *encoder_types = pv_data->encoder_types;
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, drm) {
struct vc4_encoder *vc4_encoder;
int i;
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
continue;
vc4_encoder = to_vc4_encoder(encoder);
for (i = 0; i < ARRAY_SIZE(pv_data->encoder_types); i++) {
if (vc4_encoder->type == encoder_types[i]) {
vc4_encoder->clock_select = i;
encoder->possible_crtcs |= drm_crtc_mask(crtc);
break;
}
}
}
}
/**
* __vc4_crtc_init - Initializes a CRTC
* @drm: DRM Device
* @pdev: CRTC Platform Device
* @vc4_crtc: CRTC Object to Initialize
* @data: Configuration data associated with this CRTC
* @primary_plane: Primary plane for CRTC
* @crtc_funcs: Callbacks for the new CRTC
* @crtc_helper_funcs: Helper Callbacks for the new CRTC
* @feeds_txp: Is this CRTC connected to the TXP?
*
* Initializes our private CRTC structure. This function is mostly
* relevant for KUnit testing, all other users should use
* vc4_crtc_init() instead.
*
* Returns:
* 0 on success, a negative error code on failure.
*/
int __vc4_crtc_init(struct drm_device *drm,
struct platform_device *pdev,
struct vc4_crtc *vc4_crtc,
const struct vc4_crtc_data *data,
struct drm_plane *primary_plane,
const struct drm_crtc_funcs *crtc_funcs,
const struct drm_crtc_helper_funcs *crtc_helper_funcs,
bool feeds_txp)
{
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct drm_crtc *crtc = &vc4_crtc->base;
unsigned int i;
int ret;
vc4_crtc->data = data;
vc4_crtc->pdev = pdev;
vc4_crtc->feeds_txp = feeds_txp;
spin_lock_init(&vc4_crtc->irq_lock);
ret = drmm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
crtc_funcs, data->name);
if (ret)
return ret;
drm_crtc_helper_add(crtc, crtc_helper_funcs);
if (!vc4->is_vc5) {
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
/* We support CTM, but only for one CRTC at a time. It's therefore
* implemented as private driver state in vc4_kms, not here.
*/
drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size);
}
for (i = 0; i < crtc->gamma_size; i++) {
vc4_crtc->lut_r[i] = i;
vc4_crtc->lut_g[i] = i;
vc4_crtc->lut_b[i] = i;
}
return 0;
}
int vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev,
struct vc4_crtc *vc4_crtc,
const struct vc4_crtc_data *data,
const struct drm_crtc_funcs *crtc_funcs,
const struct drm_crtc_helper_funcs *crtc_helper_funcs,
bool feeds_txp)
{
struct drm_plane *primary_plane;
/* For now, we create just the primary and the legacy cursor
* planes. We should be able to stack more planes on easily,
* but to do that we would need to compute the bandwidth
* requirement of the plane configuration, and reject ones
* that will take too much.
*/
primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY, 0);
if (IS_ERR(primary_plane)) {
dev_err(drm->dev, "failed to construct primary plane\n");
return PTR_ERR(primary_plane);
}
return __vc4_crtc_init(drm, pdev, vc4_crtc, data, primary_plane,
crtc_funcs, crtc_helper_funcs, feeds_txp);
}
static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
const struct vc4_pv_data *pv_data;
struct vc4_crtc *vc4_crtc;
struct drm_crtc *crtc;
int ret;
vc4_crtc = drmm_kzalloc(drm, sizeof(*vc4_crtc), GFP_KERNEL);
if (!vc4_crtc)
return -ENOMEM;
crtc = &vc4_crtc->base;
pv_data = of_device_get_match_data(dev);
if (!pv_data)
return -ENODEV;
vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vc4_crtc->regs))
return PTR_ERR(vc4_crtc->regs);
vc4_crtc->regset.base = vc4_crtc->regs;
vc4_crtc->regset.regs = crtc_regs;
vc4_crtc->regset.nregs = ARRAY_SIZE(crtc_regs);
ret = vc4_crtc_init(drm, pdev, vc4_crtc, &pv_data->base,
&vc4_crtc_funcs, &vc4_crtc_helper_funcs,
false);
if (ret)
return ret;
vc4_set_crtc_possible_masks(drm, crtc);
CRTC_WRITE(PV_INTEN, 0);
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
vc4_crtc_irq_handler,
IRQF_SHARED,
"vc4 crtc", vc4_crtc);
if (ret)
return ret;
platform_set_drvdata(pdev, vc4_crtc);
return 0;
}
static void vc4_crtc_unbind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct vc4_crtc *vc4_crtc = dev_get_drvdata(dev);
CRTC_WRITE(PV_INTEN, 0);
platform_set_drvdata(pdev, NULL);
}
static const struct component_ops vc4_crtc_ops = {
.bind = vc4_crtc_bind,
.unbind = vc4_crtc_unbind,
};
static int vc4_crtc_dev_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &vc4_crtc_ops);
}
static void vc4_crtc_dev_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_crtc_ops);
}
struct platform_driver vc4_crtc_driver = {
.probe = vc4_crtc_dev_probe,
.remove_new = vc4_crtc_dev_remove,
.driver = {
.name = "vc4_crtc",
.of_match_table = vc4_crtc_dt_match,
},
};
| linux-master | drivers/gpu/drm/vc4/vc4_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Broadcom Limited
*/
/**
* DOC: VC4 DPI module
*
* The VC4 DPI hardware supports MIPI DPI type 4 and Nokia ViSSI
* signals. On BCM2835, these can be routed out to GPIO0-27 with the
* ALT2 function.
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/media-bus-format.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
#define DPI_C 0x00
# define DPI_OUTPUT_ENABLE_MODE BIT(16)
/* The order field takes the incoming 24 bit RGB from the pixel valve
* and shuffles the 3 channels.
*/
# define DPI_ORDER_MASK VC4_MASK(15, 14)
# define DPI_ORDER_SHIFT 14
# define DPI_ORDER_RGB 0
# define DPI_ORDER_BGR 1
# define DPI_ORDER_GRB 2
# define DPI_ORDER_BRG 3
/* The format field takes the ORDER-shuffled pixel valve data and
* formats it onto the output lines.
*/
# define DPI_FORMAT_MASK VC4_MASK(13, 11)
# define DPI_FORMAT_SHIFT 11
/* This define is named in the hardware, but actually just outputs 0. */
# define DPI_FORMAT_9BIT_666_RGB 0
/* Outputs 00000000rrrrrggggggbbbbb */
# define DPI_FORMAT_16BIT_565_RGB_1 1
/* Outputs 000rrrrr00gggggg000bbbbb */
# define DPI_FORMAT_16BIT_565_RGB_2 2
/* Outputs 00rrrrr000gggggg00bbbbb0 */
# define DPI_FORMAT_16BIT_565_RGB_3 3
/* Outputs 000000rrrrrrggggggbbbbbb */
# define DPI_FORMAT_18BIT_666_RGB_1 4
/* Outputs 00rrrrrr00gggggg00bbbbbb */
# define DPI_FORMAT_18BIT_666_RGB_2 5
/* Outputs rrrrrrrrggggggggbbbbbbbb */
# define DPI_FORMAT_24BIT_888_RGB 6
/* Reverses the polarity of the corresponding signal */
# define DPI_PIXEL_CLK_INVERT BIT(10)
# define DPI_HSYNC_INVERT BIT(9)
# define DPI_VSYNC_INVERT BIT(8)
# define DPI_OUTPUT_ENABLE_INVERT BIT(7)
/* Outputs the signal the falling clock edge instead of rising. */
# define DPI_HSYNC_NEGATE BIT(6)
# define DPI_VSYNC_NEGATE BIT(5)
# define DPI_OUTPUT_ENABLE_NEGATE BIT(4)
/* Disables the signal */
# define DPI_HSYNC_DISABLE BIT(3)
# define DPI_VSYNC_DISABLE BIT(2)
# define DPI_OUTPUT_ENABLE_DISABLE BIT(1)
/* Power gate to the device, full reset at 0 -> 1 transition */
# define DPI_ENABLE BIT(0)
/* All other registers besides DPI_C return the ID */
#define DPI_ID 0x04
# define DPI_ID_VALUE 0x00647069
/* General DPI hardware state. */
struct vc4_dpi {
struct vc4_encoder encoder;
struct platform_device *pdev;
void __iomem *regs;
struct clk *pixel_clock;
struct clk *core_clock;
struct debugfs_regset32 regset;
};
#define to_vc4_dpi(_encoder) \
container_of_const(_encoder, struct vc4_dpi, encoder.base)
#define DPI_READ(offset) \
({ \
kunit_fail_current_test("Accessing a register in a unit test!\n"); \
readl(dpi->regs + (offset)); \
})
#define DPI_WRITE(offset, val) \
do { \
kunit_fail_current_test("Accessing a register in a unit test!\n"); \
writel(val, dpi->regs + (offset)); \
} while (0)
static const struct debugfs_reg32 dpi_regs[] = {
VC4_REG32(DPI_C),
VC4_REG32(DPI_ID),
};
static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct vc4_dpi *dpi = to_vc4_dpi(encoder);
int idx;
if (!drm_dev_enter(dev, &idx))
return;
clk_disable_unprepare(dpi->pixel_clock);
drm_dev_exit(idx);
}
static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_display_mode *mode = &encoder->crtc->mode;
struct vc4_dpi *dpi = to_vc4_dpi(encoder);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector = NULL, *connector_scan;
u32 dpi_c = DPI_ENABLE;
int idx;
int ret;
/* Look up the connector attached to DPI so we can get the
* bus_format. Ideally the bridge would tell us the
* bus_format we want, but it doesn't yet, so assume that it's
* uniform throughout the bridge chain.
*/
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector_scan, &conn_iter) {
if (connector_scan->encoder == encoder) {
connector = connector_scan;
break;
}
}
drm_connector_list_iter_end(&conn_iter);
/* Default to 18bit if no connector or format found. */
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1, DPI_FORMAT);
if (connector) {
if (connector->display_info.num_bus_formats) {
u32 bus_format = connector->display_info.bus_formats[0];
dpi_c &= ~DPI_FORMAT_MASK;
switch (bus_format) {
case MEDIA_BUS_FMT_RGB888_1X24:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
DPI_FORMAT);
break;
case MEDIA_BUS_FMT_BGR888_1X24:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
DPI_FORMAT);
dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR,
DPI_ORDER);
break;
case MEDIA_BUS_FMT_BGR666_1X24_CPADHI:
dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
fallthrough;
case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
DPI_FORMAT);
break;
case MEDIA_BUS_FMT_BGR666_1X18:
dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
fallthrough;
case MEDIA_BUS_FMT_RGB666_1X18:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1,
DPI_FORMAT);
break;
case MEDIA_BUS_FMT_RGB565_1X16:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_1,
DPI_FORMAT);
break;
case MEDIA_BUS_FMT_RGB565_1X24_CPADHI:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_2,
DPI_FORMAT);
break;
default:
DRM_ERROR("Unknown media bus format %d\n",
bus_format);
break;
}
}
if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
dpi_c |= DPI_PIXEL_CLK_INVERT;
if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
dpi_c |= DPI_OUTPUT_ENABLE_INVERT;
}
if (mode->flags & DRM_MODE_FLAG_CSYNC) {
if (mode->flags & DRM_MODE_FLAG_NCSYNC)
dpi_c |= DPI_OUTPUT_ENABLE_INVERT;
} else {
dpi_c |= DPI_OUTPUT_ENABLE_MODE;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
dpi_c |= DPI_HSYNC_INVERT;
else if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
dpi_c |= DPI_HSYNC_DISABLE;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
dpi_c |= DPI_VSYNC_INVERT;
else if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
dpi_c |= DPI_VSYNC_DISABLE;
}
if (!drm_dev_enter(dev, &idx))
return;
DPI_WRITE(DPI_C, dpi_c);
ret = clk_set_rate(dpi->pixel_clock, mode->clock * 1000);
if (ret)
DRM_ERROR("Failed to set clock rate: %d\n", ret);
ret = clk_prepare_enable(dpi->pixel_clock);
if (ret)
DRM_ERROR("Failed to set clock rate: %d\n", ret);
drm_dev_exit(idx);
}
static enum drm_mode_status vc4_dpi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
return MODE_OK;
}
static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
.disable = vc4_dpi_encoder_disable,
.enable = vc4_dpi_encoder_enable,
.mode_valid = vc4_dpi_encoder_mode_valid,
};
static int vc4_dpi_late_register(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
struct vc4_dpi *dpi = to_vc4_dpi(encoder);
vc4_debugfs_add_regset32(drm, "dpi_regs", &dpi->regset);
return 0;
}
static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
.late_register = vc4_dpi_late_register,
};
static const struct of_device_id vc4_dpi_dt_match[] = {
{ .compatible = "brcm,bcm2835-dpi", .data = NULL },
{}
};
/* Sets up the next link in the display chain, whether it's a panel or
* a bridge.
*/
static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
struct drm_device *drm = dpi->encoder.base.dev;
struct device *dev = &dpi->pdev->dev;
struct drm_bridge *bridge;
bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
if (IS_ERR(bridge)) {
/* If nothing was connected in the DT, that's not an
* error.
*/
if (PTR_ERR(bridge) == -ENODEV)
return 0;
else
return PTR_ERR(bridge);
}
return drm_bridge_attach(&dpi->encoder.base, bridge, NULL, 0);
}
static void vc4_dpi_disable_clock(void *ptr)
{
struct vc4_dpi *dpi = ptr;
clk_disable_unprepare(dpi->core_clock);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dpi *dpi;
int ret;
dpi = drmm_kzalloc(drm, sizeof(*dpi), GFP_KERNEL);
if (!dpi)
return -ENOMEM;
dpi->encoder.type = VC4_ENCODER_TYPE_DPI;
dpi->pdev = pdev;
dpi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dpi->regs))
return PTR_ERR(dpi->regs);
dpi->regset.base = dpi->regs;
dpi->regset.regs = dpi_regs;
dpi->regset.nregs = ARRAY_SIZE(dpi_regs);
if (DPI_READ(DPI_ID) != DPI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
DPI_READ(DPI_ID), DPI_ID_VALUE);
return -ENODEV;
}
dpi->core_clock = devm_clk_get(dev, "core");
if (IS_ERR(dpi->core_clock)) {
ret = PTR_ERR(dpi->core_clock);
if (ret != -EPROBE_DEFER)
DRM_ERROR("Failed to get core clock: %d\n", ret);
return ret;
}
dpi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(dpi->pixel_clock)) {
ret = PTR_ERR(dpi->pixel_clock);
if (ret != -EPROBE_DEFER)
DRM_ERROR("Failed to get pixel clock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(dpi->core_clock);
if (ret) {
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
return ret;
}
ret = devm_add_action_or_reset(dev, vc4_dpi_disable_clock, dpi);
if (ret)
return ret;
ret = drmm_encoder_init(drm, &dpi->encoder.base,
&vc4_dpi_encoder_funcs,
DRM_MODE_ENCODER_DPI,
NULL);
if (ret)
return ret;
drm_encoder_helper_add(&dpi->encoder.base, &vc4_dpi_encoder_helper_funcs);
ret = vc4_dpi_init_bridge(dpi);
if (ret)
return ret;
dev_set_drvdata(dev, dpi);
return 0;
}
static const struct component_ops vc4_dpi_ops = {
.bind = vc4_dpi_bind,
};
static int vc4_dpi_dev_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &vc4_dpi_ops);
}
static void vc4_dpi_dev_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_dpi_ops);
}
struct platform_driver vc4_dpi_driver = {
.probe = vc4_dpi_dev_probe,
.remove_new = vc4_dpi_dev_remove,
.driver = {
.name = "vc4_dpi",
.of_match_table = vc4_dpi_dt_match,
},
};
| linux-master | drivers/gpu/drm/vc4/vc4_dpi.c |
/*
* Copyright © 2014 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/**
* DOC: Interrupt management for the V3D engine
*
* We have an interrupt status register (V3D_INTCTL) which reports
* interrupts, and where writing 1 bits clears those interrupts.
* There are also a pair of interrupt registers
* (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or
* disables that specific interrupt, and 0s written are ignored
* (reading either one returns the set of enabled interrupts).
*
* When we take a binning flush done interrupt, we need to submit the
* next frame for binning and move the finished frame to the render
* thread.
*
* When we take a render frame interrupt, we need to wake the
* processes waiting for some frame to be done, and get the next frame
* submitted ASAP (so the hardware doesn't sit idle when there's work
* to do).
*
* When we take the binner out of memory interrupt, we need to
* allocate some new memory and pass it to the binner so that the
* current job can make progress.
*/
#include <linux/platform_device.h>
#include <drm/drm_drv.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
#include "vc4_trace.h"
#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
V3D_INT_FLDONE | \
V3D_INT_FRDONE)
static void
vc4_overflow_mem_work(struct work_struct *work)
{
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, overflow_mem_work);
struct vc4_bo *bo;
int bin_bo_slot;
struct vc4_exec_info *exec;
unsigned long irqflags;
mutex_lock(&vc4->bin_bo_lock);
if (!vc4->bin_bo)
goto complete;
bo = vc4->bin_bo;
bin_bo_slot = vc4_v3d_get_bin_slot(vc4);
if (bin_bo_slot < 0) {
DRM_ERROR("Couldn't allocate binner overflow mem\n");
goto complete;
}
spin_lock_irqsave(&vc4->job_lock, irqflags);
if (vc4->bin_alloc_overflow) {
/* If we had overflow memory allocated previously,
* then that chunk will free when the current bin job
* is done. If we don't have a bin job running, then
* the chunk will be done whenever the list of render
* jobs has drained.
*/
exec = vc4_first_bin_job(vc4);
if (!exec)
exec = vc4_last_render_job(vc4);
if (exec) {
exec->bin_slots |= vc4->bin_alloc_overflow;
} else {
/* There's nothing queued in the hardware, so
* the old slot is free immediately.
*/
vc4->bin_alloc_used &= ~vc4->bin_alloc_overflow;
}
}
vc4->bin_alloc_overflow = BIT(bin_bo_slot);
V3D_WRITE(V3D_BPOA, bo->base.dma_addr + bin_bo_slot * vc4->bin_alloc_size);
V3D_WRITE(V3D_BPOS, bo->base.base.size);
V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
complete:
mutex_unlock(&vc4->bin_bo_lock);
}
static void
vc4_irq_finish_bin_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *next, *exec = vc4_first_bin_job(vc4);
if (!exec)
return;
trace_vc4_bcl_end_irq(dev, exec->seqno);
vc4_move_job_to_render(dev, exec);
next = vc4_first_bin_job(vc4);
/* Only submit the next job in the bin list if it matches the perfmon
* attached to the one that just finished (or if both jobs don't have
* perfmon attached to them).
*/
if (next && next->perfmon == exec->perfmon)
vc4_submit_next_bin_job(dev);
}
static void
vc4_cancel_bin_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *exec = vc4_first_bin_job(vc4);
if (!exec)
return;
/* Stop the perfmon so that the next bin job can be started. */
if (exec->perfmon)
vc4_perfmon_stop(vc4, exec->perfmon, false);
list_move_tail(&exec->head, &vc4->bin_job_list);
vc4_submit_next_bin_job(dev);
}
static void
vc4_irq_finish_render_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *exec = vc4_first_render_job(vc4);
struct vc4_exec_info *nextbin, *nextrender;
if (!exec)
return;
trace_vc4_rcl_end_irq(dev, exec->seqno);
vc4->finished_seqno++;
list_move_tail(&exec->head, &vc4->job_done_list);
nextbin = vc4_first_bin_job(vc4);
nextrender = vc4_first_render_job(vc4);
/* Only stop the perfmon if following jobs in the queue don't expect it
* to be enabled.
*/
if (exec->perfmon && !nextrender &&
(!nextbin || nextbin->perfmon != exec->perfmon))
vc4_perfmon_stop(vc4, exec->perfmon, true);
/* If there's a render job waiting, start it. If this is not the case
* we may have to unblock the binner if it's been stalled because of
* perfmon (this can be checked by comparing the perfmon attached to
* the finished renderjob to the one attached to the next bin job: if
* they don't match, this means the binner is stalled and should be
* restarted).
*/
if (nextrender)
vc4_submit_next_render_job(dev);
else if (nextbin && nextbin->perfmon != exec->perfmon)
vc4_submit_next_bin_job(dev);
if (exec->fence) {
dma_fence_signal_locked(exec->fence);
dma_fence_put(exec->fence);
exec->fence = NULL;
}
wake_up_all(&vc4->job_wait_queue);
schedule_work(&vc4->job_done_work);
}
static irqreturn_t
vc4_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t intctl;
irqreturn_t status = IRQ_NONE;
barrier();
intctl = V3D_READ(V3D_INTCTL);
/* Acknowledge the interrupts we're handling here. The binner
* last flush / render frame done interrupt will be cleared,
* while OUTOMEM will stay high until the underlying cause is
* cleared.
*/
V3D_WRITE(V3D_INTCTL, intctl);
if (intctl & V3D_INT_OUTOMEM) {
/* Disable OUTOMEM until the work is done. */
V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM);
schedule_work(&vc4->overflow_mem_work);
status = IRQ_HANDLED;
}
if (intctl & V3D_INT_FLDONE) {
spin_lock(&vc4->job_lock);
vc4_irq_finish_bin_job(dev);
spin_unlock(&vc4->job_lock);
status = IRQ_HANDLED;
}
if (intctl & V3D_INT_FRDONE) {
spin_lock(&vc4->job_lock);
vc4_irq_finish_render_job(dev);
spin_unlock(&vc4->job_lock);
status = IRQ_HANDLED;
}
return status;
}
static void
vc4_irq_prepare(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
if (!vc4->v3d)
return;
init_waitqueue_head(&vc4->job_wait_queue);
INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
/* Clear any pending interrupts someone might have left around
* for us.
*/
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
}
void
vc4_irq_enable(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (!vc4->v3d)
return;
/* Enable the render done interrupts. The out-of-memory interrupt is
* enabled as soon as we have a binner BO allocated.
*/
V3D_WRITE(V3D_INTENA, V3D_INT_FLDONE | V3D_INT_FRDONE);
}
void
vc4_irq_disable(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (!vc4->v3d)
return;
/* Disable sending interrupts for our driver's IRQs. */
V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
/* Clear any pending interrupts we might have left. */
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
/* Finish any interrupt handler still in flight. */
synchronize_irq(vc4->irq);
cancel_work_sync(&vc4->overflow_mem_work);
}
int vc4_irq_install(struct drm_device *dev, int irq)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
vc4_irq_prepare(dev);
ret = request_irq(irq, vc4_irq, 0, dev->driver->name, dev);
if (ret)
return ret;
vc4_irq_enable(dev);
return 0;
}
void vc4_irq_uninstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
if (WARN_ON_ONCE(vc4->is_vc5))
return;
vc4_irq_disable(dev);
free_irq(vc4->irq, dev);
}
/** Reinitializes interrupt registers when a GPU reset is performed. */
void vc4_irq_reset(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
/* Acknowledge any stale IRQs. */
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
/*
* Turn all our interrupts on. Binner out of memory is the
* only one we expect to trigger at this point, since we've
* just come from poweron and haven't supplied any overflow
* memory yet.
*/
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
spin_lock_irqsave(&vc4->job_lock, irqflags);
vc4_cancel_bin_job(dev);
vc4_irq_finish_render_job(dev);
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
}
| linux-master | drivers/gpu/drm/vc4/vc4_irq.c |
/*
* Copyright © 2014 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/**
* DOC: Command list validator for VC4.
*
* Since the VC4 has no IOMMU between it and system memory, a user
* with access to execute command lists could escalate privilege by
* overwriting system memory (drawing to it as a framebuffer) or
* reading system memory it shouldn't (reading it as a vertex buffer
* or index buffer)
*
* We validate binner command lists to ensure that all accesses are
* within the bounds of the GEM objects referenced by the submitted
* job. It explicitly whitelists packets, and looks at the offsets in
* any address fields to make sure they're contained within the BOs
* they reference.
*
* Note that because CL validation is already reading the
* user-submitted CL and writing the validated copy out to the memory
* that the GPU will actually read, this is also where GEM relocation
* processing (turning BO references into actual addresses for the GPU
* to use) happens.
*/
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_packet.h"
#define VALIDATE_ARGS \
struct vc4_exec_info *exec, \
void *validated, \
void *untrusted
/** Return the width in pixels of a 64-byte microtile. */
static uint32_t
utile_width(int cpp)
{
switch (cpp) {
case 1:
case 2:
return 8;
case 4:
return 4;
case 8:
return 2;
default:
DRM_ERROR("unknown cpp: %d\n", cpp);
return 1;
}
}
/** Return the height in pixels of a 64-byte microtile. */
static uint32_t
utile_height(int cpp)
{
switch (cpp) {
case 1:
return 8;
case 2:
case 4:
case 8:
return 4;
default:
DRM_ERROR("unknown cpp: %d\n", cpp);
return 1;
}
}
/**
* size_is_lt() - Returns whether a miplevel of the given size will
* use the lineartile (LT) tiling layout rather than the normal T
* tiling layout.
* @width: Width in pixels of the miplevel
* @height: Height in pixels of the miplevel
* @cpp: Bytes per pixel of the pixel format
*/
static bool
size_is_lt(uint32_t width, uint32_t height, int cpp)
{
return (width <= 4 * utile_width(cpp) ||
height <= 4 * utile_height(cpp));
}
struct drm_gem_dma_object *
vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
{
struct vc4_dev *vc4 = exec->dev;
struct drm_gem_dma_object *obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
return NULL;
if (hindex >= exec->bo_count) {
DRM_DEBUG("BO index %d greater than BO count %d\n",
hindex, exec->bo_count);
return NULL;
}
obj = to_drm_gem_dma_obj(exec->bo[hindex]);
bo = to_vc4_bo(&obj->base);
if (bo->validated_shader) {
DRM_DEBUG("Trying to use shader BO as something other than "
"a shader\n");
return NULL;
}
return obj;
}
static struct drm_gem_dma_object *
vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index)
{
return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]);
}
static bool
validate_bin_pos(struct vc4_exec_info *exec, void *untrusted, uint32_t pos)
{
/* Note that the untrusted pointer passed to these functions is
* incremented past the packet byte.
*/
return (untrusted - 1 == exec->bin_u + pos);
}
static uint32_t
gl_shader_rec_size(uint32_t pointer_bits)
{
uint32_t attribute_count = pointer_bits & 7;
bool extended = pointer_bits & 8;
if (attribute_count == 0)
attribute_count = 8;
if (extended)
return 100 + attribute_count * 4;
else
return 36 + attribute_count * 8;
}
bool
vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp)
{
struct vc4_dev *vc4 = exec->dev;
uint32_t aligned_width, aligned_height, stride, size;
uint32_t utile_w = utile_width(cpp);
uint32_t utile_h = utile_height(cpp);
if (WARN_ON_ONCE(vc4->is_vc5))
return false;
/* The shaded vertex format stores signed 12.4 fixed point
* (-2048,2047) offsets from the viewport center, so we should
* never have a render target larger than 4096. The texture
* unit can only sample from 2048x2048, so it's even more
* restricted. This lets us avoid worrying about overflow in
* our math.
*/
if (width > 4096 || height > 4096) {
DRM_DEBUG("Surface dimensions (%d,%d) too large",
width, height);
return false;
}
switch (tiling_format) {
case VC4_TILING_FORMAT_LINEAR:
aligned_width = round_up(width, utile_w);
aligned_height = height;
break;
case VC4_TILING_FORMAT_T:
aligned_width = round_up(width, utile_w * 8);
aligned_height = round_up(height, utile_h * 8);
break;
case VC4_TILING_FORMAT_LT:
aligned_width = round_up(width, utile_w);
aligned_height = round_up(height, utile_h);
break;
default:
DRM_DEBUG("buffer tiling %d unsupported\n", tiling_format);
return false;
}
stride = aligned_width * cpp;
size = stride * aligned_height;
if (size + offset < size ||
size + offset > fbo->base.size) {
DRM_DEBUG("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
width, height,
aligned_width, aligned_height,
size, offset, fbo->base.size);
return false;
}
return true;
}
static int
validate_flush(VALIDATE_ARGS)
{
if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) {
DRM_DEBUG("Bin CL must end with VC4_PACKET_FLUSH\n");
return -EINVAL;
}
exec->found_flush = true;
return 0;
}
static int
validate_start_tile_binning(VALIDATE_ARGS)
{
if (exec->found_start_tile_binning_packet) {
DRM_DEBUG("Duplicate VC4_PACKET_START_TILE_BINNING\n");
return -EINVAL;
}
exec->found_start_tile_binning_packet = true;
if (!exec->found_tile_binning_mode_config_packet) {
DRM_DEBUG("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
return -EINVAL;
}
return 0;
}
static int
validate_increment_semaphore(VALIDATE_ARGS)
{
if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) {
DRM_DEBUG("Bin CL must end with "
"VC4_PACKET_INCREMENT_SEMAPHORE\n");
return -EINVAL;
}
exec->found_increment_semaphore_packet = true;
return 0;
}
static int
validate_indexed_prim_list(VALIDATE_ARGS)
{
struct drm_gem_dma_object *ib;
uint32_t length = *(uint32_t *)(untrusted + 1);
uint32_t offset = *(uint32_t *)(untrusted + 5);
uint32_t max_index = *(uint32_t *)(untrusted + 9);
uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1;
struct vc4_shader_state *shader_state;
/* Check overflow condition */
if (exec->shader_state_count == 0) {
DRM_DEBUG("shader state must precede primitives\n");
return -EINVAL;
}
shader_state = &exec->shader_state[exec->shader_state_count - 1];
if (max_index > shader_state->max_index)
shader_state->max_index = max_index;
ib = vc4_use_handle(exec, 0);
if (!ib)
return -EINVAL;
exec->bin_dep_seqno = max(exec->bin_dep_seqno,
to_vc4_bo(&ib->base)->write_seqno);
if (offset > ib->base.size ||
(ib->base.size - offset) / index_size < length) {
DRM_DEBUG("IB access overflow (%d + %d*%d > %zd)\n",
offset, length, index_size, ib->base.size);
return -EINVAL;
}
*(uint32_t *)(validated + 5) = ib->dma_addr + offset;
return 0;
}
static int
validate_gl_array_primitive(VALIDATE_ARGS)
{
uint32_t length = *(uint32_t *)(untrusted + 1);
uint32_t base_index = *(uint32_t *)(untrusted + 5);
uint32_t max_index;
struct vc4_shader_state *shader_state;
/* Check overflow condition */
if (exec->shader_state_count == 0) {
DRM_DEBUG("shader state must precede primitives\n");
return -EINVAL;
}
shader_state = &exec->shader_state[exec->shader_state_count - 1];
if (length + base_index < length) {
DRM_DEBUG("primitive vertex count overflow\n");
return -EINVAL;
}
max_index = length + base_index - 1;
if (max_index > shader_state->max_index)
shader_state->max_index = max_index;
return 0;
}
static int
validate_gl_shader_state(VALIDATE_ARGS)
{
uint32_t i = exec->shader_state_count++;
if (i >= exec->shader_state_size) {
DRM_DEBUG("More requests for shader states than declared\n");
return -EINVAL;
}
exec->shader_state[i].addr = *(uint32_t *)untrusted;
exec->shader_state[i].max_index = 0;
if (exec->shader_state[i].addr & ~0xf) {
DRM_DEBUG("high bits set in GL shader rec reference\n");
return -EINVAL;
}
*(uint32_t *)validated = (exec->shader_rec_p +
exec->shader_state[i].addr);
exec->shader_rec_p +=
roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16);
return 0;
}
static int
validate_tile_binning_config(VALIDATE_ARGS)
{
struct drm_device *dev = exec->exec_bo->base.dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint8_t flags;
uint32_t tile_state_size;
uint32_t tile_count, bin_addr;
int bin_slot;
if (exec->found_tile_binning_mode_config_packet) {
DRM_DEBUG("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
return -EINVAL;
}
exec->found_tile_binning_mode_config_packet = true;
exec->bin_tiles_x = *(uint8_t *)(untrusted + 12);
exec->bin_tiles_y = *(uint8_t *)(untrusted + 13);
tile_count = exec->bin_tiles_x * exec->bin_tiles_y;
flags = *(uint8_t *)(untrusted + 14);
if (exec->bin_tiles_x == 0 ||
exec->bin_tiles_y == 0) {
DRM_DEBUG("Tile binning config of %dx%d too small\n",
exec->bin_tiles_x, exec->bin_tiles_y);
return -EINVAL;
}
if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) {
DRM_DEBUG("unsupported binning config flags 0x%02x\n", flags);
return -EINVAL;
}
bin_slot = vc4_v3d_get_bin_slot(vc4);
if (bin_slot < 0) {
if (bin_slot != -EINTR && bin_slot != -ERESTARTSYS) {
DRM_ERROR("Failed to allocate binner memory: %d\n",
bin_slot);
}
return bin_slot;
}
/* The slot we allocated will only be used by this job, and is
* free when the job completes rendering.
*/
exec->bin_slots |= BIT(bin_slot);
bin_addr = vc4->bin_bo->base.dma_addr + bin_slot * vc4->bin_alloc_size;
/* The tile state data array is 48 bytes per tile, and we put it at
* the start of a BO containing both it and the tile alloc.
*/
tile_state_size = 48 * tile_count;
/* Since the tile alloc array will follow us, align. */
exec->tile_alloc_offset = bin_addr + roundup(tile_state_size, 4096);
*(uint8_t *)(validated + 14) =
((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK |
VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) |
VC4_BIN_CONFIG_AUTO_INIT_TSDA |
VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32,
VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) |
VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128,
VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE));
/* tile alloc address. */
*(uint32_t *)(validated + 0) = exec->tile_alloc_offset;
/* tile alloc size. */
*(uint32_t *)(validated + 4) = (bin_addr + vc4->bin_alloc_size -
exec->tile_alloc_offset);
/* tile state address. */
*(uint32_t *)(validated + 8) = bin_addr;
return 0;
}
static int
validate_gem_handles(VALIDATE_ARGS)
{
memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index));
return 0;
}
#define VC4_DEFINE_PACKET(packet, func) \
[packet] = { packet ## _SIZE, #packet, func }
static const struct cmd_info {
uint16_t len;
const char *name;
int (*func)(struct vc4_exec_info *exec, void *validated,
void *untrusted);
} cmd_info[] = {
VC4_DEFINE_PACKET(VC4_PACKET_HALT, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_NOP, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, validate_flush),
VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING,
validate_start_tile_binning),
VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE,
validate_increment_semaphore),
VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE,
validate_indexed_prim_list),
VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE,
validate_gl_array_primitive),
VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, validate_gl_shader_state),
VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, NULL),
/* Note: The docs say this was also 105, but it was 106 in the
* initial userland code drop.
*/
VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, NULL),
VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG,
validate_tile_binning_config),
VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, validate_gem_handles),
};
int
vc4_validate_bin_cl(struct drm_device *dev,
void *validated,
void *unvalidated,
struct vc4_exec_info *exec)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t len = exec->args->bin_cl_size;
uint32_t dst_offset = 0;
uint32_t src_offset = 0;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
while (src_offset < len) {
void *dst_pkt = validated + dst_offset;
void *src_pkt = unvalidated + src_offset;
u8 cmd = *(uint8_t *)src_pkt;
const struct cmd_info *info;
if (cmd >= ARRAY_SIZE(cmd_info)) {
DRM_DEBUG("0x%08x: packet %d out of bounds\n",
src_offset, cmd);
return -EINVAL;
}
info = &cmd_info[cmd];
if (!info->name) {
DRM_DEBUG("0x%08x: packet %d invalid\n",
src_offset, cmd);
return -EINVAL;
}
if (src_offset + info->len > len) {
DRM_DEBUG("0x%08x: packet %d (%s) length 0x%08x "
"exceeds bounds (0x%08x)\n",
src_offset, cmd, info->name, info->len,
src_offset + len);
return -EINVAL;
}
if (cmd != VC4_PACKET_GEM_HANDLES)
memcpy(dst_pkt, src_pkt, info->len);
if (info->func && info->func(exec,
dst_pkt + 1,
src_pkt + 1)) {
DRM_DEBUG("0x%08x: packet %d (%s) failed to validate\n",
src_offset, cmd, info->name);
return -EINVAL;
}
src_offset += info->len;
/* GEM handle loading doesn't produce HW packets. */
if (cmd != VC4_PACKET_GEM_HANDLES)
dst_offset += info->len;
/* When the CL hits halt, it'll stop reading anything else. */
if (cmd == VC4_PACKET_HALT)
break;
}
exec->ct0ea = exec->ct0ca + dst_offset;
if (!exec->found_start_tile_binning_packet) {
DRM_DEBUG("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
return -EINVAL;
}
/* The bin CL must be ended with INCREMENT_SEMAPHORE and FLUSH. The
* semaphore is used to trigger the render CL to start up, and the
* FLUSH is what caps the bin lists with
* VC4_PACKET_RETURN_FROM_SUB_LIST (so they jump back to the main
* render CL when they get called to) and actually triggers the queued
* semaphore increment.
*/
if (!exec->found_increment_semaphore_packet || !exec->found_flush) {
DRM_DEBUG("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
"VC4_PACKET_FLUSH\n");
return -EINVAL;
}
return 0;
}
static bool
reloc_tex(struct vc4_exec_info *exec,
void *uniform_data_u,
struct vc4_texture_sample_info *sample,
uint32_t texture_handle_index, bool is_cs)
{
struct drm_gem_dma_object *tex;
uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
uint32_t p2 = (sample->p_offset[2] != ~0 ?
*(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0);
uint32_t p3 = (sample->p_offset[3] != ~0 ?
*(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0);
uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0];
uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK;
uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS);
uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH);
uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT);
uint32_t cpp, tiling_format, utile_w, utile_h;
uint32_t i;
uint32_t cube_map_stride = 0;
enum vc4_texture_data_type type;
tex = vc4_use_bo(exec, texture_handle_index);
if (!tex)
return false;
if (sample->is_direct) {
uint32_t remaining_size = tex->base.size - p0;
if (p0 > tex->base.size - 4) {
DRM_DEBUG("UBO offset greater than UBO size\n");
goto fail;
}
if (p1 > remaining_size - 4) {
DRM_DEBUG("UBO clamp would allow reads "
"outside of UBO\n");
goto fail;
}
*validated_p0 = tex->dma_addr + p0;
return true;
}
if (width == 0)
width = 2048;
if (height == 0)
height = 2048;
if (p0 & VC4_TEX_P0_CMMODE_MASK) {
if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) ==
VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE)
cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK;
if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
if (cube_map_stride) {
DRM_DEBUG("Cube map stride set twice\n");
goto fail;
}
cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
}
if (!cube_map_stride) {
DRM_DEBUG("Cube map stride not set\n");
goto fail;
}
}
type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) |
(VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4));
switch (type) {
case VC4_TEXTURE_TYPE_RGBA8888:
case VC4_TEXTURE_TYPE_RGBX8888:
case VC4_TEXTURE_TYPE_RGBA32R:
cpp = 4;
break;
case VC4_TEXTURE_TYPE_RGBA4444:
case VC4_TEXTURE_TYPE_RGBA5551:
case VC4_TEXTURE_TYPE_RGB565:
case VC4_TEXTURE_TYPE_LUMALPHA:
case VC4_TEXTURE_TYPE_S16F:
case VC4_TEXTURE_TYPE_S16:
cpp = 2;
break;
case VC4_TEXTURE_TYPE_LUMINANCE:
case VC4_TEXTURE_TYPE_ALPHA:
case VC4_TEXTURE_TYPE_S8:
cpp = 1;
break;
case VC4_TEXTURE_TYPE_ETC1:
/* ETC1 is arranged as 64-bit blocks, where each block is 4x4
* pixels.
*/
cpp = 8;
width = (width + 3) >> 2;
height = (height + 3) >> 2;
break;
case VC4_TEXTURE_TYPE_BW1:
case VC4_TEXTURE_TYPE_A4:
case VC4_TEXTURE_TYPE_A1:
case VC4_TEXTURE_TYPE_RGBA64:
case VC4_TEXTURE_TYPE_YUV422R:
default:
DRM_DEBUG("Texture format %d unsupported\n", type);
goto fail;
}
utile_w = utile_width(cpp);
utile_h = utile_height(cpp);
if (type == VC4_TEXTURE_TYPE_RGBA32R) {
tiling_format = VC4_TILING_FORMAT_LINEAR;
} else {
if (size_is_lt(width, height, cpp))
tiling_format = VC4_TILING_FORMAT_LT;
else
tiling_format = VC4_TILING_FORMAT_T;
}
if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5,
tiling_format, width, height, cpp)) {
goto fail;
}
/* The mipmap levels are stored before the base of the texture. Make
* sure there is actually space in the BO.
*/
for (i = 1; i <= miplevels; i++) {
uint32_t level_width = max(width >> i, 1u);
uint32_t level_height = max(height >> i, 1u);
uint32_t aligned_width, aligned_height;
uint32_t level_size;
/* Once the levels get small enough, they drop from T to LT. */
if (tiling_format == VC4_TILING_FORMAT_T &&
size_is_lt(level_width, level_height, cpp)) {
tiling_format = VC4_TILING_FORMAT_LT;
}
switch (tiling_format) {
case VC4_TILING_FORMAT_T:
aligned_width = round_up(level_width, utile_w * 8);
aligned_height = round_up(level_height, utile_h * 8);
break;
case VC4_TILING_FORMAT_LT:
aligned_width = round_up(level_width, utile_w);
aligned_height = round_up(level_height, utile_h);
break;
default:
aligned_width = round_up(level_width, utile_w);
aligned_height = level_height;
break;
}
level_size = aligned_width * cpp * aligned_height;
if (offset < level_size) {
DRM_DEBUG("Level %d (%dx%d -> %dx%d) size %db "
"overflowed buffer bounds (offset %d)\n",
i, level_width, level_height,
aligned_width, aligned_height,
level_size, offset);
goto fail;
}
offset -= level_size;
}
*validated_p0 = tex->dma_addr + p0;
if (is_cs) {
exec->bin_dep_seqno = max(exec->bin_dep_seqno,
to_vc4_bo(&tex->base)->write_seqno);
}
return true;
fail:
DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
DRM_INFO("Texture p1 at %d: 0x%08x\n", sample->p_offset[1], p1);
DRM_INFO("Texture p2 at %d: 0x%08x\n", sample->p_offset[2], p2);
DRM_INFO("Texture p3 at %d: 0x%08x\n", sample->p_offset[3], p3);
return false;
}
static int
validate_gl_shader_rec(struct drm_device *dev,
struct vc4_exec_info *exec,
struct vc4_shader_state *state)
{
uint32_t *src_handles;
void *pkt_u, *pkt_v;
static const uint32_t shader_reloc_offsets[] = {
4, /* fs */
16, /* vs */
28, /* cs */
};
uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
struct drm_gem_dma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
uint32_t nr_attributes, nr_relocs, packet_size;
int i;
nr_attributes = state->addr & 0x7;
if (nr_attributes == 0)
nr_attributes = 8;
packet_size = gl_shader_rec_size(state->addr);
nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes;
if (nr_relocs * 4 > exec->shader_rec_size) {
DRM_DEBUG("overflowed shader recs reading %d handles "
"from %d bytes left\n",
nr_relocs, exec->shader_rec_size);
return -EINVAL;
}
src_handles = exec->shader_rec_u;
exec->shader_rec_u += nr_relocs * 4;
exec->shader_rec_size -= nr_relocs * 4;
if (packet_size > exec->shader_rec_size) {
DRM_DEBUG("overflowed shader recs copying %db packet "
"from %d bytes left\n",
packet_size, exec->shader_rec_size);
return -EINVAL;
}
pkt_u = exec->shader_rec_u;
pkt_v = exec->shader_rec_v;
memcpy(pkt_v, pkt_u, packet_size);
exec->shader_rec_u += packet_size;
/* Shader recs have to be aligned to 16 bytes (due to the attribute
* flags being in the low bytes), so round the next validated shader
* rec address up. This should be safe, since we've got so many
* relocations in a shader rec packet.
*/
BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4);
exec->shader_rec_v += roundup(packet_size, 16);
exec->shader_rec_size -= packet_size;
for (i = 0; i < shader_reloc_count; i++) {
if (src_handles[i] > exec->bo_count) {
DRM_DEBUG("Shader handle %d too big\n", src_handles[i]);
return -EINVAL;
}
bo[i] = to_drm_gem_dma_obj(exec->bo[src_handles[i]]);
if (!bo[i])
return -EINVAL;
}
for (i = shader_reloc_count; i < nr_relocs; i++) {
bo[i] = vc4_use_bo(exec, src_handles[i]);
if (!bo[i])
return -EINVAL;
}
if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) !=
to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) {
DRM_DEBUG("Thread mode of CL and FS do not match\n");
return -EINVAL;
}
if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded ||
to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) {
DRM_DEBUG("cs and vs cannot be threaded\n");
return -EINVAL;
}
for (i = 0; i < shader_reloc_count; i++) {
struct vc4_validated_shader_info *validated_shader;
uint32_t o = shader_reloc_offsets[i];
uint32_t src_offset = *(uint32_t *)(pkt_u + o);
uint32_t *texture_handles_u;
void *uniform_data_u;
uint32_t tex, uni;
*(uint32_t *)(pkt_v + o) = bo[i]->dma_addr + src_offset;
if (src_offset != 0) {
DRM_DEBUG("Shaders must be at offset 0 of "
"the BO.\n");
return -EINVAL;
}
validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader;
if (!validated_shader)
return -EINVAL;
if (validated_shader->uniforms_src_size >
exec->uniforms_size) {
DRM_DEBUG("Uniforms src buffer overflow\n");
return -EINVAL;
}
texture_handles_u = exec->uniforms_u;
uniform_data_u = (texture_handles_u +
validated_shader->num_texture_samples);
memcpy(exec->uniforms_v, uniform_data_u,
validated_shader->uniforms_size);
for (tex = 0;
tex < validated_shader->num_texture_samples;
tex++) {
if (!reloc_tex(exec,
uniform_data_u,
&validated_shader->texture_samples[tex],
texture_handles_u[tex],
i == 2)) {
return -EINVAL;
}
}
/* Fill in the uniform slots that need this shader's
* start-of-uniforms address (used for resetting the uniform
* stream in the presence of control flow).
*/
for (uni = 0;
uni < validated_shader->num_uniform_addr_offsets;
uni++) {
uint32_t o = validated_shader->uniform_addr_offsets[uni];
((uint32_t *)exec->uniforms_v)[o] = exec->uniforms_p;
}
*(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
exec->uniforms_u += validated_shader->uniforms_src_size;
exec->uniforms_v += validated_shader->uniforms_size;
exec->uniforms_p += validated_shader->uniforms_size;
}
for (i = 0; i < nr_attributes; i++) {
struct drm_gem_dma_object *vbo =
bo[ARRAY_SIZE(shader_reloc_offsets) + i];
uint32_t o = 36 + i * 8;
uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1;
uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
uint32_t max_index;
exec->bin_dep_seqno = max(exec->bin_dep_seqno,
to_vc4_bo(&vbo->base)->write_seqno);
if (state->addr & 0x8)
stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
if (vbo->base.size < offset ||
vbo->base.size - offset < attr_size) {
DRM_DEBUG("BO offset overflow (%d + %d > %zu)\n",
offset, attr_size, vbo->base.size);
return -EINVAL;
}
if (stride != 0) {
max_index = ((vbo->base.size - offset - attr_size) /
stride);
if (state->max_index > max_index) {
DRM_DEBUG("primitives use index %d out of "
"supplied %d\n",
state->max_index, max_index);
return -EINVAL;
}
}
*(uint32_t *)(pkt_v + o) = vbo->dma_addr + offset;
}
return 0;
}
int
vc4_validate_shader_recs(struct drm_device *dev,
struct vc4_exec_info *exec)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t i;
int ret = 0;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
for (i = 0; i < exec->shader_state_count; i++) {
ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]);
if (ret)
return ret;
}
return ret;
}
| linux-master | drivers/gpu/drm/vc4/vc4_validate.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Broadcom
*/
/**
* DOC: VC4 V3D performance monitor module
*
* The V3D block provides 16 hardware counters which can count various events.
*/
#include "vc4_drv.h"
#include "vc4_regs.h"
#define VC4_PERFMONID_MIN 1
#define VC4_PERFMONID_MAX U32_MAX
void vc4_perfmon_get(struct vc4_perfmon *perfmon)
{
struct vc4_dev *vc4;
if (!perfmon)
return;
vc4 = perfmon->dev;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
refcount_inc(&perfmon->refcnt);
}
void vc4_perfmon_put(struct vc4_perfmon *perfmon)
{
struct vc4_dev *vc4;
if (!perfmon)
return;
vc4 = perfmon->dev;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (refcount_dec_and_test(&perfmon->refcnt))
kfree(perfmon);
}
void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon)
{
unsigned int i;
u32 mask;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
return;
for (i = 0; i < perfmon->ncounters; i++)
V3D_WRITE(V3D_PCTRS(i), perfmon->events[i]);
mask = GENMASK(perfmon->ncounters - 1, 0);
V3D_WRITE(V3D_PCTRC, mask);
V3D_WRITE(V3D_PCTRE, V3D_PCTRE_EN | mask);
vc4->active_perfmon = perfmon;
}
void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
bool capture)
{
unsigned int i;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (WARN_ON_ONCE(!vc4->active_perfmon ||
perfmon != vc4->active_perfmon))
return;
if (capture) {
for (i = 0; i < perfmon->ncounters; i++)
perfmon->counters[i] += V3D_READ(V3D_PCTR(i));
}
V3D_WRITE(V3D_PCTRE, 0);
vc4->active_perfmon = NULL;
}
struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
{
struct vc4_dev *vc4 = vc4file->dev;
struct vc4_perfmon *perfmon;
if (WARN_ON_ONCE(vc4->is_vc5))
return NULL;
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_find(&vc4file->perfmon.idr, id);
vc4_perfmon_get(perfmon);
mutex_unlock(&vc4file->perfmon.lock);
return perfmon;
}
void vc4_perfmon_open_file(struct vc4_file *vc4file)
{
struct vc4_dev *vc4 = vc4file->dev;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
mutex_init(&vc4file->perfmon.lock);
idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN);
vc4file->dev = vc4;
}
static int vc4_perfmon_idr_del(int id, void *elem, void *data)
{
struct vc4_perfmon *perfmon = elem;
vc4_perfmon_put(perfmon);
return 0;
}
void vc4_perfmon_close_file(struct vc4_file *vc4file)
{
struct vc4_dev *vc4 = vc4file->dev;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
mutex_lock(&vc4file->perfmon.lock);
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
idr_destroy(&vc4file->perfmon.idr);
mutex_unlock(&vc4file->perfmon.lock);
mutex_destroy(&vc4file->perfmon.lock);
}
int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_create *req = data;
struct vc4_perfmon *perfmon;
unsigned int i;
int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (!vc4->v3d) {
DRM_DEBUG("Creating perfmon no VC4 V3D probed\n");
return -ENODEV;
}
/* Number of monitored counters cannot exceed HW limits. */
if (req->ncounters > DRM_VC4_MAX_PERF_COUNTERS ||
!req->ncounters)
return -EINVAL;
/* Make sure all events are valid. */
for (i = 0; i < req->ncounters; i++) {
if (req->events[i] >= VC4_PERFCNT_NUM_EVENTS)
return -EINVAL;
}
perfmon = kzalloc(struct_size(perfmon, counters, req->ncounters),
GFP_KERNEL);
if (!perfmon)
return -ENOMEM;
perfmon->dev = vc4;
for (i = 0; i < req->ncounters; i++)
perfmon->events[i] = req->events[i];
perfmon->ncounters = req->ncounters;
refcount_set(&perfmon->refcnt, 1);
mutex_lock(&vc4file->perfmon.lock);
ret = idr_alloc(&vc4file->perfmon.idr, perfmon, VC4_PERFMONID_MIN,
VC4_PERFMONID_MAX, GFP_KERNEL);
mutex_unlock(&vc4file->perfmon.lock);
if (ret < 0) {
kfree(perfmon);
return ret;
}
req->id = ret;
return 0;
}
int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_destroy *req = data;
struct vc4_perfmon *perfmon;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (!vc4->v3d) {
DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n");
return -ENODEV;
}
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_remove(&vc4file->perfmon.idr, req->id);
mutex_unlock(&vc4file->perfmon.lock);
if (!perfmon)
return -EINVAL;
vc4_perfmon_put(perfmon);
return 0;
}
int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_get_values *req = data;
struct vc4_perfmon *perfmon;
int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (!vc4->v3d) {
DRM_DEBUG("Getting perfmon no VC4 V3D probed\n");
return -ENODEV;
}
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_find(&vc4file->perfmon.idr, req->id);
vc4_perfmon_get(perfmon);
mutex_unlock(&vc4file->perfmon.lock);
if (!perfmon)
return -EINVAL;
if (copy_to_user(u64_to_user_ptr(req->values_ptr), perfmon->counters,
perfmon->ncounters * sizeof(u64)))
ret = -EFAULT;
else
ret = 0;
vc4_perfmon_put(perfmon);
return ret;
}
| linux-master | drivers/gpu/drm/vc4/vc4_perfmon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Broadcom
*/
#include "vc4_drv.h"
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "vc4_trace.h"
#endif
| linux-master | drivers/gpu/drm/vc4/vc4_trace_points.c |
/*
* Copyright © 2014 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/**
* DOC: Shader validator for VC4.
*
* Since the VC4 has no IOMMU between it and system memory, a user
* with access to execute shaders could escalate privilege by
* overwriting system memory (using the VPM write address register in
* the general-purpose DMA mode) or reading system memory it shouldn't
* (reading it as a texture, uniform data, or direct-addressed TMU
* lookup).
*
* The shader validator walks over a shader's BO, ensuring that its
* accesses are appropriately bounded, and recording where texture
* accesses are made so that we can do relocations for them in the
* uniform stream.
*
* Shader BO are immutable for their lifetimes (enforced by not
* allowing mmaps, GEM prime export, or rendering to from a CL), so
* this validation is only performed at BO creation time.
*/
#include "vc4_drv.h"
#include "vc4_qpu_defines.h"
#define LIVE_REG_COUNT (32 + 32 + 4)
struct vc4_shader_validation_state {
/* Current IP being validated. */
uint32_t ip;
/* IP at the end of the BO, do not read shader[max_ip] */
uint32_t max_ip;
uint64_t *shader;
struct vc4_texture_sample_info tmu_setup[2];
int tmu_write_count[2];
/* For registers that were last written to by a MIN instruction with
* one argument being a uniform, the address of the uniform.
* Otherwise, ~0.
*
* This is used for the validation of direct address memory reads.
*/
uint32_t live_min_clamp_offsets[LIVE_REG_COUNT];
bool live_max_clamp_regs[LIVE_REG_COUNT];
uint32_t live_immediates[LIVE_REG_COUNT];
/* Bitfield of which IPs are used as branch targets.
*
* Used for validation that the uniform stream is updated at the right
* points and clearing the texturing/clamping state.
*/
unsigned long *branch_targets;
/* Set when entering a basic block, and cleared when the uniform
* address update is found. This is used to make sure that we don't
* read uniforms when the address is undefined.
*/
bool needs_uniform_address_update;
/* Set when we find a backwards branch. If the branch is backwards,
* the taraget is probably doing an address reset to read uniforms,
* and so we need to be sure that a uniforms address is present in the
* stream, even if the shader didn't need to read uniforms in later
* basic blocks.
*/
bool needs_uniform_address_for_loop;
/* Set when we find an instruction writing the top half of the
* register files. If we allowed writing the unusable regs in
* a threaded shader, then the other shader running on our
* QPU's clamp validation would be invalid.
*/
bool all_registers_used;
};
static uint32_t
waddr_to_live_reg_index(uint32_t waddr, bool is_b)
{
if (waddr < 32) {
if (is_b)
return 32 + waddr;
else
return waddr;
} else if (waddr <= QPU_W_ACC3) {
return 64 + waddr - QPU_W_ACC0;
} else {
return ~0;
}
}
static uint32_t
raddr_add_a_to_live_reg_index(uint64_t inst)
{
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
if (add_a == QPU_MUX_A)
return raddr_a;
else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM)
return 32 + raddr_b;
else if (add_a <= QPU_MUX_R3)
return 64 + add_a;
else
return ~0;
}
static bool
live_reg_is_upper_half(uint32_t lri)
{
return (lri >= 16 && lri < 32) ||
(lri >= 32 + 16 && lri < 32 + 32);
}
static bool
is_tmu_submit(uint32_t waddr)
{
return (waddr == QPU_W_TMU0_S ||
waddr == QPU_W_TMU1_S);
}
static bool
is_tmu_write(uint32_t waddr)
{
return (waddr >= QPU_W_TMU0_S &&
waddr <= QPU_W_TMU1_B);
}
static bool
record_texture_sample(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
int tmu)
{
uint32_t s = validated_shader->num_texture_samples;
int i;
struct vc4_texture_sample_info *temp_samples;
temp_samples = krealloc(validated_shader->texture_samples,
(s + 1) * sizeof(*temp_samples),
GFP_KERNEL);
if (!temp_samples)
return false;
memcpy(&temp_samples[s],
&validation_state->tmu_setup[tmu],
sizeof(*temp_samples));
validated_shader->num_texture_samples = s + 1;
validated_shader->texture_samples = temp_samples;
for (i = 0; i < 4; i++)
validation_state->tmu_setup[tmu].p_offset[i] = ~0;
return true;
}
static bool
check_tmu_write(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
bool is_mul)
{
uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr = (is_mul ?
QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
QPU_GET_FIELD(inst, QPU_WADDR_ADD));
uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
int tmu = waddr > QPU_W_TMU0_B;
bool submit = is_tmu_submit(waddr);
bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
if (is_direct) {
uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
uint32_t clamp_reg, clamp_offset;
if (sig == QPU_SIG_SMALL_IMM) {
DRM_DEBUG("direct TMU read used small immediate\n");
return false;
}
/* Make sure that this texture load is an add of the base
* address of the UBO to a clamped offset within the UBO.
*/
if (is_mul ||
QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
DRM_DEBUG("direct TMU load wasn't an add\n");
return false;
}
/* We assert that the clamped address is the first
* argument, and the UBO base address is the second argument.
* This is arbitrary, but simpler than supporting flipping the
* two either way.
*/
clamp_reg = raddr_add_a_to_live_reg_index(inst);
if (clamp_reg == ~0) {
DRM_DEBUG("direct TMU load wasn't clamped\n");
return false;
}
clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
if (clamp_offset == ~0) {
DRM_DEBUG("direct TMU load wasn't clamped\n");
return false;
}
/* Store the clamp value's offset in p1 (see reloc_tex() in
* vc4_validate.c).
*/
validation_state->tmu_setup[tmu].p_offset[1] =
clamp_offset;
if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
!(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
DRM_DEBUG("direct TMU load didn't add to a uniform\n");
return false;
}
validation_state->tmu_setup[tmu].is_direct = true;
} else {
if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
raddr_b == QPU_R_UNIF)) {
DRM_DEBUG("uniform read in the same instruction as "
"texture setup.\n");
return false;
}
}
if (validation_state->tmu_write_count[tmu] >= 4) {
DRM_DEBUG("TMU%d got too many parameters before dispatch\n",
tmu);
return false;
}
validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] =
validated_shader->uniforms_size;
validation_state->tmu_write_count[tmu]++;
/* Since direct uses a RADDR uniform reference, it will get counted in
* check_instruction_reads()
*/
if (!is_direct) {
if (validation_state->needs_uniform_address_update) {
DRM_DEBUG("Texturing with undefined uniform address\n");
return false;
}
validated_shader->uniforms_size += 4;
}
if (submit) {
if (!record_texture_sample(validated_shader,
validation_state, tmu)) {
return false;
}
validation_state->tmu_write_count[tmu] = 0;
}
return true;
}
static bool require_uniform_address_uniform(struct vc4_validated_shader_info *validated_shader)
{
uint32_t o = validated_shader->num_uniform_addr_offsets;
uint32_t num_uniforms = validated_shader->uniforms_size / 4;
validated_shader->uniform_addr_offsets =
krealloc(validated_shader->uniform_addr_offsets,
(o + 1) *
sizeof(*validated_shader->uniform_addr_offsets),
GFP_KERNEL);
if (!validated_shader->uniform_addr_offsets)
return false;
validated_shader->uniform_addr_offsets[o] = num_uniforms;
validated_shader->num_uniform_addr_offsets++;
return true;
}
static bool
validate_uniform_address_write(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
bool is_mul)
{
uint64_t inst = validation_state->shader[validation_state->ip];
u32 add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
u32 raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
u32 raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
u32 add_lri = raddr_add_a_to_live_reg_index(inst);
/* We want our reset to be pointing at whatever uniform follows the
* uniforms base address.
*/
u32 expected_offset = validated_shader->uniforms_size + 4;
/* We only support absolute uniform address changes, and we
* require that they be in the current basic block before any
* of its uniform reads.
*
* One could potentially emit more efficient QPU code, by
* noticing that (say) an if statement does uniform control
* flow for all threads and that the if reads the same number
* of uniforms on each side. However, this scheme is easy to
* validate so it's all we allow for now.
*/
switch (QPU_GET_FIELD(inst, QPU_SIG)) {
case QPU_SIG_NONE:
case QPU_SIG_SCOREBOARD_UNLOCK:
case QPU_SIG_COLOR_LOAD:
case QPU_SIG_LOAD_TMU0:
case QPU_SIG_LOAD_TMU1:
break;
default:
DRM_DEBUG("uniforms address change must be "
"normal math\n");
return false;
}
if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
DRM_DEBUG("Uniform address reset must be an ADD.\n");
return false;
}
if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
DRM_DEBUG("Uniform address reset must be unconditional.\n");
return false;
}
if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
!(inst & QPU_PM)) {
DRM_DEBUG("No packing allowed on uniforms reset\n");
return false;
}
if (add_lri == -1) {
DRM_DEBUG("First argument of uniform address write must be "
"an immediate value.\n");
return false;
}
if (validation_state->live_immediates[add_lri] != expected_offset) {
DRM_DEBUG("Resetting uniforms with offset %db instead of %db\n",
validation_state->live_immediates[add_lri],
expected_offset);
return false;
}
if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
!(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
DRM_DEBUG("Second argument of uniform address write must be "
"a uniform.\n");
return false;
}
validation_state->needs_uniform_address_update = false;
validation_state->needs_uniform_address_for_loop = false;
return require_uniform_address_uniform(validated_shader);
}
static bool
check_reg_write(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
bool is_mul)
{
uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr = (is_mul ?
QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
QPU_GET_FIELD(inst, QPU_WADDR_ADD));
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
bool ws = inst & QPU_WS;
bool is_b = is_mul ^ ws;
u32 lri = waddr_to_live_reg_index(waddr, is_b);
if (lri != -1) {
uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
uint32_t cond_mul = QPU_GET_FIELD(inst, QPU_COND_MUL);
if (sig == QPU_SIG_LOAD_IMM &&
QPU_GET_FIELD(inst, QPU_PACK) == QPU_PACK_A_NOP &&
((is_mul && cond_mul == QPU_COND_ALWAYS) ||
(!is_mul && cond_add == QPU_COND_ALWAYS))) {
validation_state->live_immediates[lri] =
QPU_GET_FIELD(inst, QPU_LOAD_IMM);
} else {
validation_state->live_immediates[lri] = ~0;
}
if (live_reg_is_upper_half(lri))
validation_state->all_registers_used = true;
}
switch (waddr) {
case QPU_W_UNIFORMS_ADDRESS:
if (is_b) {
DRM_DEBUG("relative uniforms address change "
"unsupported\n");
return false;
}
return validate_uniform_address_write(validated_shader,
validation_state,
is_mul);
case QPU_W_TLB_COLOR_MS:
case QPU_W_TLB_COLOR_ALL:
case QPU_W_TLB_Z:
/* These only interact with the tile buffer, not main memory,
* so they're safe.
*/
return true;
case QPU_W_TMU0_S:
case QPU_W_TMU0_T:
case QPU_W_TMU0_R:
case QPU_W_TMU0_B:
case QPU_W_TMU1_S:
case QPU_W_TMU1_T:
case QPU_W_TMU1_R:
case QPU_W_TMU1_B:
return check_tmu_write(validated_shader, validation_state,
is_mul);
case QPU_W_HOST_INT:
case QPU_W_TMU_NOSWAP:
case QPU_W_TLB_ALPHA_MASK:
case QPU_W_MUTEX_RELEASE:
/* XXX: I haven't thought about these, so don't support them
* for now.
*/
DRM_DEBUG("Unsupported waddr %d\n", waddr);
return false;
case QPU_W_VPM_ADDR:
DRM_DEBUG("General VPM DMA unsupported\n");
return false;
case QPU_W_VPM:
case QPU_W_VPMVCD_SETUP:
/* We allow VPM setup in general, even including VPM DMA
* configuration setup, because the (unsafe) DMA can only be
* triggered by QPU_W_VPM_ADDR writes.
*/
return true;
case QPU_W_TLB_STENCIL_SETUP:
return true;
}
return true;
}
static void
track_live_clamps(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state)
{
uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
bool ws = inst & QPU_WS;
uint32_t lri_add_a, lri_add, lri_mul;
bool add_a_is_min_0;
/* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
* before we clear previous live state.
*/
lri_add_a = raddr_add_a_to_live_reg_index(inst);
add_a_is_min_0 = (lri_add_a != ~0 &&
validation_state->live_max_clamp_regs[lri_add_a]);
/* Clear live state for registers written by our instruction. */
lri_add = waddr_to_live_reg_index(waddr_add, ws);
lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
if (lri_mul != ~0) {
validation_state->live_max_clamp_regs[lri_mul] = false;
validation_state->live_min_clamp_offsets[lri_mul] = ~0;
}
if (lri_add != ~0) {
validation_state->live_max_clamp_regs[lri_add] = false;
validation_state->live_min_clamp_offsets[lri_add] = ~0;
} else {
/* Nothing further to do for live tracking, since only ADDs
* generate new live clamp registers.
*/
return;
}
/* Now, handle remaining live clamp tracking for the ADD operation. */
if (cond_add != QPU_COND_ALWAYS)
return;
if (op_add == QPU_A_MAX) {
/* Track live clamps of a value to a minimum of 0 (in either
* arg).
*/
if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 ||
(add_a != QPU_MUX_B && add_b != QPU_MUX_B)) {
return;
}
validation_state->live_max_clamp_regs[lri_add] = true;
} else if (op_add == QPU_A_MIN) {
/* Track live clamps of a value clamped to a minimum of 0 and
* a maximum of some uniform's offset.
*/
if (!add_a_is_min_0)
return;
if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
!(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF &&
sig != QPU_SIG_SMALL_IMM)) {
return;
}
validation_state->live_min_clamp_offsets[lri_add] =
validated_shader->uniforms_size;
}
}
static bool
check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state)
{
uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
bool ok;
if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
DRM_DEBUG("ADD and MUL both set up textures\n");
return false;
}
ok = (check_reg_write(validated_shader, validation_state, false) &&
check_reg_write(validated_shader, validation_state, true));
track_live_clamps(validated_shader, validation_state);
return ok;
}
static bool
check_branch(uint64_t inst,
struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
int ip)
{
int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
if ((int)branch_imm < 0)
validation_state->needs_uniform_address_for_loop = true;
/* We don't want to have to worry about validation of this, and
* there's no need for it.
*/
if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
DRM_DEBUG("branch instruction at %d wrote a register.\n",
validation_state->ip);
return false;
}
return true;
}
static bool
check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state)
{
uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
if (raddr_a == QPU_R_UNIF ||
(raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) {
/* This can't overflow the uint32_t, because we're reading 8
* bytes of instruction to increment by 4 here, so we'd
* already be OOM.
*/
validated_shader->uniforms_size += 4;
if (validation_state->needs_uniform_address_update) {
DRM_DEBUG("Uniform read with undefined uniform "
"address\n");
return false;
}
}
if ((raddr_a >= 16 && raddr_a < 32) ||
(raddr_b >= 16 && raddr_b < 32 && sig != QPU_SIG_SMALL_IMM)) {
validation_state->all_registers_used = true;
}
return true;
}
/* Make sure that all branches are absolute and point within the shader, and
* note their targets for later.
*/
static bool
vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
{
uint32_t max_branch_target = 0;
int ip;
int last_branch = -2;
for (ip = 0; ip < validation_state->max_ip; ip++) {
uint64_t inst = validation_state->shader[ip];
int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
uint32_t after_delay_ip = ip + 4;
uint32_t branch_target_ip;
if (sig == QPU_SIG_PROG_END) {
/* There are two delay slots after program end is
* signaled that are still executed, then we're
* finished. validation_state->max_ip is the
* instruction after the last valid instruction in the
* program.
*/
validation_state->max_ip = ip + 3;
continue;
}
if (sig != QPU_SIG_BRANCH)
continue;
if (ip - last_branch < 4) {
DRM_DEBUG("Branch at %d during delay slots\n", ip);
return false;
}
last_branch = ip;
if (inst & QPU_BRANCH_REG) {
DRM_DEBUG("branching from register relative "
"not supported\n");
return false;
}
if (!(inst & QPU_BRANCH_REL)) {
DRM_DEBUG("relative branching required\n");
return false;
}
/* The actual branch target is the instruction after the delay
* slots, plus whatever byte offset is in the low 32 bits of
* the instruction. Make sure we're not branching beyond the
* end of the shader object.
*/
if (branch_imm % sizeof(inst) != 0) {
DRM_DEBUG("branch target not aligned\n");
return false;
}
branch_target_ip = after_delay_ip + (branch_imm >> 3);
if (branch_target_ip >= validation_state->max_ip) {
DRM_DEBUG("Branch at %d outside of shader (ip %d/%d)\n",
ip, branch_target_ip,
validation_state->max_ip);
return false;
}
set_bit(branch_target_ip, validation_state->branch_targets);
/* Make sure that the non-branching path is also not outside
* the shader.
*/
if (after_delay_ip >= validation_state->max_ip) {
DRM_DEBUG("Branch at %d continues past shader end "
"(%d/%d)\n",
ip, after_delay_ip, validation_state->max_ip);
return false;
}
set_bit(after_delay_ip, validation_state->branch_targets);
max_branch_target = max(max_branch_target, after_delay_ip);
}
if (max_branch_target > validation_state->max_ip - 3) {
DRM_DEBUG("Branch landed after QPU_SIG_PROG_END");
return false;
}
return true;
}
/* Resets any known state for the shader, used when we may be branched to from
* multiple locations in the program (or at shader start).
*/
static void
reset_validation_state(struct vc4_shader_validation_state *validation_state)
{
int i;
for (i = 0; i < 8; i++)
validation_state->tmu_setup[i / 4].p_offset[i % 4] = ~0;
for (i = 0; i < LIVE_REG_COUNT; i++) {
validation_state->live_min_clamp_offsets[i] = ~0;
validation_state->live_max_clamp_regs[i] = false;
validation_state->live_immediates[i] = ~0;
}
}
static bool
texturing_in_progress(struct vc4_shader_validation_state *validation_state)
{
return (validation_state->tmu_write_count[0] != 0 ||
validation_state->tmu_write_count[1] != 0);
}
static bool
vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
{
uint32_t ip = validation_state->ip;
if (!test_bit(ip, validation_state->branch_targets))
return true;
if (texturing_in_progress(validation_state)) {
DRM_DEBUG("Branch target landed during TMU setup\n");
return false;
}
/* Reset our live values tracking, since this instruction may have
* multiple predecessors.
*
* One could potentially do analysis to determine that, for
* example, all predecessors have a live max clamp in the same
* register, but we don't bother with that.
*/
reset_validation_state(validation_state);
/* Since we've entered a basic block from potentially multiple
* predecessors, we need the uniforms address to be updated before any
* unforms are read. We require that after any branch point, the next
* uniform to be loaded is a uniform address offset. That uniform's
* offset will be marked by the uniform address register write
* validation, or a one-off the end-of-program check.
*/
validation_state->needs_uniform_address_update = true;
return true;
}
struct vc4_validated_shader_info *
vc4_validate_shader(struct drm_gem_dma_object *shader_obj)
{
struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
bool found_shader_end = false;
int shader_end_ip = 0;
uint32_t last_thread_switch_ip = -3;
uint32_t ip;
struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
if (WARN_ON_ONCE(vc4->is_vc5))
return NULL;
memset(&validation_state, 0, sizeof(validation_state));
validation_state.shader = shader_obj->vaddr;
validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
reset_validation_state(&validation_state);
validation_state.branch_targets =
kcalloc(BITS_TO_LONGS(validation_state.max_ip),
sizeof(unsigned long), GFP_KERNEL);
if (!validation_state.branch_targets)
goto fail;
validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
if (!validated_shader)
goto fail;
if (!vc4_validate_branches(&validation_state))
goto fail;
for (ip = 0; ip < validation_state.max_ip; ip++) {
uint64_t inst = validation_state.shader[ip];
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
validation_state.ip = ip;
if (!vc4_handle_branch_target(&validation_state))
goto fail;
if (ip == last_thread_switch_ip + 3) {
/* Reset r0-r3 live clamp data */
int i;
for (i = 64; i < LIVE_REG_COUNT; i++) {
validation_state.live_min_clamp_offsets[i] = ~0;
validation_state.live_max_clamp_regs[i] = false;
validation_state.live_immediates[i] = ~0;
}
}
switch (sig) {
case QPU_SIG_NONE:
case QPU_SIG_WAIT_FOR_SCOREBOARD:
case QPU_SIG_SCOREBOARD_UNLOCK:
case QPU_SIG_COLOR_LOAD:
case QPU_SIG_LOAD_TMU0:
case QPU_SIG_LOAD_TMU1:
case QPU_SIG_PROG_END:
case QPU_SIG_SMALL_IMM:
case QPU_SIG_THREAD_SWITCH:
case QPU_SIG_LAST_THREAD_SWITCH:
if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_DEBUG("Bad write at ip %d\n", ip);
goto fail;
}
if (!check_instruction_reads(validated_shader,
&validation_state))
goto fail;
if (sig == QPU_SIG_PROG_END) {
found_shader_end = true;
shader_end_ip = ip;
}
if (sig == QPU_SIG_THREAD_SWITCH ||
sig == QPU_SIG_LAST_THREAD_SWITCH) {
validated_shader->is_threaded = true;
if (ip < last_thread_switch_ip + 3) {
DRM_DEBUG("Thread switch too soon after "
"last switch at ip %d\n", ip);
goto fail;
}
last_thread_switch_ip = ip;
}
break;
case QPU_SIG_LOAD_IMM:
if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_DEBUG("Bad LOAD_IMM write at ip %d\n", ip);
goto fail;
}
break;
case QPU_SIG_BRANCH:
if (!check_branch(inst, validated_shader,
&validation_state, ip))
goto fail;
if (ip < last_thread_switch_ip + 3) {
DRM_DEBUG("Branch in thread switch at ip %d",
ip);
goto fail;
}
break;
default:
DRM_DEBUG("Unsupported QPU signal %d at "
"instruction %d\n", sig, ip);
goto fail;
}
/* There are two delay slots after program end is signaled
* that are still executed, then we're finished.
*/
if (found_shader_end && ip == shader_end_ip + 2)
break;
}
if (ip == validation_state.max_ip) {
DRM_DEBUG("shader failed to terminate before "
"shader BO end at %zd\n",
shader_obj->base.size);
goto fail;
}
/* Might corrupt other thread */
if (validated_shader->is_threaded &&
validation_state.all_registers_used) {
DRM_DEBUG("Shader uses threading, but uses the upper "
"half of the registers, too\n");
goto fail;
}
/* If we did a backwards branch and we haven't emitted a uniforms
* reset since then, we still need the uniforms stream to have the
* uniforms address available so that the backwards branch can do its
* uniforms reset.
*
* We could potentially prove that the backwards branch doesn't
* contain any uses of uniforms until program exit, but that doesn't
* seem to be worth the trouble.
*/
if (validation_state.needs_uniform_address_for_loop) {
if (!require_uniform_address_uniform(validated_shader))
goto fail;
validated_shader->uniforms_size += 4;
}
/* Again, no chance of integer overflow here because the worst case
* scenario is 8 bytes of uniforms plus handles per 8-byte
* instruction.
*/
validated_shader->uniforms_src_size =
(validated_shader->uniforms_size +
4 * validated_shader->num_texture_samples);
kfree(validation_state.branch_targets);
return validated_shader;
fail:
kfree(validation_state.branch_targets);
if (validated_shader) {
kfree(validated_shader->uniform_addr_offsets);
kfree(validated_shader->texture_samples);
kfree(validated_shader);
}
return NULL;
}
| linux-master | drivers/gpu/drm/vc4/vc4_validate_shaders.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.