python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2014-2022 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include <linux/hashtable.h>
#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
/**
* struct vmw_cmdbuf_res - Command buffer managed resource entry.
*
* @res: Refcounted pointer to a struct vmw_resource.
* @hash: Hash entry for the manager hash table.
* @head: List head used either by the staging list or the manager list
* of committed resources.
* @state: Staging state of this resource entry.
* @man: Pointer to a resource manager for this entry.
*/
struct vmw_cmdbuf_res {
struct vmw_resource *res;
struct vmwgfx_hash_item hash;
struct list_head head;
enum vmw_cmdbuf_res_state state;
struct vmw_cmdbuf_res_manager *man;
};
/**
* struct vmw_cmdbuf_res_manager - Command buffer resource manager.
*
* @resources: Hash table containing staged and committed command buffer
* resources
* @list: List of committed command buffer resources.
* @dev_priv: Pointer to a device private structure.
*
* @resources and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_cmdbuf_res_manager {
DECLARE_HASHTABLE(resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
struct list_head list;
struct vmw_private *dev_priv;
};
/**
* vmw_cmdbuf_res_lookup - Look up a command buffer resource
*
* @man: Pointer to the command buffer resource manager
* @res_type: The resource type, that combined with the user key
* identifies the resource.
* @user_key: The user key.
*
* Returns a valid refcounted struct vmw_resource pointer on success,
* an error pointer on failure.
*/
struct vmw_resource *
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key)
{
struct vmwgfx_hash_item *hash;
unsigned long key = user_key | (res_type << 24);
hash_for_each_possible_rcu(man->resources, hash, head, key) {
if (hash->key == key)
return hlist_entry(hash, struct vmw_cmdbuf_res, hash)->res;
}
return ERR_PTR(-EINVAL);
}
/**
* vmw_cmdbuf_res_free - Free a command buffer resource.
*
* @man: Pointer to the command buffer resource manager
* @entry: Pointer to a struct vmw_cmdbuf_res.
*
* Frees a struct vmw_cmdbuf_res entry and drops its reference to the
* struct vmw_resource.
*/
static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
struct vmw_cmdbuf_res *entry)
{
list_del(&entry->head);
hash_del_rcu(&entry->hash.head);
vmw_resource_unreference(&entry->res);
kfree(entry);
}
/**
* vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions
*
* @list: Caller's list of command buffer resource actions.
*
* This function commits a list of command buffer resource
* additions or removals.
* It is typically called when the execbuf ioctl call triggering these
* actions has committed the fifo contents to the device.
*/
void vmw_cmdbuf_res_commit(struct list_head *list)
{
struct vmw_cmdbuf_res *entry, *next;
list_for_each_entry_safe(entry, next, list, head) {
list_del(&entry->head);
if (entry->res->func->commit_notify)
entry->res->func->commit_notify(entry->res,
entry->state);
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
entry->state = VMW_CMDBUF_RES_COMMITTED;
list_add_tail(&entry->head, &entry->man->list);
break;
case VMW_CMDBUF_RES_DEL:
vmw_resource_unreference(&entry->res);
kfree(entry);
break;
default:
BUG();
break;
}
}
}
/**
* vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
*
* @list: Caller's list of command buffer resource action
*
* This function reverts a list of command buffer resource
* additions or removals.
* It is typically called when the execbuf ioctl call triggering these
* actions failed for some reason, and the command stream was never
* submitted.
*/
void vmw_cmdbuf_res_revert(struct list_head *list)
{
struct vmw_cmdbuf_res *entry, *next;
list_for_each_entry_safe(entry, next, list, head) {
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
vmw_cmdbuf_res_free(entry->man, entry);
break;
case VMW_CMDBUF_RES_DEL:
hash_add_rcu(entry->man->resources, &entry->hash.head,
entry->hash.key);
list_move_tail(&entry->head, &entry->man->list);
entry->state = VMW_CMDBUF_RES_COMMITTED;
break;
default:
BUG();
break;
}
}
}
/**
* vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition.
*
* @man: Pointer to the command buffer resource manager.
* @res_type: The resource type.
* @user_key: The user-space id of the resource.
* @res: Valid (refcount != 0) pointer to a struct vmw_resource.
* @list: The staging list.
*
* This function allocates a struct vmw_cmdbuf_res entry and adds the
* resource to the hash table of the manager identified by @man. The
* entry is then put on the staging list identified by @list.
*/
int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
struct vmw_resource *res,
struct list_head *list)
{
struct vmw_cmdbuf_res *cres;
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
if (unlikely(!cres))
return -ENOMEM;
cres->hash.key = user_key | (res_type << 24);
hash_add_rcu(man->resources, &cres->hash.head, cres->hash.key);
cres->state = VMW_CMDBUF_RES_ADD;
cres->res = vmw_resource_reference(res);
cres->man = man;
list_add_tail(&cres->head, list);
return 0;
}
/**
* vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal.
*
* @man: Pointer to the command buffer resource manager.
* @res_type: The resource type.
* @user_key: The user-space id of the resource.
* @list: The staging list.
* @res_p: If the resource is in an already committed state, points to the
* struct vmw_resource on successful return. The pointer will be
* non ref-counted.
*
* This function looks up the struct vmw_cmdbuf_res entry from the manager
* hash table and, if it exists, removes it. Depending on its current staging
* state it then either removes the entry from the staging list or adds it
* to it with a staging state of removal.
*/
int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
struct list_head *list,
struct vmw_resource **res_p)
{
struct vmw_cmdbuf_res *entry = NULL;
struct vmwgfx_hash_item *hash;
unsigned long key = user_key | (res_type << 24);
hash_for_each_possible_rcu(man->resources, hash, head, key) {
if (hash->key == key) {
entry = hlist_entry(hash, struct vmw_cmdbuf_res, hash);
break;
}
}
if (unlikely(!entry))
return -EINVAL;
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
vmw_cmdbuf_res_free(man, entry);
*res_p = NULL;
break;
case VMW_CMDBUF_RES_COMMITTED:
hash_del_rcu(&entry->hash.head);
list_del(&entry->head);
entry->state = VMW_CMDBUF_RES_DEL;
list_add_tail(&entry->head, list);
*res_p = entry->res;
break;
default:
BUG();
break;
}
return 0;
}
/**
* vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource
* manager.
*
* @dev_priv: Pointer to a struct vmw_private
*
* Allocates and initializes a command buffer managed resource manager. Returns
* an error pointer on failure.
*/
struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
{
struct vmw_cmdbuf_res_manager *man;
man = kzalloc(sizeof(*man), GFP_KERNEL);
if (!man)
return ERR_PTR(-ENOMEM);
man->dev_priv = dev_priv;
INIT_LIST_HEAD(&man->list);
hash_init(man->resources);
return man;
}
/**
* vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource
* manager.
*
* @man: Pointer to the manager to destroy.
*
* This function destroys a command buffer managed resource manager and
* unreferences / frees all command buffer managed resources and -entries
* associated with it.
*/
void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
{
struct vmw_cmdbuf_res *entry, *next;
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
kfree(man);
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_binding.h"
#include "vmwgfx_devcaps.h"
#include "vmwgfx_mksstat.h"
#include "ttm_object.h"
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_module.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_placement.h>
#include <generated/utsrelease.h>
#ifdef CONFIG_X86
#include <asm/hypervisor.h>
#endif
#include <linux/cc_platform.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/version.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
/*
* Fully encoded drm commands. Might move to vmw_drm.h
*/
#define DRM_IOCTL_VMW_GET_PARAM \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
struct drm_vmw_getparam_arg)
#define DRM_IOCTL_VMW_ALLOC_DMABUF \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
union drm_vmw_alloc_dmabuf_arg)
#define DRM_IOCTL_VMW_UNREF_DMABUF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
struct drm_vmw_unref_dmabuf_arg)
#define DRM_IOCTL_VMW_CURSOR_BYPASS \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
struct drm_vmw_cursor_bypass_arg)
#define DRM_IOCTL_VMW_CONTROL_STREAM \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
struct drm_vmw_control_stream_arg)
#define DRM_IOCTL_VMW_CLAIM_STREAM \
DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
struct drm_vmw_stream_arg)
#define DRM_IOCTL_VMW_UNREF_STREAM \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
struct drm_vmw_stream_arg)
#define DRM_IOCTL_VMW_CREATE_CONTEXT \
DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
struct drm_vmw_context_arg)
#define DRM_IOCTL_VMW_UNREF_CONTEXT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
struct drm_vmw_context_arg)
#define DRM_IOCTL_VMW_CREATE_SURFACE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
union drm_vmw_surface_create_arg)
#define DRM_IOCTL_VMW_UNREF_SURFACE \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
struct drm_vmw_surface_arg)
#define DRM_IOCTL_VMW_REF_SURFACE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
union drm_vmw_surface_reference_arg)
#define DRM_IOCTL_VMW_EXECBUF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
struct drm_vmw_execbuf_arg)
#define DRM_IOCTL_VMW_GET_3D_CAP \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
struct drm_vmw_get_3d_cap_arg)
#define DRM_IOCTL_VMW_FENCE_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
struct drm_vmw_fence_wait_arg)
#define DRM_IOCTL_VMW_FENCE_SIGNALED \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
struct drm_vmw_fence_signaled_arg)
#define DRM_IOCTL_VMW_FENCE_UNREF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
struct drm_vmw_fence_arg)
#define DRM_IOCTL_VMW_FENCE_EVENT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
struct drm_vmw_fence_event_arg)
#define DRM_IOCTL_VMW_PRESENT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
struct drm_vmw_present_arg)
#define DRM_IOCTL_VMW_PRESENT_READBACK \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
struct drm_vmw_present_readback_arg)
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
struct drm_vmw_update_layout_arg)
#define DRM_IOCTL_VMW_CREATE_SHADER \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
struct drm_vmw_shader_create_arg)
#define DRM_IOCTL_VMW_UNREF_SHADER \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
struct drm_vmw_shader_arg)
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
union drm_vmw_gb_surface_create_arg)
#define DRM_IOCTL_VMW_GB_SURFACE_REF \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
union drm_vmw_gb_surface_reference_arg)
#define DRM_IOCTL_VMW_SYNCCPU \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
struct drm_vmw_synccpu_arg)
#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
struct drm_vmw_context_arg)
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
union drm_vmw_gb_surface_create_ext_arg)
#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
union drm_vmw_gb_surface_reference_ext_arg)
#define DRM_IOCTL_VMW_MSG \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
struct drm_vmw_msg_arg)
#define DRM_IOCTL_VMW_MKSSTAT_RESET \
DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET)
#define DRM_IOCTL_VMW_MKSSTAT_ADD \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD, \
struct drm_vmw_mksstat_add_arg)
#define DRM_IOCTL_VMW_MKSSTAT_REMOVE \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE, \
struct drm_vmw_mksstat_remove_arg)
/*
* Ioctl definitions.
*/
static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER),
DRM_IOCTL_DEF_DRV(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
DRM_MASTER),
DRM_IOCTL_DEF_DRV(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
DRM_MASTER),
DRM_IOCTL_DEF_DRV(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
DRM_MASTER),
DRM_IOCTL_DEF_DRV(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_FENCE_SIGNALED,
vmw_fence_obj_signaled_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
DRM_RENDER_ALLOW),
/* these allow direct access to the framebuffers mark as master only */
DRM_IOCTL_DEF_DRV(VMW_PRESENT, vmw_present_ioctl,
DRM_MASTER | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VMW_PRESENT_READBACK,
vmw_present_readback_ioctl,
DRM_MASTER | DRM_AUTH),
/*
* The permissions of the below ioctl are overridden in
* vmw_generic_ioctl(). We require either
* DRM_MASTER or capable(CAP_SYS_ADMIN).
*/
DRM_IOCTL_DEF_DRV(VMW_UPDATE_LAYOUT,
vmw_kms_update_layout_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_UNREF_SHADER,
vmw_shader_destroy_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE,
vmw_gb_surface_define_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_SYNCCPU,
vmw_user_bo_synccpu_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE_EXT,
vmw_gb_surface_define_ext_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF_EXT,
vmw_gb_surface_reference_ext_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_MSG,
vmw_msg_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_RESET,
vmw_mksstat_reset_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_ADD,
vmw_mksstat_add_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_REMOVE,
vmw_mksstat_remove_ioctl,
DRM_RENDER_ALLOW),
};
static const struct pci_device_id vmw_pci_id_list[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) },
{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) },
{ }
};
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
static int vmw_restrict_iommu;
static int vmw_force_coherent;
static int vmw_restrict_dma_mask;
static int vmw_assume_16bpp;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
struct bitmap_name {
uint32 value;
const char *name;
};
static const struct bitmap_name cap1_names[] = {
{ SVGA_CAP_RECT_COPY, "rect copy" },
{ SVGA_CAP_CURSOR, "cursor" },
{ SVGA_CAP_CURSOR_BYPASS, "cursor bypass" },
{ SVGA_CAP_CURSOR_BYPASS_2, "cursor bypass 2" },
{ SVGA_CAP_8BIT_EMULATION, "8bit emulation" },
{ SVGA_CAP_ALPHA_CURSOR, "alpha cursor" },
{ SVGA_CAP_3D, "3D" },
{ SVGA_CAP_EXTENDED_FIFO, "extended fifo" },
{ SVGA_CAP_MULTIMON, "multimon" },
{ SVGA_CAP_PITCHLOCK, "pitchlock" },
{ SVGA_CAP_IRQMASK, "irq mask" },
{ SVGA_CAP_DISPLAY_TOPOLOGY, "display topology" },
{ SVGA_CAP_GMR, "gmr" },
{ SVGA_CAP_TRACES, "traces" },
{ SVGA_CAP_GMR2, "gmr2" },
{ SVGA_CAP_SCREEN_OBJECT_2, "screen object 2" },
{ SVGA_CAP_COMMAND_BUFFERS, "command buffers" },
{ SVGA_CAP_CMD_BUFFERS_2, "command buffers 2" },
{ SVGA_CAP_GBOBJECTS, "gbobject" },
{ SVGA_CAP_DX, "dx" },
{ SVGA_CAP_HP_CMD_QUEUE, "hp cmd queue" },
{ SVGA_CAP_NO_BB_RESTRICTION, "no bb restriction" },
{ SVGA_CAP_CAP2_REGISTER, "cap2 register" },
};
static const struct bitmap_name cap2_names[] = {
{ SVGA_CAP2_GROW_OTABLE, "grow otable" },
{ SVGA_CAP2_INTRA_SURFACE_COPY, "intra surface copy" },
{ SVGA_CAP2_DX2, "dx2" },
{ SVGA_CAP2_GB_MEMSIZE_2, "gb memsize 2" },
{ SVGA_CAP2_SCREENDMA_REG, "screendma reg" },
{ SVGA_CAP2_OTABLE_PTDEPTH_2, "otable ptdepth2" },
{ SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT, "non ms to ms stretchblt" },
{ SVGA_CAP2_CURSOR_MOB, "cursor mob" },
{ SVGA_CAP2_MSHINT, "mshint" },
{ SVGA_CAP2_CB_MAX_SIZE_4MB, "cb max size 4mb" },
{ SVGA_CAP2_DX3, "dx3" },
{ SVGA_CAP2_FRAME_TYPE, "frame type" },
{ SVGA_CAP2_COTABLE_COPY, "cotable copy" },
{ SVGA_CAP2_TRACE_FULL_FB, "trace full fb" },
{ SVGA_CAP2_EXTRA_REGS, "extra regs" },
{ SVGA_CAP2_LO_STAGING, "lo staging" },
};
static void vmw_print_bitmap(struct drm_device *drm,
const char *prefix, uint32_t bitmap,
const struct bitmap_name *bnames,
uint32_t num_names)
{
char buf[512];
uint32_t i;
uint32_t offset = 0;
for (i = 0; i < num_names; ++i) {
if ((bitmap & bnames[i].value) != 0) {
offset += snprintf(buf + offset,
ARRAY_SIZE(buf) - offset,
"%s, ", bnames[i].name);
bitmap &= ~bnames[i].value;
}
}
drm_info(drm, "%s: %s\n", prefix, buf);
if (bitmap != 0)
drm_dbg(drm, "%s: unknown enums: %x\n", prefix, bitmap);
}
static void vmw_print_sm_type(struct vmw_private *dev_priv)
{
static const char *names[] = {
[VMW_SM_LEGACY] = "Legacy",
[VMW_SM_4] = "SM4",
[VMW_SM_4_1] = "SM4_1",
[VMW_SM_5] = "SM_5",
[VMW_SM_5_1X] = "SM_5_1X",
[VMW_SM_MAX] = "Invalid"
};
BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1));
drm_info(&dev_priv->drm, "Available shader model: %s.\n",
names[dev_priv->sm_type]);
}
/**
* vmw_dummy_query_bo_create - create a bo to hold a dummy query result
*
* @dev_priv: A device private structure.
*
* This function creates a small buffer object that holds the query
* result for dummy queries emitted as query barriers.
* The function will then map the first page and initialize a pending
* occlusion query result structure, Finally it will unmap the buffer.
* No interruptible waits are done within this function.
*
* Returns an error if bo creation or initialization fails.
*/
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
struct vmw_bo *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
.pin = true
};
/*
* Create the vbo as pinned, so that a tryreserve will
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
BUG_ON(ret != 0);
vmw_bo_pin_reserved(vbo, true);
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
result->state = SVGA3D_QUERYSTATE_PENDING;
result->result32 = 0xff;
ttm_bo_kunmap(&map);
}
vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(&vbo->tbo);
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
vmw_bo_unreference(&vbo);
} else
dev_priv->dummy_query_bo = vbo;
return ret;
}
static int vmw_device_init(struct vmw_private *dev_priv)
{
bool uses_fb_traces = false;
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
SVGA_REG_ENABLE_HIDE);
uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
(dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
dev_priv->fifo = vmw_fifo_create(dev_priv);
if (IS_ERR(dev_priv->fifo)) {
int err = PTR_ERR(dev_priv->fifo);
dev_priv->fifo = NULL;
return err;
} else if (!dev_priv->fifo) {
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
}
dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
return 0;
}
static void vmw_device_fini(struct vmw_private *vmw)
{
/*
* Legacy sync
*/
vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
;
vmw->last_read_seqno = vmw_fence_read(vmw);
vmw_write(vmw, SVGA_REG_CONFIG_DONE,
vmw->config_done_state);
vmw_write(vmw, SVGA_REG_ENABLE,
vmw->enable_state);
vmw_write(vmw, SVGA_REG_TRACES,
vmw->traces_state);
vmw_fifo_destroy(vmw);
}
/**
* vmw_request_device_late - Perform late device setup
*
* @dev_priv: Pointer to device private.
*
* This function performs setup of otables and enables large command
* buffer submission. These tasks are split out to a separate function
* because it reverts vmw_release_device_early and is intended to be used
* by an error path in the hibernation code.
*/
static int vmw_request_device_late(struct vmw_private *dev_priv)
{
int ret;
if (dev_priv->has_mob) {
ret = vmw_otables_setup(dev_priv);
if (unlikely(ret != 0)) {
DRM_ERROR("Unable to initialize "
"guest Memory OBjects.\n");
return ret;
}
}
if (dev_priv->cman) {
ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
if (ret) {
struct vmw_cmdbuf_man *man = dev_priv->cman;
dev_priv->cman = NULL;
vmw_cmdbuf_man_destroy(man);
}
}
return 0;
}
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
ret = vmw_device_init(dev_priv);
if (unlikely(ret != 0)) {
DRM_ERROR("Unable to initialize the device.\n");
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
if (IS_ERR(dev_priv->cman)) {
dev_priv->cman = NULL;
dev_priv->sm_type = VMW_SM_LEGACY;
}
ret = vmw_request_device_late(dev_priv);
if (ret)
goto out_no_mob;
ret = vmw_dummy_query_bo_create(dev_priv);
if (unlikely(ret != 0))
goto out_no_query_bo;
return 0;
out_no_query_bo:
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
struct ttm_resource_manager *man;
man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
ttm_resource_manager_evict_all(&dev_priv->bdev, man);
vmw_otables_takedown(dev_priv);
}
if (dev_priv->cman)
vmw_cmdbuf_man_destroy(dev_priv->cman);
out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
vmw_device_fini(dev_priv);
return ret;
}
/**
* vmw_release_device_early - Early part of fifo takedown.
*
* @dev_priv: Pointer to device private struct.
*
* This is the first part of command submission takedown, to be called before
* buffer management is taken down.
*/
static void vmw_release_device_early(struct vmw_private *dev_priv)
{
/*
* Previous destructions should've released
* the pinned bo.
*/
BUG_ON(dev_priv->pinned_bo != NULL);
vmw_bo_unreference(&dev_priv->dummy_query_bo);
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
struct ttm_resource_manager *man;
man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
ttm_resource_manager_evict_all(&dev_priv->bdev, man);
vmw_otables_takedown(dev_priv);
}
}
/**
* vmw_release_device_late - Late part of fifo takedown.
*
* @dev_priv: Pointer to device private struct.
*
* This is the last part of the command submission takedown, to be called when
* command submission is no longer needed. It may wait on pending fences.
*/
static void vmw_release_device_late(struct vmw_private *dev_priv)
{
vmw_fence_fifo_down(dev_priv->fman);
if (dev_priv->cman)
vmw_cmdbuf_man_destroy(dev_priv->cman);
vmw_device_fini(dev_priv);
}
/*
* Sets the initial_[width|height] fields on the given vmw_private.
*
* It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
* clamping the value to fb_max_[width|height] fields and the
* VMW_MIN_INITIAL_[WIDTH|HEIGHT].
* If the values appear to be invalid, set them to
* VMW_MIN_INITIAL_[WIDTH|HEIGHT].
*/
static void vmw_get_initial_size(struct vmw_private *dev_priv)
{
uint32_t width;
uint32_t height;
width = vmw_read(dev_priv, SVGA_REG_WIDTH);
height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
width = max_t(uint32_t, width, VMWGFX_MIN_INITIAL_WIDTH);
height = max_t(uint32_t, height, VMWGFX_MIN_INITIAL_HEIGHT);
if (width > dev_priv->fb_max_width ||
height > dev_priv->fb_max_height) {
/*
* This is a host error and shouldn't occur.
*/
width = VMWGFX_MIN_INITIAL_WIDTH;
height = VMWGFX_MIN_INITIAL_HEIGHT;
}
dev_priv->initial_width = width;
dev_priv->initial_height = height;
}
/**
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
* system.
*
* @dev_priv: Pointer to a struct vmw_private
*
* This functions tries to determine what actions need to be taken by the
* driver to make system pages visible to the device.
* If this function decides that DMA is not possible, it returns -EINVAL.
* The driver may then try to disable features of the device that require
* DMA.
*/
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
{
static const char *names[vmw_dma_map_max] = {
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* TTM currently doesn't fully support SEV encryption. */
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -EINVAL;
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
else
dev_priv->map_mode = vmw_dma_map_populate;
drm_info(&dev_priv->drm,
"DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0;
}
/**
* vmw_dma_masks - set required page- and dma masks
*
* @dev_priv: Pointer to struct drm-device
*
* With 32-bit we can only handle 32 bit PFNs. Optionally set that
* restriction also for 64-bit systems.
*/
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
int ret = 0;
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) {
drm_info(&dev_priv->drm,
"Restricting DMA addresses to 44 bits.\n");
return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
}
return ret;
}
static int vmw_vram_manager_init(struct vmw_private *dev_priv)
{
int ret;
ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
dev_priv->vram_size >> PAGE_SHIFT);
ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
return ret;
}
static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
{
ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
}
static int vmw_setup_pci_resources(struct vmw_private *dev,
u32 pci_id)
{
resource_size_t rmmio_start;
resource_size_t rmmio_size;
resource_size_t fifo_start;
resource_size_t fifo_size;
int ret;
struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
pci_set_master(pdev);
ret = pci_request_regions(pdev, "vmwgfx probe");
if (ret)
return ret;
dev->pci_id = pci_id;
if (pci_id == VMWGFX_PCI_ID_SVGA3) {
rmmio_start = pci_resource_start(pdev, 0);
rmmio_size = pci_resource_len(pdev, 0);
dev->vram_start = pci_resource_start(pdev, 2);
dev->vram_size = pci_resource_len(pdev, 2);
drm_info(&dev->drm,
"Register MMIO at 0x%pa size is %llu kiB\n",
&rmmio_start, (uint64_t)rmmio_size / 1024);
dev->rmmio = devm_ioremap(dev->drm.dev,
rmmio_start,
rmmio_size);
if (!dev->rmmio) {
drm_err(&dev->drm,
"Failed mapping registers mmio memory.\n");
pci_release_regions(pdev);
return -ENOMEM;
}
} else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
dev->io_start = pci_resource_start(pdev, 0);
dev->vram_start = pci_resource_start(pdev, 1);
dev->vram_size = pci_resource_len(pdev, 1);
fifo_start = pci_resource_start(pdev, 2);
fifo_size = pci_resource_len(pdev, 2);
drm_info(&dev->drm,
"FIFO at %pa size is %llu kiB\n",
&fifo_start, (uint64_t)fifo_size / 1024);
dev->fifo_mem = devm_memremap(dev->drm.dev,
fifo_start,
fifo_size,
MEMREMAP_WB);
if (IS_ERR(dev->fifo_mem)) {
drm_err(&dev->drm,
"Failed mapping FIFO memory.\n");
pci_release_regions(pdev);
return PTR_ERR(dev->fifo_mem);
}
} else {
pci_release_regions(pdev);
return -EINVAL;
}
/*
* This is approximate size of the vram, the exact size will only
* be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
* size will be equal to or bigger than the size reported by
* SVGA_REG_VRAM_SIZE.
*/
drm_info(&dev->drm,
"VRAM at %pa size is %llu kiB\n",
&dev->vram_start, (uint64_t)dev->vram_size / 1024);
return 0;
}
static int vmw_detect_version(struct vmw_private *dev)
{
uint32_t svga_id;
vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
SVGA_ID_3 : SVGA_ID_2);
svga_id = vmw_read(dev, SVGA_REG_ID);
if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
drm_err(&dev->drm,
"Unsupported SVGA ID 0x%x on chipset 0x%x\n",
svga_id, dev->pci_id);
return -ENOSYS;
}
BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
drm_info(&dev->drm,
"Running on SVGA version %d.\n", (svga_id & 0xff));
return 0;
}
static void vmw_write_driver_id(struct vmw_private *dev)
{
if ((dev->capabilities2 & SVGA_CAP2_DX2) != 0) {
vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
SVGA_REG_GUEST_DRIVER_ID_LINUX);
vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION1,
LINUX_VERSION_MAJOR << 24 |
LINUX_VERSION_PATCHLEVEL << 16 |
LINUX_VERSION_SUBLEVEL);
vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION2,
VMWGFX_DRIVER_MAJOR << 24 |
VMWGFX_DRIVER_MINOR << 16 |
VMWGFX_DRIVER_PATCHLEVEL);
vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION3, 0);
vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
SVGA_REG_GUEST_DRIVER_ID_SUBMIT);
}
}
static void vmw_sw_context_init(struct vmw_private *dev_priv)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
hash_init(sw_context->res_ht);
}
static void vmw_sw_context_fini(struct vmw_private *dev_priv)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
vfree(sw_context->cmd_bounce);
if (sw_context->staged_bindings)
vmw_binding_state_free(sw_context->staged_bindings);
}
static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
{
int ret;
enum vmw_res_type i;
bool refuse_dma = false;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
dev_priv->drm.dev_private = dev_priv;
vmw_sw_context_init(dev_priv);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->binding_mutex);
spin_lock_init(&dev_priv->resource_lock);
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cursor_lock);
ret = vmw_setup_pci_resources(dev_priv, pci_id);
if (ret)
return ret;
ret = vmw_detect_version(dev_priv);
if (ret)
goto out_no_pci_or_version;
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init_base(&dev_priv->res_idr[i], 1);
INIT_LIST_HEAD(&dev_priv->res_lru[i]);
}
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0;
dev_priv->fifo_queue_waiters = 0;
dev_priv->used_memory_size = 0;
dev_priv->assume_16bpp = !!vmw_assume_16bpp;
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
vmw_print_bitmap(&dev_priv->drm, "Capabilities",
dev_priv->capabilities,
cap1_names, ARRAY_SIZE(cap1_names));
if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
vmw_print_bitmap(&dev_priv->drm, "Capabilities2",
dev_priv->capabilities2,
cap2_names, ARRAY_SIZE(cap2_names));
}
if (!vmwgfx_supported(dev_priv)) {
vmw_disable_backdoor();
drm_err_once(&dev_priv->drm,
"vmwgfx seems to be running on an unsupported hypervisor.");
drm_err_once(&dev_priv->drm,
"This configuration is likely broken.");
drm_err_once(&dev_priv->drm,
"Please switch to a supported graphics device to avoid problems.");
}
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
drm_info(&dev_priv->drm,
"Restricting capabilities since DMA not available.\n");
refuse_dma = true;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
drm_info(&dev_priv->drm,
"Disabling 3D acceleration.\n");
}
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
vmw_get_initial_size(dev_priv);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
dev_priv->max_gmr_ids =
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
dev_priv->max_gmr_pages =
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
dev_priv->memory_size =
vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
dev_priv->memory_size -= dev_priv->vram_size;
} else {
/*
* An arbitrary limit of 512MiB on surface
* memory. But all HWV8 hardware supports GMR2.
*/
dev_priv->memory_size = 512*1024*1024;
}
dev_priv->max_mob_pages = 0;
dev_priv->max_mob_size = 0;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
uint64_t mem_size;
if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
mem_size = vmw_read(dev_priv,
SVGA_REG_GBOBJECT_MEM_SIZE_KB);
else
mem_size =
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
/*
* Workaround for low memory 2D VMs to compensate for the
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->max_primary_mem =
vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
dev_priv->max_mob_size =
vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
dev_priv->stdu_max_width =
vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
dev_priv->stdu_max_height =
vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
vmw_write(dev_priv, SVGA_REG_DEV_CAP,
SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
dev_priv->texture_max_width = vmw_read(dev_priv,
SVGA_REG_DEV_CAP);
vmw_write(dev_priv, SVGA_REG_DEV_CAP,
SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
dev_priv->texture_max_height = vmw_read(dev_priv,
SVGA_REG_DEV_CAP);
} else {
dev_priv->texture_max_width = 8192;
dev_priv->texture_max_height = 8192;
dev_priv->max_primary_mem = dev_priv->vram_size;
}
drm_info(&dev_priv->drm,
"Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
(u64)dev_priv->vram_size / 1024,
(u64)dev_priv->fifo_mem_size / 1024,
dev_priv->memory_size / 1024);
drm_info(&dev_priv->drm,
"MOB limits: max mob size = %u kB, max mob pages = %u\n",
dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0))
goto out_err0;
dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
drm_info(&dev_priv->drm,
"Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
drm_info(&dev_priv->drm,
"Max number of GMR pages is %u\n",
(unsigned)dev_priv->max_gmr_pages);
}
drm_info(&dev_priv->drm,
"Maximum display memory size is %llu kiB\n",
(uint64_t)dev_priv->max_primary_mem / 1024);
/* Need mmio memory to check for fifo pitchlock cap. */
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
!(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
!vmw_fifo_have_pitchlock(dev_priv)) {
ret = -ENOSYS;
DRM_ERROR("Hardware has no pitchlock\n");
goto out_err0;
}
dev_priv->tdev = ttm_object_device_init(&vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
drm_err(&dev_priv->drm,
"Unable to initialize TTM object management.\n");
ret = -ENOMEM;
goto out_err0;
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
ret = vmw_irq_install(dev_priv);
if (ret != 0) {
drm_err(&dev_priv->drm,
"Failed installing irq: %d\n", ret);
goto out_no_irq;
}
}
dev_priv->fman = vmw_fence_manager_init(dev_priv);
if (unlikely(dev_priv->fman == NULL)) {
ret = -ENOMEM;
goto out_no_fman;
}
ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
dev_priv->drm.dev,
dev_priv->drm.anon_inode->i_mapping,
dev_priv->drm.vma_offset_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent,
false);
if (unlikely(ret != 0)) {
drm_err(&dev_priv->drm,
"Failed initializing TTM buffer object driver.\n");
goto out_no_bdev;
}
/*
* Enable VRAM, but initially don't use it until SVGA is enabled and
* unhidden.
*/
ret = vmw_vram_manager_init(dev_priv);
if (unlikely(ret != 0)) {
drm_err(&dev_priv->drm,
"Failed initializing memory manager for VRAM.\n");
goto out_no_vram;
}
ret = vmw_devcaps_create(dev_priv);
if (unlikely(ret != 0)) {
drm_err(&dev_priv->drm,
"Failed initializing device caps.\n");
goto out_no_vram;
}
/*
* "Guest Memory Regions" is an aperture like feature with
* one slot per bo. There is an upper limit of the number of
* slots as well as the bo size.
*/
dev_priv->has_gmr = true;
/* TODO: This is most likely not correct */
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma ||
vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
drm_info(&dev_priv->drm,
"No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
}
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
dev_priv->has_mob = true;
if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
drm_info(&dev_priv->drm,
"No MOB memory available. "
"3D will be disabled.\n");
dev_priv->has_mob = false;
}
if (vmw_sys_man_init(dev_priv) != 0) {
drm_info(&dev_priv->drm,
"No MOB page table memory available. "
"3D will be disabled.\n");
dev_priv->has_mob = false;
}
}
if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_DXCONTEXT))
dev_priv->sm_type = VMW_SM_4;
}
/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
if (has_sm4_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM41))
dev_priv->sm_type = VMW_SM_4_1;
if (has_sm4_1_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) {
dev_priv->sm_type = VMW_SM_5;
if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43))
dev_priv->sm_type = VMW_SM_5_1X;
}
}
}
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
vmw_overlay_init(dev_priv);
ret = vmw_request_device(dev_priv);
if (ret)
goto out_no_fifo;
vmw_print_sm_type(dev_priv);
vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
vmw_write_driver_id(dev_priv);
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
register_pm_notifier(&dev_priv->pm_nb);
return 0;
out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
if (dev_priv->has_mob) {
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
vmw_sys_man_fini(dev_priv);
}
if (dev_priv->has_gmr)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv);
out_no_vram:
ttm_device_fini(&dev_priv->bdev);
out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
vmw_irq_uninstall(&dev_priv->drm);
out_no_irq:
ttm_object_device_release(&dev_priv->tdev);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
out_no_pci_or_version:
pci_release_regions(pdev);
return ret;
}
static void vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
vmw_sw_context_fini(dev_priv);
vmw_fifo_resource_dec(dev_priv);
vmw_svga_disable(dev_priv);
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
if (dev_priv->has_gmr)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_release_device_early(dev_priv);
if (dev_priv->has_mob) {
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
vmw_sys_man_fini(dev_priv);
}
vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv);
ttm_device_fini(&dev_priv->bdev);
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
vmw_irq_uninstall(&dev_priv->drm);
ttm_object_device_release(&dev_priv->tdev);
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
vmw_mksstat_remove_all(dev_priv);
pci_release_regions(pdev);
}
static void vmw_postclose(struct drm_device *dev,
struct drm_file *file_priv)
{
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_fpriv *vmw_fp;
int ret = -ENOMEM;
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
if (unlikely(!vmw_fp))
return ret;
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev);
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
file_priv->driver_priv = vmw_fp;
return 0;
out_no_tfile:
kfree(vmw_fp);
return ret;
}
static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg,
long (*ioctl_func)(struct file *, unsigned int,
unsigned long))
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
unsigned int nr = DRM_IOCTL_NR(cmd);
unsigned int flags;
/*
* Do extra checking on driver private ioctls.
*/
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
return ioctl_func(filp, cmd, arg);
} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
if (!drm_is_current_master(file_priv) &&
!capable(CAP_SYS_ADMIN))
return -EACCES;
}
if (unlikely(ioctl->cmd != cmd))
goto out_io_encoding;
flags = ioctl->flags;
} else if (!drm_ioctl_flags(nr, &flags))
return -EINVAL;
return ioctl_func(filp, cmd, arg);
out_io_encoding:
DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE);
return -EINVAL;
}
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
}
#ifdef CONFIG_COMPAT
static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
}
#endif
static void vmw_master_set(struct drm_device *dev,
struct drm_file *file_priv,
bool from_open)
{
/*
* Inform a new master that the layout may have changed while
* it was gone.
*/
if (!from_open)
drm_sysfs_hotplug_event(dev);
}
static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
vmw_kms_legacy_hotspot_clear(dev_priv);
}
bool vmwgfx_supported(struct vmw_private *vmw)
{
#if defined(CONFIG_X86)
return hypervisor_is_type(X86_HYPER_VMWARE);
#elif defined(CONFIG_ARM64)
/*
* On aarch64 only svga3 is supported
*/
return vmw->pci_id == VMWGFX_PCI_ID_SVGA3;
#else
drm_warn_once(&vmw->drm,
"vmwgfx is running on an unknown architecture.");
return false;
#endif
}
/**
* __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
*
* @dev_priv: Pointer to device private struct.
* Needs the reservation sem to be held in non-exclusive mode.
*/
static void __vmw_svga_enable(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
if (!ttm_resource_manager_used(man)) {
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
ttm_resource_manager_set_used(man, true);
}
}
/**
* vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
*
* @dev_priv: Pointer to device private struct.
*/
void vmw_svga_enable(struct vmw_private *dev_priv)
{
__vmw_svga_enable(dev_priv);
}
/**
* __vmw_svga_disable - Disable SVGA mode and use of VRAM.
*
* @dev_priv: Pointer to device private struct.
* Needs the reservation sem to be held in exclusive mode.
* Will not empty VRAM. VRAM must be emptied by caller.
*/
static void __vmw_svga_disable(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
if (ttm_resource_manager_used(man)) {
ttm_resource_manager_set_used(man, false);
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
}
}
/**
* vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
* running.
*
* @dev_priv: Pointer to device private struct.
* Will empty VRAM.
*/
void vmw_svga_disable(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
/*
* Disabling SVGA will turn off device modesetting capabilities, so
* notify KMS about that so that it doesn't cache atomic state that
* isn't valid anymore, for example crtcs turned on.
* Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
* but vmw_kms_lost_device() takes the reservation sem and thus we'll
* end up with lock order reversal. Thus, a master may actually perform
* a new modeset just after we call vmw_kms_lost_device() and race with
* vmw_svga_disable(), but that should at worst cause atomic KMS state
* to be inconsistent with the device, causing modesetting problems.
*
*/
vmw_kms_lost_device(&dev_priv->drm);
if (ttm_resource_manager_used(man)) {
if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
DRM_ERROR("Failed evicting VRAM buffers.\n");
ttm_resource_manager_set_used(man, false);
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
}
}
static void vmw_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
vmw_driver_unload(dev);
}
static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
{
struct drm_minor *minor = vmw->drm.primary;
struct dentry *root = minor->debugfs_root;
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_SYSTEM),
root, "system_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
root, "vram_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
root, "gmr_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
root, "mob_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
root, "system_mob_ttm");
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr)
{
struct vmw_private *dev_priv =
container_of(nb, struct vmw_private, pm_nb);
switch (val) {
case PM_HIBERNATION_PREPARE:
/*
* Take the reservation sem in write mode, which will make sure
* there are no other processes holding a buffer object
* reservation, meaning we should be able to evict all buffer
* objects if needed.
* Once user-space processes have been frozen, we can release
* the lock again.
*/
dev_priv->suspend_locked = true;
break;
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
if (READ_ONCE(dev_priv->suspend_locked)) {
dev_priv->suspend_locked = false;
}
break;
default:
break;
}
return 0;
}
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
if (dev_priv->refuse_hibernation)
return -EBUSY;
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int vmw_pci_resume(struct pci_dev *pdev)
{
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
return pci_enable_device(pdev);
}
static int vmw_pm_suspend(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct pm_message dummy;
dummy.event = 0;
return vmw_pci_suspend(pdev, dummy);
}
static int vmw_pm_resume(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
return vmw_pci_resume(pdev);
}
static int vmw_pm_freeze(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
int ret;
/*
* No user-space processes should be running now.
*/
ret = vmw_kms_suspend(&dev_priv->drm);
if (ret) {
DRM_ERROR("Failed to freeze modesetting.\n");
return ret;
}
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
DRM_ERROR("Can't hibernate while 3D resources are active.\n");
vmw_fifo_resource_inc(dev_priv);
WARN_ON(vmw_request_device_late(dev_priv));
dev_priv->suspend_locked = false;
if (dev_priv->suspend_state)
vmw_kms_resume(dev);
return -EBUSY;
}
vmw_fence_fifo_down(dev_priv->fman);
__vmw_svga_disable(dev_priv);
vmw_release_device_late(dev_priv);
return 0;
}
static int vmw_pm_restore(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
int ret;
vmw_detect_version(dev_priv);
vmw_fifo_resource_inc(dev_priv);
ret = vmw_request_device(dev_priv);
if (ret)
return ret;
__vmw_svga_enable(dev_priv);
vmw_fence_fifo_up(dev_priv->fman);
dev_priv->suspend_locked = false;
if (dev_priv->suspend_state)
vmw_kms_resume(&dev_priv->drm);
return 0;
}
static const struct dev_pm_ops vmw_pm_ops = {
.freeze = vmw_pm_freeze,
.thaw = vmw_pm_restore,
.restore = vmw_pm_restore,
.suspend = vmw_pm_suspend,
.resume = vmw_pm_resume,
};
static const struct file_operations vmwgfx_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = vmw_unlocked_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#if defined(CONFIG_COMPAT)
.compat_ioctl = vmw_compat_ioctl,
#endif
.llseek = noop_llseek,
};
static const struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
.postclose = vmw_postclose,
.dumb_create = vmw_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
.major = VMWGFX_DRIVER_MAJOR,
.minor = VMWGFX_DRIVER_MINOR,
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
};
static struct pci_driver vmw_pci_driver = {
.name = VMWGFX_DRIVER_NAME,
.id_table = vmw_pci_id_list,
.probe = vmw_probe,
.remove = vmw_remove,
.driver = {
.pm = &vmw_pm_ops
}
};
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct vmw_private *vmw;
int ret;
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
if (ret)
goto out_error;
ret = pcim_enable_device(pdev);
if (ret)
goto out_error;
vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
struct vmw_private, drm);
if (IS_ERR(vmw)) {
ret = PTR_ERR(vmw);
goto out_error;
}
pci_set_drvdata(pdev, &vmw->drm);
ret = vmw_driver_load(vmw, ent->device);
if (ret)
goto out_error;
ret = drm_dev_register(&vmw->drm, 0);
if (ret)
goto out_unload;
vmw_fifo_resource_inc(vmw);
vmw_svga_enable(vmw);
drm_fbdev_generic_setup(&vmw->drm, 0);
vmw_debugfs_gem_init(vmw);
vmw_debugfs_resource_managers_init(vmw);
return 0;
out_unload:
vmw_driver_unload(&vmw->drm);
out_error:
return ret;
}
drm_module_pci_driver(vmw_pci_driver);
MODULE_AUTHOR("VMware Inc. and others");
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
MODULE_LICENSE("GPL and additional rights");
MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
__stringify(VMWGFX_DRIVER_MINOR) "."
__stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
"0");
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
static int vmw_bo_vm_lookup(struct ttm_device *bdev,
struct drm_file *filp,
unsigned long offset,
unsigned long pages,
struct ttm_buffer_object **p_bo)
{
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
struct drm_device *drm = &dev_priv->drm;
struct drm_vma_offset_node *node;
int ret;
*p_bo = NULL;
drm_vma_offset_lock_lookup(bdev->vma_manager);
node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
if (likely(node)) {
*p_bo = container_of(node, struct ttm_buffer_object,
base.vma_node);
*p_bo = ttm_bo_get_unless_zero(*p_bo);
}
drm_vma_offset_unlock_lookup(bdev->vma_manager);
if (!*p_bo) {
drm_err(drm, "Could not find buffer object to map\n");
return -EINVAL;
}
if (!drm_vma_node_is_allowed(node, filp)) {
ret = -EACCES;
goto out_no_access;
}
return 0;
out_no_access:
ttm_bo_put(*p_bo);
return ret;
}
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
static const struct vm_operations_struct vmw_vm_ops = {
.pfn_mkwrite = vmw_bo_vm_mkwrite,
.page_mkwrite = vmw_bo_vm_mkwrite,
.fault = vmw_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
struct ttm_device *bdev = &dev_priv->bdev;
struct ttm_buffer_object *bo;
int ret;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
return -EINVAL;
ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_mmap_obj(vma, bo);
if (unlikely(ret != 0))
goto out_unref;
vma->vm_ops = &vmw_vm_ops;
/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
if (!is_cow_mapping(vma->vm_flags))
vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);
ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
return 0;
out_unref:
ttm_bo_put(bo);
return ret;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
uint32_t size;
uint8_t num_input_sig;
uint8_t num_output_sig;
};
struct vmw_user_shader {
struct ttm_base_object base;
struct vmw_shader shader;
};
struct vmw_dx_shader {
struct vmw_resource res;
struct vmw_resource *ctx;
struct vmw_resource *cotable;
u32 id;
bool committed;
struct list_head cotable_head;
};
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base);
static int vmw_gb_shader_create(struct vmw_resource *res);
static int vmw_gb_shader_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_shader_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_shader_destroy(struct vmw_resource *res);
static int vmw_dx_shader_create(struct vmw_resource *res);
static int vmw_dx_shader_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_dx_shader_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
.base_obj_to_res = vmw_user_shader_base_to_res,
.res_free = vmw_user_shader_free
};
const struct vmw_user_resource_conv *user_shader_converter =
&user_shader_conv;
static const struct vmw_res_func vmw_gb_shader_func = {
.res_type = vmw_res_shader,
.needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "guest backed shaders",
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_shader_create,
.destroy = vmw_gb_shader_destroy,
.bind = vmw_gb_shader_bind,
.unbind = vmw_gb_shader_unbind
};
static const struct vmw_res_func vmw_dx_shader_func = {
.res_type = vmw_res_shader,
.needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "dx shaders",
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_shader_create,
/*
* The destroy callback is only called with a committed resource on
* context destroy, in which case we destroy the cotable anyway,
* so there's no need to destroy DX shaders separately.
*/
.destroy = NULL,
.bind = vmw_dx_shader_bind,
.unbind = vmw_dx_shader_unbind,
.commit_notify = vmw_dx_shader_commit_notify,
};
/*
* Shader management:
*/
static inline struct vmw_shader *
vmw_res_to_shader(struct vmw_resource *res)
{
return container_of(res, struct vmw_shader, res);
}
/**
* vmw_res_to_dx_shader - typecast a struct vmw_resource to a
* struct vmw_dx_shader
*
* @res: Pointer to the struct vmw_resource.
*/
static inline struct vmw_dx_shader *
vmw_res_to_dx_shader(struct vmw_resource *res)
{
return container_of(res, struct vmw_dx_shader, res);
}
static void vmw_hw_shader_destroy(struct vmw_resource *res)
{
if (likely(res->func->destroy))
(void) res->func->destroy(res);
else
res->id = -1;
}
static int vmw_gb_shader_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
uint32_t size,
uint64_t offset,
SVGA3dShaderType type,
uint8_t num_input_sig,
uint8_t num_output_sig,
struct vmw_bo *byte_code,
void (*res_free) (struct vmw_resource *res))
{
struct vmw_shader *shader = vmw_res_to_shader(res);
int ret;
ret = vmw_resource_init(dev_priv, res, true, res_free,
&vmw_gb_shader_func);
if (unlikely(ret != 0)) {
if (res_free)
res_free(res);
else
kfree(res);
return ret;
}
res->guest_memory_size = size;
if (byte_code) {
res->guest_memory_bo = vmw_bo_reference(byte_code);
res->guest_memory_offset = offset;
}
shader->size = size;
shader->type = type;
shader->num_input_sig = num_input_sig;
shader->num_output_sig = num_output_sig;
res->hw_destroy = vmw_hw_shader_destroy;
return 0;
}
/*
* GB shader code:
*/
static int vmw_gb_shader_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_shader *shader = vmw_res_to_shader(res);
int ret;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBShader body;
} *cmd;
if (likely(res->id != -1))
return 0;
ret = vmw_resource_alloc_id(res);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a shader id.\n");
goto out_no_id;
}
if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
ret = -EBUSY;
goto out_no_fifo;
}
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
}
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
cmd->body.type = shader->type;
cmd->body.sizeInBytes = shader->size;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
out_no_fifo:
vmw_resource_release_id(res);
out_no_id:
return ret;
}
static int vmw_gb_shader_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBindGBShader body;
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
cmd->body.mobid = bo->resource->start;
cmd->body.offsetInBytes = res->guest_memory_offset;
res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
static int vmw_gb_shader_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBindGBShader body;
} *cmd;
struct vmw_fence_obj *fence;
BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
/*
* Create a fence object and fence the backup buffer.
*/
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
return 0;
}
static int vmw_gb_shader_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBShader body;
} *cmd;
if (likely(res->id == -1))
return 0;
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
return 0;
}
/*
* DX shader code:
*/
/**
* vmw_dx_shader_commit_notify - Notify that a shader operation has been
* committed to hardware from a user-supplied command stream.
*
* @res: Pointer to the shader resource.
* @state: Indicating whether a creation or removal has been committed.
*
*/
static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state)
{
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
struct vmw_private *dev_priv = res->dev_priv;
if (state == VMW_CMDBUF_RES_ADD) {
mutex_lock(&dev_priv->binding_mutex);
vmw_cotable_add_resource(shader->cotable,
&shader->cotable_head);
shader->committed = true;
res->id = shader->id;
mutex_unlock(&dev_priv->binding_mutex);
} else {
mutex_lock(&dev_priv->binding_mutex);
list_del_init(&shader->cotable_head);
shader->committed = false;
res->id = -1;
mutex_unlock(&dev_priv->binding_mutex);
}
}
/**
* vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
*
* @res: The shader resource
*
* This function reverts a scrub operation.
*/
static int vmw_dx_shader_unscrub(struct vmw_resource *res)
{
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindShader body;
} *cmd;
if (!list_empty(&shader->cotable_head) || !shader->committed)
return 0;
cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), shader->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = shader->ctx->id;
cmd->body.shid = shader->id;
cmd->body.mobid = res->guest_memory_bo->tbo.resource->start;
cmd->body.offsetInBytes = res->guest_memory_offset;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
return 0;
}
/**
* vmw_dx_shader_create - The DX shader create callback
*
* @res: The DX shader resource
*
* The create callback is called as part of resource validation and
* makes sure that we unscrub the shader if it's previously been scrubbed.
*/
static int vmw_dx_shader_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
int ret = 0;
WARN_ON_ONCE(!shader->committed);
if (vmw_resource_mob_attached(res)) {
mutex_lock(&dev_priv->binding_mutex);
ret = vmw_dx_shader_unscrub(res);
mutex_unlock(&dev_priv->binding_mutex);
}
res->id = shader->id;
return ret;
}
/**
* vmw_dx_shader_bind - The DX shader bind callback
*
* @res: The DX shader resource
* @val_buf: Pointer to the validate buffer.
*
*/
static int vmw_dx_shader_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_dx_shader_unscrub(res);
mutex_unlock(&dev_priv->binding_mutex);
return 0;
}
/**
* vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
*
* @res: The shader resource
*
* This function unbinds a MOB from the DX shader without requiring the
* MOB dma_buffer to be reserved. The driver still considers the MOB bound.
* However, once the driver eventually decides to unbind the MOB, it doesn't
* need to access the context.
*/
static int vmw_dx_shader_scrub(struct vmw_resource *res)
{
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindShader body;
} *cmd;
if (list_empty(&shader->cotable_head))
return 0;
WARN_ON_ONCE(!shader->committed);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = shader->ctx->id;
cmd->body.shid = res->id;
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&shader->cotable_head);
return 0;
}
/**
* vmw_dx_shader_unbind - The dx shader unbind callback.
*
* @res: The shader resource
* @readback: Whether this is a readback unbind. Currently unused.
* @val_buf: MOB buffer information.
*/
static int vmw_dx_shader_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_fence_obj *fence;
int ret;
BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
ret = vmw_dx_shader_scrub(res);
mutex_unlock(&dev_priv->binding_mutex);
if (ret)
return ret;
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
return 0;
}
/**
* vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
* DX shaders.
*
* @dev_priv: Pointer to device private structure.
* @list: The list of cotable resources.
* @readback: Whether the call was part of a readback unbind.
*
* Scrubs all shader MOBs so that any subsequent shader unbind or shader
* destroy operation won't need to swap in the context.
*/
void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
struct list_head *list,
bool readback)
{
struct vmw_dx_shader *entry, *next;
lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, cotable_head) {
WARN_ON(vmw_dx_shader_scrub(&entry->res));
if (!readback)
entry->committed = false;
}
}
/**
* vmw_dx_shader_res_free - The DX shader free callback
*
* @res: The shader resource
*
* Frees the DX shader resource.
*/
static void vmw_dx_shader_res_free(struct vmw_resource *res)
{
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
vmw_resource_unreference(&shader->cotable);
kfree(shader);
}
/**
* vmw_dx_shader_add - Add a shader resource as a command buffer managed
* resource.
*
* @man: The command buffer resource manager.
* @ctx: Pointer to the context resource.
* @user_key: The id used for this shader.
* @shader_type: The shader type.
* @list: The list of staged command buffer managed resources.
*/
int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource *ctx,
u32 user_key,
SVGA3dShaderType shader_type,
struct list_head *list)
{
struct vmw_dx_shader *shader;
struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv;
int ret;
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
shader = kmalloc(sizeof(*shader), GFP_KERNEL);
if (!shader) {
return -ENOMEM;
}
res = &shader->res;
shader->ctx = ctx;
shader->cotable = vmw_resource_reference
(vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER));
shader->id = user_key;
shader->committed = false;
INIT_LIST_HEAD(&shader->cotable_head);
ret = vmw_resource_init(dev_priv, res, true,
vmw_dx_shader_res_free, &vmw_dx_shader_func);
if (ret)
goto out_resource_init;
/*
* The user_key name-space is not per shader type for DX shaders,
* so when hashing, use a single zero shader type.
*/
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
vmw_shader_key(user_key, 0),
res, list);
if (ret)
goto out_resource_init;
res->id = shader->id;
res->hw_destroy = vmw_hw_shader_destroy;
out_resource_init:
vmw_resource_unreference(&res);
return ret;
}
/*
* User-space shader management:
*/
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base)
{
return &(container_of(base, struct vmw_user_shader, base)->
shader.res);
}
static void vmw_user_shader_free(struct vmw_resource *res)
{
struct vmw_user_shader *ushader =
container_of(res, struct vmw_user_shader, shader.res);
ttm_base_object_kfree(ushader, base);
}
static void vmw_shader_free(struct vmw_resource *res)
{
struct vmw_shader *shader = vmw_res_to_shader(res);
kfree(shader);
}
/*
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
*/
static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_resource *res = vmw_user_shader_base_to_res(base);
*p_base = NULL;
vmw_resource_unreference(&res);
}
int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_ref_object_base_unref(tfile, arg->handle);
}
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
struct vmw_bo *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
uint8_t num_input_sig,
uint8_t num_output_sig,
struct ttm_object_file *tfile,
u32 *handle)
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
int ret;
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(!ushader)) {
ret = -ENOMEM;
goto out;
}
res = &ushader->shader.res;
ushader->base.shareable = false;
ushader->base.tfile = NULL;
/*
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
offset, shader_type, num_input_sig,
num_output_sig, buffer,
vmw_user_shader_free);
if (unlikely(ret != 0))
goto out;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
&vmw_user_shader_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
}
if (handle)
*handle = ushader->base.handle;
out_err:
vmw_resource_unreference(&res);
out:
return ret;
}
static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
struct vmw_bo *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type)
{
struct vmw_shader *shader;
struct vmw_resource *res;
int ret;
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
if (unlikely(!shader)) {
ret = -ENOMEM;
goto out_err;
}
res = &shader->res;
/*
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
offset, shader_type, 0, 0, buffer,
vmw_shader_free);
out_err:
return ret ? ERR_PTR(ret) : res;
}
static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
enum drm_vmw_shader_type shader_type_drm,
u32 buffer_handle, size_t size, size_t offset,
uint8_t num_input_sig, uint8_t num_output_sig,
uint32_t *shader_handle)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_bo *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
if (buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_bo_lookup(file_priv, buffer_handle, &buffer);
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
return ret;
}
if ((u64)buffer->tbo.base.size < (u64)size + (u64)offset) {
VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
}
}
switch (shader_type_drm) {
case drm_vmw_shader_type_vs:
shader_type = SVGA3D_SHADERTYPE_VS;
break;
case drm_vmw_shader_type_ps:
shader_type = SVGA3D_SHADERTYPE_PS;
break;
default:
VMW_DEBUG_USER("Illegal shader type.\n");
ret = -EINVAL;
goto out_bad_arg;
}
ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
shader_type, num_input_sig,
num_output_sig, tfile, shader_handle);
out_bad_arg:
vmw_user_bo_unref(buffer);
return ret;
}
/**
* vmw_shader_id_ok - Check whether a compat shader user key and
* shader type are within valid bounds.
*
* @user_key: User space id of the shader.
* @shader_type: Shader type.
*
* Returns true if valid false if not.
*/
static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
}
/**
* vmw_shader_key - Compute a hash key suitable for a compat shader.
*
* @user_key: User space id of the shader.
* @shader_type: Shader type.
*
* Returns a hash key suitable for a command buffer managed resource
* manager hash table.
*/
static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key | (shader_type << 20);
}
/**
* vmw_shader_remove - Stage a compat shader for removal.
*
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @shader_type: Shader type.
* @list: Caller's list of staged command buffer resource actions.
*/
int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type,
struct list_head *list)
{
struct vmw_resource *dummy;
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
vmw_shader_key(user_key, shader_type),
list, &dummy);
}
/**
* vmw_compat_shader_add - Create a compat shader and stage it for addition
* as a command buffer managed resource.
*
* @dev_priv: Pointer to device private structure.
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @bytecode: Pointer to the bytecode of the shader.
* @shader_type: Shader type.
* @size: Command size.
* @list: Caller's list of staged command buffer resource actions.
*
*/
int vmw_compat_shader_add(struct vmw_private *dev_priv,
struct vmw_cmdbuf_res_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
struct list_head *list)
{
struct ttm_operation_ctx ctx = { false, true };
struct vmw_bo *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
int ret;
struct vmw_resource *res;
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
.pin = true
};
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (unlikely(ret != 0))
goto out;
ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
if (unlikely(ret != 0))
goto no_reserve;
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
ttm_bo_unreserve(&buf->tbo);
goto no_reserve;
}
memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
WARN_ON(is_iomem);
ttm_bo_kunmap(&map);
ret = ttm_bo_validate(&buf->tbo, &buf->placement, &ctx);
WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->tbo);
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
if (unlikely(ret != 0))
goto no_reserve;
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
vmw_shader_key(user_key, shader_type),
res, list);
vmw_resource_unreference(&res);
no_reserve:
vmw_bo_unreference(&buf);
out:
return ret;
}
/**
* vmw_shader_lookup - Look up a compat shader
*
* @man: Pointer to the command buffer managed resource manager identifying
* the shader namespace.
* @user_key: The user space id of the shader.
* @shader_type: The shader type.
*
* Returns a refcounted pointer to a struct vmw_resource if the shader was
* found. An error pointer otherwise.
*/
struct vmw_resource *
vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
u32 user_key,
SVGA3dShaderType shader_type)
{
if (!vmw_shader_id_ok(user_key, shader_type))
return ERR_PTR(-EINVAL);
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
vmw_shader_key(user_key, shader_type));
}
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data;
return vmw_shader_define(dev, file_priv, arg->shader_type,
arg->buffer_handle,
arg->size, arg->offset,
0, 0,
&arg->shader_handle);
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_shader.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
* Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
#include "vmwgfx_binding.h"
/*
* The currently only reason we need to keep track of views is that if we
* destroy a hardware surface, all views pointing to it must also be destroyed,
* otherwise the device will error.
* So in particular if a surface is evicted, we must destroy all views pointing
* to it, and all context bindings of that view. Similarly we must restore
* the view bindings, views and surfaces pointed to by the views when a
* context is referenced in the command stream.
*/
/**
* struct vmw_view - view metadata
*
* @rcu: RCU callback head
* @res: The struct vmw_resource we derive from
* @ctx: Non-refcounted pointer to the context this view belongs to.
* @srf: Refcounted pointer to the surface pointed to by this view.
* @cotable: Refcounted pointer to the cotable holding this view.
* @srf_head: List head for the surface-to-view list.
* @cotable_head: List head for the cotable-to_view list.
* @view_type: View type.
* @view_id: User-space per context view id. Currently used also as per
* context device view id.
* @cmd_size: Size of the SVGA3D define view command that we've copied from the
* command stream.
* @committed: Whether the view is actually created or pending creation at the
* device level.
* @cmd: The SVGA3D define view command copied from the command stream.
*/
struct vmw_view {
struct rcu_head rcu;
struct vmw_resource res;
struct vmw_resource *ctx; /* Immutable */
struct vmw_resource *srf; /* Immutable */
struct vmw_resource *cotable; /* Immutable */
struct list_head srf_head; /* Protected by binding_mutex */
struct list_head cotable_head; /* Protected by binding_mutex */
unsigned view_type; /* Immutable */
unsigned view_id; /* Immutable */
u32 cmd_size; /* Immutable */
bool committed; /* Protected by binding_mutex */
u32 cmd[]; /* Immutable */
};
static int vmw_view_create(struct vmw_resource *res);
static int vmw_view_destroy(struct vmw_resource *res);
static void vmw_hw_view_destroy(struct vmw_resource *res);
static void vmw_view_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
static const struct vmw_res_func vmw_view_func = {
.res_type = vmw_res_view,
.needs_guest_memory = false,
.may_evict = false,
.type_name = "DX view",
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
.create = vmw_view_create,
.commit_notify = vmw_view_commit_notify,
};
/**
* struct vmw_view_define - view define command body stub
*
* @view_id: The device id of the view being defined
* @sid: The surface id of the view being defined
*
* This generic struct is used by the code to change @view_id and @sid of a
* saved view define command.
*/
struct vmw_view_define {
uint32 view_id;
uint32 sid;
};
/**
* vmw_view - Convert a struct vmw_resource to a struct vmw_view
*
* @res: Pointer to the resource to convert.
*
* Returns a pointer to a struct vmw_view.
*/
static struct vmw_view *vmw_view(struct vmw_resource *res)
{
return container_of(res, struct vmw_view, res);
}
/**
* vmw_view_commit_notify - Notify that a view operation has been committed to
* hardware from a user-supplied command stream.
*
* @res: Pointer to the view resource.
* @state: Indicating whether a creation or removal has been committed.
*
*/
static void vmw_view_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state)
{
struct vmw_view *view = vmw_view(res);
struct vmw_private *dev_priv = res->dev_priv;
mutex_lock(&dev_priv->binding_mutex);
if (state == VMW_CMDBUF_RES_ADD) {
struct vmw_surface *srf = vmw_res_to_srf(view->srf);
list_add_tail(&view->srf_head, &srf->view_list);
vmw_cotable_add_resource(view->cotable, &view->cotable_head);
view->committed = true;
res->id = view->view_id;
} else {
list_del_init(&view->cotable_head);
list_del_init(&view->srf_head);
view->committed = false;
res->id = -1;
}
mutex_unlock(&dev_priv->binding_mutex);
}
/**
* vmw_view_create - Create a hardware view.
*
* @res: Pointer to the view resource.
*
* Create a hardware view. Typically used if that view has previously been
* destroyed by an eviction operation.
*/
static int vmw_view_create(struct vmw_resource *res)
{
struct vmw_view *view = vmw_view(res);
struct vmw_surface *srf = vmw_res_to_srf(view->srf);
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
struct vmw_view_define body;
} *cmd;
mutex_lock(&dev_priv->binding_mutex);
if (!view->committed) {
mutex_unlock(&dev_priv->binding_mutex);
return 0;
}
cmd = VMW_CMD_CTX_RESERVE(res->dev_priv, view->cmd_size, view->ctx->id);
if (!cmd) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
memcpy(cmd, &view->cmd, view->cmd_size);
WARN_ON(cmd->body.view_id != view->view_id);
/* Sid may have changed due to surface eviction. */
WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
cmd->body.sid = view->srf->id;
vmw_cmd_commit(res->dev_priv, view->cmd_size);
res->id = view->view_id;
list_add_tail(&view->srf_head, &srf->view_list);
vmw_cotable_add_resource(view->cotable, &view->cotable_head);
mutex_unlock(&dev_priv->binding_mutex);
return 0;
}
/**
* vmw_view_destroy - Destroy a hardware view.
*
* @res: Pointer to the view resource.
*
* Destroy a hardware view. Typically used on unexpected termination of the
* owning process or if the surface the view is pointing to is destroyed.
*/
static int vmw_view_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_view *view = vmw_view(res);
struct {
SVGA3dCmdHeader header;
union vmw_view_destroy body;
} *cmd;
lockdep_assert_held_once(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head);
if (!view->committed || res->id == -1)
return 0;
cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), view->ctx->id);
if (!cmd)
return -ENOMEM;
cmd->header.id = vmw_view_destroy_cmds[view->view_type];
cmd->header.size = sizeof(cmd->body);
cmd->body.view_id = view->view_id;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&view->cotable_head);
list_del_init(&view->srf_head);
return 0;
}
/**
* vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
*
* @res: Pointer to the view resource.
*
* Destroy a hardware view if it's still present.
*/
static void vmw_hw_view_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
mutex_lock(&dev_priv->binding_mutex);
WARN_ON(vmw_view_destroy(res));
res->id = -1;
mutex_unlock(&dev_priv->binding_mutex);
}
/**
* vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
*
* @user_key: The user-space id used for the view.
* @view_type: The view type.
*
* Destroy a hardware view if it's still present.
*/
static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
{
return user_key | (view_type << 20);
}
/**
* vmw_view_id_ok - Basic view id and type range checks.
*
* @user_key: The user-space id used for the view.
* @view_type: The view type.
*
* Checks that the view id and type (typically provided by user-space) is
* valid.
*/
static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
{
return (user_key < SVGA_COTABLE_MAX_IDS &&
view_type < vmw_view_max);
}
/**
* vmw_view_res_free - resource res_free callback for view resources
*
* @res: Pointer to a struct vmw_resource
*
* Frees memory held by the struct vmw_view.
*/
static void vmw_view_res_free(struct vmw_resource *res)
{
struct vmw_view *view = vmw_view(res);
vmw_resource_unreference(&view->cotable);
vmw_resource_unreference(&view->srf);
kfree_rcu(view, rcu);
}
/**
* vmw_view_add - Create a view resource and stage it for addition
* as a command buffer managed resource.
*
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @ctx: Pointer to a struct vmw_resource identifying the active context.
* @srf: Pointer to a struct vmw_resource identifying the surface the view
* points to.
* @view_type: The view type deduced from the view create command.
* @user_key: The key that is used to identify the shader. The key is
* unique to the view type and to the context.
* @cmd: Pointer to the view create command in the command stream.
* @cmd_size: Size of the view create command in the command stream.
* @list: Caller's list of staged command buffer resource actions.
*/
int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource *ctx,
struct vmw_resource *srf,
enum vmw_view_type view_type,
u32 user_key,
const void *cmd,
size_t cmd_size,
struct list_head *list)
{
static const size_t vmw_view_define_sizes[] = {
[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView),
[vmw_view_ua] = sizeof(SVGA3dCmdDXDefineUAView)
};
struct vmw_private *dev_priv = ctx->dev_priv;
struct vmw_resource *res;
struct vmw_view *view;
size_t size;
int ret;
if (cmd_size != vmw_view_define_sizes[view_type] +
sizeof(SVGA3dCmdHeader)) {
VMW_DEBUG_USER("Illegal view create command size.\n");
return -EINVAL;
}
if (!vmw_view_id_ok(user_key, view_type)) {
VMW_DEBUG_USER("Illegal view add view id.\n");
return -EINVAL;
}
size = offsetof(struct vmw_view, cmd) + cmd_size;
view = kmalloc(size, GFP_KERNEL);
if (!view) {
return -ENOMEM;
}
res = &view->res;
view->ctx = ctx;
view->srf = vmw_resource_reference(srf);
view->cotable = vmw_resource_reference
(vmw_context_cotable(ctx, vmw_view_cotables[view_type]));
view->view_type = view_type;
view->view_id = user_key;
view->cmd_size = cmd_size;
view->committed = false;
INIT_LIST_HEAD(&view->srf_head);
INIT_LIST_HEAD(&view->cotable_head);
memcpy(&view->cmd, cmd, cmd_size);
ret = vmw_resource_init(dev_priv, res, true,
vmw_view_res_free, &vmw_view_func);
if (ret)
goto out_resource_init;
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
vmw_view_key(user_key, view_type),
res, list);
if (ret)
goto out_resource_init;
res->id = view->view_id;
res->hw_destroy = vmw_hw_view_destroy;
out_resource_init:
vmw_resource_unreference(&res);
return ret;
}
/**
* vmw_view_remove - Stage a view for removal.
*
* @man: Pointer to the view manager identifying the shader namespace.
* @user_key: The key that is used to identify the view. The key is
* unique to the view type.
* @view_type: View type
* @list: Caller's list of staged command buffer resource actions.
* @res_p: If the resource is in an already committed state, points to the
* struct vmw_resource on successful return. The pointer will be
* non ref-counted.
*/
int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, enum vmw_view_type view_type,
struct list_head *list,
struct vmw_resource **res_p)
{
if (!vmw_view_id_ok(user_key, view_type)) {
VMW_DEBUG_USER("Illegal view remove view id.\n");
return -EINVAL;
}
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
vmw_view_key(user_key, view_type),
list, res_p);
}
/**
* vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
*
* @dev_priv: Pointer to a device private struct.
* @list: List of views belonging to a cotable.
* @readback: Unused. Needed for function interface only.
*
* This function evicts all views belonging to a cotable.
* It must be called with the binding_mutex held, and the caller must hold
* a reference to the view resource. This is typically called before the
* cotable is paged out.
*/
void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
struct list_head *list,
bool readback)
{
struct vmw_view *entry, *next;
lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, cotable_head)
WARN_ON(vmw_view_destroy(&entry->res));
}
/**
* vmw_view_surface_list_destroy - Evict all views pointing to a surface
*
* @dev_priv: Pointer to a device private struct.
* @list: List of views pointing to a surface.
*
* This function evicts all views pointing to a surface. This is typically
* called before the surface is evicted.
*/
void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
struct list_head *list)
{
struct vmw_view *entry, *next;
lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, srf_head)
WARN_ON(vmw_view_destroy(&entry->res));
}
/**
* vmw_view_srf - Return a non-refcounted pointer to the surface a view is
* pointing to.
*
* @res: pointer to a view resource.
*
* Note that the view itself is holding a reference, so as long
* the view resource is alive, the surface resource will be.
*/
struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
{
return vmw_view(res)->srf;
}
/**
* vmw_view_lookup - Look up a view.
*
* @man: The context's cmdbuf ref manager.
* @view_type: The view type.
* @user_key: The view user id.
*
* returns a refcounted pointer to a view or an error pointer if not found.
*/
struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_view_type view_type,
u32 user_key)
{
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
vmw_view_key(user_key, view_type));
}
/**
* vmw_view_dirtying - Return whether a view type is dirtying its resource
* @res: Pointer to the view
*
* Each time a resource is put on the validation list as the result of a
* view pointing to it, we need to determine whether that resource will
* be dirtied (written to by the GPU) as a result of the corresponding
* GPU operation. Currently only rendertarget-, depth-stencil and unordered
* access views are capable of dirtying its resource.
*
* Return: Whether the view type of @res dirties the resource it points to.
*/
u32 vmw_view_dirtying(struct vmw_resource *res)
{
static u32 view_is_dirtying[vmw_view_max] = {
[vmw_view_rt] = VMW_RES_DIRTY_SET,
[vmw_view_ds] = VMW_RES_DIRTY_SET,
[vmw_view_ua] = VMW_RES_DIRTY_SET,
};
/* Update this function as we add more view types */
BUILD_BUG_ON(vmw_view_max != 4);
return view_is_dirtying[vmw_view(res)->view_type];
}
const u32 vmw_view_destroy_cmds[] = {
[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
[vmw_view_ua] = SVGA_3D_CMD_DX_DESTROY_UA_VIEW,
};
const SVGACOTableType vmw_view_cotables[] = {
[vmw_view_sr] = SVGA_COTABLE_SRVIEW,
[vmw_view_rt] = SVGA_COTABLE_RTVIEW,
[vmw_view_ds] = SVGA_COTABLE_DSVIEW,
[vmw_view_ua] = SVGA_COTABLE_UAVIEW,
};
const SVGACOTableType vmw_so_cotables[] = {
[vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
[vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
[vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
[vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
[vmw_so_ss] = SVGA_COTABLE_SAMPLER,
[vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT,
[vmw_so_max]= SVGA_COTABLE_MAX
};
/* To remove unused function warning */
static void vmw_so_build_asserts(void) __attribute__((used));
/*
* This function is unused at run-time, and only used to dump various build
* asserts important for code optimization assumptions.
*/
static void vmw_so_build_asserts(void)
{
/* Assert that our vmw_view_cmd_to_type() function is correct. */
BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
/* Assert that our "one body fits all" assumption is valid */
BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
/* Assert that the view key space can hold all view ids. */
BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
/*
* Assert that the offset of sid in all view define commands
* is what we assume it to be.
*/
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineUAView, sid));
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineDepthStencilView_v2, sid));
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_so.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright 2021 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_devcaps.h"
#include "vmwgfx_drv.h"
struct svga_3d_compat_cap {
SVGA3dFifoCapsRecordHeader header;
SVGA3dFifoCapPair pairs[SVGA3D_DEVCAP_MAX];
};
static u32 vmw_mask_legacy_multisample(unsigned int cap, u32 fmt_value)
{
/*
* A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
* to check the sample count supported by virtual device. Since there
* never was support for multisample count for backing MOB return 0.
*
* MULTISAMPLE_MASKABLESAMPLES devcap is marked as deprecated by virtual
* device.
*/
if (cap == SVGA3D_DEVCAP_DEAD5)
return 0;
return fmt_value;
}
static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
size_t size)
{
struct svga_3d_compat_cap *compat_cap =
(struct svga_3d_compat_cap *) bounce;
unsigned int i;
size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
unsigned int max_size;
if (size < pair_offset)
return -EINVAL;
max_size = (size - pair_offset) / sizeof(SVGA3dFifoCapPair);
if (max_size > SVGA3D_DEVCAP_MAX)
max_size = SVGA3D_DEVCAP_MAX;
compat_cap->header.length =
(pair_offset + max_size * sizeof(SVGA3dFifoCapPair)) / sizeof(u32);
compat_cap->header.type = SVGA3D_FIFO_CAPS_RECORD_DEVCAPS;
for (i = 0; i < max_size; ++i) {
compat_cap->pairs[i][0] = i;
compat_cap->pairs[i][1] = vmw_mask_legacy_multisample
(i, dev_priv->devcaps[i]);
}
return 0;
}
int vmw_devcaps_create(struct vmw_private *vmw)
{
bool gb_objects = !!(vmw->capabilities & SVGA_CAP_GBOBJECTS);
uint32_t i;
if (gb_objects) {
vmw->devcaps = vzalloc(sizeof(uint32_t) * SVGA3D_DEVCAP_MAX);
if (!vmw->devcaps)
return -ENOMEM;
for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) {
vmw_write(vmw, SVGA_REG_DEV_CAP, i);
vmw->devcaps[i] = vmw_read(vmw, SVGA_REG_DEV_CAP);
}
}
return 0;
}
void vmw_devcaps_destroy(struct vmw_private *vmw)
{
vfree(vmw->devcaps);
vmw->devcaps = NULL;
}
uint32 vmw_devcaps_size(const struct vmw_private *vmw,
bool gb_aware)
{
bool gb_objects = !!(vmw->capabilities & SVGA_CAP_GBOBJECTS);
if (gb_objects && gb_aware)
return SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
else if (gb_objects)
return sizeof(struct svga_3d_compat_cap) +
sizeof(uint32_t);
else if (vmw->fifo_mem != NULL)
return (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
sizeof(uint32_t);
else
return 0;
}
int vmw_devcaps_copy(struct vmw_private *vmw, bool gb_aware,
void *dst, uint32_t dst_size)
{
int ret;
bool gb_objects = !!(vmw->capabilities & SVGA_CAP_GBOBJECTS);
if (gb_objects && gb_aware) {
memcpy(dst, vmw->devcaps, dst_size);
} else if (gb_objects) {
ret = vmw_fill_compat_cap(vmw, dst, dst_size);
if (unlikely(ret != 0))
return ret;
} else if (vmw->fifo_mem) {
u32 *fifo_mem = vmw->fifo_mem;
memcpy(dst, &fifo_mem[SVGA_FIFO_3D_CAPS], dst_size);
} else
return -EINVAL;
return 0;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
#define vmw_encoder_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.encoder)
#define vmw_connector_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.connector)
struct vmw_legacy_display {
struct list_head active;
unsigned num_active;
unsigned last_num_active;
struct vmw_framebuffer *fb;
};
/*
* Display unit using the legacy register interface.
*/
struct vmw_legacy_display_unit {
struct vmw_display_unit base;
struct list_head active;
};
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
{
list_del_init(&ldu->active);
vmw_du_cleanup(&ldu->base);
kfree(ldu);
}
/*
* Legacy Display Unit CRTC functions
*/
static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
{
vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
}
static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
int i;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
*/
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
int w = 0, h = 0;
list_for_each_entry(entry, &lds->active, active) {
crtc = &entry->base.crtc;
w = max(w, crtc->x + crtc->mode.hdisplay);
h = max(h, crtc->y + crtc->mode.vdisplay);
}
if (crtc == NULL)
return 0;
fb = crtc->primary->state->fb;
return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
fb->format->cpp[0] * 8,
fb->format->depth);
}
if (!list_empty(&lds->active)) {
entry = list_entry(lds->active.next, typeof(*entry), active);
fb = entry->base.crtc.primary->state->fb;
vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
fb->format->cpp[0] * 8, fb->format->depth);
}
/* Make sure we always show something. */
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
lds->num_active ? lds->num_active : 1);
i = 0;
list_for_each_entry(entry, &lds->active, active) {
crtc = &entry->base.crtc;
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
i++;
}
BUG_ON(i != lds->num_active);
lds->last_num_active = lds->num_active;
return 0;
}
/*
* Pin the buffer in a location suitable for access by the
* display system.
*/
static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_bo *buf;
int ret;
buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
if (!buf)
return 0;
WARN_ON(dev_priv->active_display_unit != vmw_du_legacy);
if (dev_priv->active_display_unit == vmw_du_legacy) {
vmw_overlay_pause_all(dev_priv);
ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
vmw_overlay_resume_all(dev_priv);
} else
ret = -EINVAL;
return ret;
}
static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_bo *buf;
buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
if (WARN_ON(!buf))
return 0;
return vmw_bo_unpin(dev_priv, buf, false);
}
static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *ldu)
{
struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
if (list_empty(&ldu->active))
return 0;
/* Must init otherwise list_empty(&ldu->active) will not work. */
list_del_init(&ldu->active);
if (--(ld->num_active) == 0) {
BUG_ON(!ld->fb);
WARN_ON(vmw_ldu_fb_unpin(ld->fb));
ld->fb = NULL;
}
return 0;
}
static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *ldu,
struct vmw_framebuffer *vfb)
{
struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
struct list_head *at;
BUG_ON(!ld->num_active && ld->fb);
if (vfb != ld->fb) {
if (ld->fb)
WARN_ON(vmw_ldu_fb_unpin(ld->fb));
vmw_svga_enable(vmw_priv);
WARN_ON(vmw_ldu_fb_pin(vfb));
ld->fb = vfb;
}
if (!list_empty(&ldu->active))
return 0;
at = &ld->active;
list_for_each_entry(entry, &ld->active, active) {
if (entry->base.unit > ldu->base.unit)
break;
at = &entry->active;
}
list_add(&ldu->active, at);
ld->num_active++;
return 0;
}
/**
* vmw_ldu_crtc_mode_set_nofb - Enable svga
*
* @crtc: CRTC associated with the new screen
*
* For LDU, just enable the svga
*/
static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
}
/**
* vmw_ldu_crtc_atomic_enable - Noop
*
* @crtc: CRTC associated with the new screen
* @state: Unused
*
* This is called after a mode set has been completed. Here's
* usually a good place to call vmw_ldu_add_active/vmw_ldu_del_active
* but since for LDU the display plane is closely tied to the
* CRTC, it makes more sense to do those at plane update time.
*/
static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
}
/**
* vmw_ldu_crtc_atomic_disable - Turns off CRTC
*
* @crtc: CRTC to be turned off
* @state: Unused
*/
static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
}
static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_ldu_crtc_destroy,
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
};
/*
* Legacy Display Unit encoder functions
*/
static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
{
vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
}
static const struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
.destroy = vmw_ldu_encoder_destroy,
};
/*
* Legacy Display Unit connector functions
*/
static void vmw_ldu_connector_destroy(struct drm_connector *connector)
{
vmw_ldu_destroy(vmw_connector_to_ldu(connector));
}
static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
};
static const struct
drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
};
static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned int flags, unsigned int color,
struct drm_mode_rect *clips,
unsigned int num_clips);
/*
* Legacy Display Plane Functions
*/
static void
vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct vmw_private *dev_priv;
struct vmw_legacy_display_unit *ldu;
struct vmw_framebuffer *vfb;
struct drm_framebuffer *fb;
struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
ldu = vmw_crtc_to_ldu(crtc);
dev_priv = vmw_priv(plane->dev);
fb = new_state->fb;
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
if (vfb)
vmw_ldu_add_active(dev_priv, ldu, vfb);
else
vmw_ldu_del_active(dev_priv, ldu);
vmw_ldu_commit_list(dev_priv);
if (vfb && vmw_cmd_supported(dev_priv)) {
struct drm_mode_rect fb_rect = {
.x1 = 0,
.y1 = 0,
.x2 = vfb->base.width,
.y2 = vfb->base.height
};
struct drm_mode_rect *damage_rects = drm_plane_get_damage_clips(new_state);
u32 rect_count = drm_plane_get_damage_clips_count(new_state);
int ret;
if (!damage_rects) {
damage_rects = &fb_rect;
rect_count = 1;
}
ret = vmw_kms_ldu_do_bo_dirty(dev_priv, vfb, 0, 0, damage_rects, rect_count);
drm_WARN_ONCE(plane->dev, ret,
"vmw_kms_ldu_do_bo_dirty failed with: ret=%d\n", ret);
vmw_cmd_flush(dev_priv, false);
}
}
static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = vmw_du_primary_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
};
static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = vmw_du_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
};
/*
* Atomic Helpers
*/
static const struct
drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = {
.atomic_check = vmw_du_cursor_plane_atomic_check,
.atomic_update = vmw_du_cursor_plane_atomic_update,
.prepare_fb = vmw_du_cursor_plane_prepare_fb,
.cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
};
static const struct
drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = {
.atomic_check = vmw_du_primary_plane_atomic_check,
.atomic_update = vmw_ldu_primary_plane_atomic_update,
};
static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
.mode_set_nofb = vmw_ldu_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
.atomic_flush = vmw_du_crtc_atomic_flush,
.atomic_enable = vmw_ldu_crtc_atomic_enable,
.atomic_disable = vmw_ldu_crtc_atomic_disable,
};
static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_legacy_display_unit *ldu;
struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary;
struct vmw_cursor_plane *cursor;
struct drm_crtc *crtc;
int ret;
ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
if (!ldu)
return -ENOMEM;
ldu->base.unit = unit;
crtc = &ldu->base.crtc;
encoder = &ldu->base.encoder;
connector = &ldu->base.connector;
primary = &ldu->base.primary;
cursor = &ldu->base.cursor;
INIT_LIST_HEAD(&ldu->active);
ldu->base.pref_active = (unit == 0);
ldu->base.pref_width = dev_priv->initial_width;
ldu->base.pref_height = dev_priv->initial_height;
ldu->base.pref_mode = NULL;
/*
* Remove this after enabling atomic because property values can
* only exist in a state object
*/
ldu->base.is_implicit = true;
/* Initialize primary plane */
ret = drm_universal_plane_init(dev, primary,
0, &vmw_ldu_plane_funcs,
vmw_primary_plane_formats,
ARRAY_SIZE(vmw_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
DRM_ERROR("Failed to initialize primary plane");
goto err_free;
}
drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs);
/*
* We're going to be using traces and software cursors
*/
if (vmw_cmd_supported(dev_priv)) {
/* Initialize cursor plane */
ret = drm_universal_plane_init(dev, &cursor->base,
0, &vmw_ldu_cursor_funcs,
vmw_cursor_plane_formats,
ARRAY_SIZE(vmw_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR, NULL);
if (ret) {
DRM_ERROR("Failed to initialize cursor plane");
drm_plane_cleanup(&ldu->base.primary);
goto err_free;
}
drm_plane_helper_add(&cursor->base, &vmw_ldu_cursor_plane_helper_funcs);
}
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
goto err_free;
}
drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret) {
DRM_ERROR("Failed to initialize encoder\n");
goto err_free_connector;
}
(void) drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
ret = drm_connector_register(connector);
if (ret) {
DRM_ERROR("Failed to register connector\n");
goto err_free_encoder;
}
ret = drm_crtc_init_with_planes(dev, crtc, primary,
vmw_cmd_supported(dev_priv) ? &cursor->base : NULL,
&vmw_legacy_crtc_funcs, NULL);
if (ret) {
DRM_ERROR("Failed to initialize CRTC\n");
goto err_free_unregister;
}
drm_crtc_helper_add(crtc, &vmw_ldu_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_object_attach_property(&connector->base,
dev_priv->hotplug_mode_update_property, 1);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
if (dev_priv->implicit_placement_property)
drm_object_attach_property
(&connector->base,
dev_priv->implicit_placement_property,
1);
return 0;
err_free_unregister:
drm_connector_unregister(connector);
err_free_encoder:
drm_encoder_cleanup(encoder);
err_free_connector:
drm_connector_cleanup(connector);
err_free:
kfree(ldu);
return ret;
}
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
int i, ret;
int num_display_units = (dev_priv->capabilities & SVGA_CAP_MULTIMON) ?
VMWGFX_NUM_DISPLAY_UNITS : 1;
if (unlikely(dev_priv->ldu_priv)) {
return -EINVAL;
}
dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
if (!dev_priv->ldu_priv)
return -ENOMEM;
INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
dev_priv->ldu_priv->num_active = 0;
dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL;
vmw_kms_create_implicit_placement_property(dev_priv);
for (i = 0; i < num_display_units; ++i) {
ret = vmw_ldu_init(dev_priv, i);
if (ret != 0)
goto err_free;
}
dev_priv->active_display_unit = vmw_du_legacy;
drm_mode_config_reset(dev);
return 0;
err_free:
kfree(dev_priv->ldu_priv);
dev_priv->ldu_priv = NULL;
return ret;
}
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
{
if (!dev_priv->ldu_priv)
return -ENOSYS;
BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
kfree(dev_priv->ldu_priv);
return 0;
}
static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned int flags, unsigned int color,
struct drm_mode_rect *clips,
unsigned int num_clips)
{
size_t fifo_size;
int i;
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
fifo_size = sizeof(*cmd) * num_clips;
cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips++) {
cmd[i].header = SVGA_CMD_UPDATE;
cmd[i].body.x = clips->x1;
cmd[i].body.y = clips->y1;
cmd[i].body.width = clips->x2 - clips->x1;
cmd[i].body.height = clips->y2 - clips->y1;
}
vmw_cmd_commit(dev_priv, fifo_size);
return 0;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
#include "vmw_surface_cache.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include <drm/drm_sysfs.h>
void vmw_du_cleanup(struct vmw_display_unit *du)
{
struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
drm_plane_cleanup(&du->primary);
if (vmw_cmd_supported(dev_priv))
drm_plane_cleanup(&du->cursor.base);
drm_connector_unregister(&du->connector);
drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector);
}
/*
* Display Unit Cursor functions
*/
static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
struct vmw_plane_state *vps,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
struct vmw_svga_fifo_cmd_define_cursor {
u32 cmd;
SVGAFifoCmdDefineAlphaCursor cursor;
};
/**
* vmw_send_define_cursor_cmd - queue a define cursor command
* @dev_priv: the private driver struct
* @image: buffer which holds the cursor image
* @width: width of the mouse cursor image
* @height: height of the mouse cursor image
* @hotspotX: the horizontal position of mouse hotspot
* @hotspotY: the vertical position of mouse hotspot
*/
static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
struct vmw_svga_fifo_cmd_define_cursor *cmd;
const u32 image_size = width * height * sizeof(*image);
const u32 cmd_size = sizeof(*cmd) + image_size;
/* Try to reserve fifocmd space and swallow any failures;
such reservations cannot be left unconsumed for long
under the risk of clogging other fifocmd users, so
we treat reservations separtely from the way we treat
other fallible KMS-atomic resources at prepare_fb */
cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
if (unlikely(!cmd))
return;
memset(cmd, 0, sizeof(*cmd));
memcpy(&cmd[1], image, image_size);
cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
cmd->cursor.id = 0;
cmd->cursor.width = width;
cmd->cursor.height = height;
cmd->cursor.hotspotX = hotspotX;
cmd->cursor.hotspotY = hotspotY;
vmw_cmd_commit_flush(dev_priv, cmd_size);
}
/**
* vmw_cursor_update_image - update the cursor image on the provided plane
* @dev_priv: the private driver struct
* @vps: the plane state of the cursor plane
* @image: buffer which holds the cursor image
* @width: width of the mouse cursor image
* @height: height of the mouse cursor image
* @hotspotX: the horizontal position of mouse hotspot
* @hotspotY: the vertical position of mouse hotspot
*/
static void vmw_cursor_update_image(struct vmw_private *dev_priv,
struct vmw_plane_state *vps,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
if (vps->cursor.bo)
vmw_cursor_update_mob(dev_priv, vps, image,
vps->base.crtc_w, vps->base.crtc_h,
hotspotX, hotspotY);
else
vmw_send_define_cursor_cmd(dev_priv, image, width, height,
hotspotX, hotspotY);
}
/**
* vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
*
* Called from inside vmw_du_cursor_plane_atomic_update to actually
* make the cursor-image live.
*
* @dev_priv: device to work with
* @vps: the plane state of the cursor plane
* @image: cursor source data to fill the MOB with
* @width: source data width
* @height: source data height
* @hotspotX: cursor hotspot x
* @hotspotY: cursor hotspot Y
*/
static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
struct vmw_plane_state *vps,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
SVGAGBCursorHeader *header;
SVGAGBAlphaCursorHeader *alpha_header;
const u32 image_size = width * height * sizeof(*image);
header = vmw_bo_map_and_cache(vps->cursor.bo);
alpha_header = &header->header.alphaHeader;
memset(header, 0, sizeof(*header));
header->type = SVGA_ALPHA_CURSOR;
header->sizeInBytes = image_size;
alpha_header->hotspotX = hotspotX;
alpha_header->hotspotY = hotspotY;
alpha_header->width = width;
alpha_header->height = height;
memcpy(header + 1, image, image_size);
vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
vps->cursor.bo->tbo.resource->start);
}
static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
{
return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
}
/**
* vmw_du_cursor_plane_acquire_image -- Acquire the image data
* @vps: cursor plane state
*/
static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
{
bool is_iomem;
if (vps->surf) {
if (vps->surf_mapped)
return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
return vps->surf->snooper.image;
} else if (vps->bo)
return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
return NULL;
}
static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
struct vmw_plane_state *new_vps)
{
void *old_image;
void *new_image;
u32 size;
bool changed;
if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
old_vps->base.crtc_h != new_vps->base.crtc_h)
return true;
if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
return true;
size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
old_image = vmw_du_cursor_plane_acquire_image(old_vps);
new_image = vmw_du_cursor_plane_acquire_image(new_vps);
changed = false;
if (old_image && new_image)
changed = memcmp(old_image, new_image, size) != 0;
return changed;
}
static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
{
if (!(*vbo))
return;
ttm_bo_unpin(&(*vbo)->tbo);
vmw_bo_unreference(vbo);
}
static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
struct vmw_plane_state *vps)
{
u32 i;
if (!vps->cursor.bo)
return;
vmw_du_cursor_plane_unmap_cm(vps);
/* Look for a free slot to return this mob to the cache. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
if (!vcp->cursor_mobs[i]) {
vcp->cursor_mobs[i] = vps->cursor.bo;
vps->cursor.bo = NULL;
return;
}
}
/* Cache is full: See if this mob is bigger than an existing mob. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
if (vcp->cursor_mobs[i]->tbo.base.size <
vps->cursor.bo->tbo.base.size) {
vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
vcp->cursor_mobs[i] = vps->cursor.bo;
vps->cursor.bo = NULL;
return;
}
}
/* Destroy it if it's not worth caching. */
vmw_du_destroy_cursor_mob(&vps->cursor.bo);
}
static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
struct vmw_plane_state *vps)
{
struct vmw_private *dev_priv = vcp->base.dev->dev_private;
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
u32 i;
u32 cursor_max_dim, mob_max_size;
int ret;
if (!dev_priv->has_mob ||
(dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
return -EINVAL;
mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
vps->base.crtc_h > cursor_max_dim)
return -EINVAL;
if (vps->cursor.bo) {
if (vps->cursor.bo->tbo.base.size >= size)
return 0;
vmw_du_put_cursor_mob(vcp, vps);
}
/* Look for an unused mob in the cache. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
if (vcp->cursor_mobs[i] &&
vcp->cursor_mobs[i]->tbo.base.size >= size) {
vps->cursor.bo = vcp->cursor_mobs[i];
vcp->cursor_mobs[i] = NULL;
return 0;
}
}
/* Create a new mob if we can't find an existing one. */
ret = vmw_bo_create_and_populate(dev_priv, size,
VMW_BO_DOMAIN_MOB,
&vps->cursor.bo);
if (ret != 0)
return ret;
/* Fence the mob creation so we are guarateed to have the mob */
ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
if (ret != 0)
goto teardown;
vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
ttm_bo_unreserve(&vps->cursor.bo->tbo);
return 0;
teardown:
vmw_du_destroy_cursor_mob(&vps->cursor.bo);
return ret;
}
static void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
: SVGA_CURSOR_ON_HIDE;
uint32_t count;
spin_lock(&dev_priv->cursor_lock);
if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
} else {
vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
}
spin_unlock(&dev_priv->cursor_lock);
}
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
unsigned long kmap_num;
SVGA3dCopyBox *box;
unsigned box_count;
void *virtual;
bool is_iomem;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int i, ret;
const struct SVGA3dSurfaceDesc *desc =
vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
cmd = container_of(header, struct vmw_dma_cmd, header);
/* No snooper installed, nothing to copy */
if (!srf->snooper.image)
return;
if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
DRM_ERROR("face and mipmap for cursors should never != 0\n");
return;
}
if (cmd->header.size < 64) {
DRM_ERROR("at least one full copy box must be given\n");
return;
}
box = (SVGA3dCopyBox *)&cmd[1];
box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
sizeof(SVGA3dCopyBox);
if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
box->x != 0 || box->y != 0 || box->z != 0 ||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
box->d != 1 || box_count != 1 ||
box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
/* TODO handle none page aligned offsets */
/* TODO handle more dst & src != 0 */
/* TODO handle more then one copy */
DRM_ERROR("Can't snoop dma request for cursor!\n");
DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
box->srcx, box->srcy, box->srcz,
box->x, box->y, box->z,
box->w, box->h, box->d, box_count,
cmd->dma.guest.ptr.offset);
return;
}
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
ret = ttm_bo_reserve(bo, true, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return;
}
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
memcpy(srf->snooper.image, virtual,
VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
} else {
/* Image is unsigned pointer. */
for (i = 0; i < box->h; i++)
memcpy(srf->snooper.image + i * image_pitch,
virtual + i * cmd->dma.guest.pitch,
box->w * desc->pitchBytesPerBlock);
}
srf->snooper.age++;
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(bo);
}
/**
* vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
*
* @dev_priv: Pointer to the device private struct.
*
* Clears all legacy hotspots.
*/
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev) {
du = vmw_crtc_to_du(crtc);
du->hotspot_x = 0;
du->hotspot_y = 0;
}
drm_modeset_unlock_all(dev);
}
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
du = vmw_crtc_to_du(crtc);
if (!du->cursor_surface ||
du->cursor_age == du->cursor_surface->snooper.age ||
!du->cursor_surface->snooper.image)
continue;
du->cursor_age = du->cursor_surface->snooper.age;
vmw_send_define_cursor_cmd(dev_priv,
du->cursor_surface->snooper.image,
VMW_CURSOR_SNOOP_WIDTH,
VMW_CURSOR_SNOOP_HEIGHT,
du->hotspot_x + du->core_hotspot_x,
du->hotspot_y + du->core_hotspot_y);
}
mutex_unlock(&dev->mode_config.mutex);
}
void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
{
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
u32 i;
vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
drm_plane_cleanup(plane);
}
void vmw_du_primary_plane_destroy(struct drm_plane *plane)
{
drm_plane_cleanup(plane);
/* Planes are static in our case so we don't free it */
}
/**
* vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
*
* @vps: plane state associated with the display surface
* @unreference: true if we also want to unreference the display.
*/
void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
bool unreference)
{
if (vps->surf) {
if (vps->pinned) {
vmw_resource_unpin(&vps->surf->res);
vps->pinned--;
}
if (unreference) {
if (vps->pinned)
DRM_ERROR("Surface still pinned\n");
vmw_surface_unreference(&vps->surf);
}
}
}
/**
* vmw_du_plane_cleanup_fb - Unpins the plane surface
*
* @plane: display plane
* @old_state: Contains the FB to clean up
*
* Unpins the framebuffer surface
*
* Returns 0 on success
*/
void
vmw_du_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
vmw_du_plane_unpin_surf(vps, false);
}
/**
* vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
*
* @vps: plane_state
*
* Returns 0 on success
*/
static int
vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
{
int ret;
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
struct ttm_buffer_object *bo;
if (!vps->cursor.bo)
return -EINVAL;
bo = &vps->cursor.bo->tbo;
if (bo->base.size < size)
return -EINVAL;
if (vps->cursor.bo->map.virtual)
return 0;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (unlikely(ret != 0))
return -ENOMEM;
vmw_bo_map_and_cache(vps->cursor.bo);
ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return -ENOMEM;
return 0;
}
/**
* vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
*
* @vps: state of the cursor plane
*
* Returns 0 on success
*/
static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
{
int ret = 0;
struct vmw_bo *vbo = vps->cursor.bo;
if (!vbo || !vbo->map.virtual)
return 0;
ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
if (likely(ret == 0)) {
vmw_bo_unmap(vbo);
ttm_bo_unreserve(&vbo->tbo);
}
return ret;
}
/**
* vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
*
* @plane: cursor plane
* @old_state: contains the state to clean up
*
* Unmaps all cursor bo mappings and unpins the cursor surface
*
* Returns 0 on success
*/
void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
bool is_iomem;
if (vps->surf_mapped) {
vmw_bo_unmap(vps->surf->res.guest_memory_bo);
vps->surf_mapped = false;
}
if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
if (likely(ret == 0)) {
ttm_bo_kunmap(&vps->bo->map);
ttm_bo_unreserve(&vps->bo->tbo);
}
}
vmw_du_cursor_plane_unmap_cm(vps);
vmw_du_put_cursor_mob(vcp, vps);
vmw_du_plane_unpin_surf(vps, false);
if (vps->surf) {
vmw_surface_unreference(&vps->surf);
vps->surf = NULL;
}
if (vps->bo) {
vmw_bo_unreference(&vps->bo);
vps->bo = NULL;
}
}
/**
* vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
*
* @plane: display plane
* @new_state: info on the new plane state, including the FB
*
* Returns 0 on success
*/
int
vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_framebuffer *fb = new_state->fb;
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
int ret = 0;
if (vps->surf) {
vmw_surface_unreference(&vps->surf);
vps->surf = NULL;
}
if (vps->bo) {
vmw_bo_unreference(&vps->bo);
vps->bo = NULL;
}
if (fb) {
if (vmw_framebuffer_to_vfb(fb)->bo) {
vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
vmw_bo_reference(vps->bo);
} else {
vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
vmw_surface_reference(vps->surf);
}
}
if (!vps->surf && vps->bo) {
const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
/*
* Not using vmw_bo_map_and_cache() helper here as we need to
* reserve the ttm_buffer_object first which
* vmw_bo_map_and_cache() omits.
*/
ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
if (unlikely(ret != 0))
return -ENOMEM;
ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
ttm_bo_unreserve(&vps->bo->tbo);
if (unlikely(ret != 0))
return -ENOMEM;
} else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
WARN_ON(vps->surf->snooper.image);
ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
NULL);
if (unlikely(ret != 0))
return -ENOMEM;
vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
vps->surf_mapped = true;
}
if (vps->surf || vps->bo) {
vmw_du_get_cursor_mob(vcp, vps);
vmw_du_cursor_plane_map_cm(vps);
}
return 0;
}
void
vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
s32 hotspot_x, hotspot_y;
hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y;
if (new_state->fb) {
hotspot_x += new_state->fb->hot_x;
hotspot_y += new_state->fb->hot_y;
}
du->cursor_surface = vps->surf;
du->cursor_bo = vps->bo;
if (!vps->surf && !vps->bo) {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return;
}
vps->cursor.hotspot_x = hotspot_x;
vps->cursor.hotspot_y = hotspot_y;
if (vps->surf) {
du->cursor_age = du->cursor_surface->snooper.age;
}
if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
/*
* If it hasn't changed, avoid making the device do extra
* work by keeping the old cursor active.
*/
struct vmw_cursor_plane_state tmp = old_vps->cursor;
old_vps->cursor = vps->cursor;
vps->cursor = tmp;
} else {
void *image = vmw_du_cursor_plane_acquire_image(vps);
if (image)
vmw_cursor_update_image(dev_priv, vps, image,
new_state->crtc_w,
new_state->crtc_h,
hotspot_x, hotspot_y);
}
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + hotspot_x,
du->cursor_y + hotspot_y);
du->core_hotspot_x = hotspot_x - du->hotspot_x;
du->core_hotspot_y = hotspot_y - du->hotspot_y;
}
/**
* vmw_du_primary_plane_atomic_check - check if the new state is okay
*
* @plane: display plane
* @state: info on the new plane state, including the FB
*
* Check if the new state is settable given the current state. Other
* than what the atomic helper checks, we care about crtc fitting
* the FB and maintaining one active framebuffer.
*
* Returns 0 on success
*/
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc_state *crtc_state = NULL;
struct drm_framebuffer *new_fb = new_state->fb;
int ret;
if (new_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
new_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, true);
if (!ret && new_fb) {
struct drm_crtc *crtc = new_state->crtc;
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
vmw_connector_state_to_vcs(du->connector.state);
}
return ret;
}
/**
* vmw_du_cursor_plane_atomic_check - check if the new state is okay
*
* @plane: cursor plane
* @state: info on the new plane state
*
* This is a chance to fail if the new cursor state does not fit
* our requirements.
*
* Returns 0 on success
*/
int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
int ret = 0;
struct drm_crtc_state *crtc_state = NULL;
struct vmw_surface *surface = NULL;
struct drm_framebuffer *fb = new_state->fb;
if (new_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
if (ret)
return ret;
/* Turning off */
if (!fb)
return 0;
/* A lot of the code assumes this */
if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
new_state->crtc_w, new_state->crtc_h);
return -EINVAL;
}
if (!vmw_framebuffer_to_vfb(fb)->bo) {
surface = vmw_framebuffer_to_vfbs(fb)->surface;
WARN_ON(!surface);
if (!surface ||
(!surface->snooper.image && !surface->res.guest_memory_bo)) {
DRM_ERROR("surface not suitable for cursor\n");
return -EINVAL;
}
}
return 0;
}
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
int connector_mask = drm_connector_mask(&du->connector);
bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary);
/* We always want to have an active plane with an active CRTC */
if (has_primary != new_state->enable)
return -EINVAL;
if (new_state->connector_mask != connector_mask &&
new_state->connector_mask != 0) {
DRM_ERROR("Invalid connectors configuration\n");
return -EINVAL;
}
/*
* Our virtual device does not have a dot clock, so use the logical
* clock value as the dot clock.
*/
if (new_state->mode.crtc_clock == 0)
new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
return 0;
}
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
}
void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
}
/**
* vmw_du_crtc_duplicate_state - duplicate crtc state
* @crtc: DRM crtc
*
* Allocates and returns a copy of the crtc state (both common and
* vmw-specific) for the specified crtc.
*
* Returns: The newly allocated crtc state, or NULL on failure.
*/
struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct drm_crtc_state *state;
struct vmw_crtc_state *vcs;
if (WARN_ON(!crtc->state))
return NULL;
vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
if (!vcs)
return NULL;
state = &vcs->base;
__drm_atomic_helper_crtc_duplicate_state(crtc, state);
return state;
}
/**
* vmw_du_crtc_reset - creates a blank vmw crtc state
* @crtc: DRM crtc
*
* Resets the atomic state for @crtc by freeing the state pointer (which
* might be NULL, e.g. at driver load time) and allocating a new empty state
* object.
*/
void vmw_du_crtc_reset(struct drm_crtc *crtc)
{
struct vmw_crtc_state *vcs;
if (crtc->state) {
__drm_atomic_helper_crtc_destroy_state(crtc->state);
kfree(vmw_crtc_state_to_vcs(crtc->state));
}
vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
if (!vcs) {
DRM_ERROR("Cannot allocate vmw_crtc_state\n");
return;
}
__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
}
/**
* vmw_du_crtc_destroy_state - destroy crtc state
* @crtc: DRM crtc
* @state: state object to destroy
*
* Destroys the crtc state (both common and vmw-specific) for the
* specified plane.
*/
void
vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
/**
* vmw_du_plane_duplicate_state - duplicate plane state
* @plane: drm plane
*
* Allocates and returns a copy of the plane state (both common and
* vmw-specific) for the specified plane.
*
* Returns: The newly allocated plane state, or NULL on failure.
*/
struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane *plane)
{
struct drm_plane_state *state;
struct vmw_plane_state *vps;
vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
if (!vps)
return NULL;
vps->pinned = 0;
vps->cpp = 0;
memset(&vps->cursor, 0, sizeof(vps->cursor));
/* Each ref counted resource needs to be acquired again */
if (vps->surf)
(void) vmw_surface_reference(vps->surf);
if (vps->bo)
(void) vmw_bo_reference(vps->bo);
state = &vps->base;
__drm_atomic_helper_plane_duplicate_state(plane, state);
return state;
}
/**
* vmw_du_plane_reset - creates a blank vmw plane state
* @plane: drm plane
*
* Resets the atomic state for @plane by freeing the state pointer (which might
* be NULL, e.g. at driver load time) and allocating a new empty state object.
*/
void vmw_du_plane_reset(struct drm_plane *plane)
{
struct vmw_plane_state *vps;
if (plane->state)
vmw_du_plane_destroy_state(plane, plane->state);
vps = kzalloc(sizeof(*vps), GFP_KERNEL);
if (!vps) {
DRM_ERROR("Cannot allocate vmw_plane_state\n");
return;
}
__drm_atomic_helper_plane_reset(plane, &vps->base);
}
/**
* vmw_du_plane_destroy_state - destroy plane state
* @plane: DRM plane
* @state: state object to destroy
*
* Destroys the plane state (both common and vmw-specific) for the
* specified plane.
*/
void
vmw_du_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
/* Should have been freed by cleanup_fb */
if (vps->surf)
vmw_surface_unreference(&vps->surf);
if (vps->bo)
vmw_bo_unreference(&vps->bo);
drm_atomic_helper_plane_destroy_state(plane, state);
}
/**
* vmw_du_connector_duplicate_state - duplicate connector state
* @connector: DRM connector
*
* Allocates and returns a copy of the connector state (both common and
* vmw-specific) for the specified connector.
*
* Returns: The newly allocated connector state, or NULL on failure.
*/
struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector *connector)
{
struct drm_connector_state *state;
struct vmw_connector_state *vcs;
if (WARN_ON(!connector->state))
return NULL;
vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
if (!vcs)
return NULL;
state = &vcs->base;
__drm_atomic_helper_connector_duplicate_state(connector, state);
return state;
}
/**
* vmw_du_connector_reset - creates a blank vmw connector state
* @connector: DRM connector
*
* Resets the atomic state for @connector by freeing the state pointer (which
* might be NULL, e.g. at driver load time) and allocating a new empty state
* object.
*/
void vmw_du_connector_reset(struct drm_connector *connector)
{
struct vmw_connector_state *vcs;
if (connector->state) {
__drm_atomic_helper_connector_destroy_state(connector->state);
kfree(vmw_connector_state_to_vcs(connector->state));
}
vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
if (!vcs) {
DRM_ERROR("Cannot allocate vmw_connector_state\n");
return;
}
__drm_atomic_helper_connector_reset(connector, &vcs->base);
}
/**
* vmw_du_connector_destroy_state - destroy connector state
* @connector: DRM connector
* @state: state object to destroy
*
* Destroys the connector state (both common and vmw-specific) for the
* specified plane.
*/
void
vmw_du_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
drm_atomic_helper_connector_destroy_state(connector, state);
}
/*
* Generic framebuffer code
*/
/*
* Surface framebuffer code
*/
static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
kfree(vfbs);
}
/**
* vmw_kms_readback - Perform a readback from the screen system to
* a buffer-object backed framebuffer.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL.
* @vfb: Pointer to the buffer-object backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
* @num_clips: Number of clip rects in @vclips.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *vclips,
uint32_t num_clips)
{
switch (dev_priv->active_display_unit) {
case vmw_du_screen_object:
return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
user_fence_rep, vclips, num_clips,
NULL);
case vmw_du_screen_target:
return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
user_fence_rep, NULL, vclips, num_clips,
1, NULL);
default:
WARN_ONCE(true,
"Readback called with invalid display system.\n");
}
return -ENOSYS;
}
static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = drm_atomic_helper_dirtyfb,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
*mode_cmd,
bool is_bo_proxy)
{
struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
int ret;
/* 3D is only supported on HWv8 and newer hosts */
if (dev_priv->active_display_unit == vmw_du_legacy)
return -ENOSYS;
/*
* Sanity checks.
*/
if (!drm_any_plane_has_format(&dev_priv->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
drm_dbg(&dev_priv->drm,
"unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
return -EINVAL;
}
/* Surface must be marked as a scanout. */
if (unlikely(!surface->metadata.scanout))
return -EINVAL;
if (unlikely(surface->metadata.mip_levels[0] != 1 ||
surface->metadata.num_sizes != 1 ||
surface->metadata.base_size.width < mode_cmd->width ||
surface->metadata.base_size.height < mode_cmd->height ||
surface->metadata.base_size.depth != 1)) {
DRM_ERROR("Incompatible surface dimensions "
"for requested mode.\n");
return -EINVAL;
}
switch (mode_cmd->pixel_format) {
case DRM_FORMAT_ARGB8888:
format = SVGA3D_A8R8G8B8;
break;
case DRM_FORMAT_XRGB8888:
format = SVGA3D_X8R8G8B8;
break;
case DRM_FORMAT_RGB565:
format = SVGA3D_R5G6B5;
break;
case DRM_FORMAT_XRGB1555:
format = SVGA3D_A1R5G5B5;
break;
default:
DRM_ERROR("Invalid pixel format: %p4cc\n",
&mode_cmd->pixel_format);
return -EINVAL;
}
/*
* For DX, surface format validation is done when surface->scanout
* is set.
*/
if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
if (!vfbs) {
ret = -ENOMEM;
goto out_err1;
}
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handles[0];
vfbs->is_bo_proxy = is_bo_proxy;
*out = &vfbs->base;
ret = drm_framebuffer_init(dev, &vfbs->base.base,
&vmw_framebuffer_surface_funcs);
if (ret)
goto out_err2;
return 0;
out_err2:
vmw_surface_unreference(&surface);
kfree(vfbs);
out_err1:
return ret;
}
/*
* Buffer-object framebuffer code
*/
static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(fb);
return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
}
static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
kfree(vfbd);
}
static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.create_handle = vmw_framebuffer_bo_create_handle,
.destroy = vmw_framebuffer_bo_destroy,
.dirty = drm_atomic_helper_dirtyfb,
};
/**
* vmw_create_bo_proxy - create a proxy surface for the buffer object
*
* @dev: DRM device
* @mode_cmd: parameters for the new surface
* @bo_mob: MOB backing the buffer object
* @srf_out: newly created surface
*
* When the content FB is a buffer object, we create a surface as a proxy to the
* same buffer. This way we can do a surface copy rather than a surface DMA.
* This is a more efficient approach
*
* RETURNS:
* 0 on success, error code otherwise
*/
static int vmw_create_bo_proxy(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct vmw_bo *bo_mob,
struct vmw_surface **srf_out)
{
struct vmw_surface_metadata metadata = {0};
uint32_t format;
struct vmw_resource *res;
unsigned int bytes_pp;
int ret;
switch (mode_cmd->pixel_format) {
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
format = SVGA3D_X8R8G8B8;
bytes_pp = 4;
break;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB1555:
format = SVGA3D_R5G6B5;
bytes_pp = 2;
break;
case 8:
format = SVGA3D_P8;
bytes_pp = 1;
break;
default:
DRM_ERROR("Invalid framebuffer format %p4cc\n",
&mode_cmd->pixel_format);
return -EINVAL;
}
metadata.format = format;
metadata.mip_levels[0] = 1;
metadata.num_sizes = 1;
metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
metadata.base_size.height = mode_cmd->height;
metadata.base_size.depth = 1;
metadata.scanout = true;
ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret;
}
res = &(*srf_out)->res;
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true);
vmw_bo_unreference(&res->guest_memory_bo);
res->guest_memory_bo = vmw_bo_reference(bo_mob);
res->guest_memory_offset = 0;
vmw_resource_unreserve(res, false, false, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
return 0;
}
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct vmw_bo *bo,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
*mode_cmd)
{
struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_bo *vfbd;
unsigned int requested_size;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
if (unlikely(requested_size > bo->tbo.base.size)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
}
if (!drm_any_plane_has_format(&dev_priv->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
drm_dbg(&dev_priv->drm,
"unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
return -EINVAL;
}
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
if (!vfbd) {
ret = -ENOMEM;
goto out_err1;
}
vfbd->base.base.obj[0] = &bo->tbo.base;
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
&vmw_framebuffer_bo_funcs);
if (ret)
goto out_err2;
return 0;
out_err2:
vmw_bo_unreference(&bo);
kfree(vfbd);
out_err1:
return ret;
}
/**
* vmw_kms_srf_ok - check if a surface can be created
*
* @dev_priv: Pointer to device private struct.
* @width: requested width
* @height: requested height
*
* Surfaces need to be less than texture size
*/
static bool
vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
{
if (width > dev_priv->texture_max_width ||
height > dev_priv->texture_max_height)
return false;
return true;
}
/**
* vmw_kms_new_framebuffer - Create a new framebuffer.
*
* @dev_priv: Pointer to device private struct.
* @bo: Pointer to buffer object to wrap the kms framebuffer around.
* Either @bo or @surface must be NULL.
* @surface: Pointer to a surface to wrap the kms framebuffer around.
* Either @bo or @surface must be NULL.
* @only_2d: No presents will occur to this buffer object based framebuffer.
* This helps the code to do some important optimizations.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_bo *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
bool is_bo_proxy = false;
int ret;
/*
* We cannot use the SurfaceDMA command in an non-accelerated VM,
* therefore, wrap the buffer object in a surface so we can use the
* SurfaceCopy command.
*/
if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
bo && only_2d &&
mode_cmd->width > 64 && /* Don't create a proxy for cursor */
dev_priv->active_display_unit == vmw_du_screen_target) {
ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
bo, &surface);
if (ret)
return ERR_PTR(ret);
is_bo_proxy = true;
}
/* Create the new framebuffer depending one what we have */
if (surface) {
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd,
is_bo_proxy);
/*
* vmw_create_bo_proxy() adds a reference that is no longer
* needed
*/
if (is_bo_proxy)
vmw_surface_unreference(&surface);
} else if (bo) {
ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
mode_cmd);
} else {
BUG();
}
if (ret)
return ERR_PTR(ret);
return vfb;
}
/*
* Generic Kernel modesetting functions
*/
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_bo *bo = NULL;
int ret;
/* returns either a bo or surface */
ret = vmw_user_lookup_handle(dev_priv, file_priv,
mode_cmd->handles[0],
&surface, &bo);
if (ret) {
DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
mode_cmd->handles[0], mode_cmd->handles[0]);
goto err_out;
}
if (!bo &&
!vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width,
dev_priv->texture_max_height);
goto err_out;
}
vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
!(dev_priv->capabilities & SVGA_CAP_3D),
mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
}
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo)
vmw_user_bo_unref(bo);
if (surface)
vmw_surface_unreference(&surface);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
return ERR_PTR(ret);
}
return &vfb->base;
}
/**
* vmw_kms_check_display_memory - Validates display memory required for a
* topology
* @dev: DRM device
* @num_rects: number of drm_rect in rects
* @rects: array of drm_rect representing the topology to validate indexed by
* crtc index.
*
* Returns:
* 0 on success otherwise negative error code
*/
static int vmw_kms_check_display_memory(struct drm_device *dev,
uint32_t num_rects,
struct drm_rect *rects)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_rect bounding_box = {0};
u64 total_pixels = 0, pixel_mem, bb_mem;
int i;
for (i = 0; i < num_rects; i++) {
/*
* For STDU only individual screen (screen target) is limited by
* SCREENTARGET_MAX_WIDTH/HEIGHT registers.
*/
if (dev_priv->active_display_unit == vmw_du_screen_target &&
(drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
VMW_DEBUG_KMS("Screen size not supported.\n");
return -EINVAL;
}
/* Bounding box upper left is at (0,0). */
if (rects[i].x2 > bounding_box.x2)
bounding_box.x2 = rects[i].x2;
if (rects[i].y2 > bounding_box.y2)
bounding_box.y2 = rects[i].y2;
total_pixels += (u64) drm_rect_width(&rects[i]) *
(u64) drm_rect_height(&rects[i]);
}
/* Virtual svga device primary limits are always in 32-bpp. */
pixel_mem = total_pixels * 4;
/*
* For HV10 and below prim_bb_mem is vram size. When
* SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
* limit on primary bounding box
*/
if (pixel_mem > dev_priv->max_primary_mem) {
VMW_DEBUG_KMS("Combined output size too large.\n");
return -EINVAL;
}
/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
if (dev_priv->active_display_unit != vmw_du_screen_target ||
!(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
if (bb_mem > dev_priv->max_primary_mem) {
VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
return -EINVAL;
}
}
return 0;
}
/**
* vmw_crtc_state_and_lock - Return new or current crtc state with locked
* crtc mutex
* @state: The atomic state pointer containing the new atomic state
* @crtc: The crtc
*
* This function returns the new crtc state if it's part of the state update.
* Otherwise returns the current crtc state. It also makes sure that the
* crtc mutex is locked.
*
* Returns: A valid crtc state pointer or NULL. It may also return a
* pointer error, in particular -EDEADLK if locking needs to be rerun.
*/
static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (crtc_state) {
lockdep_assert_held(&crtc->mutex.mutex.base);
} else {
int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
if (ret != 0 && ret != -EALREADY)
return ERR_PTR(ret);
crtc_state = crtc->state;
}
return crtc_state;
}
/**
* vmw_kms_check_implicit - Verify that all implicit display units scan out
* from the same fb after the new state is committed.
* @dev: The drm_device.
* @state: The new state to be checked.
*
* Returns:
* Zero on success,
* -EINVAL on invalid state,
* -EDEADLK if modeset locking needs to be rerun.
*/
static int vmw_kms_check_implicit(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_framebuffer *implicit_fb = NULL;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane_state *plane_state;
drm_for_each_crtc(crtc, dev) {
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
if (!du->is_implicit)
continue;
crtc_state = vmw_crtc_state_and_lock(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
if (!crtc_state || !crtc_state->enable)
continue;
/*
* Can't move primary planes across crtcs, so this is OK.
* It also means we don't need to take the plane mutex.
*/
plane_state = du->primary.state;
if (plane_state->crtc != crtc)
continue;
if (!implicit_fb)
implicit_fb = plane_state->fb;
else if (implicit_fb != plane_state->fb)
return -EINVAL;
}
return 0;
}
/**
* vmw_kms_check_topology - Validates topology in drm_atomic_state
* @dev: DRM device
* @state: the driver state object
*
* Returns:
* 0 on success otherwise negative error code
*/
static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_rect *rects;
struct drm_crtc *crtc;
uint32_t i;
int ret = 0;
rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
GFP_KERNEL);
if (!rects)
return -ENOMEM;
drm_for_each_crtc(crtc, dev) {
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct drm_crtc_state *crtc_state;
i = drm_crtc_index(crtc);
crtc_state = vmw_crtc_state_and_lock(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto clean;
}
if (!crtc_state)
continue;
if (crtc_state->enable) {
rects[i].x1 = du->gui_x;
rects[i].y1 = du->gui_y;
rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
} else {
rects[i].x1 = 0;
rects[i].y1 = 0;
rects[i].x2 = 0;
rects[i].y2 = 0;
}
}
/* Determine change to topology due to new atomic state */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
if (!du->pref_active && new_crtc_state->enable) {
VMW_DEBUG_KMS("Enabling a disabled display unit\n");
ret = -EINVAL;
goto clean;
}
/*
* For vmwgfx each crtc has only one connector attached and it
* is not changed so don't really need to check the
* crtc->connector_mask and iterate over it.
*/
connector = &du->connector;
conn_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(conn_state)) {
ret = PTR_ERR(conn_state);
goto clean;
}
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
vmw_conn_state->gui_x = du->gui_x;
vmw_conn_state->gui_y = du->gui_y;
}
ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
rects);
clean:
kfree(rects);
return ret;
}
/**
* vmw_kms_atomic_check_modeset- validate state object for modeset changes
*
* @dev: DRM device
* @state: the driver state object
*
* This is a simple wrapper around drm_atomic_helper_check_modeset() for
* us to assign a value to mode->crtc_clock so that
* drm_calc_timestamping_constants() won't throw an error message
*
* Returns:
* Zero for success or -errno
*/
static int
vmw_kms_atomic_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
bool need_modeset = false;
int i, ret;
ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
ret = vmw_kms_check_implicit(dev, state);
if (ret) {
VMW_DEBUG_KMS("Invalid implicit state\n");
return ret;
}
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
need_modeset = true;
}
if (need_modeset)
return vmw_kms_check_topology(dev, state);
return ret;
}
static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
.atomic_check = vmw_kms_atomic_check_modeset,
.atomic_commit = drm_atomic_helper_commit,
};
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct vmw_surface *surface,
uint32_t sid,
int32_t destX, int32_t destY,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
&surface->res, destX, destY,
num_clips, 1, NULL, NULL);
}
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct vmw_surface *surface,
uint32_t sid,
int32_t destX, int32_t destY,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
int ret;
switch (dev_priv->active_display_unit) {
case vmw_du_screen_target:
ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
&surface->res, destX, destY,
num_clips, 1, NULL, NULL);
break;
case vmw_du_screen_object:
ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
sid, destX, destY, clips,
num_clips);
break;
default:
WARN_ONCE(true,
"Present called with invalid display system.\n");
ret = -ENOSYS;
break;
}
if (ret)
return ret;
vmw_cmd_flush(dev_priv, false);
return 0;
}
static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
{
if (dev_priv->hotplug_mode_update_property)
return;
dev_priv->hotplug_mode_update_property =
drm_property_create_range(&dev_priv->drm,
DRM_MODE_PROP_IMMUTABLE,
"hotplug_mode_update", 0, 1);
}
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
int ret;
static const char *display_unit_names[] = {
"Invalid",
"Legacy",
"Screen Object",
"Screen Target",
"Invalid (max)"
};
drm_mode_config_init(dev);
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
dev->mode_config.max_width = dev_priv->texture_max_width;
dev->mode_config.max_height = dev_priv->texture_max_height;
dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
drm_mode_create_suggested_offset_properties(dev);
vmw_kms_create_hotplug_mode_update_property(dev_priv);
ret = vmw_kms_stdu_init_display(dev_priv);
if (ret) {
ret = vmw_kms_sou_init_display(dev_priv);
if (ret) /* Fallback */
ret = vmw_kms_ldu_init_display(dev_priv);
}
BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
drm_info(&dev_priv->drm, "%s display unit initialized\n",
display_unit_names[dev_priv->active_display_unit]);
return ret;
}
int vmw_kms_close(struct vmw_private *dev_priv)
{
int ret = 0;
/*
* Docs says we should take the lock before calling this function
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup(&dev_priv->drm);
if (dev_priv->active_display_unit == vmw_du_legacy)
ret = vmw_kms_ldu_close_display(dev_priv);
return ret;
}
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_cursor_bypass_arg *arg = data;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
int ret = 0;
mutex_lock(&dev->mode_config.mutex);
if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
du = vmw_crtc_to_du(crtc);
du->hotspot_x = arg->xhot;
du->hotspot_y = arg->yhot;
}
mutex_unlock(&dev->mode_config.mutex);
return 0;
}
crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
if (!crtc) {
ret = -ENOENT;
goto out;
}
du = vmw_crtc_to_du(crtc);
du->hotspot_x = arg->xhot;
du->hotspot_y = arg->yhot;
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
{
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
else if (vmw_fifo_have_pitchlock(vmw_priv))
vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
return -EINVAL;
}
return 0;
}
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
{
return ((u64) pitch * (u64) height) < (u64)
((dev_priv->active_display_unit == vmw_du_screen_target) ?
dev_priv->max_primary_mem : dev_priv->vram_size);
}
/**
* vmw_du_update_layout - Update the display unit with topology from resolution
* plugin and generate DRM uevent
* @dev_priv: device private
* @num_rects: number of drm_rect in rects
* @rects: toplogy to update
*/
static int vmw_du_update_layout(struct vmw_private *dev_priv,
unsigned int num_rects, struct drm_rect *rects)
{
struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_connector *con;
struct drm_connector_list_iter conn_iter;
struct drm_modeset_acquire_ctx ctx;
struct drm_crtc *crtc;
int ret;
/* Currently gui_x/y is protected with the crtc mutex */
mutex_lock(&dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, 0);
retry:
drm_for_each_crtc(crtc, dev) {
ret = drm_modeset_lock(&crtc->mutex, &ctx);
if (ret < 0) {
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
goto out_fini;
}
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(con, &conn_iter) {
du = vmw_connector_to_du(con);
if (num_rects > du->unit) {
du->pref_width = drm_rect_width(&rects[du->unit]);
du->pref_height = drm_rect_height(&rects[du->unit]);
du->pref_active = true;
du->gui_x = rects[du->unit].x1;
du->gui_y = rects[du->unit].y1;
} else {
du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
du->pref_active = false;
du->gui_x = 0;
du->gui_y = 0;
}
}
drm_connector_list_iter_end(&conn_iter);
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
if (num_rects > du->unit) {
drm_object_property_set_value
(&con->base, dev->mode_config.suggested_x_property,
du->gui_x);
drm_object_property_set_value
(&con->base, dev->mode_config.suggested_y_property,
du->gui_y);
} else {
drm_object_property_set_value
(&con->base, dev->mode_config.suggested_x_property,
0);
drm_object_property_set_value
(&con->base, dev->mode_config.suggested_y_property,
0);
}
con->status = vmw_du_connector_detect(con, true);
}
out_fini:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
drm_sysfs_hotplug_event(dev);
return 0;
}
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
int i;
for (i = 0; i < size; i++) {
DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
r[i], g[i], b[i]);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
}
return 0;
}
int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
{
return 0;
}
enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force)
{
uint32_t num_displays;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ?
connector_status_connected : connector_status_disconnected);
}
static struct drm_display_mode vmw_kms_connector_builtin[] = {
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
1472, 1664, 0, 720, 723, 728, 748, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1853x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1440@60Hz */
{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2880x1800@60Hz */
{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 3840x2160@60Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 3840x2400@60Hz */
{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* Terminate */
{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
};
/**
* vmw_guess_mode_timing - Provide fake timings for a
* 60Hz vrefresh mode.
*
* @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
* members filled in.
*/
void vmw_guess_mode_timing(struct drm_display_mode *mode)
{
mode->hsync_start = mode->hdisplay + 50;
mode->hsync_end = mode->hsync_start + 50;
mode->htotal = mode->hsync_end + 50;
mode->vsync_start = mode->vdisplay + 50;
mode->vsync_end = mode->vsync_start + 50;
mode->vtotal = mode->vsync_end + 50;
mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
}
int vmw_du_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height)
{
struct vmw_display_unit *du = vmw_connector_to_du(connector);
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
u32 assumed_bpp = 4;
if (dev_priv->assume_16bpp)
assumed_bpp = 2;
max_width = min(max_width, dev_priv->texture_max_width);
max_height = min(max_height, dev_priv->texture_max_height);
/*
* For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
* HEIGHT registers.
*/
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
max_height = min(max_height, dev_priv->stdu_max_height);
}
/* Add preferred mode */
mode = drm_mode_duplicate(dev, &prefmode);
if (!mode)
return 0;
mode->hdisplay = du->pref_width;
mode->vdisplay = du->pref_height;
vmw_guess_mode_timing(mode);
drm_mode_set_name(mode);
if (vmw_kms_validate_mode_vram(dev_priv,
mode->hdisplay * assumed_bpp,
mode->vdisplay)) {
drm_mode_probed_add(connector, mode);
} else {
drm_mode_destroy(dev, mode);
mode = NULL;
}
if (du->pref_mode) {
list_del_init(&du->pref_mode->head);
drm_mode_destroy(dev, du->pref_mode);
}
/* mode might be null here, this is intended */
du->pref_mode = mode;
for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
bmode = &vmw_kms_connector_builtin[i];
if (bmode->hdisplay > max_width ||
bmode->vdisplay > max_height)
continue;
if (!vmw_kms_validate_mode_vram(dev_priv,
bmode->hdisplay * assumed_bpp,
bmode->vdisplay))
continue;
mode = drm_mode_duplicate(dev, bmode);
if (!mode)
return 0;
drm_mode_probed_add(connector, mode);
}
drm_connector_list_update(connector);
/* Move the prefered mode first, help apps pick the right mode. */
drm_mode_sort(&connector->modes);
return 1;
}
/**
* vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Update preferred topology of display unit as per ioctl request. The topology
* is expressed as array of drm_vmw_rect.
* e.g.
* [0 0 640 480] [640 0 800 600] [0 480 640 480]
*
* NOTE:
* The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
* device limit on topology, x + w and y + h (lower right) cannot be greater
* than INT_MAX. So topology beyond these limits will return with error.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
void __user *user_rects;
struct drm_vmw_rect *rects;
struct drm_rect *drm_rects;
unsigned rects_size;
int ret, i;
if (!arg->num_outputs) {
struct drm_rect def_rect = {0, 0,
VMWGFX_MIN_INITIAL_WIDTH,
VMWGFX_MIN_INITIAL_HEIGHT};
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
GFP_KERNEL);
if (unlikely(!rects))
return -ENOMEM;
user_rects = (void __user *)(unsigned long)arg->rects;
ret = copy_from_user(rects, user_rects, rects_size);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to get rects.\n");
ret = -EFAULT;
goto out_free;
}
drm_rects = (struct drm_rect *)rects;
VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
for (i = 0; i < arg->num_outputs; i++) {
struct drm_vmw_rect curr_rect;
/* Verify user-space for overflow as kernel use drm_rect */
if ((rects[i].x + rects[i].w > INT_MAX) ||
(rects[i].y + rects[i].h > INT_MAX)) {
ret = -ERANGE;
goto out_free;
}
curr_rect = rects[i];
drm_rects[i].x1 = curr_rect.x;
drm_rects[i].y1 = curr_rect.y;
drm_rects[i].x2 = curr_rect.x + curr_rect.w;
drm_rects[i].y2 = curr_rect.y + curr_rect.h;
VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
drm_rects[i].x1, drm_rects[i].y1,
drm_rects[i].x2, drm_rects[i].y2);
/*
* Currently this check is limiting the topology within
* mode_config->max (which actually is max texture size
* supported by virtual device). This limit is here to address
* window managers that create a big framebuffer for whole
* topology.
*/
if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
drm_rects[i].x2 > mode_config->max_width ||
drm_rects[i].y2 > mode_config->max_height) {
VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
drm_rects[i].x1, drm_rects[i].y1,
drm_rects[i].x2, drm_rects[i].y2);
ret = -EINVAL;
goto out_free;
}
}
ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
if (ret == 0)
vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
out_free:
kfree(rects);
return ret;
}
/**
* vmw_kms_helper_dirty - Helper to build commands and perform actions based
* on a set of cliprects and a set of display units.
*
* @dev_priv: Pointer to a device private structure.
* @framebuffer: Pointer to the framebuffer on which to perform the actions.
* @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
* Cliprects are given in framebuffer coordinates.
* @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
* be NULL. Cliprects are given in source coordinates.
* @dest_x: X coordinate offset for the crtc / destination clip rects.
* @dest_y: Y coordinate offset for the crtc / destination clip rects.
* @num_clips: Number of cliprects in the @clips or @vclips array.
* @increment: Integer with which to increment the clip counter when looping.
* Used to skip a predetermined number of clip rects.
* @dirty: Closure structure. See the description of struct vmw_kms_dirty.
*/
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
const struct drm_clip_rect *clips,
const struct drm_vmw_rect *vclips,
s32 dest_x, s32 dest_y,
int num_clips,
int increment,
struct vmw_kms_dirty *dirty)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_crtc *crtc;
u32 num_units = 0;
u32 i, k;
dirty->dev_priv = dev_priv;
/* If crtc is passed, no need to iterate over other display units */
if (dirty->crtc) {
units[num_units++] = vmw_crtc_to_du(dirty->crtc);
} else {
list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
head) {
struct drm_plane *plane = crtc->primary;
if (plane->state->fb == &framebuffer->base)
units[num_units++] = vmw_crtc_to_du(crtc);
}
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
s32 crtc_x = unit->crtc.x;
s32 crtc_y = unit->crtc.y;
s32 crtc_width = unit->crtc.mode.hdisplay;
s32 crtc_height = unit->crtc.mode.vdisplay;
const struct drm_clip_rect *clips_ptr = clips;
const struct drm_vmw_rect *vclips_ptr = vclips;
dirty->unit = unit;
if (dirty->fifo_reserve_size > 0) {
dirty->cmd = VMW_CMD_RESERVE(dev_priv,
dirty->fifo_reserve_size);
if (!dirty->cmd)
return -ENOMEM;
memset(dirty->cmd, 0, dirty->fifo_reserve_size);
}
dirty->num_hits = 0;
for (i = 0; i < num_clips; i++, clips_ptr += increment,
vclips_ptr += increment) {
s32 clip_left;
s32 clip_top;
/*
* Select clip array type. Note that integer type
* in @clips is unsigned short, whereas in @vclips
* it's 32-bit.
*/
if (clips) {
dirty->fb_x = (s32) clips_ptr->x1;
dirty->fb_y = (s32) clips_ptr->y1;
dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
crtc_x;
dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
crtc_y;
} else {
dirty->fb_x = vclips_ptr->x;
dirty->fb_y = vclips_ptr->y;
dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
dest_x - crtc_x;
dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
dest_y - crtc_y;
}
dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
/* Skip this clip if it's outside the crtc region */
if (dirty->unit_x1 >= crtc_width ||
dirty->unit_y1 >= crtc_height ||
dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
continue;
/* Clip right and bottom to crtc limits */
dirty->unit_x2 = min_t(s32, dirty->unit_x2,
crtc_width);
dirty->unit_y2 = min_t(s32, dirty->unit_y2,
crtc_height);
/* Clip left and top to crtc limits */
clip_left = min_t(s32, dirty->unit_x1, 0);
clip_top = min_t(s32, dirty->unit_y1, 0);
dirty->unit_x1 -= clip_left;
dirty->unit_y1 -= clip_top;
dirty->fb_x -= clip_left;
dirty->fb_y -= clip_top;
dirty->clip(dirty);
}
dirty->fifo_commit(dirty);
}
return 0;
}
/**
* vmw_kms_helper_validation_finish - Helper for post KMS command submission
* cleanup and fencing
* @dev_priv: Pointer to the device-private struct
* @file_priv: Pointer identifying the client when user-space fencing is used
* @ctx: Pointer to the validation context
* @out_fence: If non-NULL, returned refcounted fence-pointer
* @user_fence_rep: If non-NULL, pointer to user-space address area
* in which to copy user-space fence info
*/
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_validation_context *ctx,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep)
{
struct vmw_fence_obj *fence = NULL;
uint32_t handle = 0;
int ret = 0;
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
out_fence)
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
file_priv ? &handle : NULL);
vmw_validation_done(ctx, fence);
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
handle, -1);
if (out_fence)
*out_fence = fence;
else
vmw_fence_obj_unreference(&fence);
}
/**
* vmw_kms_update_proxy - Helper function to update a proxy surface from
* its backing MOB.
*
* @res: Pointer to the surface resource
* @clips: Clip rects in framebuffer (surface) space.
* @num_clips: Number of clips in @clips.
* @increment: Integer with which to increment the clip counter when looping.
* Used to skip a predetermined number of clip rects.
*
* This function makes sure the proxy surface is updated from its backing MOB
* using the region given by @clips. The surface resource @res and its backing
* MOB needs to be reserved and validated on call.
*/
int vmw_kms_update_proxy(struct vmw_resource *res,
const struct drm_clip_rect *clips,
unsigned num_clips,
int increment)
{
struct vmw_private *dev_priv = res->dev_priv;
struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdUpdateGBImage body;
} *cmd;
SVGA3dBox *box;
size_t copy_size = 0;
int i;
if (!clips)
return 0;
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
if (!cmd)
return -ENOMEM;
for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
box = &cmd->body.box;
cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
cmd->header.size = sizeof(cmd->body);
cmd->body.image.sid = res->id;
cmd->body.image.face = 0;
cmd->body.image.mipmap = 0;
if (clips->x1 > size->width || clips->x2 > size->width ||
clips->y1 > size->height || clips->y2 > size->height) {
DRM_ERROR("Invalid clips outsize of framebuffer.\n");
return -EINVAL;
}
box->x = clips->x1;
box->y = clips->y1;
box->z = 0;
box->w = clips->x2 - clips->x1;
box->h = clips->y2 - clips->y1;
box->d = 1;
copy_size += sizeof(*cmd);
}
vmw_cmd_commit(dev_priv, copy_size);
return 0;
}
/**
* vmw_kms_create_implicit_placement_property - Set up the implicit placement
* property.
*
* @dev_priv: Pointer to a device private struct.
*
* Sets up the implicit placement property unless it's already set up.
*/
void
vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
{
if (dev_priv->implicit_placement_property)
return;
dev_priv->implicit_placement_property =
drm_property_create_range(&dev_priv->drm,
DRM_MODE_PROP_IMMUTABLE,
"implicit_placement", 0, 1);
}
/**
* vmw_kms_suspend - Save modesetting state and turn modesetting off.
*
* @dev: Pointer to the drm device
* Return: 0 on success. Negative error code on failure.
*/
int vmw_kms_suspend(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
if (IS_ERR(dev_priv->suspend_state)) {
int ret = PTR_ERR(dev_priv->suspend_state);
DRM_ERROR("Failed kms suspend: %d\n", ret);
dev_priv->suspend_state = NULL;
return ret;
}
return 0;
}
/**
* vmw_kms_resume - Re-enable modesetting and restore state
*
* @dev: Pointer to the drm device
* Return: 0 on success. Negative error code on failure.
*
* State is resumed from a previous vmw_kms_suspend(). It's illegal
* to call this function without a previous vmw_kms_suspend().
*/
int vmw_kms_resume(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
int ret;
if (WARN_ON(!dev_priv->suspend_state))
return 0;
ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
dev_priv->suspend_state = NULL;
return ret;
}
/**
* vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
*
* @dev: Pointer to the drm device
*/
void vmw_kms_lost_device(struct drm_device *dev)
{
drm_atomic_helper_shutdown(dev);
}
/**
* vmw_du_helper_plane_update - Helper to do plane update on a display unit.
* @update: The closure structure.
*
* Call this helper after setting callbacks in &vmw_du_update_plane to do plane
* update on display unit.
*
* Return: 0 on success or a negative error code on failure.
*/
int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
{
struct drm_plane_state *state = update->plane->state;
struct drm_plane_state *old_state = update->old_state;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
struct drm_rect bb;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
uint32_t reserved_size = 0;
uint32_t submit_size = 0;
uint32_t curr_size = 0;
uint32_t num_hits = 0;
void *cmd_start;
char *cmd_next;
int ret;
/*
* Iterate in advance to check if really need plane update and find the
* number of clips that actually are in plane src for fifo allocation.
*/
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
if (num_hits == 0)
return 0;
if (update->vfb->bo) {
struct vmw_framebuffer_bo *vfbbo =
container_of(update->vfb, typeof(*vfbbo), base);
/*
* For screen targets we want a mappable bo, for everything else we want
* accelerated i.e. host backed (vram or gmr) bo. If the display unit
* is not screen target then mob's shouldn't be available.
*/
if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
vmw_bo_placement_set(vfbbo->buffer,
VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
} else {
WARN_ON(update->dev_priv->has_mob);
vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
}
ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
} else {
struct vmw_framebuffer_surface *vfbs =
container_of(update->vfb, typeof(*vfbs), base);
ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
0, VMW_RES_DIRTY_NONE, NULL,
NULL);
}
if (ret)
return ret;
ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
if (ret)
goto out_unref;
reserved_size = update->calc_fifo_size(update, num_hits);
cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
if (!cmd_start) {
ret = -ENOMEM;
goto out_revert;
}
cmd_next = cmd_start;
if (update->post_prepare) {
curr_size = update->post_prepare(update, cmd_next);
cmd_next += curr_size;
submit_size += curr_size;
}
if (update->pre_clip) {
curr_size = update->pre_clip(update, cmd_next, num_hits);
cmd_next += curr_size;
submit_size += curr_size;
}
bb.x1 = INT_MAX;
bb.y1 = INT_MAX;
bb.x2 = INT_MIN;
bb.y2 = INT_MIN;
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drm_atomic_for_each_plane_damage(&iter, &clip) {
uint32_t fb_x = clip.x1;
uint32_t fb_y = clip.y1;
vmw_du_translate_to_crtc(state, &clip);
if (update->clip) {
curr_size = update->clip(update, cmd_next, &clip, fb_x,
fb_y);
cmd_next += curr_size;
submit_size += curr_size;
}
bb.x1 = min_t(int, bb.x1, clip.x1);
bb.y1 = min_t(int, bb.y1, clip.y1);
bb.x2 = max_t(int, bb.x2, clip.x2);
bb.y2 = max_t(int, bb.y2, clip.y2);
}
curr_size = update->post_clip(update, cmd_next, &bb);
submit_size += curr_size;
if (reserved_size < submit_size)
submit_size = 0;
vmw_cmd_commit(update->dev_priv, submit_size);
vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
update->out_fence, NULL);
return ret;
out_revert:
vmw_validation_revert(&val_ctx);
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright 2021 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
#include <linux/slab.h>
static int vmw_sys_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
*res = kzalloc(sizeof(**res), GFP_KERNEL);
if (!*res)
return -ENOMEM;
ttm_resource_init(bo, place, *res);
return 0;
}
static void vmw_sys_man_free(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
ttm_resource_fini(man, res);
kfree(res);
}
static const struct ttm_resource_manager_func vmw_sys_manager_func = {
.alloc = vmw_sys_man_alloc,
.free = vmw_sys_man_free,
};
int vmw_sys_man_init(struct vmw_private *dev_priv)
{
struct ttm_device *bdev = &dev_priv->bdev;
struct ttm_resource_manager *man =
kzalloc(sizeof(*man), GFP_KERNEL);
if (!man)
return -ENOMEM;
man->use_tt = true;
man->func = &vmw_sys_manager_func;
ttm_resource_manager_init(man, bdev, 0);
ttm_set_driver_manager(bdev, VMW_PL_SYSTEM, man);
ttm_resource_manager_set_used(man, true);
return 0;
}
void vmw_sys_man_fini(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev,
VMW_PL_SYSTEM);
ttm_resource_manager_evict_all(&dev_priv->bdev, man);
ttm_resource_manager_set_used(man, false);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&dev_priv->bdev, VMW_PL_SYSTEM, NULL);
kfree(man);
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_devcaps.h"
#include <drm/ttm/ttm_placement.h>
#include <linux/sched/signal.h>
bool vmw_supports_3d(struct vmw_private *dev_priv)
{
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_3D))
return false;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
uint32_t result;
if (!dev_priv->has_mob)
return false;
result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D);
return (result != 0);
}
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
BUG_ON(vmw_is_svga_v3(dev_priv));
fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
hwversion = vmw_fifo_mem_read(dev_priv,
((fifo->capabilities &
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
SVGA_FIFO_3D_HWVERSION_REVISED :
SVGA_FIFO_3D_HWVERSION));
if (hwversion == 0)
return false;
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
return false;
/* Legacy Display Unit does not support surfaces */
if (dev_priv->active_display_unit == vmw_du_legacy)
return false;
return true;
}
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
return true;
return false;
}
struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
{
struct vmw_fifo_state *fifo;
uint32_t max;
uint32_t min;
if (!dev_priv->fifo_mem)
return NULL;
fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
if (!fifo)
return ERR_PTR(-ENOMEM);
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL)) {
kfree(fifo);
return ERR_PTR(-ENOMEM);
}
fifo->dynamic_buffer = NULL;
fifo->reserved_size = 0;
fifo->using_bounce_buffer = false;
mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem);
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
min <<= 2;
if (min < PAGE_SIZE)
min = PAGE_SIZE;
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
wmb();
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
drm_info(&dev_priv->drm,
"Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
(unsigned int) max,
(unsigned int) min,
(unsigned int) fifo->capabilities);
if (unlikely(min >= max)) {
drm_warn(&dev_priv->drm,
"FIFO memory is not usable. Driver failed to initialize.");
return ERR_PTR(-ENXIO);
}
return fifo;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
u32 *fifo_mem = dev_priv->fifo_mem;
if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
void vmw_fifo_destroy(struct vmw_private *dev_priv)
{
struct vmw_fifo_state *fifo = dev_priv->fifo;
if (!fifo)
return;
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
fifo->static_buffer = NULL;
}
if (likely(fifo->dynamic_buffer != NULL)) {
vfree(fifo->dynamic_buffer);
fifo->dynamic_buffer = NULL;
}
kfree(fifo);
dev_priv->fifo = NULL;
}
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
return ((max - next_cmd) + (stop - min) <= bytes);
}
static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
uint32_t bytes, bool interruptible,
unsigned long timeout)
{
int ret = 0;
unsigned long end_jiffies = jiffies + timeout;
DEFINE_WAIT(__wait);
DRM_INFO("Fifo wait noirq.\n");
for (;;) {
prepare_to_wait(&dev_priv->fifo_queue, &__wait,
(interruptible) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (!vmw_fifo_is_full(dev_priv, bytes))
break;
if (time_after_eq(jiffies, end_jiffies)) {
ret = -EBUSY;
DRM_ERROR("SVGA device lockup.\n");
break;
}
schedule_timeout(1);
if (interruptible && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
finish_wait(&dev_priv->fifo_queue, &__wait);
wake_up_all(&dev_priv->fifo_queue);
DRM_INFO("Fifo noirq exit.\n");
return ret;
}
static int vmw_fifo_wait(struct vmw_private *dev_priv,
uint32_t bytes, bool interruptible,
unsigned long timeout)
{
long ret = 1L;
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
return 0;
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
&dev_priv->fifo_queue_waiters);
if (interruptible)
ret = wait_event_interruptible_timeout
(dev_priv->fifo_queue,
!vmw_fifo_is_full(dev_priv, bytes), timeout);
else
ret = wait_event_timeout
(dev_priv->fifo_queue,
!vmw_fifo_is_full(dev_priv, bytes), timeout);
if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
&dev_priv->fifo_queue_waiters);
return ret;
}
/*
* Reserve @bytes number of bytes in the fifo.
*
* This function will return NULL (error) on two conditions:
* If it timeouts waiting for fifo space, or if @bytes is larger than the
* available fifo space.
*
* Returns:
* Pointer to the fifo, or null on error (possible hardware hang).
*/
static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = dev_priv->fifo;
u32 *fifo_mem = dev_priv->fifo_mem;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
int ret;
mutex_lock(&fifo_state->fifo_mutex);
max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
if (unlikely(bytes >= (max - min)))
goto out_err;
BUG_ON(fifo_state->reserved_size != 0);
BUG_ON(fifo_state->dynamic_buffer != NULL);
fifo_state->reserved_size = bytes;
while (1) {
uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
bool need_bounce = false;
bool reserve_in_place = false;
if (next_cmd >= stop) {
if (likely((next_cmd + bytes < max ||
(next_cmd + bytes == max && stop > min))))
reserve_in_place = true;
else if (vmw_fifo_is_full(dev_priv, bytes)) {
ret = vmw_fifo_wait(dev_priv, bytes,
false, 3 * HZ);
if (unlikely(ret != 0))
goto out_err;
} else
need_bounce = true;
} else {
if (likely((next_cmd + bytes < stop)))
reserve_in_place = true;
else {
ret = vmw_fifo_wait(dev_priv, bytes,
false, 3 * HZ);
if (unlikely(ret != 0))
goto out_err;
}
}
if (reserve_in_place) {
if (reserveable || bytes <= sizeof(uint32_t)) {
fifo_state->using_bounce_buffer = false;
if (reserveable)
vmw_fifo_mem_write(dev_priv,
SVGA_FIFO_RESERVED,
bytes);
return (void __force *) (fifo_mem +
(next_cmd >> 2));
} else {
need_bounce = true;
}
}
if (need_bounce) {
fifo_state->using_bounce_buffer = true;
if (bytes < fifo_state->static_buffer_size)
return fifo_state->static_buffer;
else {
fifo_state->dynamic_buffer = vmalloc(bytes);
if (!fifo_state->dynamic_buffer)
goto out_err;
return fifo_state->dynamic_buffer;
}
}
}
out_err:
fifo_state->reserved_size = 0;
mutex_unlock(&fifo_state->fifo_mutex);
return NULL;
}
void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
int ctx_id)
{
void *ret;
if (dev_priv->cman)
ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
ctx_id, false, NULL);
else if (ctx_id == SVGA3D_INVALID_ID)
ret = vmw_local_fifo_reserve(dev_priv, bytes);
else {
WARN(1, "Command buffer has not been allocated.\n");
ret = NULL;
}
if (IS_ERR_OR_NULL(ret))
return NULL;
return ret;
}
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
struct vmw_private *vmw,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
u32 *fifo_mem = vmw->fifo_mem;
uint32_t chunk_size = max - next_cmd;
uint32_t rest;
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
if (bytes < chunk_size)
chunk_size = bytes;
vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
mb();
memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
rest = bytes - chunk_size;
if (rest)
memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
struct vmw_private *vmw,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
while (bytes > 0) {
vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
next_cmd += sizeof(uint32_t);
if (unlikely(next_cmd == max))
next_cmd = min;
mb();
vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
mb();
bytes -= sizeof(uint32_t);
}
}
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = dev_priv->fifo;
uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
fifo_state->reserved_size = 0;
if (fifo_state->using_bounce_buffer) {
if (reserveable)
vmw_fifo_res_copy(fifo_state, dev_priv,
next_cmd, max, min, bytes);
else
vmw_fifo_slow_copy(fifo_state, dev_priv,
next_cmd, max, min, bytes);
if (fifo_state->dynamic_buffer) {
vfree(fifo_state->dynamic_buffer);
fifo_state->dynamic_buffer = NULL;
}
}
down_write(&fifo_state->rwsem);
if (fifo_state->using_bounce_buffer || reserveable) {
next_cmd += bytes;
if (next_cmd >= max)
next_cmd -= max - min;
mb();
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
}
if (reserveable)
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
mb();
up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
}
void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
else
vmw_local_fifo_commit(dev_priv, bytes);
}
/**
* vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
*
* @dev_priv: Pointer to device private structure.
* @bytes: Number of bytes to commit.
*/
void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
else
vmw_local_fifo_commit(dev_priv, bytes);
}
/**
* vmw_cmd_flush - Flush any buffered commands and make sure command processing
* starts.
*
* @dev_priv: Pointer to device private structure.
* @interruptible: Whether to wait interruptible if function needs to sleep.
*/
int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
{
might_sleep();
if (dev_priv->cman)
return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
else
return 0;
}
int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct svga_fifo_cmd_fence *cmd_fence;
u32 *fm;
int ret = 0;
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
fm = VMW_CMD_RESERVE(dev_priv, bytes);
if (unlikely(fm == NULL)) {
*seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
false, 3*HZ);
goto out_err;
}
do {
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
} while (*seqno == 0);
if (!vmw_has_fences(dev_priv)) {
/*
* Don't request hardware to send a fence. The
* waiting code in vmwgfx_irq.c will emulate this.
*/
vmw_cmd_commit(dev_priv, 0);
return 0;
}
*fm++ = SVGA_CMD_FENCE;
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_cmd_commit_flush(dev_priv, bytes);
vmw_update_seqno(dev_priv);
out_err:
return ret;
}
/**
* vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
* legacy query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* See the vmw_cmd_emit_dummy_query documentation.
*/
static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
/*
* A query wait without a preceding query end will
* actually finish all queries for this cid
* without writing to the query result structure.
*/
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
} *cmd;
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
if (bo->resource->mem_type == TTM_PL_VRAM) {
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
} else {
cmd->body.guestResult.gmrId = bo->resource->start;
cmd->body.guestResult.offset = 0;
}
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
/**
* vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
* guest-backed resource query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* See the vmw_cmd_emit_dummy_query documentation.
*/
static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
uint32_t cid)
{
/*
* A query wait without a preceding query end will
* actually finish all queries for this cid
* without writing to the query result structure.
*/
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;
} *cmd;
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd->body.mobid = bo->resource->start;
cmd->body.offset = 0;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
/**
* vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
* appropriate resource query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* This function is used to emit a dummy occlusion query with
* no primitives rendered between query begin and query end.
* It's used to provide a query barrier, in order to know that when
* this query is finished, all preceding queries are also finished.
*
* A Query results structure should have been initialized at the start
* of the dev_priv->dummy_query_bo buffer object. And that buffer object
* must also be either reserved or pinned when this function is called.
*
* Returns -ENOMEM on failure to reserve fifo space.
*/
int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
if (dev_priv->has_mob)
return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
}
/**
* vmw_cmd_supported - returns true if the given device supports
* command queues.
*
* @vmw: The device private structure.
*
* Returns true if we can issue commands.
*/
bool vmw_cmd_supported(struct vmw_private *vmw)
{
bool has_cmdbufs =
(vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
SVGA_CAP_CMD_BUFFERS_2)) != 0;
if (vmw_is_svga_v3(vmw))
return (has_cmdbufs &&
(vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
/*
* We have FIFO cmd's
*/
return has_cmdbufs || vmw->fifo_mem != NULL;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#define VMW_PPN_SIZE (sizeof(unsigned long))
/* A future safe maximum remap size. */
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
#define DMA_ADDR_INVALID ((dma_addr_t) 0)
#define DMA_PAGE_INVALID 0UL
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
struct vmw_piter *iter,
unsigned long num_pages,
int gmr_id)
{
SVGAFifoCmdDefineGMR2 define_cmd;
SVGAFifoCmdRemapGMR2 remap_cmd;
uint32_t *cmd;
uint32_t *cmd_orig;
uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
uint32_t remap_pos = 0;
uint32_t cmd_size = define_size + remap_size;
uint32_t i;
cmd_orig = cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
define_cmd.gmrId = gmr_id;
define_cmd.numPages = num_pages;
*cmd++ = SVGA_CMD_DEFINE_GMR2;
memcpy(cmd, &define_cmd, sizeof(define_cmd));
cmd += sizeof(define_cmd) / sizeof(*cmd);
/*
* Need to split the command if there are too many
* pages that goes into the gmr.
*/
remap_cmd.gmrId = gmr_id;
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
while (num_pages > 0) {
unsigned long nr = min_t(unsigned long, num_pages, VMW_PPN_PER_REMAP);
remap_cmd.offsetPages = remap_pos;
remap_cmd.numPages = nr;
*cmd++ = SVGA_CMD_REMAP_GMR2;
memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
cmd += sizeof(remap_cmd) / sizeof(*cmd);
for (i = 0; i < nr; ++i) {
if (VMW_PPN_SIZE <= 4)
*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
else
*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
PAGE_SHIFT;
cmd += VMW_PPN_SIZE / sizeof(*cmd);
vmw_piter_next(iter);
}
num_pages -= nr;
remap_pos += nr;
}
BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
vmw_cmd_commit(dev_priv, cmd_size);
return 0;
}
static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
int gmr_id)
{
SVGAFifoCmdDefineGMR2 define_cmd;
uint32_t define_size = sizeof(define_cmd) + 4;
uint32_t *cmd;
cmd = VMW_CMD_RESERVE(dev_priv, define_size);
if (unlikely(cmd == NULL))
return;
define_cmd.gmrId = gmr_id;
define_cmd.numPages = 0;
*cmd++ = SVGA_CMD_DEFINE_GMR2;
memcpy(cmd, &define_cmd, sizeof(define_cmd));
vmw_cmd_commit(dev_priv, define_size);
}
int vmw_gmr_bind(struct vmw_private *dev_priv,
const struct vmw_sg_table *vsgt,
unsigned long num_pages,
int gmr_id)
{
struct vmw_piter data_iter;
vmw_piter_start(&data_iter, vsgt, 0);
if (unlikely(!vmw_piter_next(&data_iter)))
return 0;
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
return -EINVAL;
return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
}
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
vmw_gmr2_unbind(dev_priv, gmr_id);
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
#include "vmwgfx_binding.h"
#include "vmw_surface_cache.h"
#include "device_include/svga3d_surfacedefs.h"
#include <drm/ttm/ttm_placement.h>
#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
(svga3d_flags & ((uint64_t)U32_MAX))
/**
* struct vmw_user_surface - User-space visible surface resource
*
* @prime: The TTM prime object.
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
* @master: Master of the creating client. Used for security check.
*/
struct vmw_user_surface {
struct ttm_prime_object prime;
struct vmw_surface srf;
struct drm_master *master;
};
/**
* struct vmw_surface_offset - Backing store mip level offset info
*
* @face: Surface face.
* @mip: Mip level.
* @bo_offset: Offset into backing store of this mip level.
*
*/
struct vmw_surface_offset {
uint32_t face;
uint32_t mip;
uint32_t bo_offset;
};
/**
* struct vmw_surface_dirty - Surface dirty-tracker
* @cache: Cached layout information of the surface.
* @num_subres: Number of subresources.
* @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
*/
struct vmw_surface_dirty {
struct vmw_surface_cache cache;
u32 num_subres;
SVGA3dBox boxes[];
};
static void vmw_user_surface_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base);
static int vmw_legacy_srf_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_legacy_srf_create(struct vmw_resource *res);
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
static int vmw_gb_surface_create(struct vmw_resource *res);
static int vmw_gb_surface_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_surface_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_surface_destroy(struct vmw_resource *res);
static int
vmw_gb_surface_define_internal(struct drm_device *dev,
struct drm_vmw_gb_surface_create_ext_req *req,
struct drm_vmw_gb_surface_create_rep *rep,
struct drm_file *file_priv);
static int
vmw_gb_surface_reference_internal(struct drm_device *dev,
struct drm_vmw_surface_arg *req,
struct drm_vmw_gb_surface_ref_ext_rep *rep,
struct drm_file *file_priv);
static void vmw_surface_dirty_free(struct vmw_resource *res);
static int vmw_surface_dirty_alloc(struct vmw_resource *res);
static int vmw_surface_dirty_sync(struct vmw_resource *res);
static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
size_t end);
static int vmw_surface_clean(struct vmw_resource *res);
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
.base_obj_to_res = vmw_user_surface_base_to_res,
.res_free = vmw_user_surface_free
};
const struct vmw_user_resource_conv *user_surface_converter =
&user_surface_conv;
static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface,
.needs_guest_memory = false,
.may_evict = true,
.prio = 1,
.dirty_prio = 1,
.type_name = "legacy surfaces",
.domain = VMW_BO_DOMAIN_GMR,
.busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
.create = &vmw_legacy_srf_create,
.destroy = &vmw_legacy_srf_destroy,
.bind = &vmw_legacy_srf_bind,
.unbind = &vmw_legacy_srf_unbind
};
static const struct vmw_res_func vmw_gb_surface_func = {
.res_type = vmw_res_surface,
.needs_guest_memory = true,
.may_evict = true,
.prio = 1,
.dirty_prio = 2,
.type_name = "guest backed surfaces",
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_surface_create,
.destroy = vmw_gb_surface_destroy,
.bind = vmw_gb_surface_bind,
.unbind = vmw_gb_surface_unbind,
.dirty_alloc = vmw_surface_dirty_alloc,
.dirty_free = vmw_surface_dirty_free,
.dirty_sync = vmw_surface_dirty_sync,
.dirty_range_add = vmw_surface_dirty_range_add,
.clean = vmw_surface_clean,
};
/*
* struct vmw_surface_dma - SVGA3D DMA command
*/
struct vmw_surface_dma {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA body;
SVGA3dCopyBox cb;
SVGA3dCmdSurfaceDMASuffix suffix;
};
/*
* struct vmw_surface_define - SVGA3D Surface Define command
*/
struct vmw_surface_define {
SVGA3dCmdHeader header;
SVGA3dCmdDefineSurface body;
};
/*
* struct vmw_surface_destroy - SVGA3D Surface Destroy command
*/
struct vmw_surface_destroy {
SVGA3dCmdHeader header;
SVGA3dCmdDestroySurface body;
};
/**
* vmw_surface_dma_size - Compute fifo size for a dma command.
*
* @srf: Pointer to a struct vmw_surface
*
* Computes the required size for a surface dma command for backup or
* restoration of the surface represented by @srf.
*/
static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
{
return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
}
/**
* vmw_surface_define_size - Compute fifo size for a surface define command.
*
* @srf: Pointer to a struct vmw_surface
*
* Computes the required size for a surface define command for the definition
* of the surface represented by @srf.
*/
static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
{
return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
sizeof(SVGA3dSize);
}
/**
* vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
*
* Computes the required size for a surface destroy command for the destruction
* of a hw surface.
*/
static inline uint32_t vmw_surface_destroy_size(void)
{
return sizeof(struct vmw_surface_destroy);
}
/**
* vmw_surface_destroy_encode - Encode a surface_destroy command.
*
* @id: The surface id
* @cmd_space: Pointer to memory area in which the commands should be encoded.
*/
static void vmw_surface_destroy_encode(uint32_t id,
void *cmd_space)
{
struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
cmd_space;
cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = id;
}
/**
* vmw_surface_define_encode - Encode a surface_define command.
*
* @srf: Pointer to a struct vmw_surface object.
* @cmd_space: Pointer to memory area in which the commands should be encoded.
*/
static void vmw_surface_define_encode(const struct vmw_surface *srf,
void *cmd_space)
{
struct vmw_surface_define *cmd = (struct vmw_surface_define *)
cmd_space;
struct drm_vmw_size *src_size;
SVGA3dSize *cmd_size;
uint32_t cmd_len;
int i;
cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
sizeof(SVGA3dSize);
cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
/*
* Downcast of surfaceFlags, was upcasted when received from user-space,
* since driver internally stores as 64 bit.
* For legacy surface define only 32 bit flag is supported.
*/
cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
cmd->body.format = srf->metadata.format;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
cmd += 1;
cmd_size = (SVGA3dSize *) cmd;
src_size = srf->metadata.sizes;
for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
cmd_size->width = src_size->width;
cmd_size->height = src_size->height;
cmd_size->depth = src_size->depth;
}
}
/**
* vmw_surface_dma_encode - Encode a surface_dma command.
*
* @srf: Pointer to a struct vmw_surface object.
* @cmd_space: Pointer to memory area in which the commands should be encoded.
* @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
* should be placed or read from.
* @to_surface: Boolean whether to DMA to the surface or from the surface.
*/
static void vmw_surface_dma_encode(struct vmw_surface *srf,
void *cmd_space,
const SVGAGuestPtr *ptr,
bool to_surface)
{
uint32_t i;
struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
const struct SVGA3dSurfaceDesc *desc =
vmw_surface_get_desc(srf->metadata.format);
for (i = 0; i < srf->metadata.num_sizes; ++i) {
SVGA3dCmdHeader *header = &cmd->header;
SVGA3dCmdSurfaceDMA *body = &cmd->body;
SVGA3dCopyBox *cb = &cmd->cb;
SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
header->id = SVGA_3D_CMD_SURFACE_DMA;
header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
body->guest.ptr = *ptr;
body->guest.ptr.offset += cur_offset->bo_offset;
body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size);
body->host.sid = srf->res.id;
body->host.face = cur_offset->face;
body->host.mipmap = cur_offset->mip;
body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
SVGA3D_READ_HOST_VRAM);
cb->x = 0;
cb->y = 0;
cb->z = 0;
cb->srcx = 0;
cb->srcy = 0;
cb->srcz = 0;
cb->w = cur_size->width;
cb->h = cur_size->height;
cb->d = cur_size->depth;
suffix->suffixSize = sizeof(*suffix);
suffix->maximumOffset =
vmw_surface_get_image_buffer_size(desc, cur_size,
body->guest.pitch);
suffix->flags.discard = 0;
suffix->flags.unsynchronized = 0;
suffix->flags.reserved = 0;
++cmd;
}
};
/**
* vmw_hw_surface_destroy - destroy a Device surface
*
* @res: Pointer to a struct vmw_resource embedded in a struct
* vmw_surface.
*
* Destroys a the device surface associated with a struct vmw_surface if
* any, and adjusts resource count accordingly.
*/
static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
void *cmd;
if (res->func->destroy == vmw_gb_surface_destroy) {
(void) vmw_gb_surface_destroy(res);
return;
}
if (res->id != -1) {
cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
if (unlikely(!cmd))
return;
vmw_surface_destroy_encode(res->id, cmd);
vmw_cmd_commit(dev_priv, vmw_surface_destroy_size());
/*
* used_memory_size_atomic, or separate lock
* to avoid taking dev_priv::cmdbuf_mutex in
* the destroy path.
*/
mutex_lock(&dev_priv->cmdbuf_mutex);
dev_priv->used_memory_size -= res->guest_memory_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
}
/**
* vmw_legacy_srf_create - Create a device surface as part of the
* resource validation process.
*
* @res: Pointer to a struct vmw_surface.
*
* If the surface doesn't have a hw id.
*
* Returns -EBUSY if there wasn't sufficient device resources to
* complete the validation. Retry after freeing up resources.
*
* May return other errors if the kernel is out of guest resources.
*/
static int vmw_legacy_srf_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf;
uint32_t submit_size;
uint8_t *cmd;
int ret;
if (likely(res->id != -1))
return 0;
srf = vmw_res_to_srf(res);
if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >=
dev_priv->memory_size))
return -EBUSY;
/*
* Alloc id for the resource.
*/
ret = vmw_resource_alloc_id(res);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a surface id.\n");
goto out_no_id;
}
if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) {
ret = -EBUSY;
goto out_no_fifo;
}
/*
* Encode surface define- commands.
*/
submit_size = vmw_surface_define_size(srf);
cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd)) {
ret = -ENOMEM;
goto out_no_fifo;
}
vmw_surface_define_encode(srf, cmd);
vmw_cmd_commit(dev_priv, submit_size);
vmw_fifo_resource_inc(dev_priv);
/*
* Surface memory usage accounting.
*/
dev_priv->used_memory_size += res->guest_memory_size;
return 0;
out_no_fifo:
vmw_resource_release_id(res);
out_no_id:
return ret;
}
/**
* vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
*
* @res: Pointer to a struct vmw_res embedded in a struct
* vmw_surface.
* @val_buf: Pointer to a struct ttm_validate_buffer containing
* information about the backup buffer.
* @bind: Boolean wether to DMA to the surface.
*
* Transfer backup data to or from a legacy surface as part of the
* validation process.
* May return other errors if the kernel is out of guest resources.
* The backup buffer will be fenced or idle upon successful completion,
* and if the surface needs persistent backup storage, the backup buffer
* will also be returned reserved iff @bind is true.
*/
static int vmw_legacy_srf_dma(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf,
bool bind)
{
SVGAGuestPtr ptr;
struct vmw_fence_obj *fence;
uint32_t submit_size;
struct vmw_surface *srf = vmw_res_to_srf(res);
uint8_t *cmd;
struct vmw_private *dev_priv = res->dev_priv;
BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
vmw_surface_dma_encode(srf, cmd, &ptr, bind);
vmw_cmd_commit(dev_priv, submit_size);
/*
* Create a fence object and fence the backup buffer.
*/
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
return 0;
}
/**
* vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
* surface validation process.
*
* @res: Pointer to a struct vmw_res embedded in a struct
* vmw_surface.
* @val_buf: Pointer to a struct ttm_validate_buffer containing
* information about the backup buffer.
*
* This function will copy backup data to the surface if the
* backup buffer is dirty.
*/
static int vmw_legacy_srf_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
if (!res->guest_memory_dirty)
return 0;
return vmw_legacy_srf_dma(res, val_buf, true);
}
/**
* vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
* surface eviction process.
*
* @res: Pointer to a struct vmw_res embedded in a struct
* vmw_surface.
* @readback: Readback - only true if dirty
* @val_buf: Pointer to a struct ttm_validate_buffer containing
* information about the backup buffer.
*
* This function will copy backup data from the surface.
*/
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf)
{
if (unlikely(readback))
return vmw_legacy_srf_dma(res, val_buf, false);
return 0;
}
/**
* vmw_legacy_srf_destroy - Destroy a device surface as part of a
* resource eviction process.
*
* @res: Pointer to a struct vmw_res embedded in a struct
* vmw_surface.
*/
static int vmw_legacy_srf_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
uint32_t submit_size;
uint8_t *cmd;
BUG_ON(res->id == -1);
/*
* Encode the dma- and surface destroy commands.
*/
submit_size = vmw_surface_destroy_size();
cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
vmw_surface_destroy_encode(res->id, cmd);
vmw_cmd_commit(dev_priv, submit_size);
/*
* Surface memory usage accounting.
*/
dev_priv->used_memory_size -= res->guest_memory_size;
/*
* Release the surface ID.
*/
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
return 0;
}
/**
* vmw_surface_init - initialize a struct vmw_surface
*
* @dev_priv: Pointer to a device private struct.
* @srf: Pointer to the struct vmw_surface to initialize.
* @res_free: Pointer to a resource destructor used to free
* the object.
*/
static int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
void (*res_free) (struct vmw_resource *res))
{
int ret;
struct vmw_resource *res = &srf->res;
BUG_ON(!res_free);
ret = vmw_resource_init(dev_priv, res, true, res_free,
(dev_priv->has_mob) ? &vmw_gb_surface_func :
&vmw_legacy_surface_func);
if (unlikely(ret != 0)) {
res_free(res);
return ret;
}
/*
* The surface won't be visible to hardware until a
* surface validate.
*/
INIT_LIST_HEAD(&srf->view_list);
res->hw_destroy = vmw_hw_surface_destroy;
return ret;
}
/**
* vmw_user_surface_base_to_res - TTM base object to resource converter for
* user visible surfaces
*
* @base: Pointer to a TTM base object
*
* Returns the struct vmw_resource embedded in a struct vmw_surface
* for the user-visible object identified by the TTM base object @base.
*/
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base)
{
return &(container_of(base, struct vmw_user_surface,
prime.base)->srf.res);
}
/**
* vmw_user_surface_free - User visible surface resource destructor
*
* @res: A struct vmw_resource embedded in a struct vmw_surface.
*/
static void vmw_user_surface_free(struct vmw_resource *res)
{
struct vmw_surface *srf = vmw_res_to_srf(res);
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
WARN_ON_ONCE(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
kfree(srf->metadata.sizes);
kfree(srf->snooper.image);
ttm_prime_object_kfree(user_srf, prime);
}
/**
* vmw_user_surface_base_release - User visible surface TTM base object destructor
*
* @p_base: Pointer to a pointer to a TTM base object
* embedded in a struct vmw_user_surface.
*
* Drops the base object's reference on its resource, and the
* pointer pointed to by *p_base is set to NULL.
*/
static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_surface *user_srf =
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
if (res->guest_memory_bo)
drm_gem_object_put(&res->guest_memory_bo->tbo.base);
*p_base = NULL;
vmw_resource_unreference(&res);
}
/**
* vmw_surface_destroy_ioctl - Ioctl function implementing
* the user surface destroy functionality.
*
* @dev: Pointer to a struct drm_device.
* @data: Pointer to data copied from / to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_ref_object_base_unref(tfile, arg->sid);
}
/**
* vmw_surface_define_ioctl - Ioctl function implementing
* the user surface define functionality.
*
* @dev: Pointer to a struct drm_device.
* @data: Pointer to data copied from / to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
struct vmw_surface_metadata *metadata;
struct vmw_resource *res;
struct vmw_resource *tmp;
union drm_vmw_surface_create_arg *arg =
(union drm_vmw_surface_create_arg *)data;
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
int i, j;
uint32_t cur_bo_offset;
struct drm_vmw_size *cur_size;
struct vmw_surface_offset *cur_offset;
uint32_t num_sizes;
const SVGA3dSurfaceDesc *desc;
num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
return -EINVAL;
num_sizes += req->mip_levels[i];
}
if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
num_sizes == 0)
return -EINVAL;
desc = vmw_surface_get_desc(req->format);
if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
req->format);
return -EINVAL;
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_unlock;
}
srf = &user_srf->srf;
metadata = &srf->metadata;
res = &srf->res;
/* Driver internally stores as 64-bit flags */
metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
metadata->format = req->format;
metadata->scanout = req->scanout;
memcpy(metadata->mip_levels, req->mip_levels,
sizeof(metadata->mip_levels));
metadata->num_sizes = num_sizes;
metadata->sizes =
memdup_user((struct drm_vmw_size __user *)(unsigned long)
req->size_addr,
sizeof(*metadata->sizes) * metadata->num_sizes);
if (IS_ERR(metadata->sizes)) {
ret = PTR_ERR(metadata->sizes);
goto out_no_sizes;
}
srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
GFP_KERNEL);
if (unlikely(!srf->offsets)) {
ret = -ENOMEM;
goto out_no_offsets;
}
metadata->base_size = *srf->metadata.sizes;
metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
metadata->multisample_count = 0;
metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
cur_bo_offset = 0;
cur_offset = srf->offsets;
cur_size = metadata->sizes;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
for (j = 0; j < metadata->mip_levels[i]; ++j) {
uint32_t stride = vmw_surface_calculate_pitch(
desc, cur_size);
cur_offset->face = i;
cur_offset->mip = j;
cur_offset->bo_offset = cur_bo_offset;
cur_bo_offset += vmw_surface_get_image_buffer_size
(desc, cur_size, stride);
++cur_offset;
++cur_size;
}
}
res->guest_memory_size = cur_bo_offset;
if (metadata->scanout &&
metadata->num_sizes == 1 &&
metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
const struct SVGA3dSurfaceDesc *desc =
vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
VMW_CURSOR_SNOOP_HEIGHT *
desc->pitchBytesPerBlock;
srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
if (!srf->snooper.image) {
DRM_ERROR("Failed to allocate cursor_image\n");
ret = -ENOMEM;
goto out_no_copy;
}
} else {
srf->snooper.image = NULL;
}
user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL;
if (drm_is_primary_client(file_priv))
user_srf->master = drm_file_get_master(file_priv);
/**
* From this point, the generic resource management functions
* destroy the object on failure.
*/
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
if (unlikely(ret != 0))
goto out_unlock;
/*
* A gb-aware client referencing a shared surface will
* expect a backup buffer to be present.
*/
if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle;
ret = vmw_gem_object_create_with_handle(dev_priv,
file_priv,
res->guest_memory_size,
&backup_handle,
&res->guest_memory_bo);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
vmw_bo_reference(res->guest_memory_bo);
/*
* We don't expose the handle to the userspace and surface
* already holds a gem reference
*/
drm_gem_handle_delete(file_priv, backup_handle);
}
tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
&vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
vmw_resource_unreference(&res);
goto out_unlock;
}
rep->sid = user_srf->prime.base.handle;
vmw_resource_unreference(&res);
return 0;
out_no_copy:
kfree(srf->offsets);
out_no_offsets:
kfree(metadata->sizes);
out_no_sizes:
ttm_prime_object_kfree(user_srf, prime);
out_unlock:
return ret;
}
static int
vmw_surface_handle_reference(struct vmw_private *dev_priv,
struct drm_file *file_priv,
uint32_t u_handle,
enum drm_vmw_handle_type handle_type,
struct ttm_base_object **base_p)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_user_surface *user_srf;
uint32_t handle;
struct ttm_base_object *base;
int ret;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
handle = u_handle;
}
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
if (unlikely(!base)) {
VMW_DEBUG_USER("Could not find surface to reference.\n");
goto out_no_lookup;
}
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
VMW_DEBUG_USER("Referenced object is not a surface.\n");
goto out_bad_resource;
}
if (handle_type != DRM_VMW_HANDLE_PRIME) {
bool require_exist = false;
user_srf = container_of(base, struct vmw_user_surface,
prime.base);
/* Error out if we are unauthenticated primary */
if (drm_is_primary_client(file_priv) &&
!file_priv->authenticated) {
ret = -EACCES;
goto out_bad_resource;
}
/*
* Make sure the surface creator has the same
* authenticating master, or is already registered with us.
*/
if (drm_is_primary_client(file_priv) &&
user_srf->master != file_priv->master)
require_exist = true;
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
ret = ttm_ref_object_add(tfile, base, NULL, require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
}
}
*base_p = base;
return 0;
out_bad_resource:
ttm_base_object_unref(&base);
out_no_lookup:
if (handle_type == DRM_VMW_HANDLE_PRIME)
(void) ttm_ref_object_base_unref(tfile, handle);
return ret;
}
/**
* vmw_surface_reference_ioctl - Ioctl function implementing
* the user surface reference functionality.
*
* @dev: Pointer to a struct drm_device.
* @data: Pointer to data copied from / to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_surface_reference_arg *arg =
(union drm_vmw_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
struct drm_vmw_surface_create_req *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
struct drm_vmw_size __user *user_sizes;
struct ttm_base_object *base;
int ret;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req->handle_type, &base);
if (unlikely(ret != 0))
return ret;
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
/* Downcast of flags when sending back to user space */
rep->flags = (uint32_t)srf->metadata.flags;
rep->format = srf->metadata.format;
memcpy(rep->mip_levels, srf->metadata.mip_levels,
sizeof(srf->metadata.mip_levels));
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
rep->size_addr;
if (user_sizes)
ret = copy_to_user(user_sizes, &srf->metadata.base_size,
sizeof(srf->metadata.base_size));
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
srf->metadata.num_sizes);
ttm_ref_object_base_unref(tfile, base->handle);
ret = -EFAULT;
}
ttm_base_object_unref(&base);
return ret;
}
/**
* vmw_gb_surface_create - Encode a surface_define command.
*
* @res: Pointer to a struct vmw_resource embedded in a struct
* vmw_surface.
*/
static int vmw_gb_surface_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf = vmw_res_to_srf(res);
struct vmw_surface_metadata *metadata = &srf->metadata;
uint32_t cmd_len, cmd_id, submit_len;
int ret;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface body;
} *cmd;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface_v2 body;
} *cmd2;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface_v3 body;
} *cmd3;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface_v4 body;
} *cmd4;
if (likely(res->id != -1))
return 0;
vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_alloc_id(res);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a surface id.\n");
goto out_no_id;
}
if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
ret = -EBUSY;
goto out_no_fifo;
}
if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
cmd_len = sizeof(cmd4->body);
submit_len = sizeof(*cmd4);
} else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
cmd_len = sizeof(cmd3->body);
submit_len = sizeof(*cmd3);
} else if (metadata->array_size > 0) {
/* VMW_SM_4 support verified at creation time. */
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
cmd_len = sizeof(cmd2->body);
submit_len = sizeof(*cmd2);
} else {
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
cmd_len = sizeof(cmd->body);
submit_len = sizeof(*cmd);
}
cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
cmd3 = (typeof(cmd3))cmd;
cmd4 = (typeof(cmd4))cmd;
if (unlikely(!cmd)) {
ret = -ENOMEM;
goto out_no_fifo;
}
if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
cmd4->header.id = cmd_id;
cmd4->header.size = cmd_len;
cmd4->body.sid = srf->res.id;
cmd4->body.surfaceFlags = metadata->flags;
cmd4->body.format = metadata->format;
cmd4->body.numMipLevels = metadata->mip_levels[0];
cmd4->body.multisampleCount = metadata->multisample_count;
cmd4->body.multisamplePattern = metadata->multisample_pattern;
cmd4->body.qualityLevel = metadata->quality_level;
cmd4->body.autogenFilter = metadata->autogen_filter;
cmd4->body.size.width = metadata->base_size.width;
cmd4->body.size.height = metadata->base_size.height;
cmd4->body.size.depth = metadata->base_size.depth;
cmd4->body.arraySize = metadata->array_size;
cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
} else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
cmd3->header.id = cmd_id;
cmd3->header.size = cmd_len;
cmd3->body.sid = srf->res.id;
cmd3->body.surfaceFlags = metadata->flags;
cmd3->body.format = metadata->format;
cmd3->body.numMipLevels = metadata->mip_levels[0];
cmd3->body.multisampleCount = metadata->multisample_count;
cmd3->body.multisamplePattern = metadata->multisample_pattern;
cmd3->body.qualityLevel = metadata->quality_level;
cmd3->body.autogenFilter = metadata->autogen_filter;
cmd3->body.size.width = metadata->base_size.width;
cmd3->body.size.height = metadata->base_size.height;
cmd3->body.size.depth = metadata->base_size.depth;
cmd3->body.arraySize = metadata->array_size;
} else if (metadata->array_size > 0) {
cmd2->header.id = cmd_id;
cmd2->header.size = cmd_len;
cmd2->body.sid = srf->res.id;
cmd2->body.surfaceFlags = metadata->flags;
cmd2->body.format = metadata->format;
cmd2->body.numMipLevels = metadata->mip_levels[0];
cmd2->body.multisampleCount = metadata->multisample_count;
cmd2->body.autogenFilter = metadata->autogen_filter;
cmd2->body.size.width = metadata->base_size.width;
cmd2->body.size.height = metadata->base_size.height;
cmd2->body.size.depth = metadata->base_size.depth;
cmd2->body.arraySize = metadata->array_size;
} else {
cmd->header.id = cmd_id;
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = metadata->flags;
cmd->body.format = metadata->format;
cmd->body.numMipLevels = metadata->mip_levels[0];
cmd->body.multisampleCount = metadata->multisample_count;
cmd->body.autogenFilter = metadata->autogen_filter;
cmd->body.size.width = metadata->base_size.width;
cmd->body.size.height = metadata->base_size.height;
cmd->body.size.depth = metadata->base_size.depth;
}
vmw_cmd_commit(dev_priv, submit_len);
return 0;
out_no_fifo:
vmw_resource_release_id(res);
out_no_id:
vmw_fifo_resource_dec(dev_priv);
return ret;
}
static int vmw_gb_surface_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBindGBSurface body;
} *cmd1;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdUpdateGBSurface body;
} *cmd2;
uint32_t submit_size;
struct ttm_buffer_object *bo = val_buf->bo;
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0);
cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd1))
return -ENOMEM;
cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
cmd1->body.mobid = bo->resource->start;
if (res->guest_memory_dirty) {
cmd2 = (void *) &cmd1[1];
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
}
vmw_cmd_commit(dev_priv, submit_size);
if (res->guest_memory_bo->dirty && res->guest_memory_dirty) {
/* We've just made a full upload. Cear dirty regions. */
vmw_bo_dirty_clear_res(res);
}
res->guest_memory_dirty = false;
return 0;
}
static int vmw_gb_surface_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdReadbackGBSurface body;
} *cmd1;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdInvalidateGBSurface body;
} *cmd2;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBindGBSurface body;
} *cmd3;
uint32_t submit_size;
uint8_t *cmd;
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
if (readback) {
cmd1 = (void *) cmd;
cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
cmd3 = (void *) &cmd1[1];
} else {
cmd2 = (void *) cmd;
cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
cmd3 = (void *) &cmd2[1];
}
cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd3->header.size = sizeof(cmd3->body);
cmd3->body.sid = res->id;
cmd3->body.mobid = SVGA3D_INVALID_ID;
vmw_cmd_commit(dev_priv, submit_size);
/*
* Create a fence object and fence the backup buffer.
*/
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
return 0;
}
static int vmw_gb_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf = vmw_res_to_srf(res);
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBSurface body;
} *cmd;
if (likely(res->id == -1))
return 0;
mutex_lock(&dev_priv->binding_mutex);
vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
vmw_binding_res_list_scrub(&res->binding_head);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(!cmd)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = res->id;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
return 0;
}
/**
* vmw_gb_surface_define_ioctl - Ioctl function implementing
* the user surface define functionality.
*
* @dev: Pointer to a struct drm_device.
* @data: Pointer to data copied from / to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_vmw_gb_surface_create_arg *arg =
(union drm_vmw_gb_surface_create_arg *)data;
struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
struct drm_vmw_gb_surface_create_ext_req req_ext;
req_ext.base = arg->req;
req_ext.version = drm_vmw_gb_surface_v1;
req_ext.svga3d_flags_upper_32_bits = 0;
req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
req_ext.buffer_byte_stride = 0;
req_ext.must_be_zero = 0;
return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
}
/**
* vmw_gb_surface_reference_ioctl - Ioctl function implementing
* the user surface reference functionality.
*
* @dev: Pointer to a struct drm_device.
* @data: Pointer to data copied from / to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_vmw_gb_surface_reference_arg *arg =
(union drm_vmw_gb_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
int ret;
ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
if (unlikely(ret != 0))
return ret;
rep->creq = rep_ext.creq.base;
rep->crep = rep_ext.crep;
return ret;
}
/**
* vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
* the user surface define functionality.
*
* @dev: Pointer to a struct drm_device.
* @data: Pointer to data copied from / to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_vmw_gb_surface_create_ext_arg *arg =
(union drm_vmw_gb_surface_create_ext_arg *)data;
struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
}
/**
* vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
* the user surface reference functionality.
*
* @dev: Pointer to a struct drm_device.
* @data: Pointer to data copied from / to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_vmw_gb_surface_reference_ext_arg *arg =
(union drm_vmw_gb_surface_reference_ext_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
}
/**
* vmw_gb_surface_define_internal - Ioctl function implementing
* the user surface define functionality.
*
* @dev: Pointer to a struct drm_device.
* @req: Request argument from user-space.
* @rep: Response argument to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
static int
vmw_gb_surface_define_internal(struct drm_device *dev,
struct drm_vmw_gb_surface_create_ext_req *req,
struct drm_vmw_gb_surface_create_rep *rep,
struct drm_file *file_priv)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
struct vmw_surface_metadata metadata = {0};
struct vmw_surface *srf;
struct vmw_resource *res;
struct vmw_resource *tmp;
int ret = 0;
uint32_t backup_handle = 0;
SVGA3dSurfaceAllFlags svga3d_flags_64 =
SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
req->base.svga3d_flags);
/* array_size must be null for non-GL3 host. */
if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
VMW_DEBUG_USER("SM4 surface not supported.\n");
return -EINVAL;
}
if (!has_sm4_1_context(dev_priv)) {
if (req->svga3d_flags_upper_32_bits != 0)
ret = -EINVAL;
if (req->base.multisample_count != 0)
ret = -EINVAL;
if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
ret = -EINVAL;
if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
ret = -EINVAL;
if (ret) {
VMW_DEBUG_USER("SM4.1 surface not supported.\n");
return ret;
}
}
if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
VMW_DEBUG_USER("SM5 surface not supported.\n");
return -EINVAL;
}
if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
req->base.multisample_count == 0) {
VMW_DEBUG_USER("Invalid sample count.\n");
return -EINVAL;
}
if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
VMW_DEBUG_USER("Invalid mip level.\n");
return -EINVAL;
}
metadata.flags = svga3d_flags_64;
metadata.format = req->base.format;
metadata.mip_levels[0] = req->base.mip_levels;
metadata.multisample_count = req->base.multisample_count;
metadata.multisample_pattern = req->multisample_pattern;
metadata.quality_level = req->quality_level;
metadata.array_size = req->base.array_size;
metadata.buffer_byte_stride = req->buffer_byte_stride;
metadata.num_sizes = 1;
metadata.base_size = req->base.base_size;
metadata.scanout = req->base.drm_surface_flags &
drm_vmw_surface_flag_scanout;
/* Define a surface based on the parameters. */
ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
if (ret != 0) {
VMW_DEBUG_USER("Failed to define surface.\n");
return ret;
}
user_srf = container_of(srf, struct vmw_user_surface, srf);
if (drm_is_primary_client(file_priv))
user_srf->master = drm_file_get_master(file_priv);
res = &user_srf->srf.res;
if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
&res->guest_memory_bo);
if (ret == 0) {
if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_bo_unreference(&res->guest_memory_bo);
ret = -EINVAL;
goto out_unlock;
} else {
backup_handle = req->base.buffer_handle;
}
}
} else if (req->base.drm_surface_flags &
(drm_vmw_surface_flag_create_buffer |
drm_vmw_surface_flag_coherent)) {
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
res->guest_memory_size,
&backup_handle,
&res->guest_memory_bo);
if (ret == 0)
vmw_bo_reference(res->guest_memory_bo);
}
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
struct vmw_bo *backup = res->guest_memory_bo;
ttm_bo_reserve(&backup->tbo, false, false, NULL);
if (!res->func->dirty_alloc)
ret = -EINVAL;
if (!ret)
ret = vmw_bo_dirty_add(backup);
if (!ret) {
res->coherent = true;
ret = res->func->dirty_alloc(res);
}
ttm_bo_unreserve(&backup->tbo);
if (ret) {
vmw_resource_unreference(&res);
goto out_unlock;
}
}
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
req->base.drm_surface_flags &
drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
&vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
vmw_resource_unreference(&res);
goto out_unlock;
}
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->guest_memory_size;
if (res->guest_memory_bo) {
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
rep->buffer_size = res->guest_memory_bo->tbo.base.size;
rep->buffer_handle = backup_handle;
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;
rep->buffer_handle = SVGA3D_INVALID_ID;
}
vmw_resource_unreference(&res);
out_unlock:
return ret;
}
/**
* vmw_gb_surface_reference_internal - Ioctl function implementing
* the user surface reference functionality.
*
* @dev: Pointer to a struct drm_device.
* @req: Pointer to user-space request surface arg.
* @rep: Pointer to response to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
static int
vmw_gb_surface_reference_internal(struct drm_device *dev,
struct drm_vmw_surface_arg *req,
struct drm_vmw_gb_surface_ref_ext_rep *rep,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
struct vmw_surface_metadata *metadata;
struct ttm_base_object *base;
u32 backup_handle;
int ret;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req->handle_type, &base);
if (unlikely(ret != 0))
return ret;
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
if (!srf->res.guest_memory_bo) {
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
metadata = &srf->metadata;
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
ret = drm_gem_handle_create(file_priv, &srf->res.guest_memory_bo->tbo.base,
&backup_handle);
mutex_unlock(&dev_priv->cmdbuf_mutex);
if (ret != 0) {
drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
req->sid);
goto out_bad_resource;
}
rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
rep->creq.base.format = metadata->format;
rep->creq.base.mip_levels = metadata->mip_levels[0];
rep->creq.base.drm_surface_flags = 0;
rep->creq.base.multisample_count = metadata->multisample_count;
rep->creq.base.autogen_filter = metadata->autogen_filter;
rep->creq.base.array_size = metadata->array_size;
rep->creq.base.buffer_handle = backup_handle;
rep->creq.base.base_size = metadata->base_size;
rep->crep.handle = user_srf->prime.base.handle;
rep->crep.backup_size = srf->res.guest_memory_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
drm_vma_node_offset_addr(&srf->res.guest_memory_bo->tbo.base.vma_node);
rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size;
rep->creq.version = drm_vmw_gb_surface_v1;
rep->creq.svga3d_flags_upper_32_bits =
SVGA3D_FLAGS_UPPER_32(metadata->flags);
rep->creq.multisample_pattern = metadata->multisample_pattern;
rep->creq.quality_level = metadata->quality_level;
rep->creq.must_be_zero = 0;
out_bad_resource:
ttm_base_object_unref(&base);
return ret;
}
/**
* vmw_subres_dirty_add - Add a dirty region to a subresource
* @dirty: The surfaces's dirty tracker.
* @loc_start: The location corresponding to the start of the region.
* @loc_end: The location corresponding to the end of the region.
*
* As we are assuming that @loc_start and @loc_end represent a sequential
* range of backing store memory, if the region spans multiple lines then
* regardless of the x coordinate, the full lines are dirtied.
* Correspondingly if the region spans multiple z slices, then full rather
* than partial z slices are dirtied.
*/
static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
const struct vmw_surface_loc *loc_start,
const struct vmw_surface_loc *loc_end)
{
const struct vmw_surface_cache *cache = &dirty->cache;
SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
u32 mip = loc_start->sub_resource % cache->num_mip_levels;
const struct drm_vmw_size *size = &cache->mip[mip].size;
u32 box_c2 = box->z + box->d;
if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
return;
if (box->d == 0 || box->z > loc_start->z)
box->z = loc_start->z;
if (box_c2 < loc_end->z)
box->d = loc_end->z - box->z;
if (loc_start->z + 1 == loc_end->z) {
box_c2 = box->y + box->h;
if (box->h == 0 || box->y > loc_start->y)
box->y = loc_start->y;
if (box_c2 < loc_end->y)
box->h = loc_end->y - box->y;
if (loc_start->y + 1 == loc_end->y) {
box_c2 = box->x + box->w;
if (box->w == 0 || box->x > loc_start->x)
box->x = loc_start->x;
if (box_c2 < loc_end->x)
box->w = loc_end->x - box->x;
} else {
box->x = 0;
box->w = size->width;
}
} else {
box->y = 0;
box->h = size->height;
box->x = 0;
box->w = size->width;
}
}
/**
* vmw_subres_dirty_full - Mark a full subresource as dirty
* @dirty: The surface's dirty tracker.
* @subres: The subresource
*/
static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
{
const struct vmw_surface_cache *cache = &dirty->cache;
u32 mip = subres % cache->num_mip_levels;
const struct drm_vmw_size *size = &cache->mip[mip].size;
SVGA3dBox *box = &dirty->boxes[subres];
box->x = 0;
box->y = 0;
box->z = 0;
box->w = size->width;
box->h = size->height;
box->d = size->depth;
}
/*
* vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
* surfaces.
*/
static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
size_t start, size_t end)
{
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
size_t backup_end = res->guest_memory_offset + res->guest_memory_size;
struct vmw_surface_loc loc1, loc2;
const struct vmw_surface_cache *cache;
start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
end = min(end, backup_end) - res->guest_memory_offset;
cache = &dirty->cache;
vmw_surface_get_loc(cache, &loc1, start);
vmw_surface_get_loc(cache, &loc2, end - 1);
vmw_surface_inc_loc(cache, &loc2);
if (loc1.sheet != loc2.sheet) {
u32 sub_res;
/*
* Multiple multisample sheets. To do this in an optimized
* fashion, compute the dirty region for each sheet and the
* resulting union. Since this is not a common case, just dirty
* the whole surface.
*/
for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
vmw_subres_dirty_full(dirty, sub_res);
return;
}
if (loc1.sub_resource + 1 == loc2.sub_resource) {
/* Dirty range covers a single sub-resource */
vmw_subres_dirty_add(dirty, &loc1, &loc2);
} else {
/* Dirty range covers multiple sub-resources */
struct vmw_surface_loc loc_min, loc_max;
u32 sub_res;
vmw_surface_max_loc(cache, loc1.sub_resource, &loc_max);
vmw_subres_dirty_add(dirty, &loc1, &loc_max);
vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
vmw_subres_dirty_add(dirty, &loc_min, &loc2);
for (sub_res = loc1.sub_resource + 1;
sub_res < loc2.sub_resource - 1; ++sub_res)
vmw_subres_dirty_full(dirty, sub_res);
}
}
/*
* vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
* surfaces.
*/
static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
size_t start, size_t end)
{
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
const struct vmw_surface_cache *cache = &dirty->cache;
size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes;
SVGA3dBox *box = &dirty->boxes[0];
u32 box_c2;
box->h = box->d = 1;
start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
end = min(end, backup_end) - res->guest_memory_offset;
box_c2 = box->x + box->w;
if (box->w == 0 || box->x > start)
box->x = start;
if (box_c2 < end)
box->w = end - box->x;
}
/*
* vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
*/
static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
size_t end)
{
struct vmw_surface *srf = vmw_res_to_srf(res);
if (WARN_ON(end <= res->guest_memory_offset ||
start >= res->guest_memory_offset + res->guest_memory_size))
return;
if (srf->metadata.format == SVGA3D_BUFFER)
vmw_surface_buf_dirty_range_add(res, start, end);
else
vmw_surface_tex_dirty_range_add(res, start, end);
}
/*
* vmw_surface_dirty_sync - The surface's dirty_sync callback.
*/
static int vmw_surface_dirty_sync(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
u32 i, num_dirty;
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
size_t alloc_size;
const struct vmw_surface_cache *cache = &dirty->cache;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXUpdateSubResource body;
} *cmd1;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdUpdateGBImage body;
} *cmd2;
void *cmd;
num_dirty = 0;
for (i = 0; i < dirty->num_subres; ++i) {
const SVGA3dBox *box = &dirty->boxes[i];
if (box->d)
num_dirty++;
}
if (!num_dirty)
goto out;
alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
if (!cmd)
return -ENOMEM;
cmd1 = cmd;
cmd2 = cmd;
for (i = 0; i < dirty->num_subres; ++i) {
const SVGA3dBox *box = &dirty->boxes[i];
if (!box->d)
continue;
/*
* DX_UPDATE_SUBRESOURCE is aware of array surfaces.
* UPDATE_GB_IMAGE is not.
*/
if (has_sm4_context(dev_priv)) {
cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
cmd1->body.subResource = i;
cmd1->body.box = *box;
cmd1++;
} else {
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.image.sid = res->id;
cmd2->body.image.face = i / cache->num_mip_levels;
cmd2->body.image.mipmap = i -
(cache->num_mip_levels * cmd2->body.image.face);
cmd2->body.box = *box;
cmd2++;
}
}
vmw_cmd_commit(dev_priv, alloc_size);
out:
memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
dirty->num_subres);
return 0;
}
/*
* vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
*/
static int vmw_surface_dirty_alloc(struct vmw_resource *res)
{
struct vmw_surface *srf = vmw_res_to_srf(res);
const struct vmw_surface_metadata *metadata = &srf->metadata;
struct vmw_surface_dirty *dirty;
u32 num_layers = 1;
u32 num_mip;
u32 num_subres;
u32 num_samples;
size_t dirty_size;
int ret;
if (metadata->array_size)
num_layers = metadata->array_size;
else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
num_layers *= SVGA3D_MAX_SURFACE_FACES;
num_mip = metadata->mip_levels[0];
if (!num_mip)
num_mip = 1;
num_subres = num_layers * num_mip;
dirty_size = struct_size(dirty, boxes, num_subres);
dirty = kvzalloc(dirty_size, GFP_KERNEL);
if (!dirty) {
ret = -ENOMEM;
goto out_no_dirty;
}
num_samples = max_t(u32, 1, metadata->multisample_count);
ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
num_mip, num_layers, num_samples,
&dirty->cache);
if (ret)
goto out_no_cache;
dirty->num_subres = num_subres;
res->dirty = (struct vmw_resource_dirty *) dirty;
return 0;
out_no_cache:
kvfree(dirty);
out_no_dirty:
return ret;
}
/*
* vmw_surface_dirty_free - The surface's dirty_free callback
*/
static void vmw_surface_dirty_free(struct vmw_resource *res)
{
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
kvfree(dirty);
res->dirty = NULL;
}
/*
* vmw_surface_clean - The surface's clean callback
*/
static int vmw_surface_clean(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
size_t alloc_size;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdReadbackGBSurface body;
} *cmd;
alloc_size = sizeof(*cmd);
cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
if (!cmd)
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = res->id;
vmw_cmd_commit(dev_priv, alloc_size);
return 0;
}
/*
* vmw_gb_surface_define - Define a private GB surface
*
* @dev_priv: Pointer to a device private.
* @metadata: Metadata representing the surface to create.
* @user_srf_out: allocated user_srf. Set to NULL on failure.
*
* GB surfaces allocated by this function will not have a user mode handle, and
* thus will only be visible to vmwgfx. For optimization reasons the
* surface may later be given a user mode handle by another function to make
* it available to user mode drivers.
*/
int vmw_gb_surface_define(struct vmw_private *dev_priv,
const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out)
{
struct vmw_surface_metadata *metadata;
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
u32 sample_count = 1;
u32 num_layers = 1;
int ret;
*srf_out = NULL;
if (req->scanout) {
if (!vmw_surface_is_screen_target_format(req->format)) {
VMW_DEBUG_USER("Invalid Screen Target surface format.");
return -EINVAL;
}
if (req->base_size.width > dev_priv->texture_max_width ||
req->base_size.height > dev_priv->texture_max_height) {
VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
req->base_size.width,
req->base_size.height,
dev_priv->texture_max_width,
dev_priv->texture_max_height);
return -EINVAL;
}
} else {
const SVGA3dSurfaceDesc *desc =
vmw_surface_get_desc(req->format);
if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) {
VMW_DEBUG_USER("Invalid surface format.\n");
return -EINVAL;
}
}
if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
return -EINVAL;
if (req->num_sizes != 1)
return -EINVAL;
if (req->sizes != NULL)
return -EINVAL;
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_unlock;
}
*srf_out = &user_srf->srf;
user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL;
srf = &user_srf->srf;
srf->metadata = *req;
srf->offsets = NULL;
metadata = &srf->metadata;
if (metadata->array_size)
num_layers = req->array_size;
else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
num_layers = SVGA3D_MAX_SURFACE_FACES;
if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
sample_count = metadata->multisample_count;
srf->res.guest_memory_size =
vmw_surface_get_serialized_size_extended(
metadata->format,
metadata->base_size,
metadata->mip_levels[0],
num_layers,
sample_count);
if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
srf->res.guest_memory_size += sizeof(SVGA3dDXSOState);
/*
* Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
* size greater than STDU max width/height. This is really a workaround
* to support creation of big framebuffer requested by some user-space
* for whole topology. That big framebuffer won't really be used for
* binding with screen target as during prepare_fb a separate surface is
* created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
*/
if (dev_priv->active_display_unit == vmw_du_screen_target &&
metadata->scanout &&
metadata->base_size.width <= dev_priv->stdu_max_width &&
metadata->base_size.height <= dev_priv->stdu_max_height)
metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
/*
* From this point, the generic resource management functions
* destroy the object on failure.
*/
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
return ret;
out_unlock:
return ret;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_placement.h>
static const struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
.flags = 0
};
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
.flags = 0
};
static const struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
.flags = 0
};
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_placement_flags
};
static const struct ttm_place vram_gmr_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
.flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
.flags = 0
}
};
struct ttm_placement vmw_vram_gmr_placement = {
.num_placement = 2,
.placement = vram_gmr_placement_flags,
.num_busy_placement = 1,
.busy_placement = &gmr_placement_flags
};
struct ttm_placement vmw_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/**
* __vmw_piter_non_sg_next: Helper functions to advance
* a struct vmw_piter iterator.
*
* @viter: Pointer to the iterator.
*
* These functions return false if past the end of the list,
* true otherwise. Functions are selected depending on the current
* DMA mapping mode.
*/
static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
{
return ++(viter->i) < viter->num_pages;
}
static bool __vmw_piter_sg_next(struct vmw_piter *viter)
{
bool ret = __vmw_piter_non_sg_next(viter);
return __sg_page_iter_dma_next(&viter->iter) && ret;
}
static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
{
return viter->addrs[viter->i];
}
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
{
return sg_page_iter_dma_address(&viter->iter);
}
/**
* vmw_piter_start - Initialize a struct vmw_piter.
*
* @viter: Pointer to the iterator to initialize
* @vsgt: Pointer to a struct vmw_sg_table to initialize from
* @p_offset: Pointer offset used to update current array position
*
* Note that we're following the convention of __sg_page_iter_start, so that
* the iterator doesn't point to a valid page after initialization; it has
* to be advanced one step first.
*/
void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
unsigned long p_offset)
{
viter->i = p_offset - 1;
viter->num_pages = vsgt->num_pages;
viter->pages = vsgt->pages;
switch (vsgt->mode) {
case vmw_dma_alloc_coherent:
viter->next = &__vmw_piter_non_sg_next;
viter->dma_address = &__vmw_piter_dma_addr;
viter->addrs = vsgt->addrs;
break;
case vmw_dma_map_populate:
case vmw_dma_map_bind:
viter->next = &__vmw_piter_sg_next;
viter->dma_address = &__vmw_piter_sg_addr;
__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
vsgt->sgt->orig_nents, p_offset);
break;
default:
BUG();
}
}
/**
* vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
* TTM pages
*
* @vmw_tt: Pointer to a struct vmw_ttm_backend
*
* Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
*/
static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
{
struct device *dev = vmw_tt->dev_priv->drm.dev;
dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
}
/**
* vmw_ttm_map_for_dma - map TTM pages to get device addresses
*
* @vmw_tt: Pointer to a struct vmw_ttm_backend
*
* This function is used to get device addresses from the kernel DMA layer.
* However, it's violating the DMA API in that when this operation has been
* performed, it's illegal for the CPU to write to the pages without first
* unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
* therefore only legal to call this function if we know that the function
* dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
* a CPU write buffer flush.
*/
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
{
struct device *dev = vmw_tt->dev_priv->drm.dev;
return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
}
/**
* vmw_ttm_map_dma - Make sure TTM pages are visible to the device
*
* @vmw_tt: Pointer to a struct vmw_ttm_tt
*
* Select the correct function for and make sure the TTM pages are
* visible to the device. Allocate storage for the device mappings.
* If a mapping has already been performed, indicated by the storage
* pointer being non NULL, the function returns success.
*/
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
{
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
int ret = 0;
if (vmw_tt->mapped)
return 0;
vsgt->mode = dev_priv->map_mode;
vsgt->pages = vmw_tt->dma_ttm.pages;
vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
vsgt->addrs = vmw_tt->dma_ttm.dma_address;
vsgt->sgt = NULL;
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
vsgt->sgt = &vmw_tt->sgt;
ret = sg_alloc_table_from_pages_segment(
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
if (ret)
goto out_sg_alloc_fail;
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
goto out_map_fail;
break;
default:
break;
}
vmw_tt->mapped = true;
return 0;
out_map_fail:
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
out_sg_alloc_fail:
return ret;
}
/**
* vmw_ttm_unmap_dma - Tear down any TTM page device mappings
*
* @vmw_tt: Pointer to a struct vmw_ttm_tt
*
* Tear down any previously set up device DMA mappings and free
* any storage space allocated for them. If there are no mappings set up,
* this function is a NOP.
*/
static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
{
struct vmw_private *dev_priv = vmw_tt->dev_priv;
if (!vmw_tt->vsgt.sgt)
return;
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
vmw_ttm_unmap_from_dma(vmw_tt);
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
break;
default:
break;
}
vmw_tt->mapped = false;
}
/**
* vmw_bo_sg_table - Return a struct vmw_sg_table object for a
* TTM buffer object
*
* @bo: Pointer to a struct ttm_buffer_object
*
* Returns a pointer to a struct vmw_sg_table object. The object should
* not be freed after use.
* Note that for the device addresses to be valid, the buffer object must
* either be reserved or pinned.
*/
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
{
struct vmw_ttm_tt *vmw_tt =
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
return &vmw_tt->vsgt;
}
static int vmw_ttm_bind(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
int ret = 0;
if (!bo_mem)
return -EINVAL;
if (vmw_be->bound)
return 0;
ret = vmw_ttm_map_dma(vmw_be);
if (unlikely(ret != 0))
return ret;
vmw_be->gmr_id = bo_mem->start;
vmw_be->mem_type = bo_mem->mem_type;
switch (bo_mem->mem_type) {
case VMW_PL_GMR:
ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
ttm->num_pages, vmw_be->gmr_id);
break;
case VMW_PL_MOB:
if (unlikely(vmw_be->mob == NULL)) {
vmw_be->mob =
vmw_mob_create(ttm->num_pages);
if (unlikely(vmw_be->mob == NULL))
return -ENOMEM;
}
ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
&vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id);
break;
case VMW_PL_SYSTEM:
/* Nothing to be done for a system bind */
break;
default:
BUG();
}
vmw_be->bound = true;
return ret;
}
static void vmw_ttm_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
if (!vmw_be->bound)
return;
switch (vmw_be->mem_type) {
case VMW_PL_GMR:
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
break;
case VMW_PL_MOB:
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
break;
case VMW_PL_SYSTEM:
break;
default:
BUG();
}
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
vmw_ttm_unmap_dma(vmw_be);
vmw_be->bound = false;
}
static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
vmw_ttm_unmap_dma(vmw_be);
ttm_tt_fini(ttm);
if (vmw_be->mob)
vmw_mob_destroy(vmw_be->mob);
kfree(vmw_be);
}
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
int ret;
/* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
return ret;
}
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
vmw_ttm_unbind(bdev, ttm);
if (vmw_tt->mob) {
vmw_mob_destroy(vmw_tt->mob);
vmw_tt->mob = NULL;
}
vmw_ttm_unmap_dma(vmw_tt);
ttm_pool_free(&bdev->pool, ttm);
}
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
struct vmw_ttm_tt *vmw_be;
int ret;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
return NULL;
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
else
ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached, 0);
if (unlikely(ret != 0))
goto out_no_init;
return &vmw_be->dma_ttm;
out_no_init:
kfree(vmw_be);
return NULL;
}
static void vmw_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
*placement = vmw_sys_placement;
}
static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
case VMW_PL_SYSTEM:
case VMW_PL_GMR:
case VMW_PL_MOB:
return 0;
case TTM_PL_VRAM:
mem->bus.offset = (mem->start << PAGE_SHIFT) +
dev_priv->vram_start;
mem->bus.is_iomem = true;
mem->bus.caching = ttm_cached;
break;
default:
return -EINVAL;
}
return 0;
}
/**
* vmw_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
* @old_mem: The old memory where we move from
* @new_mem: The struct ttm_resource indicating to what memory
* region the move is taking place.
*
* Calls move_notify for all subsystems needing it.
* (currently only resources).
*/
static void vmw_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *old_mem,
struct ttm_resource *new_mem)
{
vmw_bo_move_notify(bo, new_mem);
vmw_query_move_notify(bo, old_mem, new_mem);
}
/**
* vmw_swap_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to be swapped out.
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
vmw_bo_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false);
}
static bool vmw_memtype_is_system(uint32_t mem_type)
{
return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM;
}
static int vmw_move(struct ttm_buffer_object *bo,
bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
struct ttm_resource_manager *new_man;
struct ttm_resource_manager *old_man = NULL;
int ret = 0;
new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
if (bo->resource)
old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
if (ret)
return ret;
}
if (!bo->resource || (bo->resource->mem_type == TTM_PL_SYSTEM &&
bo->ttm == NULL)) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
vmw_move_notify(bo, bo->resource, new_mem);
if (old_man && old_man->use_tt && new_man->use_tt) {
if (vmw_memtype_is_system(bo->resource->mem_type)) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
goto fail;
vmw_ttm_unbind(bo->bdev, bo->ttm);
ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
return 0;
} else {
ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (ret)
goto fail;
}
return 0;
fail:
vmw_move_notify(bo, new_mem, bo->resource);
return ret;
}
struct ttm_device_funcs vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
.ttm_tt_destroy = &vmw_ttm_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
.move = vmw_move,
.swap_notify = vmw_swap_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
};
int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
size_t bo_size, u32 domain,
struct vmw_bo **bo_p)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
struct vmw_bo *vbo;
int ret;
struct vmw_bo_params bo_params = {
.domain = domain,
.busy_domain = domain,
.bo_type = ttm_bo_type_kernel,
.size = bo_size,
.pin = true
};
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
BUG_ON(ret != 0);
ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm);
ret = vmw_ttm_map_dma(vmw_tt);
}
ttm_bo_unreserve(&vbo->tbo);
if (likely(ret == 0))
*bo_p = vbo;
return ret;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
/*
* Different methods for tracking dirty:
* VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
* VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write-
* accesses in the VM mkwrite() callback
*/
enum vmw_bo_dirty_method {
VMW_BO_DIRTY_PAGETABLE,
VMW_BO_DIRTY_MKWRITE,
};
/*
* No dirtied pages at scan trigger a transition to the _MKWRITE method,
* similarly a certain percentage of dirty pages trigger a transition to
* the _PAGETABLE method. How many triggers should we wait for before
* changing method?
*/
#define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2
/* Percentage to trigger a transition to the _PAGETABLE method */
#define VMW_DIRTY_PERCENTAGE 10
/**
* struct vmw_bo_dirty - Dirty information for buffer objects
* @start: First currently dirty bit
* @end: Last currently dirty bit + 1
* @method: The currently used dirty method
* @change_count: Number of consecutive method change triggers
* @ref_count: Reference count for this structure
* @bitmap_size: The size of the bitmap in bits. Typically equal to the
* nuber of pages in the bo.
* @bitmap: A bitmap where each bit represents a page. A set bit means a
* dirty page.
*/
struct vmw_bo_dirty {
unsigned long start;
unsigned long end;
enum vmw_bo_dirty_method method;
unsigned int change_count;
unsigned int ref_count;
unsigned long bitmap_size;
unsigned long bitmap[];
};
/**
* vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
* @vbo: The buffer object to scan
*
* Scans the pagetable for dirty bits. Clear those bits and modify the
* dirty structure with the results. This function may change the
* dirty-tracking method.
*/
static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t num_marked;
num_marked = clean_record_shared_mapping_range
(mapping,
offset, dirty->bitmap_size,
offset, &dirty->bitmap[0],
&dirty->start, &dirty->end);
if (num_marked == 0)
dirty->change_count++;
else
dirty->change_count = 0;
if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
dirty->change_count = 0;
dirty->method = VMW_BO_DIRTY_MKWRITE;
wp_shared_mapping_range(mapping,
offset, dirty->bitmap_size);
clean_record_shared_mapping_range(mapping,
offset, dirty->bitmap_size,
offset, &dirty->bitmap[0],
&dirty->start, &dirty->end);
}
}
/**
* vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
* @vbo: The buffer object to scan
*
* Write-protect pages written to so that consecutive write accesses will
* trigger a call to mkwrite.
*
* This function may change the dirty-tracking method.
*/
static void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t num_marked;
if (dirty->end <= dirty->start)
return;
num_marked = wp_shared_mapping_range(vbo->tbo.bdev->dev_mapping,
dirty->start + offset,
dirty->end - dirty->start);
if (100UL * num_marked / dirty->bitmap_size >
VMW_DIRTY_PERCENTAGE)
dirty->change_count++;
else
dirty->change_count = 0;
if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
pgoff_t start = 0;
pgoff_t end = dirty->bitmap_size;
dirty->method = VMW_BO_DIRTY_PAGETABLE;
clean_record_shared_mapping_range(mapping, offset, end, offset,
&dirty->bitmap[0],
&start, &end);
bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size);
if (dirty->start < dirty->end)
bitmap_set(&dirty->bitmap[0], dirty->start,
dirty->end - dirty->start);
dirty->change_count = 0;
}
}
/**
* vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
* tracking structure
* @vbo: The buffer object to scan
*
* This function may change the dirty tracking method.
*/
void vmw_bo_dirty_scan(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
if (dirty->method == VMW_BO_DIRTY_PAGETABLE)
vmw_bo_dirty_scan_pagetable(vbo);
else
vmw_bo_dirty_scan_mkwrite(vbo);
}
/**
* vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
* an unmap_mapping_range operation.
* @vbo: The buffer object,
* @start: First page of the range within the buffer object.
* @end: Last page of the range within the buffer object + 1.
*
* If we're using the _PAGETABLE scan method, we may leak dirty pages
* when calling unmap_mapping_range(). This function makes sure we pick
* up all dirty pages.
*/
static void vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
return;
wp_shared_mapping_range(mapping, start + offset, end - start);
clean_record_shared_mapping_range(mapping, start + offset,
end - start, offset,
&dirty->bitmap[0], &dirty->start,
&dirty->end);
}
/**
* vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo
* @vbo: The buffer object,
* @start: First page of the range within the buffer object.
* @end: Last page of the range within the buffer object + 1.
*
* This is similar to ttm_bo_unmap_virtual() except it takes a subrange.
*/
void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end)
{
unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
vmw_bo_dirty_pre_unmap(vbo, start, end);
unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
(loff_t) (end - start) << PAGE_SHIFT);
}
/**
* vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
* @vbo: The buffer object
*
* This function registers a dirty-tracking user to a buffer object.
* A user can be for example a resource or a vma in a special user-space
* mapping.
*
* Return: Zero on success, -ENOMEM on memory allocation failure.
*/
int vmw_bo_dirty_add(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size);
size_t size;
int ret;
if (dirty) {
dirty->ref_count++;
return 0;
}
size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
dirty = kvzalloc(size, GFP_KERNEL);
if (!dirty) {
ret = -ENOMEM;
goto out_no_dirty;
}
dirty->bitmap_size = num_pages;
dirty->start = dirty->bitmap_size;
dirty->end = 0;
dirty->ref_count = 1;
if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
dirty->method = VMW_BO_DIRTY_PAGETABLE;
} else {
struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
dirty->method = VMW_BO_DIRTY_MKWRITE;
/* Write-protect and then pick up already dirty bits */
wp_shared_mapping_range(mapping, offset, num_pages);
clean_record_shared_mapping_range(mapping, offset, num_pages,
offset,
&dirty->bitmap[0],
&dirty->start, &dirty->end);
}
vbo->dirty = dirty;
return 0;
out_no_dirty:
return ret;
}
/**
* vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
* @vbo: The buffer object
*
* This function releases a dirty-tracking user from a buffer object.
* If the reference count reaches zero, then the dirty-tracking object is
* freed and the pointer to it cleared.
*
* Return: Zero on success, -ENOMEM on memory allocation failure.
*/
void vmw_bo_dirty_release(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
if (dirty && --dirty->ref_count == 0) {
kvfree(dirty);
vbo->dirty = NULL;
}
}
/**
* vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
* its backing mob.
* @res: The resource
*
* This function will pick up all dirty ranges affecting the resource from
* it's backup mob, and call vmw_resource_dirty_update() once for each
* range. The transferred ranges will be cleared from the backing mob's
* dirty tracking.
*/
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
{
struct vmw_bo *vbo = res->guest_memory_bo;
struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t start, cur, end;
unsigned long res_start = res->guest_memory_offset;
unsigned long res_end = res->guest_memory_offset + res->guest_memory_size;
WARN_ON_ONCE(res_start & ~PAGE_MASK);
res_start >>= PAGE_SHIFT;
res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
if (res_start >= dirty->end || res_end <= dirty->start)
return;
cur = max(res_start, dirty->start);
res_end = max(res_end, dirty->end);
while (cur < res_end) {
unsigned long num;
start = find_next_bit(&dirty->bitmap[0], res_end, cur);
if (start >= res_end)
break;
end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
cur = end + 1;
num = end - start;
bitmap_clear(&dirty->bitmap[0], start, num);
vmw_resource_dirty_update(res, start, end);
}
if (res_start <= dirty->start && res_end > dirty->start)
dirty->start = res_end;
if (res_start < dirty->end && res_end >= dirty->end)
dirty->end = res_start;
}
/**
* vmw_bo_dirty_clear_res - Clear a resource's dirty region from
* its backing mob.
* @res: The resource
*
* This function will clear all dirty ranges affecting the resource from
* it's backup mob's dirty tracking.
*/
void vmw_bo_dirty_clear_res(struct vmw_resource *res)
{
unsigned long res_start = res->guest_memory_offset;
unsigned long res_end = res->guest_memory_offset + res->guest_memory_size;
struct vmw_bo *vbo = res->guest_memory_bo;
struct vmw_bo_dirty *dirty = vbo->dirty;
res_start >>= PAGE_SHIFT;
res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
if (res_start >= dirty->end || res_end <= dirty->start)
return;
res_start = max(res_start, dirty->start);
res_end = min(res_end, dirty->end);
bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start);
if (res_start <= dirty->start && res_end > dirty->start)
dirty->start = res_end;
if (res_start < dirty->end && res_end >= dirty->end)
dirty->end = res_start;
}
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
vm_fault_t ret;
unsigned long page_offset;
unsigned int save_flags;
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
/*
* mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly.
* So make sure the TTM helpers are aware.
*/
save_flags = vmf->flags;
vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
ret = ttm_bo_vm_reserve(bo, vmf);
vmf->flags = save_flags;
if (ret)
return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= PFN_UP(bo->resource->size))) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE &&
!test_bit(page_offset, &vbo->dirty->bitmap[0])) {
struct vmw_bo_dirty *dirty = vbo->dirty;
__set_bit(page_offset, &dirty->bitmap[0]);
dirty->start = min(dirty->start, page_offset);
dirty->end = max(dirty->end, page_offset + 1);
}
out_unlock:
dma_resv_unlock(bo->base.resv);
return ret;
}
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
pgoff_t num_prefault;
pgprot_t prot;
vm_fault_t ret;
ret = ttm_bo_vm_reserve(bo, vmf);
if (ret)
return ret;
num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 :
TTM_BO_VM_NUM_PREFAULT;
if (vbo->dirty) {
pgoff_t allowed_prefault;
unsigned long page_offset;
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
if (page_offset >= PFN_UP(bo->resource->size) ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
num_prefault = min(num_prefault, allowed_prefault);
}
/*
* If we don't track dirty using the MKWRITE method, make sure
* sure the page protection is write-enabled so we don't get
* a lot of unnecessary write faults.
*/
if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
else
prot = vm_get_page_prot(vma->vm_flags);
ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
out_unlock:
dma_resv_unlock(bo->base.resv);
return ret;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "device_include/svga_overlay.h"
#include "device_include/svga_escape.h"
#include <drm/ttm/ttm_placement.h>
#define VMW_MAX_NUM_STREAMS 1
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream {
struct vmw_bo *buf;
bool claimed;
bool paused;
struct drm_vmw_control_stream_arg saved;
};
/*
* Overlay control
*/
struct vmw_overlay {
/*
* Each stream is a single overlay. In Xv these are called ports.
*/
struct mutex mutex;
struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
};
struct vmw_escape_header {
uint32_t cmd;
SVGAFifoCmdEscape body;
};
struct vmw_escape_video_flush {
struct vmw_escape_header escape;
SVGAEscapeVideoFlush flush;
};
static inline void fill_escape(struct vmw_escape_header *header,
uint32_t size)
{
header->cmd = SVGA_CMD_ESCAPE;
header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
header->body.size = size;
}
static inline void fill_flush(struct vmw_escape_video_flush *cmd,
uint32_t stream_id)
{
fill_escape(&cmd->escape, sizeof(cmd->flush));
cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
cmd->flush.streamId = stream_id;
}
/*
* Send put command to hw.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
static int vmw_overlay_send_put(struct vmw_private *dev_priv,
struct vmw_bo *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
struct vmw_escape_video_flush *flush;
size_t fifo_size;
bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
int i, num_items;
SVGAGuestPtr ptr;
struct {
struct vmw_escape_header escape;
struct {
uint32_t cmdType;
uint32_t streamId;
} header;
} *cmds;
struct {
uint32_t registerId;
uint32_t value;
} *items;
/* defines are a index needs + 1 */
if (have_so)
num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
else
num_items = SVGA_VIDEO_PITCH_3 + 1;
fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
cmds = VMW_CMD_RESERVE(dev_priv, fifo_size);
/* hardware has hung, can't do anything here */
if (!cmds)
return -ENOMEM;
items = (typeof(items))&cmds[1];
flush = (struct vmw_escape_video_flush *)&items[num_items];
/* the size is header + number of items */
fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
cmds->header.streamId = arg->stream_id;
/* the IDs are neatly numbered */
for (i = 0; i < num_items; i++)
items[i].registerId = i;
vmw_bo_get_guest_ptr(&buf->tbo, &ptr);
ptr.offset += arg->offset;
items[SVGA_VIDEO_ENABLED].value = true;
items[SVGA_VIDEO_FLAGS].value = arg->flags;
items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
items[SVGA_VIDEO_FORMAT].value = arg->format;
items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
items[SVGA_VIDEO_SIZE].value = arg->size;
items[SVGA_VIDEO_WIDTH].value = arg->width;
items[SVGA_VIDEO_HEIGHT].value = arg->height;
items[SVGA_VIDEO_SRC_X].value = arg->src.x;
items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
items[SVGA_VIDEO_DST_X].value = arg->dst.x;
items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
if (have_so) {
items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
}
fill_flush(flush, arg->stream_id);
vmw_cmd_commit(dev_priv, fifo_size);
return 0;
}
/*
* Send stop command to hw.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
uint32_t stream_id,
bool interruptible)
{
struct {
struct vmw_escape_header escape;
SVGAEscapeVideoSetRegs body;
struct vmw_escape_video_flush flush;
} *cmds;
int ret;
for (;;) {
cmds = VMW_CMD_RESERVE(dev_priv, sizeof(*cmds));
if (cmds)
break;
ret = vmw_fallback_wait(dev_priv, false, true, 0,
interruptible, 3*HZ);
if (interruptible && ret == -ERESTARTSYS)
return ret;
else
BUG_ON(ret != 0);
}
fill_escape(&cmds->escape, sizeof(cmds->body));
cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
cmds->body.header.streamId = stream_id;
cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
cmds->body.items[0].value = false;
fill_flush(&cmds->flush, stream_id);
vmw_cmd_commit(dev_priv, sizeof(*cmds));
return 0;
}
/*
* Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
*
* With the introduction of screen objects buffers could now be
* used with GMRs instead of being locked to vram.
*/
static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
struct vmw_bo *buf,
bool pin, bool inter)
{
if (!pin)
return vmw_bo_unpin(dev_priv, buf, inter);
if (dev_priv->active_display_unit == vmw_du_legacy)
return vmw_bo_pin_in_vram(dev_priv, buf, inter);
return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
}
/*
* Stop or pause a stream.
*
* If the stream is paused the no evict flag is removed from the buffer
* but left in vram. This allows for instance mode_set to evict it
* should it need to.
*
* The caller must hold the overlay lock.
*
* @stream_id which stream to stop/pause.
* @pause true to pause, false to stop completely.
*/
static int vmw_overlay_stop(struct vmw_private *dev_priv,
uint32_t stream_id, bool pause,
bool interruptible)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct vmw_stream *stream = &overlay->stream[stream_id];
int ret;
/* no buffer attached the stream is completely stopped */
if (!stream->buf)
return 0;
/* If the stream is paused this is already done */
if (!stream->paused) {
ret = vmw_overlay_send_stop(dev_priv, stream_id,
interruptible);
if (ret)
return ret;
/* We just remove the NO_EVICT flag so no -ENOMEM */
ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
interruptible);
if (interruptible && ret == -ERESTARTSYS)
return ret;
else
BUG_ON(ret != 0);
}
if (!pause) {
vmw_bo_unreference(&stream->buf);
stream->paused = false;
} else {
stream->paused = true;
}
return 0;
}
/*
* Update a stream and send any put or stop fifo commands needed.
*
* The caller must hold the overlay lock.
*
* Returns
* -ENOMEM if buffer doesn't fit in vram.
* -ERESTARTSYS if interrupted.
*/
static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
struct vmw_bo *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct vmw_stream *stream = &overlay->stream[arg->stream_id];
int ret = 0;
if (!buf)
return -EINVAL;
DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
stream->buf, buf, stream->paused ? "" : "not ");
if (stream->buf != buf) {
ret = vmw_overlay_stop(dev_priv, arg->stream_id,
false, interruptible);
if (ret)
return ret;
} else if (!stream->paused) {
/* If the buffers match and not paused then just send
* the put command, no need to do anything else.
*/
ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
if (ret == 0)
stream->saved = *arg;
else
BUG_ON(!interruptible);
return ret;
}
/* We don't start the old stream if we are interrupted.
* Might return -ENOMEM if it can't fit the buffer in vram.
*/
ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
if (ret)
return ret;
ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
if (ret) {
/* This one needs to happen no matter what. We only remove
* the NO_EVICT flag so this is safe from -ENOMEM.
*/
BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
!= 0);
return ret;
}
if (stream->buf != buf)
stream->buf = vmw_bo_reference(buf);
stream->saved = *arg;
/* stream is no longer stopped/paused */
stream->paused = false;
return 0;
}
/*
* Try to resume all paused streams.
*
* Used by the kms code after moving a new scanout buffer to vram.
*
* Takes the overlay lock.
*/
int vmw_overlay_resume_all(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i, ret;
if (!overlay)
return 0;
mutex_lock(&overlay->mutex);
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
struct vmw_stream *stream = &overlay->stream[i];
if (!stream->paused)
continue;
ret = vmw_overlay_update_stream(dev_priv, stream->buf,
&stream->saved, false);
if (ret != 0)
DRM_INFO("%s: *warning* failed to resume stream %i\n",
__func__, i);
}
mutex_unlock(&overlay->mutex);
return 0;
}
/*
* Pauses all active streams.
*
* Used by the kms code when moving a new scanout buffer to vram.
*
* Takes the overlay lock.
*/
int vmw_overlay_pause_all(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i, ret;
if (!overlay)
return 0;
mutex_lock(&overlay->mutex);
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
if (overlay->stream[i].paused)
DRM_INFO("%s: *warning* stream %i already paused\n",
__func__, i);
ret = vmw_overlay_stop(dev_priv, i, true, false);
WARN_ON(ret != 0);
}
mutex_unlock(&overlay->mutex);
return 0;
}
static bool vmw_overlay_available(const struct vmw_private *dev_priv)
{
return (dev_priv->overlay_priv != NULL &&
((vmw_fifo_caps(dev_priv) & VMW_OVERLAY_CAP_MASK) ==
VMW_OVERLAY_CAP_MASK));
}
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct drm_vmw_control_stream_arg *arg =
(struct drm_vmw_control_stream_arg *)data;
struct vmw_bo *buf;
struct vmw_resource *res;
int ret;
if (!vmw_overlay_available(dev_priv))
return -ENOSYS;
ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
if (ret)
return ret;
mutex_lock(&overlay->mutex);
if (!arg->enabled) {
ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
goto out_unlock;
}
ret = vmw_user_bo_lookup(file_priv, arg->handle, &buf);
if (ret)
goto out_unlock;
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_user_bo_unref(buf);
out_unlock:
mutex_unlock(&overlay->mutex);
vmw_resource_unreference(&res);
return ret;
}
int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
{
if (!vmw_overlay_available(dev_priv))
return 0;
return VMW_MAX_NUM_STREAMS;
}
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i, k;
if (!vmw_overlay_available(dev_priv))
return 0;
mutex_lock(&overlay->mutex);
for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
if (!overlay->stream[i].claimed)
k++;
mutex_unlock(&overlay->mutex);
return k;
}
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i;
if (!overlay)
return -ENOSYS;
mutex_lock(&overlay->mutex);
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
if (overlay->stream[i].claimed)
continue;
overlay->stream[i].claimed = true;
*out = i;
mutex_unlock(&overlay->mutex);
return 0;
}
mutex_unlock(&overlay->mutex);
return -ESRCH;
}
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
if (!overlay)
return -ENOSYS;
mutex_lock(&overlay->mutex);
WARN_ON(!overlay->stream[stream_id].claimed);
vmw_overlay_stop(dev_priv, stream_id, false, false);
overlay->stream[stream_id].claimed = false;
mutex_unlock(&overlay->mutex);
return 0;
}
int vmw_overlay_init(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay;
int i;
if (dev_priv->overlay_priv)
return -EINVAL;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return -ENOMEM;
mutex_init(&overlay->mutex);
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
overlay->stream[i].buf = NULL;
overlay->stream[i].paused = false;
overlay->stream[i].claimed = false;
}
dev_priv->overlay_priv = overlay;
return 0;
}
int vmw_overlay_close(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
bool forgotten_buffer = false;
int i;
if (!overlay)
return -ENOSYS;
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
if (overlay->stream[i].buf) {
forgotten_buffer = true;
vmw_overlay_stop(dev_priv, i, false, false);
}
}
WARN_ON(forgotten_buffer);
dev_priv->overlay_priv = NULL;
kfree(overlay);
return 0;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
struct vmw_ctx_binding_state *cbs;
struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_MAX];
spinlock_t cotable_lock;
struct vmw_bo *dx_query_mob;
};
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base);
static int vmw_gb_context_create(struct vmw_resource *res);
static int vmw_gb_context_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
static int vmw_dx_context_create(struct vmw_resource *res);
static int vmw_dx_context_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_dx_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_dx_context_destroy(struct vmw_resource *res);
static const struct vmw_user_resource_conv user_context_conv = {
.object_type = VMW_RES_CONTEXT,
.base_obj_to_res = vmw_user_context_base_to_res,
.res_free = vmw_user_context_free
};
const struct vmw_user_resource_conv *user_context_converter =
&user_context_conv;
static const struct vmw_res_func vmw_legacy_context_func = {
.res_type = vmw_res_context,
.needs_guest_memory = false,
.may_evict = false,
.type_name = "legacy contexts",
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
.create = NULL,
.destroy = NULL,
.bind = NULL,
.unbind = NULL
};
static const struct vmw_res_func vmw_gb_context_func = {
.res_type = vmw_res_context,
.needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "guest backed contexts",
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_context_create,
.destroy = vmw_gb_context_destroy,
.bind = vmw_gb_context_bind,
.unbind = vmw_gb_context_unbind
};
static const struct vmw_res_func vmw_dx_context_func = {
.res_type = vmw_res_dx_context,
.needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "dx contexts",
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_context_create,
.destroy = vmw_dx_context_destroy,
.bind = vmw_dx_context_bind,
.unbind = vmw_dx_context_unbind
};
/*
* Context management:
*/
static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
struct vmw_user_context *uctx)
{
struct vmw_resource *res;
int i;
u32 cotable_max = has_sm5_context(dev_priv) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
for (i = 0; i < cotable_max; ++i) {
spin_lock(&uctx->cotable_lock);
res = uctx->cotables[i];
uctx->cotables[i] = NULL;
spin_unlock(&uctx->cotable_lock);
if (res)
vmw_resource_unreference(&res);
}
}
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyContext body;
} *cmd;
if (res->func->destroy == vmw_gb_context_destroy ||
res->func->destroy == vmw_dx_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_state_kill(uctx->cbs);
(void) res->func->destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
vmw_context_cotables_unref(dev_priv, uctx);
return;
}
vmw_execbuf_release_pinned_bo(dev_priv);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return;
cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_dec(dev_priv);
}
static int vmw_gb_context_init(struct vmw_private *dev_priv,
bool dx,
struct vmw_resource *res,
void (*res_free)(struct vmw_resource *res))
{
int ret, i;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
sizeof(SVGAGBContextData));
ret = vmw_resource_init(dev_priv, res, true,
res_free,
dx ? &vmw_dx_context_func :
&vmw_gb_context_func);
if (unlikely(ret != 0))
goto out_err;
if (dev_priv->has_mob) {
uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
if (IS_ERR(uctx->man)) {
ret = PTR_ERR(uctx->man);
uctx->man = NULL;
goto out_err;
}
}
uctx->cbs = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(uctx->cbs)) {
ret = PTR_ERR(uctx->cbs);
goto out_err;
}
spin_lock_init(&uctx->cotable_lock);
if (dx) {
u32 cotable_max = has_sm5_context(dev_priv) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
for (i = 0; i < cotable_max; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
if (IS_ERR(uctx->cotables[i])) {
ret = PTR_ERR(uctx->cotables[i]);
goto out_cotables;
}
}
}
res->hw_destroy = vmw_hw_context_destroy;
return 0;
out_cotables:
vmw_context_cotables_unref(dev_priv, uctx);
out_err:
if (res_free)
res_free(res);
else
kfree(res);
return ret;
}
static int vmw_context_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
void (*res_free)(struct vmw_resource *res),
bool dx)
{
int ret;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineContext body;
} *cmd;
if (dev_priv->has_mob)
return vmw_gb_context_init(dev_priv, dx, res, res_free);
ret = vmw_resource_init(dev_priv, res, false,
res_free, &vmw_legacy_context_func);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a resource id.\n");
goto out_early;
}
if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) {
DRM_ERROR("Out of hw context ids.\n");
vmw_resource_unreference(&res);
return -ENOMEM;
}
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
vmw_resource_unreference(&res);
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
res->hw_destroy = vmw_hw_context_destroy;
return 0;
out_early:
if (res_free == NULL)
kfree(res);
else
res_free(res);
return ret;
}
/*
* GB context.
*/
static int vmw_gb_context_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBContext body;
} *cmd;
if (likely(res->id != -1))
return 0;
ret = vmw_resource_alloc_id(res);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a context id.\n");
goto out_no_id;
}
if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
ret = -EBUSY;
goto out_no_fifo;
}
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
}
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
out_no_fifo:
vmw_resource_release_id(res);
out_no_id:
return ret;
}
static int vmw_gb_context_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBindGBContext body;
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->guest_memory_dirty;
res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
static int vmw_gb_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
struct {
SVGA3dCmdHeader header;
SVGA3dCmdReadbackGBContext body;
} *cmd1;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBindGBContext body;
} *cmd2;
uint32_t submit_size;
uint8_t *cmd;
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_state_scrub(uctx->cbs);
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
cmd2 = (void *) cmd;
if (readback) {
cmd1 = (void *) cmd;
cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.cid = res->id;
cmd2 = (void *) (&cmd1[1]);
}
cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.cid = res->id;
cmd2->body.mobid = SVGA3D_INVALID_ID;
vmw_cmd_commit(dev_priv, submit_size);
mutex_unlock(&dev_priv->binding_mutex);
/*
* Create a fence object and fence the backup buffer.
*/
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
return 0;
}
static int vmw_gb_context_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBContext body;
} *cmd;
if (likely(res->id == -1))
return 0;
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
return 0;
}
/*
* DX context.
*/
static int vmw_dx_context_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXDefineContext body;
} *cmd;
if (likely(res->id != -1))
return 0;
ret = vmw_resource_alloc_id(res);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a context id.\n");
goto out_no_id;
}
if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
ret = -EBUSY;
goto out_no_fifo;
}
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
}
cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
out_no_fifo:
vmw_resource_release_id(res);
out_no_id:
return ret;
}
static int vmw_dx_context_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindContext body;
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->guest_memory_dirty;
res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
/**
* vmw_dx_context_scrub_cotables - Scrub all bindings and
* cotables from a context
*
* @ctx: Pointer to the context resource
* @readback: Whether to save the otable contents on scrubbing.
*
* COtables must be unbound before their context, but unbinding requires
* the backup buffer being reserved, whereas scrubbing does not.
* This function scrubs all cotables of a context, potentially reading back
* the contents into their backup buffers. However, scrubbing cotables
* also makes the device context invalid, so scrub all bindings first so
* that doesn't have to be done later with an invalid context.
*/
void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback)
{
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
int i;
vmw_binding_state_scrub(uctx->cbs);
for (i = 0; i < cotable_max; ++i) {
struct vmw_resource *res;
/* Avoid racing with ongoing cotable destruction. */
spin_lock(&uctx->cotable_lock);
res = uctx->cotables[vmw_cotable_scrub_order[i]];
if (res)
res = vmw_resource_reference_unless_doomed(res);
spin_unlock(&uctx->cotable_lock);
if (!res)
continue;
WARN_ON(vmw_cotable_scrub(res, readback));
vmw_resource_unreference(&res);
}
}
static int vmw_dx_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXReadbackContext body;
} *cmd1;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindContext body;
} *cmd2;
uint32_t submit_size;
uint8_t *cmd;
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_dx_context_scrub_cotables(res, readback);
if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
readback) {
WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
if (vmw_query_readback_all(uctx->dx_query_mob))
DRM_ERROR("Failed to read back query states\n");
}
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
cmd2 = (void *) cmd;
if (readback) {
cmd1 = (void *) cmd;
cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.cid = res->id;
cmd2 = (void *) (&cmd1[1]);
}
cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.cid = res->id;
cmd2->body.mobid = SVGA3D_INVALID_ID;
vmw_cmd_commit(dev_priv, submit_size);
mutex_unlock(&dev_priv->binding_mutex);
/*
* Create a fence object and fence the backup buffer.
*/
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
return 0;
}
static int vmw_dx_context_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXDestroyContext body;
} *cmd;
if (likely(res->id == -1))
return 0;
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
return 0;
}
/*
* User-space context management:
*/
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base)
{
return &(container_of(base, struct vmw_user_context, base)->res);
}
static void vmw_user_context_free(struct vmw_resource *res)
{
struct vmw_user_context *ctx =
container_of(res, struct vmw_user_context, res);
if (ctx->cbs)
vmw_binding_state_free(ctx->cbs);
(void) vmw_context_bind_dx_query(res, NULL);
ttm_base_object_kfree(ctx, base);
}
/*
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
*/
static void vmw_user_context_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_context *ctx =
container_of(base, struct vmw_user_context, base);
struct vmw_resource *res = &ctx->res;
*p_base = NULL;
vmw_resource_unreference(&res);
}
int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_ref_object_base_unref(tfile, arg->cid);
}
static int vmw_context_define(struct drm_device *dev, void *data,
struct drm_file *file_priv, bool dx)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_context *ctx;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
if (!has_sm4_context(dev_priv) && dx) {
VMW_DEBUG_USER("DX contexts not supported by device.\n");
return -EINVAL;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (unlikely(!ctx)) {
ret = -ENOMEM;
goto out_ret;
}
res = &ctx->res;
ctx->base.shareable = false;
ctx->base.tfile = NULL;
/*
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
if (unlikely(ret != 0))
goto out_ret;
tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
&vmw_user_context_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
}
arg->cid = ctx->base.handle;
out_err:
vmw_resource_unreference(&res);
out_ret:
return ret;
}
int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return vmw_context_define(dev, data, file_priv, false);
}
int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
struct drm_vmw_context_arg *rep = &arg->rep;
switch (arg->req) {
case drm_vmw_context_legacy:
return vmw_context_define(dev, rep, file_priv, false);
case drm_vmw_context_dx:
return vmw_context_define(dev, rep, file_priv, true);
default:
break;
}
return -EINVAL;
}
/**
* vmw_context_binding_list - Return a list of context bindings
*
* @ctx: The context resource
*
* Returns the current list of bindings of the given context. Note that
* this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
return vmw_binding_state_list(uctx->cbs);
}
struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
{
return container_of(ctx, struct vmw_user_context, res)->man;
}
struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
SVGACOTableType cotable_type)
{
u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
if (cotable_type >= cotable_max)
return ERR_PTR(-EINVAL);
return container_of(ctx, struct vmw_user_context, res)->
cotables[cotable_type];
}
/**
* vmw_context_binding_state -
* Return a pointer to a context binding state structure
*
* @ctx: The context resource
*
* Returns the current state of bindings of the given context. Note that
* this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
struct vmw_ctx_binding_state *
vmw_context_binding_state(struct vmw_resource *ctx)
{
return container_of(ctx, struct vmw_user_context, res)->cbs;
}
/**
* vmw_context_bind_dx_query -
* Sets query MOB for the context. If @mob is NULL, then this function will
* remove the association between the MOB and the context. This function
* assumes the binding_mutex is held.
*
* @ctx_res: The context resource
* @mob: a reference to the query MOB
*
* Returns -EINVAL if a MOB has already been set and does not match the one
* specified in the parameter. 0 otherwise.
*/
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_bo *mob)
{
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
if (mob == NULL) {
if (uctx->dx_query_mob) {
uctx->dx_query_mob->dx_query_ctx = NULL;
vmw_bo_unreference(&uctx->dx_query_mob);
uctx->dx_query_mob = NULL;
}
return 0;
}
/* Can only have one MOB per context for queries */
if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
return -EINVAL;
mob->dx_query_ctx = ctx_res;
if (!uctx->dx_query_mob)
uctx->dx_query_mob = vmw_bo_reference(mob);
return 0;
}
/**
* vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
*
* @ctx_res: The context resource
*/
struct vmw_bo *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
return uctx->dx_query_mob;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_context.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/objtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/cc_platform.h>
#include <asm/hypervisor.h>
#include <drm/drm_ioctl.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_msg_x86.h"
#include "vmwgfx_msg_arm64.h"
#include "vmwgfx_mksstat.h"
#define MESSAGE_STATUS_SUCCESS 0x0001
#define MESSAGE_STATUS_DORECV 0x0002
#define MESSAGE_STATUS_CPT 0x0010
#define MESSAGE_STATUS_HB 0x0080
#define RPCI_PROTOCOL_NUM 0x49435052
#define GUESTMSG_FLAG_COOKIE 0x80000000
#define RETRIES 3
#define VMW_HYPERVISOR_MAGIC 0x564D5868
#define VMW_PORT_CMD_MSG 30
#define VMW_PORT_CMD_HB_MSG 0
#define VMW_PORT_CMD_OPEN_CHANNEL (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
#define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG)
#define VMW_PORT_CMD_SENDSIZE (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG)
#define VMW_PORT_CMD_RECVSIZE (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
#define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
#define VMW_PORT_CMD_MKS_GUEST_STATS 85
#define VMW_PORT_CMD_MKSGS_RESET (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
#define VMW_PORT_CMD_MKSGS_ADD_PPN (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
#define VMW_PORT_CMD_MKSGS_REMOVE_PPN (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
#define MAX_USER_MSG_LENGTH PAGE_SIZE
static u32 vmw_msg_enabled = 1;
enum rpc_msg_type {
MSG_TYPE_OPEN,
MSG_TYPE_SENDSIZE,
MSG_TYPE_SENDPAYLOAD,
MSG_TYPE_RECVSIZE,
MSG_TYPE_RECVPAYLOAD,
MSG_TYPE_RECVSTATUS,
MSG_TYPE_CLOSE,
};
struct rpc_channel {
u16 channel_id;
u32 cookie_high;
u32 cookie_low;
};
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
{
{ "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
{ "vmw_cotable_resize", "vmw_cotable_resize" },
};
#endif
/**
* vmw_open_channel
*
* @channel: RPC channel
* @protocol:
*
* Returns: 0 on success
*/
static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
{
unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
(protocol | GUESTMSG_FLAG_COOKIE), si, di,
0,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
return -EINVAL;
channel->channel_id = HIGH_WORD(edx);
channel->cookie_high = si;
channel->cookie_low = di;
return 0;
}
/**
* vmw_close_channel
*
* @channel: RPC channel
*
* Returns: 0 on success
*/
static int vmw_close_channel(struct rpc_channel *channel)
{
unsigned long eax, ebx, ecx, edx, si, di;
/* Set up additional parameters */
si = channel->cookie_high;
di = channel->cookie_low;
VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
0, si, di,
channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
return -EINVAL;
return 0;
}
/**
* vmw_port_hb_out - Send the message payload either through the
* high-bandwidth port if available, or through the backdoor otherwise.
* @channel: The rpc channel.
* @msg: NULL-terminated message.
* @hb: Whether the high-bandwidth port is available.
*
* Return: The port status.
*/
static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
const char *msg, bool hb)
{
unsigned long si, di, eax, ebx, ecx, edx;
unsigned long msg_len = strlen(msg);
/* HB port can't access encrypted memory. */
if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_high;
u32 channel_id = (channel->channel_id << 16);
si = (uintptr_t) msg;
di = channel->cookie_low;
VMW_PORT_HB_OUT(
(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
msg_len, si, di,
VMWARE_HYPERVISOR_HB | channel_id |
VMWARE_HYPERVISOR_OUT,
VMW_HYPERVISOR_MAGIC, bp,
eax, ebx, ecx, edx, si, di);
return ebx;
}
/* HB port not available. Send the message 4 bytes at a time. */
ecx = MESSAGE_STATUS_SUCCESS << 16;
while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
unsigned int bytes = min_t(size_t, msg_len, 4);
unsigned long word = 0;
memcpy(&word, msg, bytes);
msg_len -= bytes;
msg += bytes;
si = channel->cookie_high;
di = channel->cookie_low;
VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
word, si, di,
channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
}
return ecx;
}
/**
* vmw_port_hb_in - Receive the message payload either through the
* high-bandwidth port if available, or through the backdoor otherwise.
* @channel: The rpc channel.
* @reply: Pointer to buffer holding reply.
* @reply_len: Length of the reply.
* @hb: Whether the high-bandwidth port is available.
*
* Return: The port status.
*/
static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
unsigned long reply_len, bool hb)
{
unsigned long si, di, eax, ebx, ecx, edx;
/* HB port can't access encrypted memory */
if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_low;
u32 channel_id = (channel->channel_id << 16);
si = channel->cookie_high;
di = (uintptr_t) reply;
VMW_PORT_HB_IN(
(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
reply_len, si, di,
VMWARE_HYPERVISOR_HB | channel_id,
VMW_HYPERVISOR_MAGIC, bp,
eax, ebx, ecx, edx, si, di);
return ebx;
}
/* HB port not available. Retrieve the message 4 bytes at a time. */
ecx = MESSAGE_STATUS_SUCCESS << 16;
while (reply_len) {
unsigned int bytes = min_t(unsigned long, reply_len, 4);
si = channel->cookie_high;
di = channel->cookie_low;
VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
MESSAGE_STATUS_SUCCESS, si, di,
channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
break;
memcpy(reply, &ebx, bytes);
reply_len -= bytes;
reply += bytes;
}
return ecx;
}
/**
* vmw_send_msg: Sends a message to the host
*
* @channel: RPC channel
* @msg: NULL terminated string
*
* Returns: 0 on success
*/
static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
{
unsigned long eax, ebx, ecx, edx, si, di;
size_t msg_len = strlen(msg);
int retries = 0;
while (retries < RETRIES) {
retries++;
/* Set up additional parameters */
si = channel->cookie_high;
di = channel->cookie_low;
VMW_PORT(VMW_PORT_CMD_SENDSIZE,
msg_len, si, di,
channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
/* Expected success. Give up. */
return -EINVAL;
}
/* Send msg */
ebx = vmw_port_hb_out(channel, msg,
!!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
return 0;
} else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
} else {
break;
}
}
return -EINVAL;
}
STACK_FRAME_NON_STANDARD(vmw_send_msg);
/**
* vmw_recv_msg: Receives a message from the host
*
* Note: It is the caller's responsibility to call kfree() on msg.
*
* @channel: channel opened by vmw_open_channel
* @msg: [OUT] message received from the host
* @msg_len: message length
*/
static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
size_t *msg_len)
{
unsigned long eax, ebx, ecx, edx, si, di;
char *reply;
size_t reply_len;
int retries = 0;
*msg_len = 0;
*msg = NULL;
while (retries < RETRIES) {
retries++;
/* Set up additional parameters */
si = channel->cookie_high;
di = channel->cookie_low;
VMW_PORT(VMW_PORT_CMD_RECVSIZE,
0, si, di,
channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
DRM_ERROR("Failed to get reply size for host message.\n");
return -EINVAL;
}
/* No reply available. This is okay. */
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0)
return 0;
reply_len = ebx;
reply = kzalloc(reply_len + 1, GFP_KERNEL);
if (!reply) {
DRM_ERROR("Cannot allocate memory for host message reply.\n");
return -ENOMEM;
}
/* Receive buffer */
ebx = vmw_port_hb_in(channel, reply, reply_len,
!!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
reply = NULL;
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
}
return -EINVAL;
}
reply[reply_len] = '\0';
/* Ack buffer */
si = channel->cookie_high;
di = channel->cookie_low;
VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
MESSAGE_STATUS_SUCCESS, si, di,
channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
reply = NULL;
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
}
return -EINVAL;
}
break;
}
if (!reply)
return -EINVAL;
*msg_len = reply_len;
*msg = reply;
return 0;
}
STACK_FRAME_NON_STANDARD(vmw_recv_msg);
/**
* vmw_host_get_guestinfo: Gets a GuestInfo parameter
*
* Gets the value of a GuestInfo.* parameter. The value returned will be in
* a string, and it is up to the caller to post-process.
*
* @guest_info_param: Parameter to get, e.g. GuestInfo.svga.gl3
* @buffer: if NULL, *reply_len will contain reply size.
* @length: size of the reply_buf. Set to size of reply upon return
*
* Returns: 0 on success
*/
int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length)
{
struct rpc_channel channel;
char *msg, *reply = NULL;
size_t reply_len = 0;
if (!vmw_msg_enabled)
return -ENODEV;
if (!guest_info_param || !length)
return -EINVAL;
msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
if (!msg) {
DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
guest_info_param);
return -ENOMEM;
}
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
goto out_open;
if (vmw_send_msg(&channel, msg) ||
vmw_recv_msg(&channel, (void *) &reply, &reply_len))
goto out_msg;
vmw_close_channel(&channel);
if (buffer && reply && reply_len > 0) {
/* Remove reply code, which are the first 2 characters of
* the reply
*/
reply_len = max(reply_len - 2, (size_t) 0);
reply_len = min(reply_len, *length);
if (reply_len > 0)
memcpy(buffer, reply + 2, reply_len);
}
*length = reply_len;
kfree(reply);
kfree(msg);
return 0;
out_msg:
vmw_close_channel(&channel);
kfree(reply);
out_open:
*length = 0;
kfree(msg);
DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
return -EINVAL;
}
/**
* vmw_host_printf: Sends a log message to the host
*
* @fmt: Regular printf format string and arguments
*
* Returns: 0 on success
*/
__printf(1, 2)
int vmw_host_printf(const char *fmt, ...)
{
va_list ap;
struct rpc_channel channel;
char *msg;
char *log;
int ret = 0;
if (!vmw_msg_enabled)
return -ENODEV;
if (!fmt)
return ret;
va_start(ap, fmt);
log = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
if (!log) {
DRM_ERROR("Cannot allocate memory for the log message.\n");
return -ENOMEM;
}
msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) {
DRM_ERROR("Cannot allocate memory for host log message.\n");
kfree(log);
return -ENOMEM;
}
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
goto out_open;
if (vmw_send_msg(&channel, msg))
goto out_msg;
vmw_close_channel(&channel);
kfree(msg);
kfree(log);
return 0;
out_msg:
vmw_close_channel(&channel);
out_open:
kfree(msg);
kfree(log);
DRM_ERROR("Failed to send host log message.\n");
return -EINVAL;
}
/**
* vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
*
* Sends a message from user-space to host.
* Can also receive a result from host and return that to user-space.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller.
* Return: Zero on success, negative error code on error.
*/
int vmw_msg_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_msg_arg *arg =
(struct drm_vmw_msg_arg *)data;
struct rpc_channel channel;
char *msg;
int length;
msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
if (!msg) {
DRM_ERROR("Cannot allocate memory for log message.\n");
return -ENOMEM;
}
length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
MAX_USER_MSG_LENGTH);
if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
DRM_ERROR("Userspace message access failure.\n");
kfree(msg);
return -EINVAL;
}
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
DRM_ERROR("Failed to open channel.\n");
goto out_open;
}
if (vmw_send_msg(&channel, msg)) {
DRM_ERROR("Failed to send message to host.\n");
goto out_msg;
}
if (!arg->send_only) {
char *reply = NULL;
size_t reply_len = 0;
if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
DRM_ERROR("Failed to receive message from host.\n");
goto out_msg;
}
if (reply && reply_len > 0) {
if (copy_to_user((void __user *)((unsigned long)arg->receive),
reply, reply_len)) {
DRM_ERROR("Failed to copy message to userspace.\n");
kfree(reply);
goto out_msg;
}
arg->receive_len = (__u32)reply_len;
}
kfree(reply);
}
vmw_close_channel(&channel);
kfree(msg);
return 0;
out_msg:
vmw_close_channel(&channel);
out_open:
kfree(msg);
return -EINVAL;
}
/**
* reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content
*
* @arr: Array to reset.
* @size: Array length.
*/
static inline void reset_ppn_array(PPN64 *arr, size_t size)
{
size_t i;
BUG_ON(!arr || size == 0);
for (i = 0; i < size; ++i)
arr[i] = INVALID_PPN64;
}
/**
* hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from
* the hypervisor. All related pages should be subsequently unpinned or freed.
*
*/
static inline void hypervisor_ppn_reset_all(void)
{
unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
VMW_PORT(VMW_PORT_CMD_MKSGS_RESET,
0, si, di,
0,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
}
/**
* hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the
* hypervisor. Any related userspace pages should be pinned in advance.
*
* @pfn: Physical page number of the instance descriptor
*/
static inline void hypervisor_ppn_add(PPN64 pfn)
{
unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN,
(unsigned long)pfn, si, di,
0,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
}
/**
* hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from
* the hypervisor. All related pages should be subsequently unpinned or freed.
*
* @pfn: Physical page number of the instance descriptor
*/
static inline void hypervisor_ppn_remove(PPN64 pfn)
{
unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN,
(unsigned long)pfn, si, di,
0,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
}
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
/* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */
#define MKSSTAT_KERNEL_PAGES_ORDER 2
/* Header to the text description of mksGuestStat instance descriptor */
#define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
/**
* mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record
* for the respective mksGuestStat index.
*
* @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record.
* @pstat: Pointer to array of MKSGuestStatCounterTime.
* @pinfo: Pointer to array of MKSGuestStatInfoEntry.
* @pstrs: Pointer to current end of the name/description sequence.
* Return: Pointer to the new end of the names/description sequence.
*/
static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx,
MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
{
char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
pinfo[stat_idx].name.s = pstrs;
pinfo[stat_idx].description.s = pstrd;
pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME;
pinfo[stat_idx].stat.counterTime = &pstat[stat_idx];
return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
}
/**
* mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and
* kernel-internal counters. Adds PFN mapping to the hypervisor.
*
* Create a single mksGuestStat instance descriptor and corresponding structures
* for all kernel-internal counters. The corresponding PFNs are mapped with the
* hypervisor.
*
* @ppage: Output pointer to page containing the instance descriptor.
* Return: Zero on success, negative error code on error.
*/
static int mksstat_init_kern_id(struct page **ppage)
{
MKSGuestStatInstanceDescriptor *pdesc;
MKSGuestStatCounterTime *pstat;
MKSGuestStatInfoEntry *pinfo;
char *pstrs, *pstrs_acc;
/* Allocate pages for the kernel-internal instance descriptor */
struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER);
if (!page)
return -ENOMEM;
pdesc = page_address(page);
pstat = vmw_mksstat_get_kern_pstat(pdesc);
pinfo = vmw_mksstat_get_kern_pinfo(pdesc);
pstrs = vmw_mksstat_get_kern_pstrs(pdesc);
/* Set up all kernel-internal counters and corresponding structures */
pstrs_acc = pstrs;
pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc);
pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_COTABLE_RESIZE, pstat, pinfo, pstrs_acc);
/* Add new counters above, in their order of appearance in mksstat_kern_stats_t */
BUG_ON(pstrs_acc - pstrs > PAGE_SIZE);
/* Set up the kernel-internal instance descriptor */
pdesc->reservedMBZ = 0;
pdesc->statStartVA = (uintptr_t)pstat;
pdesc->strsStartVA = (uintptr_t)pstrs;
pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT;
pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT;
pdesc->strsLength = pstrs_acc - pstrs;
snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d",
MKSSTAT_KERNEL_DESCRIPTION, current->pid);
pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat));
reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1);
pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo));
reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1);
pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs));
reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1);
*ppage = page;
hypervisor_ppn_add((PPN64)page_to_pfn(page));
return 0;
}
/**
* vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal
* mksGuestStat instance descriptor.
*
* Find a slot for a single kernel-internal mksGuestStat instance descriptor.
* In case no such was already present, allocate a new one and set up a kernel-
* internal mksGuestStat instance descriptor for the former.
*
* @pid: Process for which a slot is sought.
* @dev_priv: Identifies the drm private device.
* Return: Non-negative slot on success, negative error code on error.
*/
int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv)
{
const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2);
size_t i;
for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids);
/* Check if an instance descriptor for this pid is already present */
if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot]))
return (int)slot;
/* Set up a new instance descriptor for this pid */
if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) {
const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]);
if (!ret) {
/* Reset top-timer tracking for this slot */
dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT;
atomic_set(&dev_priv->mksstat_kern_pids[slot], pid);
return (int)slot;
}
atomic_set(&dev_priv->mksstat_kern_pids[slot], 0);
return ret;
}
}
return -ENOSPC;
}
#endif
/**
* vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating
* mksGuestStat instance-descriptor page and unpins all related user pages.
*
* Unpin all user pages realated to this instance descriptor and free
* the instance-descriptor page itself.
*
* @page: Page of the instance descriptor.
*/
static void vmw_mksstat_cleanup_descriptor(struct page *page)
{
MKSGuestStatInstanceDescriptor *pdesc = page_address(page);
size_t i;
for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i)
unpin_user_page(pfn_to_page(pdesc->statPPNs[i]));
for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i)
unpin_user_page(pfn_to_page(pdesc->infoPPNs[i]));
for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i)
unpin_user_page(pfn_to_page(pdesc->strsPPNs[i]));
__free_page(page);
}
/**
* vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors
* from the hypervisor.
*
* Discard all hypervisor PFN mappings, containing active mksGuestState instance
* descriptors, unpin the related userspace pages and free the related kernel pages.
*
* @dev_priv: Identifies the drm private device.
* Return: Zero on success, negative error code on error.
*/
int vmw_mksstat_remove_all(struct vmw_private *dev_priv)
{
int ret = 0;
size_t i;
/* Discard all PFN mappings with the hypervisor */
hypervisor_ppn_reset_all();
/* Discard all userspace-originating instance descriptors and unpin all related pages */
for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) {
const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]);
if (!pid0)
continue;
if (pid0 != MKSSTAT_PID_RESERVED) {
const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED);
if (!pid1)
continue;
if (pid1 == pid0) {
struct page *const page = dev_priv->mksstat_user_pages[i];
BUG_ON(!page);
dev_priv->mksstat_user_pages[i] = NULL;
atomic_set(&dev_priv->mksstat_user_pids[i], 0);
vmw_mksstat_cleanup_descriptor(page);
continue;
}
}
ret = -EAGAIN;
}
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
/* Discard all kernel-internal instance descriptors and free all related pages */
for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]);
if (!pid0)
continue;
if (pid0 != MKSSTAT_PID_RESERVED) {
const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED);
if (!pid1)
continue;
if (pid1 == pid0) {
struct page *const page = dev_priv->mksstat_kern_pages[i];
BUG_ON(!page);
dev_priv->mksstat_kern_pages[i] = NULL;
atomic_set(&dev_priv->mksstat_kern_pids[i], 0);
__free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER);
continue;
}
}
ret = -EAGAIN;
}
#endif
return ret;
}
/**
* vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors
* from the hypervisor.
*
* Discard all hypervisor PFN mappings, containing active mksGuestStat instance
* descriptors, unpin the related userspace pages and free the related kernel pages.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller; unused.
* Return: Zero on success, negative error code on error.
*/
int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *const dev_priv = vmw_priv(dev);
return vmw_mksstat_remove_all(dev_priv);
}
/**
* vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat
* instance descriptor and registers that with the hypervisor.
*
* Create a hypervisor PFN mapping, containing a single mksGuestStat instance
* descriptor and pin the corresponding userspace pages.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller; unused.
* Return: Zero on success, negative error code on error.
*/
int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_mksstat_add_arg *arg =
(struct drm_vmw_mksstat_add_arg *) data;
struct vmw_private *const dev_priv = vmw_priv(dev);
const size_t num_pages_stat = PFN_UP(arg->stat_len);
const size_t num_pages_info = PFN_UP(arg->info_len);
const size_t num_pages_strs = PFN_UP(arg->strs_len);
long desc_len;
long nr_pinned_stat;
long nr_pinned_info;
long nr_pinned_strs;
MKSGuestStatInstanceDescriptor *pdesc;
struct page *page = NULL;
struct page **pages_stat = NULL;
struct page **pages_info = NULL;
struct page **pages_strs = NULL;
size_t i, slot;
int ret_err = -ENOMEM;
arg->id = -1;
if (!arg->stat || !arg->info || !arg->strs)
return -EINVAL;
if (!arg->stat_len || !arg->info_len || !arg->strs_len)
return -EINVAL;
if (!arg->description)
return -EINVAL;
if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) ||
num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) ||
num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs))
return -EINVAL;
/* Find an available slot in the mksGuestStats user array and reserve it */
for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot)
if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED))
break;
if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids))
return -ENOSPC;
BUG_ON(dev_priv->mksstat_user_pages[slot]);
/* Allocate statically-sized temp arrays for pages -- too big to keep in frame */
pages_stat = (struct page **)kmalloc_array(
ARRAY_SIZE(pdesc->statPPNs) +
ARRAY_SIZE(pdesc->infoPPNs) +
ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL);
if (!pages_stat)
goto err_nomem;
pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs);
pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs);
/* Allocate a page for the instance descriptor */
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
goto err_nomem;
/* Set up the instance descriptor */
pdesc = page_address(page);
pdesc->reservedMBZ = 0;
pdesc->statStartVA = arg->stat;
pdesc->strsStartVA = arg->strs;
pdesc->statLength = arg->stat_len;
pdesc->infoLength = arg->info_len;
pdesc->strsLength = arg->strs_len;
desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description),
ARRAY_SIZE(pdesc->description) - 1);
if (desc_len < 0) {
ret_err = -EFAULT;
goto err_nomem;
}
reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs));
reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs));
reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs));
/* Pin mksGuestStat user pages and store those in the instance descriptor */
nr_pinned_stat = pin_user_pages_fast(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat);
if (num_pages_stat != nr_pinned_stat)
goto err_pin_stat;
for (i = 0; i < num_pages_stat; ++i)
pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]);
nr_pinned_info = pin_user_pages_fast(arg->info, num_pages_info, FOLL_LONGTERM, pages_info);
if (num_pages_info != nr_pinned_info)
goto err_pin_info;
for (i = 0; i < num_pages_info; ++i)
pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]);
nr_pinned_strs = pin_user_pages_fast(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs);
if (num_pages_strs != nr_pinned_strs)
goto err_pin_strs;
for (i = 0; i < num_pages_strs; ++i)
pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]);
/* Send the descriptor to the host via a hypervisor call. The mksGuestStat
pages will remain in use until the user requests a matching remove stats
or a stats reset occurs. */
hypervisor_ppn_add((PPN64)page_to_pfn(page));
dev_priv->mksstat_user_pages[slot] = page;
atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current));
arg->id = slot;
DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot);
kfree(pages_stat);
return 0;
err_pin_strs:
if (nr_pinned_strs > 0)
unpin_user_pages(pages_strs, nr_pinned_strs);
err_pin_info:
if (nr_pinned_info > 0)
unpin_user_pages(pages_info, nr_pinned_info);
err_pin_stat:
if (nr_pinned_stat > 0)
unpin_user_pages(pages_stat, nr_pinned_stat);
err_nomem:
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
if (page)
__free_page(page);
kfree(pages_stat);
return ret_err;
}
/**
* vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat
* instance descriptor from the hypervisor.
*
* Discard a hypervisor PFN mapping, containing a single mksGuestStat instance
* descriptor and unpin the corresponding userspace pages.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller; unused.
* Return: Zero on success, negative error code on error.
*/
int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_mksstat_remove_arg *arg =
(struct drm_vmw_mksstat_remove_arg *) data;
struct vmw_private *const dev_priv = vmw_priv(dev);
const size_t slot = arg->id;
pid_t pgid, pid;
if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids))
return -EINVAL;
DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot);
pgid = task_pgrp_vnr(current);
pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED);
if (!pid)
return 0;
if (pid == pgid) {
struct page *const page = dev_priv->mksstat_user_pages[slot];
BUG_ON(!page);
dev_priv->mksstat_user_pages[slot] = NULL;
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
hypervisor_ppn_remove((PPN64)page_to_pfn(page));
vmw_mksstat_cleanup_descriptor(page);
return 0;
}
return -EAGAIN;
}
/**
* vmw_disable_backdoor: Disables all backdoor communication
* with the hypervisor.
*/
void vmw_disable_backdoor(void)
{
vmw_msg_enabled = 0;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_msg.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_placement.h>
static void vmw_bo_release(struct vmw_bo *vbo)
{
vmw_bo_unmap(vbo);
drm_gem_object_release(&vbo->tbo.base);
}
/**
* vmw_bo_free - vmw_bo destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
*/
static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_release(vbo);
kfree(vbo);
}
/**
* vmw_bo_pin_in_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @placement: The placement to pin it.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
struct vmw_bo *buf,
struct ttm_placement *placement,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->tbo;
int ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
ret = ttm_bo_validate(bo, placement, &ctx);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err:
return ret;
}
/**
* vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_bo *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->tbo;
int ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
vmw_bo_placement_set(buf,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve;
vmw_bo_placement_set(buf,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
out_unreserve:
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err:
return ret;
}
/**
* vmw_bo_pin_in_vram - Move a buffer to vram.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_bo *buf,
bool interruptible)
{
return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
interruptible);
}
/**
* vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to pin.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct vmw_bo *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->tbo;
int ret = 0;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
/*
* Is this buffer already in vram but not at the start of it?
* In that case, evict it first because TTM isn't good at handling
* that situation.
*/
if (bo->resource->mem_type == TTM_PL_VRAM &&
bo->resource->start < PFN_UP(bo->resource->size) &&
bo->resource->start > 0 &&
buf->tbo.pin_count == 0) {
ctx.interruptible = false;
vmw_bo_placement_set(buf,
VMW_BO_DOMAIN_SYS,
VMW_BO_DOMAIN_SYS);
(void)ttm_bo_validate(bo, &buf->placement, &ctx);
}
vmw_bo_placement_set(buf,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->resource->start != 0);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err_unlock:
return ret;
}
/**
* vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
*
* This function takes the reservation_sem in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to unpin.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_unpin(struct vmw_private *dev_priv,
struct vmw_bo *buf,
bool interruptible)
{
struct ttm_buffer_object *bo = &buf->tbo;
int ret;
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
vmw_bo_pin_reserved(buf, false);
ttm_bo_unreserve(bo);
err:
return ret;
}
/**
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
* of a buffer.
*
* @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
* @ptr: SVGAGuestPtr returning the result.
*/
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
SVGAGuestPtr *ptr)
{
if (bo->resource->mem_type == TTM_PL_VRAM) {
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
ptr->offset = bo->resource->start << PAGE_SHIFT;
} else {
ptr->gmrId = bo->resource->start;
ptr->offset = 0;
}
}
/**
* vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
*
* @vbo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin.
*
*/
void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
{
struct ttm_operation_ctx ctx = { false, true };
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->tbo;
uint32_t old_mem_type = bo->resource->mem_type;
int ret;
dma_resv_assert_held(bo->base.resv);
if (pin == !!bo->pin_count)
return;
pl.fpfn = 0;
pl.lpfn = 0;
pl.mem_type = bo->resource->mem_type;
pl.flags = bo->resource->placement;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
placement.placement = &pl;
ret = ttm_bo_validate(bo, &placement, &ctx);
BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
if (pin)
ttm_bo_pin(bo);
else
ttm_bo_unpin(bo);
}
/**
* vmw_bo_map_and_cache - Map a buffer object and cache the map
*
* @vbo: The buffer object to map
* Return: A kernel virtual address or NULL if mapping failed.
*
* This function maps a buffer object into the kernel address space, or
* returns the virtual kernel address of an already existing map. The virtual
* address remains valid as long as the buffer object is pinned or reserved.
* The cached map is torn down on either
* 1) Buffer object move
* 2) Buffer object swapout
* 3) Buffer object destruction
*
*/
void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
{
struct ttm_buffer_object *bo = &vbo->tbo;
bool not_used;
void *virtual;
int ret;
virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
if (virtual)
return virtual;
ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
}
/**
* vmw_bo_unmap - Tear down a cached buffer object map.
*
* @vbo: The buffer object whose map we are tearing down.
*
* This function tears down a cached map set up using
* vmw_bo_map_and_cache().
*/
void vmw_bo_unmap(struct vmw_bo *vbo)
{
if (vbo->map.bo == NULL)
return;
ttm_bo_kunmap(&vbo->map);
vbo->map.bo = NULL;
}
/**
* vmw_bo_init - Initialize a vmw buffer object
*
* @dev_priv: Pointer to the device private struct
* @vmw_bo: Buffer object to initialize
* @params: Parameters used to initialize the buffer object
* @destroy: The function used to delete the buffer object
* Returns: Zero on success, negative error code on error.
*
*/
static int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_bo *vmw_bo,
struct vmw_bo_params *params,
void (*destroy)(struct ttm_buffer_object *))
{
struct ttm_operation_ctx ctx = {
.interruptible = params->bo_type != ttm_bo_type_kernel,
.no_wait_gpu = false
};
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
int ret;
memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT;
params->size = ALIGN(params->size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
&vmw_bo->placement, 0, &ctx, NULL,
NULL, destroy);
if (unlikely(ret))
return ret;
if (params->pin)
ttm_bo_pin(&vmw_bo->tbo);
ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
}
int vmw_bo_create(struct vmw_private *vmw,
struct vmw_bo_params *params,
struct vmw_bo **p_bo)
{
int ret;
*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
if (unlikely(!*p_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
}
/*
* vmw_bo_init will delete the *p_bo object if it fails
*/
ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
if (unlikely(ret != 0))
goto out_error;
return ret;
out_error:
*p_bo = NULL;
return ret;
}
/**
* vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
* access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions.
*
* @vmw_bo: Pointer to the buffer object being grabbed for CPU access
* @flags: Flags indicating how the grab should be performed.
* Return: Zero on success, Negative error code on error. In particular,
* -EBUSY will be returned if a dontblock operation is requested and the
* buffer object is busy, and -ERESTARTSYS will be returned if a wait is
* interrupted by a signal.
*
* A blocking grab will be automatically released when @tfile is closed.
*/
static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
uint32_t flags)
{
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
struct ttm_buffer_object *bo = &vmw_bo->tbo;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
long lret;
lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
true, nonblock ? 0 :
MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
return lret;
return 0;
}
ret = ttm_bo_reserve(bo, true, nonblock, NULL);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_wait(bo, true, nonblock);
if (likely(ret == 0))
atomic_inc(&vmw_bo->cpu_writers);
ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return ret;
return ret;
}
/**
* vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
* and unblock command submission on the buffer if blocked.
*
* @filp: Identifying the caller.
* @handle: Handle identifying the buffer object.
* @flags: Flags indicating the type of release.
*/
static int vmw_user_bo_synccpu_release(struct drm_file *filp,
uint32_t handle,
uint32_t flags)
{
struct vmw_bo *vmw_bo;
int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
if (!ret) {
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
vmw_user_bo_unref(vmw_bo);
}
return ret;
}
/**
* vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
* functionality.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller.
* Return: Zero on success, negative error code on error.
*
* This function checks the ioctl arguments for validity and calls the
* relevant synccpu functions.
*/
int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data;
struct vmw_bo *vbo;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
|| (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
drm_vmw_synccpu_dontblock |
drm_vmw_synccpu_allow_cs)) != 0) {
DRM_ERROR("Illegal synccpu flags.\n");
return -EINVAL;
}
switch (arg->op) {
case drm_vmw_synccpu_grab:
ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
if (unlikely(ret != 0))
return ret;
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_user_bo_unref(vbo);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
}
break;
case drm_vmw_synccpu_release:
ret = vmw_user_bo_synccpu_release(file_priv,
arg->handle,
arg->flags);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
}
break;
default:
DRM_ERROR("Invalid synccpu operation.\n");
return -EINVAL;
}
return 0;
}
/**
* vmw_bo_unref_ioctl - Generic handle close ioctl.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller.
* Return: Zero on success, negative error code on error.
*
* This function checks the ioctl arguments for validity and closes a
* handle to a TTM base object, optionally freeing the object.
*/
int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data;
return drm_gem_handle_delete(file_priv, arg->handle);
}
/**
* vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
*
* @filp: The file the handle is registered with.
* @handle: The user buffer object handle
* @out: Pointer to a where a pointer to the embedded
* struct vmw_bo should be placed.
* Return: Zero on success, Negative error code on error.
*
* The vmw buffer object pointer will be refcounted (both ttm and gem)
*/
int vmw_user_bo_lookup(struct drm_file *filp,
u32 handle,
struct vmw_bo **out)
{
struct drm_gem_object *gobj;
gobj = drm_gem_object_lookup(filp, handle);
if (!gobj) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -ESRCH;
}
*out = to_vmw_bo(gobj);
ttm_bo_get(&(*out)->tbo);
return 0;
}
/**
* vmw_bo_fence_single - Utility function to fence a single TTM buffer
* object without unreserving it.
*
* @bo: Pointer to the struct ttm_buffer_object to fence.
* @fence: Pointer to the fence. If NULL, this function will
* insert a fence into the command stream..
*
* Contrary to the ttm_eu version of this function, it takes only
* a single buffer object instead of a list, and it also doesn't
* unreserve the buffer object, which needs to be done separately.
*/
void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence)
{
struct ttm_device *bdev = bo->bdev;
struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
int ret;
if (fence == NULL)
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
else
dma_fence_get(&fence->base);
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (!ret)
dma_resv_add_fence(bo->base.resv, &fence->base,
DMA_RESV_USAGE_KERNEL);
else
/* Last resort fallback when we are OOM */
dma_fence_wait(&fence->base, false);
dma_fence_put(&fence->base);
}
/**
* vmw_dumb_create - Create a dumb kms buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @args: Pointer to a struct drm_mode_create_dumb structure
* Return: Zero on success, negative error code on failure.
*
* This is a driver callback for the core drm create_dumb functionality.
* Note that this is very similar to the vmw_bo_alloc ioctl, except
* that the arguments have a different format.
*/
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_bo *vbo;
int cpp = DIV_ROUND_UP(args->bpp, 8);
int ret;
switch (cpp) {
case 1: /* DRM_FORMAT_C8 */
case 2: /* DRM_FORMAT_RGB565 */
case 4: /* DRM_FORMAT_XRGB8888 */
break;
default:
/*
* Dumb buffers don't allow anything else.
* This is tested via IGT's dumb_buffers
*/
return -EINVAL;
}
args->pitch = args->width * cpp;
args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle,
&vbo);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->tbo.base);
return ret;
}
/**
* vmw_bo_swap_notify - swapout notify callback.
*
* @bo: The buffer object to be swapped out.
*/
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
/* Kill any cached kernel maps before swapout */
vmw_bo_unmap(to_vmw_bo(&bo->base));
}
/**
* vmw_bo_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
* @mem: The struct ttm_resource indicating to what memory
* region the move is taking place.
*
* Detaches cached maps and device bindings that require that the
* buffer doesn't move.
*/
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem)
{
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
/*
* Kill any cached kernel maps before move to or from VRAM.
* With other types of moves, the underlying pages stay the same,
* and the map can be kept.
*/
if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
vmw_bo_unmap(vbo);
/*
* If we're moving a backup MOB out of MOB placement, then make sure we
* read back all resource content first, and unbind the MOB from
* the resource.
*/
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
static u32
set_placement_list(struct ttm_place *pl, u32 domain)
{
u32 n = 0;
/*
* The placements are ordered according to our preferences
*/
if (domain & VMW_BO_DOMAIN_MOB) {
pl[n].mem_type = VMW_PL_MOB;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_GMR) {
pl[n].mem_type = VMW_PL_GMR;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_VRAM) {
pl[n].mem_type = TTM_PL_VRAM;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
pl[n].mem_type = VMW_PL_SYSTEM;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_SYS) {
pl[n].mem_type = TTM_PL_SYSTEM;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
WARN_ON(!n);
if (!n) {
pl[n].mem_type = TTM_PL_SYSTEM;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
return n;
}
void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
{
struct ttm_device *bdev = bo->tbo.bdev;
struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
struct ttm_placement *pl = &bo->placement;
bool mem_compatible = false;
u32 i;
pl->placement = bo->places;
pl->num_placement = set_placement_list(bo->places, domain);
if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
for (i = 0; i < pl->num_placement; ++i) {
if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
bo->tbo.resource->mem_type == pl->placement[i].mem_type)
mem_compatible = true;
}
if (!mem_compatible)
drm_warn(&vmw->drm,
"%s: Incompatible transition from "
"bo->base.resource->mem_type = %u to domain = %u\n",
__func__, bo->tbo.resource->mem_type, domain);
}
pl->busy_placement = bo->busy_places;
pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain);
}
void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
{
struct ttm_device *bdev = bo->tbo.bdev;
struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
if (vmw->has_mob)
domain = VMW_BO_DOMAIN_MOB;
vmw_bo_placement_set(bo, domain, domain);
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_bo.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2015-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo.h>
#include <linux/dmapool.h>
#include <linux/pci.h>
/*
* Size of inline command buffers. Try to make sure that a page size is a
* multiple of the DMA pool allocation size.
*/
#define VMW_CMDBUF_INLINE_ALIGN 64
#define VMW_CMDBUF_INLINE_SIZE \
(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
/**
* struct vmw_cmdbuf_context - Command buffer context queues
*
* @submitted: List of command buffers that have been submitted to the
* manager but not yet submitted to hardware.
* @hw_submitted: List of command buffers submitted to hardware.
* @preempted: List of preempted command buffers.
* @num_hw_submitted: Number of buffers currently being processed by hardware
* @block_submission: Identifies a block command submission.
*/
struct vmw_cmdbuf_context {
struct list_head submitted;
struct list_head hw_submitted;
struct list_head preempted;
unsigned num_hw_submitted;
bool block_submission;
};
/**
* struct vmw_cmdbuf_man - Command buffer manager
*
* @cur_mutex: Mutex protecting the command buffer used for incremental small
* kernel command submissions, @cur.
* @space_mutex: Mutex to protect against starvation when we allocate
* main pool buffer space.
* @error_mutex: Mutex to serialize the work queue error handling.
* Note this is not needed if the same workqueue handler
* can't race with itself...
* @work: A struct work_struct implementeing command buffer error handling.
* Immutable.
* @dev_priv: Pointer to the device private struct. Immutable.
* @ctx: Array of command buffer context queues. The queues and the context
* data is protected by @lock.
* @error: List of command buffers that have caused device errors.
* Protected by @lock.
* @mm: Range manager for the command buffer space. Manager allocations and
* frees are protected by @lock.
* @cmd_space: Buffer object for the command buffer space, unless we were
* able to make a contigous coherent DMA memory allocation, @handle. Immutable.
* @map: Pointer to command buffer space. May be a mapped buffer object or
* a contigous coherent DMA memory allocation. Immutable.
* @cur: Command buffer for small kernel command submissions. Protected by
* the @cur_mutex.
* @cur_pos: Space already used in @cur. Protected by @cur_mutex.
* @default_size: Default size for the @cur command buffer. Immutable.
* @max_hw_submitted: Max number of in-flight command buffers the device can
* handle. Immutable.
* @lock: Spinlock protecting command submission queues.
* @headers: Pool of DMA memory for device command buffer headers.
* Internal protection.
* @dheaders: Pool of DMA memory for device command buffer headers with trailing
* space for inline data. Internal protection.
* @alloc_queue: Wait queue for processes waiting to allocate command buffer
* space.
* @idle_queue: Wait queue for processes waiting for command buffer idle.
* @irq_on: Whether the process function has requested irq to be turned on.
* Protected by @lock.
* @using_mob: Whether the command buffer space is a MOB or a contigous DMA
* allocation. Immutable.
* @has_pool: Has a large pool of DMA memory which allows larger allocations.
* Typically this is false only during bootstrap.
* @handle: DMA address handle for the command buffer space if @using_mob is
* false. Immutable.
* @size: The size of the command buffer space. Immutable.
* @num_contexts: Number of contexts actually enabled.
*/
struct vmw_cmdbuf_man {
struct mutex cur_mutex;
struct mutex space_mutex;
struct mutex error_mutex;
struct work_struct work;
struct vmw_private *dev_priv;
struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
struct list_head error;
struct drm_mm mm;
struct vmw_bo *cmd_space;
u8 *map;
struct vmw_cmdbuf_header *cur;
size_t cur_pos;
size_t default_size;
unsigned max_hw_submitted;
spinlock_t lock;
struct dma_pool *headers;
struct dma_pool *dheaders;
wait_queue_head_t alloc_queue;
wait_queue_head_t idle_queue;
bool irq_on;
bool using_mob;
bool has_pool;
dma_addr_t handle;
size_t size;
u32 num_contexts;
};
/**
* struct vmw_cmdbuf_header - Command buffer metadata
*
* @man: The command buffer manager.
* @cb_header: Device command buffer header, allocated from a DMA pool.
* @cb_context: The device command buffer context.
* @list: List head for attaching to the manager lists.
* @node: The range manager node.
* @handle: The DMA address of @cb_header. Handed to the device on command
* buffer submission.
* @cmd: Pointer to the command buffer space of this buffer.
* @size: Size of the command buffer space of this buffer.
* @reserved: Reserved space of this buffer.
* @inline_space: Whether inline command buffer space is used.
*/
struct vmw_cmdbuf_header {
struct vmw_cmdbuf_man *man;
SVGACBHeader *cb_header;
SVGACBContext cb_context;
struct list_head list;
struct drm_mm_node node;
dma_addr_t handle;
u8 *cmd;
size_t size;
size_t reserved;
bool inline_space;
};
/**
* struct vmw_cmdbuf_dheader - Device command buffer header with inline
* command buffer space.
*
* @cb_header: Device command buffer header.
* @cmd: Inline command buffer space.
*/
struct vmw_cmdbuf_dheader {
SVGACBHeader cb_header;
u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
};
/**
* struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
*
* @page_size: Size of requested command buffer space in pages.
* @node: Pointer to the range manager node.
* @done: True if this allocation has succeeded.
*/
struct vmw_cmdbuf_alloc_info {
size_t page_size;
struct drm_mm_node *node;
bool done;
};
/* Loop over each context in the command buffer manager. */
#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
++(_i), ++(_ctx))
static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
bool enable);
static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
/**
* vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
*
* @man: The range manager.
* @interruptible: Whether to wait interruptible when locking.
*/
static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
{
if (interruptible) {
if (mutex_lock_interruptible(&man->cur_mutex))
return -ERESTARTSYS;
} else {
mutex_lock(&man->cur_mutex);
}
return 0;
}
/**
* vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
*
* @man: The range manager.
*/
static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
{
mutex_unlock(&man->cur_mutex);
}
/**
* vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
* been used for the device context with inline command buffers.
* Need not be called locked.
*
* @header: Pointer to the header to free.
*/
static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
{
struct vmw_cmdbuf_dheader *dheader;
if (WARN_ON_ONCE(!header->inline_space))
return;
dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
cb_header);
dma_pool_free(header->man->dheaders, dheader, header->handle);
kfree(header);
}
/**
* __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
* associated structures.
*
* @header: Pointer to the header to free.
*
* For internal use. Must be called with man::lock held.
*/
static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
{
struct vmw_cmdbuf_man *man = header->man;
lockdep_assert_held_once(&man->lock);
if (header->inline_space) {
vmw_cmdbuf_header_inline_free(header);
return;
}
drm_mm_remove_node(&header->node);
wake_up_all(&man->alloc_queue);
if (header->cb_header)
dma_pool_free(man->headers, header->cb_header,
header->handle);
kfree(header);
}
/**
* vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
* associated structures.
*
* @header: Pointer to the header to free.
*/
void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
{
struct vmw_cmdbuf_man *man = header->man;
/* Avoid locking if inline_space */
if (header->inline_space) {
vmw_cmdbuf_header_inline_free(header);
return;
}
spin_lock(&man->lock);
__vmw_cmdbuf_header_free(header);
spin_unlock(&man->lock);
}
/**
* vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
*
* @header: The header of the buffer to submit.
*/
static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
{
struct vmw_cmdbuf_man *man = header->man;
u32 val;
val = upper_32_bits(header->handle);
vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
val = lower_32_bits(header->handle);
val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
return header->cb_header->status;
}
/**
* vmw_cmdbuf_ctx_init: Initialize a command buffer context.
*
* @ctx: The command buffer context to initialize
*/
static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
{
INIT_LIST_HEAD(&ctx->hw_submitted);
INIT_LIST_HEAD(&ctx->submitted);
INIT_LIST_HEAD(&ctx->preempted);
ctx->num_hw_submitted = 0;
}
/**
* vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
* context.
*
* @man: The command buffer manager.
* @ctx: The command buffer context.
*
* Submits command buffers to hardware until there are no more command
* buffers to submit or the hardware can't handle more command buffers.
*/
static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_context *ctx)
{
while (ctx->num_hw_submitted < man->max_hw_submitted &&
!list_empty(&ctx->submitted) &&
!ctx->block_submission) {
struct vmw_cmdbuf_header *entry;
SVGACBStatus status;
entry = list_first_entry(&ctx->submitted,
struct vmw_cmdbuf_header,
list);
status = vmw_cmdbuf_header_submit(entry);
/* This should never happen */
if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
entry->cb_header->status = SVGA_CB_STATUS_NONE;
break;
}
list_move_tail(&entry->list, &ctx->hw_submitted);
ctx->num_hw_submitted++;
}
}
/**
* vmw_cmdbuf_ctx_process - Process a command buffer context.
*
* @man: The command buffer manager.
* @ctx: The command buffer context.
* @notempty: Pass back count of non-empty command submitted lists.
*
* Submit command buffers to hardware if possible, and process finished
* buffers. Typically freeing them, but on preemption or error take
* appropriate action. Wake up waiters if appropriate.
*/
static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_context *ctx,
int *notempty)
{
struct vmw_cmdbuf_header *entry, *next;
vmw_cmdbuf_ctx_submit(man, ctx);
list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
SVGACBStatus status = entry->cb_header->status;
if (status == SVGA_CB_STATUS_NONE)
break;
list_del(&entry->list);
wake_up_all(&man->idle_queue);
ctx->num_hw_submitted--;
switch (status) {
case SVGA_CB_STATUS_COMPLETED:
__vmw_cmdbuf_header_free(entry);
break;
case SVGA_CB_STATUS_COMMAND_ERROR:
WARN_ONCE(true, "Command buffer error.\n");
entry->cb_header->status = SVGA_CB_STATUS_NONE;
list_add_tail(&entry->list, &man->error);
schedule_work(&man->work);
break;
case SVGA_CB_STATUS_PREEMPTED:
entry->cb_header->status = SVGA_CB_STATUS_NONE;
list_add_tail(&entry->list, &ctx->preempted);
break;
case SVGA_CB_STATUS_CB_HEADER_ERROR:
WARN_ONCE(true, "Command buffer header error.\n");
__vmw_cmdbuf_header_free(entry);
break;
default:
WARN_ONCE(true, "Undefined command buffer status.\n");
__vmw_cmdbuf_header_free(entry);
break;
}
}
vmw_cmdbuf_ctx_submit(man, ctx);
if (!list_empty(&ctx->submitted))
(*notempty)++;
}
/**
* vmw_cmdbuf_man_process - Process all command buffer contexts and
* switch on and off irqs as appropriate.
*
* @man: The command buffer manager.
*
* Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
* command buffers left that are not submitted to hardware, Make sure
* IRQ handling is turned on. Otherwise, make sure it's turned off.
*/
static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
{
int notempty;
struct vmw_cmdbuf_context *ctx;
int i;
retry:
notempty = 0;
for_each_cmdbuf_ctx(man, i, ctx)
vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
if (man->irq_on && !notempty) {
vmw_generic_waiter_remove(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
man->irq_on = false;
} else if (!man->irq_on && notempty) {
vmw_generic_waiter_add(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
man->irq_on = true;
/* Rerun in case we just missed an irq. */
goto retry;
}
}
/**
* vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
* command buffer context
*
* @man: The command buffer manager.
* @header: The header of the buffer to submit.
* @cb_context: The command buffer context to use.
*
* This function adds @header to the "submitted" queue of the command
* buffer context identified by @cb_context. It then calls the command buffer
* manager processing to potentially submit the buffer to hardware.
* @man->lock needs to be held when calling this function.
*/
static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_header *header,
SVGACBContext cb_context)
{
if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
header->cb_header->dxContext = 0;
header->cb_context = cb_context;
list_add_tail(&header->list, &man->ctx[cb_context].submitted);
vmw_cmdbuf_man_process(man);
}
/**
* vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
* handler implemented as a threaded irq task.
*
* @man: Pointer to the command buffer manager.
*
* The bottom half of the interrupt handler simply calls into the
* command buffer processor to free finished buffers and submit any
* queued buffers to hardware.
*/
void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
{
spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
}
/**
* vmw_cmdbuf_work_func - The deferred work function that handles
* command buffer errors.
*
* @work: The work func closure argument.
*
* Restarting the command buffer context after an error requires process
* context, so it is deferred to this work function.
*/
static void vmw_cmdbuf_work_func(struct work_struct *work)
{
struct vmw_cmdbuf_man *man =
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
uint32_t dummy = 0;
bool send_fence = false;
struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
int i;
struct vmw_cmdbuf_context *ctx;
bool global_block = false;
for_each_cmdbuf_ctx(man, i, ctx)
INIT_LIST_HEAD(&restart_head[i]);
mutex_lock(&man->error_mutex);
spin_lock(&man->lock);
list_for_each_entry_safe(entry, next, &man->error, list) {
SVGACBHeader *cb_hdr = entry->cb_header;
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
(entry->cmd + cb_hdr->errorOffset);
u32 error_cmd_size, new_start_offset;
const char *cmd_name;
list_del_init(&entry->list);
global_block = true;
if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
VMW_DEBUG_USER("Unknown command causing device error.\n");
VMW_DEBUG_USER("Command buffer offset is %lu\n",
(unsigned long) cb_hdr->errorOffset);
__vmw_cmdbuf_header_free(entry);
send_fence = true;
continue;
}
VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
cmd_name);
VMW_DEBUG_USER("Command buffer offset is %lu\n",
(unsigned long) cb_hdr->errorOffset);
VMW_DEBUG_USER("Command size is %lu\n",
(unsigned long) error_cmd_size);
new_start_offset = cb_hdr->errorOffset + error_cmd_size;
if (new_start_offset >= cb_hdr->length) {
__vmw_cmdbuf_header_free(entry);
send_fence = true;
continue;
}
if (man->using_mob)
cb_hdr->ptr.mob.mobOffset += new_start_offset;
else
cb_hdr->ptr.pa += (u64) new_start_offset;
entry->cmd += new_start_offset;
cb_hdr->length -= new_start_offset;
cb_hdr->errorOffset = 0;
cb_hdr->offset = 0;
list_add_tail(&entry->list, &restart_head[entry->cb_context]);
}
for_each_cmdbuf_ctx(man, i, ctx)
man->ctx[i].block_submission = true;
spin_unlock(&man->lock);
/* Preempt all contexts */
if (global_block && vmw_cmdbuf_preempt(man, 0))
DRM_ERROR("Failed preempting command buffer contexts\n");
spin_lock(&man->lock);
for_each_cmdbuf_ctx(man, i, ctx) {
/* Move preempted command buffers to the preempted queue. */
vmw_cmdbuf_ctx_process(man, ctx, &dummy);
/*
* Add the preempted queue after the command buffer
* that caused an error.
*/
list_splice_init(&ctx->preempted, restart_head[i].prev);
/*
* Finally add all command buffers first in the submitted
* queue, to rerun them.
*/
ctx->block_submission = false;
list_splice_init(&restart_head[i], &ctx->submitted);
}
vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
if (global_block && vmw_cmdbuf_startstop(man, 0, true))
DRM_ERROR("Failed restarting command buffer contexts\n");
/* Send a new fence in case one was removed */
if (send_fence) {
vmw_cmd_send_fence(man->dev_priv, &dummy);
wake_up_all(&man->idle_queue);
}
mutex_unlock(&man->error_mutex);
}
/**
* vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
*
* @man: The command buffer manager.
* @check_preempted: Check also the preempted queue for pending command buffers.
*
*/
static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
bool check_preempted)
{
struct vmw_cmdbuf_context *ctx;
bool idle = false;
int i;
spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
for_each_cmdbuf_ctx(man, i, ctx) {
if (!list_empty(&ctx->submitted) ||
!list_empty(&ctx->hw_submitted) ||
(check_preempted && !list_empty(&ctx->preempted)))
goto out_unlock;
}
idle = list_empty(&man->error);
out_unlock:
spin_unlock(&man->lock);
return idle;
}
/**
* __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
* command submissions
*
* @man: The command buffer manager.
*
* Flushes the current command buffer without allocating a new one. A new one
* is automatically allocated when needed. Call with @man->cur_mutex held.
*/
static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
{
struct vmw_cmdbuf_header *cur = man->cur;
lockdep_assert_held_once(&man->cur_mutex);
if (!cur)
return;
spin_lock(&man->lock);
if (man->cur_pos == 0) {
__vmw_cmdbuf_header_free(cur);
goto out_unlock;
}
man->cur->cb_header->length = man->cur_pos;
vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
out_unlock:
spin_unlock(&man->lock);
man->cur = NULL;
man->cur_pos = 0;
}
/**
* vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
* command submissions
*
* @man: The command buffer manager.
* @interruptible: Whether to sleep interruptible when sleeping.
*
* Flushes the current command buffer without allocating a new one. A new one
* is automatically allocated when needed.
*/
int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible)
{
int ret = vmw_cmdbuf_cur_lock(man, interruptible);
if (ret)
return ret;
__vmw_cmdbuf_cur_flush(man);
vmw_cmdbuf_cur_unlock(man);
return 0;
}
/**
* vmw_cmdbuf_idle - Wait for command buffer manager idle.
*
* @man: The command buffer manager.
* @interruptible: Sleep interruptible while waiting.
* @timeout: Time out after this many ticks.
*
* Wait until the command buffer manager has processed all command buffers,
* or until a timeout occurs. If a timeout occurs, the function will return
* -EBUSY.
*/
int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
unsigned long timeout)
{
int ret;
ret = vmw_cmdbuf_cur_flush(man, interruptible);
vmw_generic_waiter_add(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
if (interruptible) {
ret = wait_event_interruptible_timeout
(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
timeout);
} else {
ret = wait_event_timeout
(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
timeout);
}
vmw_generic_waiter_remove(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
if (ret == 0) {
if (!vmw_cmdbuf_man_idle(man, true))
ret = -EBUSY;
else
ret = 0;
}
if (ret > 0)
ret = 0;
return ret;
}
/**
* vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
*
* @man: The command buffer manager.
* @info: Allocation info. Will hold the size on entry and allocated mm node
* on successful return.
*
* Try to allocate buffer space from the main pool. Returns true if succeeded.
* If a fatal error was hit, the error code is returned in @info->ret.
*/
static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_alloc_info *info)
{
int ret;
if (info->done)
return true;
memset(info->node, 0, sizeof(*info->node));
spin_lock(&man->lock);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
if (ret) {
vmw_cmdbuf_man_process(man);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
}
spin_unlock(&man->lock);
info->done = !ret;
return info->done;
}
/**
* vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
*
* @man: The command buffer manager.
* @node: Pointer to pre-allocated range-manager node.
* @size: The size of the allocation.
* @interruptible: Whether to sleep interruptible while waiting for space.
*
* This function allocates buffer space from the main pool, and if there is
* no space available ATM, it turns on IRQ handling and sleeps waiting for it to
* become available.
*/
static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
struct drm_mm_node *node,
size_t size,
bool interruptible)
{
struct vmw_cmdbuf_alloc_info info;
info.page_size = PFN_UP(size);
info.node = node;
info.done = false;
/*
* To prevent starvation of large requests, only one allocating call
* at a time waiting for space.
*/
if (interruptible) {
if (mutex_lock_interruptible(&man->space_mutex))
return -ERESTARTSYS;
} else {
mutex_lock(&man->space_mutex);
}
/* Try to allocate space without waiting. */
if (vmw_cmdbuf_try_alloc(man, &info))
goto out_unlock;
vmw_generic_waiter_add(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
if (interruptible) {
int ret;
ret = wait_event_interruptible
(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
if (ret) {
vmw_generic_waiter_remove
(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
mutex_unlock(&man->space_mutex);
return ret;
}
} else {
wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
}
vmw_generic_waiter_remove(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
out_unlock:
mutex_unlock(&man->space_mutex);
return 0;
}
/**
* vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
* space from the main pool.
*
* @man: The command buffer manager.
* @header: Pointer to the header to set up.
* @size: The requested size of the buffer space.
* @interruptible: Whether to sleep interruptible while waiting for space.
*/
static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_header *header,
size_t size,
bool interruptible)
{
SVGACBHeader *cb_hdr;
size_t offset;
int ret;
if (!man->has_pool)
return -ENOMEM;
ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
if (ret)
return ret;
header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
&header->handle);
if (!header->cb_header) {
ret = -ENOMEM;
goto out_no_cb_header;
}
header->size = header->node.size << PAGE_SHIFT;
cb_hdr = header->cb_header;
offset = header->node.start << PAGE_SHIFT;
header->cmd = man->map + offset;
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start;
cb_hdr->ptr.mob.mobOffset = offset;
} else {
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
}
return 0;
out_no_cb_header:
spin_lock(&man->lock);
drm_mm_remove_node(&header->node);
spin_unlock(&man->lock);
return ret;
}
/**
* vmw_cmdbuf_space_inline - Set up a command buffer header with
* inline command buffer space.
*
* @man: The command buffer manager.
* @header: Pointer to the header to set up.
* @size: The requested size of the buffer space.
*/
static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_header *header,
int size)
{
struct vmw_cmdbuf_dheader *dheader;
SVGACBHeader *cb_hdr;
if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
return -ENOMEM;
dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
&header->handle);
if (!dheader)
return -ENOMEM;
header->inline_space = true;
header->size = VMW_CMDBUF_INLINE_SIZE;
cb_hdr = &dheader->cb_header;
header->cb_header = cb_hdr;
header->cmd = dheader->cmd;
cb_hdr->status = SVGA_CB_STATUS_NONE;
cb_hdr->flags = SVGA_CB_FLAG_NONE;
cb_hdr->ptr.pa = (u64)header->handle +
(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
return 0;
}
/**
* vmw_cmdbuf_alloc - Allocate a command buffer header complete with
* command buffer space.
*
* @man: The command buffer manager.
* @size: The requested size of the buffer space.
* @interruptible: Whether to sleep interruptible while waiting for space.
* @p_header: points to a header pointer to populate on successful return.
*
* Returns a pointer to command buffer space if successful. Otherwise
* returns an error pointer. The header pointer returned in @p_header should
* be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
*/
void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
size_t size, bool interruptible,
struct vmw_cmdbuf_header **p_header)
{
struct vmw_cmdbuf_header *header;
int ret = 0;
*p_header = NULL;
header = kzalloc(sizeof(*header), GFP_KERNEL);
if (!header)
return ERR_PTR(-ENOMEM);
if (size <= VMW_CMDBUF_INLINE_SIZE)
ret = vmw_cmdbuf_space_inline(man, header, size);
else
ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
if (ret) {
kfree(header);
return ERR_PTR(ret);
}
header->man = man;
INIT_LIST_HEAD(&header->list);
header->cb_header->status = SVGA_CB_STATUS_NONE;
*p_header = header;
return header->cmd;
}
/**
* vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
* command buffer.
*
* @man: The command buffer manager.
* @size: The requested size of the commands.
* @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
* @interruptible: Whether to sleep interruptible while waiting for space.
*
* Returns a pointer to command buffer space if successful. Otherwise
* returns an error pointer.
*/
static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
size_t size,
int ctx_id,
bool interruptible)
{
struct vmw_cmdbuf_header *cur;
void *ret;
if (vmw_cmdbuf_cur_lock(man, interruptible))
return ERR_PTR(-ERESTARTSYS);
cur = man->cur;
if (cur && (size + man->cur_pos > cur->size ||
((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
ctx_id != cur->cb_header->dxContext)))
__vmw_cmdbuf_cur_flush(man);
if (!man->cur) {
ret = vmw_cmdbuf_alloc(man,
max_t(size_t, size, man->default_size),
interruptible, &man->cur);
if (IS_ERR(ret)) {
vmw_cmdbuf_cur_unlock(man);
return ret;
}
cur = man->cur;
}
if (ctx_id != SVGA3D_INVALID_ID) {
cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
cur->cb_header->dxContext = ctx_id;
}
cur->reserved = size;
return (void *) (man->cur->cmd + man->cur_pos);
}
/**
* vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
*
* @man: The command buffer manager.
* @size: The size of the commands actually written.
* @flush: Whether to flush the command buffer immediately.
*/
static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
size_t size, bool flush)
{
struct vmw_cmdbuf_header *cur = man->cur;
lockdep_assert_held_once(&man->cur_mutex);
WARN_ON(size > cur->reserved);
man->cur_pos += size;
if (!size)
cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
if (flush)
__vmw_cmdbuf_cur_flush(man);
vmw_cmdbuf_cur_unlock(man);
}
/**
* vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
*
* @man: The command buffer manager.
* @size: The requested size of the commands.
* @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
* @interruptible: Whether to sleep interruptible while waiting for space.
* @header: Header of the command buffer. NULL if the current command buffer
* should be used.
*
* Returns a pointer to command buffer space if successful. Otherwise
* returns an error pointer.
*/
void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
int ctx_id, bool interruptible,
struct vmw_cmdbuf_header *header)
{
if (!header)
return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
if (size > header->size)
return ERR_PTR(-EINVAL);
if (ctx_id != SVGA3D_INVALID_ID) {
header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
header->cb_header->dxContext = ctx_id;
}
header->reserved = size;
return header->cmd;
}
/**
* vmw_cmdbuf_commit - Commit commands in a command buffer.
*
* @man: The command buffer manager.
* @size: The size of the commands actually written.
* @header: Header of the command buffer. NULL if the current command buffer
* should be used.
* @flush: Whether to flush the command buffer immediately.
*/
void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
struct vmw_cmdbuf_header *header, bool flush)
{
if (!header) {
vmw_cmdbuf_commit_cur(man, size, flush);
return;
}
(void) vmw_cmdbuf_cur_lock(man, false);
__vmw_cmdbuf_cur_flush(man);
WARN_ON(size > header->reserved);
man->cur = header;
man->cur_pos = size;
if (!size)
header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
if (flush)
__vmw_cmdbuf_cur_flush(man);
vmw_cmdbuf_cur_unlock(man);
}
/**
* vmw_cmdbuf_send_device_command - Send a command through the device context.
*
* @man: The command buffer manager.
* @command: Pointer to the command to send.
* @size: Size of the command.
*
* Synchronously sends a device context command.
*/
static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
const void *command,
size_t size)
{
struct vmw_cmdbuf_header *header;
int status;
void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
if (IS_ERR(cmd))
return PTR_ERR(cmd);
memcpy(cmd, command, size);
header->cb_header->length = size;
header->cb_context = SVGA_CB_CONTEXT_DEVICE;
spin_lock(&man->lock);
status = vmw_cmdbuf_header_submit(header);
spin_unlock(&man->lock);
vmw_cmdbuf_header_free(header);
if (status != SVGA_CB_STATUS_COMPLETED) {
DRM_ERROR("Device context command failed with status %d\n",
status);
return -EINVAL;
}
return 0;
}
/**
* vmw_cmdbuf_preempt - Send a preempt command through the device
* context.
*
* @man: The command buffer manager.
* @context: Device context to pass command through.
*
* Synchronously sends a preempt command.
*/
static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
{
struct {
uint32 id;
SVGADCCmdPreempt body;
} __packed cmd;
cmd.id = SVGA_DC_CMD_PREEMPT;
cmd.body.context = SVGA_CB_CONTEXT_0 + context;
cmd.body.ignoreIDZero = 0;
return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
}
/**
* vmw_cmdbuf_startstop - Send a start / stop command through the device
* context.
*
* @man: The command buffer manager.
* @context: Device context to start/stop.
* @enable: Whether to enable or disable the context.
*
* Synchronously sends a device start / stop context command.
*/
static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
bool enable)
{
struct {
uint32 id;
SVGADCCmdStartStop body;
} __packed cmd;
cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
cmd.body.enable = (enable) ? 1 : 0;
cmd.body.context = SVGA_CB_CONTEXT_0 + context;
return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
}
/**
* vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
*
* @man: The command buffer manager.
* @size: The size of the main space pool.
*
* Set the size and allocate the main command buffer space pool.
* If successful, this enables large command submissions.
* Note that this function requires that rudimentary command
* submission is already available and that the MOB memory manager is alive.
* Returns 0 on success. Negative error code on failure.
*/
int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
{
struct vmw_private *dev_priv = man->dev_priv;
int ret;
if (man->has_pool)
return -EINVAL;
/* First, try to allocate a huge chunk of DMA memory */
size = PAGE_ALIGN(size);
man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
&man->handle, GFP_KERNEL);
if (man->map) {
man->using_mob = false;
} else {
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.bo_type = ttm_bo_type_kernel,
.size = size,
.pin = true
};
/*
* DMA memory failed. If we can have command buffers in a
* MOB, try to use that instead. Note that this will
* actually call into the already enabled manager, when
* binding the MOB.
*/
if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
!dev_priv->has_mob)
return -ENOMEM;
ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space);
if (ret)
return ret;
man->map = vmw_bo_map_and_cache(man->cmd_space);
man->using_mob = man->map;
}
man->size = size;
drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
man->has_pool = true;
/*
* For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
* prevent deadlocks from happening when vmw_cmdbuf_space_pool()
* needs to wait for space and we block on further command
* submissions to be able to free up space.
*/
man->default_size = VMW_CMDBUF_INLINE_SIZE;
drm_info(&dev_priv->drm,
"Using command buffers with %s pool.\n",
(man->using_mob) ? "MOB" : "DMA");
return 0;
}
/**
* vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
* inline command buffer submissions only.
*
* @dev_priv: Pointer to device private structure.
*
* Returns a pointer to a cummand buffer manager to success or error pointer
* on failure. The command buffer manager will be enabled for submissions of
* size VMW_CMDBUF_INLINE_SIZE only.
*/
struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
{
struct vmw_cmdbuf_man *man;
struct vmw_cmdbuf_context *ctx;
unsigned int i;
int ret;
if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
return ERR_PTR(-ENOSYS);
man = kzalloc(sizeof(*man), GFP_KERNEL);
if (!man)
return ERR_PTR(-ENOMEM);
man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
2 : 1;
man->headers = dma_pool_create("vmwgfx cmdbuf",
dev_priv->drm.dev,
sizeof(SVGACBHeader),
64, PAGE_SIZE);
if (!man->headers) {
ret = -ENOMEM;
goto out_no_pool;
}
man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
dev_priv->drm.dev,
sizeof(struct vmw_cmdbuf_dheader),
64, PAGE_SIZE);
if (!man->dheaders) {
ret = -ENOMEM;
goto out_no_dpool;
}
for_each_cmdbuf_ctx(man, i, ctx)
vmw_cmdbuf_ctx_init(ctx);
INIT_LIST_HEAD(&man->error);
spin_lock_init(&man->lock);
mutex_init(&man->cur_mutex);
mutex_init(&man->space_mutex);
mutex_init(&man->error_mutex);
man->default_size = VMW_CMDBUF_INLINE_SIZE;
init_waitqueue_head(&man->alloc_queue);
init_waitqueue_head(&man->idle_queue);
man->dev_priv = dev_priv;
man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
&dev_priv->error_waiters);
ret = vmw_cmdbuf_startstop(man, 0, true);
if (ret) {
DRM_ERROR("Failed starting command buffer contexts\n");
vmw_cmdbuf_man_destroy(man);
return ERR_PTR(ret);
}
return man;
out_no_dpool:
dma_pool_destroy(man->headers);
out_no_pool:
kfree(man);
return ERR_PTR(ret);
}
/**
* vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
*
* @man: Pointer to a command buffer manager.
*
* This function removes the main buffer space pool, and should be called
* before MOB memory management is removed. When this function has been called,
* only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
* less are allowed, and the default size of the command buffer for small kernel
* submissions is also set to this size.
*/
void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
{
if (!man->has_pool)
return;
man->has_pool = false;
man->default_size = VMW_CMDBUF_INLINE_SIZE;
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
if (man->using_mob)
vmw_bo_unreference(&man->cmd_space);
else
dma_free_coherent(man->dev_priv->drm.dev,
man->size, man->map, man->handle);
}
/**
* vmw_cmdbuf_man_destroy - Take down a command buffer manager.
*
* @man: Pointer to a command buffer manager.
*
* This function idles and then destroys a command buffer manager.
*/
void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
{
WARN_ON_ONCE(man->has_pool);
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
if (vmw_cmdbuf_startstop(man, 0, false))
DRM_ERROR("Failed stopping command buffer contexts.\n");
vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
&man->dev_priv->error_waiters);
(void) cancel_work_sync(&man->work);
dma_pool_destroy(man->dheaders);
dma_pool_destroy(man->headers);
mutex_destroy(&man->cur_mutex);
mutex_destroy(&man->space_mutex);
mutex_destroy(&man->error_mutex);
kfree(man);
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2009-2022 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*
* While no substantial code is shared, the prime code is inspired by
* drm_prime.c, with
* Authors:
* Dave Airlie <[email protected]>
* Rob Clark <[email protected]>
*/
/** @file ttm_ref_object.c
*
* Base- and reference object implementation for the various
* ttm objects. Implements reference counting, minimal security checks
* and release on file close.
*/
#define pr_fmt(fmt) "[TTM] " fmt
#include "ttm_object.h"
#include "vmwgfx_drv.h"
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/hashtable.h>
MODULE_IMPORT_NS(DMA_BUF);
#define VMW_TTM_OBJECT_REF_HT_ORDER 10
/**
* struct ttm_object_file
*
* @tdev: Pointer to the ttm_object_device.
*
* @lock: Lock that protects the ref_list list and the
* ref_hash hash tables.
*
* @ref_list: List of ttm_ref_objects to be destroyed at
* file release.
*
* @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
* for fast lookup of ref objects given a base object.
*
* @refcount: reference/usage count
*/
struct ttm_object_file {
struct ttm_object_device *tdev;
spinlock_t lock;
struct list_head ref_list;
DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER);
struct kref refcount;
};
/*
* struct ttm_object_device
*
* @object_lock: lock that protects idr.
*
* @object_count: Per device object count.
*
* This is the per-device data structure needed for ttm object management.
*/
struct ttm_object_device {
spinlock_t object_lock;
atomic_t object_count;
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
struct idr idr;
};
/*
* struct ttm_ref_object
*
* @hash: Hash entry for the per-file object reference hash.
*
* @head: List entry for the per-file list of ref-objects.
*
* @kref: Ref count.
*
* @obj: Base object this ref object is referencing.
*
* @ref_type: Type of ref object.
*
* This is similar to an idr object, but it also has a hash table entry
* that allows lookup with a pointer to the referenced object as a key. In
* that way, one can easily detect whether a base object is referenced by
* a particular ttm_object_file. It also carries a ref count to avoid creating
* multiple ref objects if a ttm_object_file references the same base
* object more than once.
*/
struct ttm_ref_object {
struct rcu_head rcu_head;
struct vmwgfx_hash_item hash;
struct list_head head;
struct kref kref;
struct ttm_base_object *obj;
struct ttm_object_file *tfile;
};
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file *tfile)
{
kref_get(&tfile->refcount);
return tfile;
}
static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile,
uint64_t key,
struct vmwgfx_hash_item **p_hash)
{
struct vmwgfx_hash_item *hash;
hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) {
if (hash->key == key) {
*p_hash = hash;
return 0;
}
}
return -EINVAL;
}
static int ttm_tfile_find_ref(struct ttm_object_file *tfile,
uint64_t key,
struct vmwgfx_hash_item **p_hash)
{
struct vmwgfx_hash_item *hash;
hash_for_each_possible(tfile->ref_hash, hash, head, key) {
if (hash->key == key) {
*p_hash = hash;
return 0;
}
}
return -EINVAL;
}
static void ttm_object_file_destroy(struct kref *kref)
{
struct ttm_object_file *tfile =
container_of(kref, struct ttm_object_file, refcount);
kfree(tfile);
}
static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
{
struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL;
kref_put(&tfile->refcount, ttm_object_file_destroy);
}
int ttm_base_object_init(struct ttm_object_file *tfile,
struct ttm_base_object *base,
bool shareable,
enum ttm_object_type object_type,
void (*refcount_release) (struct ttm_base_object **))
{
struct ttm_object_device *tdev = tfile->tdev;
int ret;
base->shareable = shareable;
base->tfile = ttm_object_file_ref(tfile);
base->refcount_release = refcount_release;
base->object_type = object_type;
kref_init(&base->refcount);
idr_preload(GFP_KERNEL);
spin_lock(&tdev->object_lock);
ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
spin_unlock(&tdev->object_lock);
idr_preload_end();
if (ret < 0)
return ret;
base->handle = ret;
ret = ttm_ref_object_add(tfile, base, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
ttm_base_object_unref(&base);
return 0;
out_err1:
spin_lock(&tdev->object_lock);
idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock);
return ret;
}
static void ttm_release_base(struct kref *kref)
{
struct ttm_base_object *base =
container_of(kref, struct ttm_base_object, refcount);
struct ttm_object_device *tdev = base->tfile->tdev;
spin_lock(&tdev->object_lock);
idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock);
/*
* Note: We don't use synchronize_rcu() here because it's far
* too slow. It's up to the user to free the object using
* call_rcu() or ttm_base_object_kfree().
*/
ttm_object_file_unref(&base->tfile);
if (base->refcount_release)
base->refcount_release(&base);
}
void ttm_base_object_unref(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
*p_base = NULL;
kref_put(&base->refcount, ttm_release_base);
}
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint64_t key)
{
struct ttm_base_object *base = NULL;
struct vmwgfx_hash_item *hash;
int ret;
spin_lock(&tfile->lock);
ret = ttm_tfile_find_ref(tfile, key, &hash);
if (likely(ret == 0)) {
base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
if (!kref_get_unless_zero(&base->refcount))
base = NULL;
}
spin_unlock(&tfile->lock);
return base;
}
struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key)
{
struct ttm_base_object *base;
rcu_read_lock();
base = idr_find(&tdev->idr, key);
if (base && !kref_get_unless_zero(&base->refcount))
base = NULL;
rcu_read_unlock();
return base;
}
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
bool *existed,
bool require_existed)
{
struct ttm_ref_object *ref;
struct vmwgfx_hash_item *hash;
int ret = -EINVAL;
if (base->tfile != tfile && !base->shareable)
return -EPERM;
if (existed != NULL)
*existed = true;
while (ret == -EINVAL) {
rcu_read_lock();
ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash);
if (ret == 0) {
ref = hlist_entry(hash, struct ttm_ref_object, hash);
if (kref_get_unless_zero(&ref->kref)) {
rcu_read_unlock();
break;
}
}
rcu_read_unlock();
if (require_existed)
return -EPERM;
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
if (unlikely(ref == NULL)) {
return -ENOMEM;
}
ref->hash.key = base->handle;
ref->obj = base;
ref->tfile = tfile;
kref_init(&ref->kref);
spin_lock(&tfile->lock);
hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key);
ret = 0;
list_add_tail(&ref->head, &tfile->ref_list);
kref_get(&base->refcount);
spin_unlock(&tfile->lock);
if (existed != NULL)
*existed = false;
}
return ret;
}
static void __releases(tfile->lock) __acquires(tfile->lock)
ttm_ref_object_release(struct kref *kref)
{
struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref);
struct ttm_object_file *tfile = ref->tfile;
hash_del_rcu(&ref->hash.head);
list_del(&ref->head);
spin_unlock(&tfile->lock);
ttm_base_object_unref(&ref->obj);
kfree_rcu(ref, rcu_head);
spin_lock(&tfile->lock);
}
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
unsigned long key)
{
struct ttm_ref_object *ref;
struct vmwgfx_hash_item *hash;
int ret;
spin_lock(&tfile->lock);
ret = ttm_tfile_find_ref(tfile, key, &hash);
if (unlikely(ret != 0)) {
spin_unlock(&tfile->lock);
return -EINVAL;
}
ref = hlist_entry(hash, struct ttm_ref_object, hash);
kref_put(&ref->kref, ttm_ref_object_release);
spin_unlock(&tfile->lock);
return 0;
}
void ttm_object_file_release(struct ttm_object_file **p_tfile)
{
struct ttm_ref_object *ref;
struct list_head *list;
struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL;
spin_lock(&tfile->lock);
/*
* Since we release the lock within the loop, we have to
* restart it from the beginning each time.
*/
while (!list_empty(&tfile->ref_list)) {
list = tfile->ref_list.next;
ref = list_entry(list, struct ttm_ref_object, head);
ttm_ref_object_release(&ref->kref);
}
spin_unlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev)
{
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
if (unlikely(tfile == NULL))
return NULL;
spin_lock_init(&tfile->lock);
tfile->tdev = tdev;
kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list);
hash_init(tfile->ref_hash);
return tfile;
}
struct ttm_object_device *
ttm_object_device_init(const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
if (unlikely(tdev == NULL))
return NULL;
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
/*
* Our base is at VMWGFX_NUM_MOB + 1 because we want to create
* a seperate namespace for GEM handles (which are
* 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's
* can take either handle as an argument so we want to
* easily be able to tell whether the handle refers to a
* GEM buffer or a surface.
*/
idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
return tdev;
}
void ttm_object_device_release(struct ttm_object_device **p_tdev)
{
struct ttm_object_device *tdev = *p_tdev;
*p_tdev = NULL;
WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
idr_destroy(&tdev->idr);
kfree(tdev);
}
/**
* get_dma_buf_unless_doomed - get a dma_buf reference if possible.
*
* @dmabuf: Non-refcounted pointer to a struct dma-buf.
*
* Obtain a file reference from a lookup structure that doesn't refcount
* the file, but synchronizes with its release method to make sure it has
* not been freed yet. See for example kref_get_unless_zero documentation.
* Returns true if refcounting succeeds, false otherwise.
*
* Nobody really wants this as a public API yet, so let it mature here
* for some time...
*/
static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
{
return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
}
/**
* ttm_prime_refcount_release - refcount release method for a prime object.
*
* @p_base: Pointer to ttm_base_object pointer.
*
* This is a wrapper that calls the refcount_release founction of the
* underlying object. At the same time it cleans up the prime object.
* This function is called when all references to the base object we
* derive from are gone.
*/
static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct ttm_prime_object *prime;
*p_base = NULL;
prime = container_of(base, struct ttm_prime_object, base);
BUG_ON(prime->dma_buf != NULL);
mutex_destroy(&prime->mutex);
if (prime->refcount_release)
prime->refcount_release(&base);
}
/**
* ttm_prime_dmabuf_release - Release method for the dma-bufs we export
*
* @dma_buf:
*
* This function first calls the dma_buf release method the driver
* provides. Then it cleans up our dma_buf pointer used for lookup,
* and finally releases the reference the dma_buf has on our base
* object.
*/
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
{
struct ttm_prime_object *prime =
(struct ttm_prime_object *) dma_buf->priv;
struct ttm_base_object *base = &prime->base;
struct ttm_object_device *tdev = base->tfile->tdev;
if (tdev->dmabuf_release)
tdev->dmabuf_release(dma_buf);
mutex_lock(&prime->mutex);
if (prime->dma_buf == dma_buf)
prime->dma_buf = NULL;
mutex_unlock(&prime->mutex);
ttm_base_object_unref(&base);
}
/**
* ttm_prime_fd_to_handle - Get a base object handle from a prime fd
*
* @tfile: A struct ttm_object_file identifying the caller.
* @fd: The prime / dmabuf fd.
* @handle: The returned handle.
*
* This function returns a handle to an object that previously exported
* a dma-buf. Note that we don't handle imports yet, because we simply
* have no consumers of that implementation.
*/
int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
int fd, u32 *handle)
{
struct ttm_object_device *tdev = tfile->tdev;
struct dma_buf *dma_buf;
struct ttm_prime_object *prime;
struct ttm_base_object *base;
int ret;
dma_buf = dma_buf_get(fd);
if (IS_ERR(dma_buf))
return PTR_ERR(dma_buf);
if (dma_buf->ops != &tdev->ops)
return -ENOSYS;
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->handle;
ret = ttm_ref_object_add(tfile, base, NULL, false);
dma_buf_put(dma_buf);
return ret;
}
/**
* ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
*
* @tfile: Struct ttm_object_file identifying the caller.
* @handle: Handle to the object we're exporting from.
* @flags: flags for dma-buf creation. We just pass them on.
* @prime_fd: The returned file descriptor.
*
*/
int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
uint32_t handle, uint32_t flags,
int *prime_fd)
{
struct ttm_object_device *tdev = tfile->tdev;
struct ttm_base_object *base;
struct dma_buf *dma_buf;
struct ttm_prime_object *prime;
int ret;
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL ||
base->object_type != ttm_prime_type)) {
ret = -ENOENT;
goto out_unref;
}
prime = container_of(base, struct ttm_prime_object, base);
if (unlikely(!base->shareable)) {
ret = -EPERM;
goto out_unref;
}
ret = mutex_lock_interruptible(&prime->mutex);
if (unlikely(ret != 0)) {
ret = -ERESTARTSYS;
goto out_unref;
}
dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &tdev->ops;
exp_info.size = prime->size;
exp_info.flags = flags;
exp_info.priv = prime;
/*
* Need to create a new dma_buf
*/
dma_buf = dma_buf_export(&exp_info);
if (IS_ERR(dma_buf)) {
ret = PTR_ERR(dma_buf);
mutex_unlock(&prime->mutex);
goto out_unref;
}
/*
* dma_buf has taken the base object reference
*/
base = NULL;
prime->dma_buf = dma_buf;
}
mutex_unlock(&prime->mutex);
ret = dma_buf_fd(dma_buf, flags);
if (ret >= 0) {
*prime_fd = ret;
ret = 0;
} else
dma_buf_put(dma_buf);
out_unref:
if (base)
ttm_base_object_unref(&base);
return ret;
}
/**
* ttm_prime_object_init - Initialize a ttm_prime_object
*
* @tfile: struct ttm_object_file identifying the caller
* @size: The size of the dma_bufs we export.
* @prime: The object to be initialized.
* @shareable: See ttm_base_object_init
* @type: See ttm_base_object_init
* @refcount_release: See ttm_base_object_init
*
* Initializes an object which is compatible with the drm_prime model
* for data sharing between processes and devices.
*/
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
struct ttm_prime_object *prime, bool shareable,
enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object **))
{
mutex_init(&prime->mutex);
prime->size = PAGE_ALIGN(size);
prime->real_type = type;
prime->dma_buf = NULL;
prime->refcount_release = refcount_release;
return ttm_base_object_init(tfile, &prime->base, shareable,
ttm_prime_type,
ttm_prime_refcount_release);
}
| linux-master | drivers/gpu/drm/vmwgfx/ttm_object.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <linux/pci.h>
#include <linux/sched/signal.h>
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 24)
static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
{
if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
return SVGA_IRQFLAG_REG_FENCE_GOAL;
else
return SVGA_IRQFLAG_FENCE_GOAL;
}
/**
* vmw_thread_fn - Deferred (process context) irq handler
*
* @irq: irq number
* @arg: Closure argument. Pointer to a struct drm_device cast to void *
*
* This function implements the deferred part of irq processing.
* The function is guaranteed to run at least once after the
* vmw_irq_handler has returned with IRQ_WAKE_THREAD.
*
*/
static irqreturn_t vmw_thread_fn(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
irqreturn_t ret = IRQ_NONE;
if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
dev_priv->irqthread_pending)) {
vmw_fences_update(dev_priv->fman);
wake_up_all(&dev_priv->fence_queue);
ret = IRQ_HANDLED;
}
if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
dev_priv->irqthread_pending)) {
vmw_cmdbuf_irqthread(dev_priv->cman);
ret = IRQ_HANDLED;
}
return ret;
}
/**
* vmw_irq_handler: irq handler
*
* @irq: irq number
* @arg: Closure argument. Pointer to a struct drm_device cast to void *
*
* This function implements the quick part of irq processing.
* The function performs fast actions like clearing the device interrupt
* flags and also reasonably quick actions like waking processes waiting for
* FIFO space. Other IRQ actions are deferred to the IRQ thread.
*/
static irqreturn_t vmw_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status, masked_status;
irqreturn_t ret = IRQ_HANDLED;
status = vmw_irq_status_read(dev_priv);
masked_status = status & READ_ONCE(dev_priv->irq_mask);
if (likely(status))
vmw_irq_status_write(dev_priv, status);
if (!status)
return IRQ_NONE;
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
vmw_irqflag_fence_goal(dev_priv))) &&
!test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
ret = IRQ_WAKE_THREAD;
if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
SVGA_IRQFLAG_ERROR)) &&
!test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
dev_priv->irqthread_pending))
ret = IRQ_WAKE_THREAD;
return ret;
}
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
void vmw_update_seqno(struct vmw_private *dev_priv)
{
uint32_t seqno = vmw_fence_read(dev_priv);
if (dev_priv->last_read_seqno != seqno) {
dev_priv->last_read_seqno = seqno;
vmw_fences_update(dev_priv->fman);
}
}
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
bool ret;
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
vmw_update_seqno(dev_priv);
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
return true;
/**
* Then check if the seqno is higher than what we've actually
* emitted. Then the fence is stale and signaled.
*/
ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
> VMW_FENCE_WRAP);
return ret;
}
int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
uint32_t seqno,
bool interruptible,
unsigned long timeout)
{
struct vmw_fifo_state *fifo_state = dev_priv->fifo;
bool fifo_down = false;
uint32_t count = 0;
uint32_t signal_seq;
int ret;
unsigned long end_jiffies = jiffies + timeout;
bool (*wait_condition)(struct vmw_private *, uint32_t);
DEFINE_WAIT(__wait);
wait_condition = (fifo_idle) ? &vmw_fifo_idle :
&vmw_seqno_passed;
/**
* Block command submission while waiting for idle.
*/
if (fifo_idle) {
if (dev_priv->cman) {
ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
10*HZ);
if (ret)
goto out_err;
} else if (fifo_state) {
down_read(&fifo_state->rwsem);
fifo_down = true;
}
}
signal_seq = atomic_read(&dev_priv->marker_seq);
ret = 0;
for (;;) {
prepare_to_wait(&dev_priv->fence_queue, &__wait,
(interruptible) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (wait_condition(dev_priv, seqno))
break;
if (time_after_eq(jiffies, end_jiffies)) {
DRM_ERROR("SVGA device lockup.\n");
break;
}
if (lazy)
schedule_timeout(1);
else if ((++count & 0x0F) == 0) {
/**
* FIXME: Use schedule_hr_timeout here for
* newer kernels and lower CPU utilization.
*/
__set_current_state(TASK_RUNNING);
schedule();
__set_current_state((interruptible) ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
}
if (interruptible && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle && fifo_state)
vmw_fence_write(dev_priv, signal_seq);
wake_up_all(&dev_priv->fence_queue);
out_err:
if (fifo_down)
up_read(&fifo_state->rwsem);
return ret;
}
void vmw_generic_waiter_add(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
spin_lock_bh(&dev_priv->waiter_lock);
if ((*waiter_count)++ == 0) {
vmw_irq_status_write(dev_priv, flag);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
}
spin_unlock_bh(&dev_priv->waiter_lock);
}
void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
spin_lock_bh(&dev_priv->waiter_lock);
if (--(*waiter_count) == 0) {
dev_priv->irq_mask &= ~flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
}
spin_unlock_bh(&dev_priv->waiter_lock);
}
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
&dev_priv->fence_queue_waiters);
}
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
&dev_priv->fence_queue_waiters);
}
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
&dev_priv->goal_queue_waiters);
}
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
&dev_priv->goal_queue_waiters);
}
static void vmw_irq_preinstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
status = vmw_irq_status_read(dev_priv);
vmw_irq_status_write(dev_priv, status);
}
void vmw_irq_uninstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t status;
u32 i;
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
status = vmw_irq_status_read(dev_priv);
vmw_irq_status_write(dev_priv, status);
for (i = 0; i < dev_priv->num_irq_vectors; ++i)
free_irq(dev_priv->irqs[i], dev);
pci_free_irq_vectors(pdev);
dev_priv->num_irq_vectors = 0;
}
/**
* vmw_irq_install - Install the irq handlers
*
* @dev_priv: Pointer to the vmw_private device.
* Return: Zero if successful. Negative number otherwise.
*/
int vmw_irq_install(struct vmw_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct drm_device *dev = &dev_priv->drm;
int ret;
int nvec;
int i = 0;
BUILD_BUG_ON((SVGA_IRQFLAG_MAX >> VMWGFX_MAX_NUM_IRQS) != 1);
BUG_ON(VMWGFX_MAX_NUM_IRQS != get_count_order(SVGA_IRQFLAG_MAX));
nvec = pci_alloc_irq_vectors(pdev, 1, VMWGFX_MAX_NUM_IRQS,
PCI_IRQ_ALL_TYPES);
if (nvec <= 0) {
drm_err(&dev_priv->drm,
"IRQ's are unavailable, nvec: %d\n", nvec);
ret = nvec;
goto done;
}
vmw_irq_preinstall(dev);
for (i = 0; i < nvec; ++i) {
ret = pci_irq_vector(pdev, i);
if (ret < 0) {
drm_err(&dev_priv->drm,
"failed getting irq vector: %d\n", ret);
goto done;
}
dev_priv->irqs[i] = ret;
ret = request_threaded_irq(dev_priv->irqs[i], vmw_irq_handler, vmw_thread_fn,
IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
if (ret != 0) {
drm_err(&dev_priv->drm,
"Failed installing irq(%d): %d\n",
dev_priv->irqs[i], ret);
goto done;
}
}
done:
dev_priv->num_irq_vectors = i;
return ret;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_irq.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#define VMW_RES_EVICT_ERR_COUNT 10
/**
* vmw_resource_mob_attach - Mark a resource as attached to its backing mob
* @res: The resource
*/
void vmw_resource_mob_attach(struct vmw_resource *res)
{
struct vmw_bo *gbo = res->guest_memory_bo;
struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL;
dma_resv_assert_held(gbo->tbo.base.resv);
res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
res->func->prio;
while (*new) {
struct vmw_resource *this =
container_of(*new, struct vmw_resource, mob_node);
parent = *new;
new = (res->guest_memory_offset < this->guest_memory_offset) ?
&((*new)->rb_left) : &((*new)->rb_right);
}
rb_link_node(&res->mob_node, parent, new);
rb_insert_color(&res->mob_node, &gbo->res_tree);
vmw_bo_prio_add(gbo, res->used_prio);
}
/**
* vmw_resource_mob_detach - Mark a resource as detached from its backing mob
* @res: The resource
*/
void vmw_resource_mob_detach(struct vmw_resource *res)
{
struct vmw_bo *gbo = res->guest_memory_bo;
dma_resv_assert_held(gbo->tbo.base.resv);
if (vmw_resource_mob_attached(res)) {
rb_erase(&res->mob_node, &gbo->res_tree);
RB_CLEAR_NODE(&res->mob_node);
vmw_bo_prio_del(gbo, res->used_prio);
}
}
struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
{
kref_get(&res->kref);
return res;
}
struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res)
{
return kref_get_unless_zero(&res->kref) ? res : NULL;
}
/**
* vmw_resource_release_id - release a resource id to the id manager.
*
* @res: Pointer to the resource.
*
* Release the resource id to the resource id manager and set it to -1
*/
void vmw_resource_release_id(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
spin_lock(&dev_priv->resource_lock);
if (res->id != -1)
idr_remove(idr, res->id);
res->id = -1;
spin_unlock(&dev_priv->resource_lock);
}
static void vmw_resource_release(struct kref *kref)
{
struct vmw_resource *res =
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
int id;
int ret;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
spin_unlock(&dev_priv->resource_lock);
if (res->guest_memory_bo) {
struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
ret = ttm_bo_reserve(bo, false, false, NULL);
BUG_ON(ret);
if (vmw_resource_mob_attached(res) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
val_buf.bo = bo;
val_buf.num_shared = 0;
res->func->unbind(res, false, &val_buf);
}
res->guest_memory_size = false;
vmw_resource_mob_detach(res);
if (res->dirty)
res->func->dirty_free(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
ttm_bo_unreserve(bo);
vmw_bo_unreference(&res->guest_memory_bo);
}
if (likely(res->hw_destroy != NULL)) {
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_res_list_kill(&res->binding_head);
mutex_unlock(&dev_priv->binding_mutex);
res->hw_destroy(res);
}
id = res->id;
if (res->res_free != NULL)
res->res_free(res);
else
kfree(res);
spin_lock(&dev_priv->resource_lock);
if (id != -1)
idr_remove(idr, id);
spin_unlock(&dev_priv->resource_lock);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
{
struct vmw_resource *res = *p_res;
*p_res = NULL;
kref_put(&res->kref, vmw_resource_release);
}
/**
* vmw_resource_alloc_id - release a resource id to the id manager.
*
* @res: Pointer to the resource.
*
* Allocate the lowest free resource from the resource manager, and set
* @res->id to that id. Returns 0 on success and -ENOMEM on failure.
*/
int vmw_resource_alloc_id(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
BUG_ON(res->id != -1);
idr_preload(GFP_KERNEL);
spin_lock(&dev_priv->resource_lock);
ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
if (ret >= 0)
res->id = ret;
spin_unlock(&dev_priv->resource_lock);
idr_preload_end();
return ret < 0 ? ret : 0;
}
/**
* vmw_resource_init - initialize a struct vmw_resource
*
* @dev_priv: Pointer to a device private struct.
* @res: The struct vmw_resource to initialize.
* @delay_id: Boolean whether to defer device id allocation until
* the first validation.
* @res_free: Resource destructor.
* @func: Resource function table.
*/
int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
bool delay_id,
void (*res_free) (struct vmw_resource *res),
const struct vmw_res_func *func)
{
kref_init(&res->kref);
res->hw_destroy = NULL;
res->res_free = res_free;
res->dev_priv = dev_priv;
res->func = func;
RB_CLEAR_NODE(&res->mob_node);
INIT_LIST_HEAD(&res->lru_head);
INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
res->guest_memory_bo = NULL;
res->guest_memory_offset = 0;
res->guest_memory_dirty = false;
res->res_dirty = false;
res->coherent = false;
res->used_prio = 3;
res->dirty = NULL;
if (delay_id)
return 0;
else
return vmw_resource_alloc_id(res);
}
/**
* vmw_user_resource_lookup_handle - lookup a struct resource from a
* TTM user-space handle and perform basic type checks
*
* @dev_priv: Pointer to a device private struct
* @tfile: Pointer to a struct ttm_object_file identifying the caller
* @handle: The TTM user-space handle
* @converter: Pointer to an object describing the resource type
* @p_res: On successful return the location pointed to will contain
* a pointer to a refcounted struct vmw_resource.
*
* If the handle can't be found or is associated with an incorrect resource
* type, -EINVAL will be returned.
*/
int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv
*converter,
struct vmw_resource **p_res)
{
struct ttm_base_object *base;
struct vmw_resource *res;
int ret = -EINVAL;
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(!base))
return -EINVAL;
if (unlikely(ttm_base_object_type(base) != converter->object_type))
goto out_bad_resource;
res = converter->base_obj_to_res(base);
kref_get(&res->kref);
*p_res = res;
ret = 0;
out_bad_resource:
ttm_base_object_unref(&base);
return ret;
}
/*
* Helper function that looks either a surface or bo.
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_bo **out_buf)
{
struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res;
int ret;
BUG_ON(*out_surf || *out_buf);
ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
user_surface_converter,
&res);
if (!ret) {
*out_surf = vmw_res_to_srf(res);
return 0;
}
*out_surf = NULL;
ret = vmw_user_bo_lookup(filp, handle, out_buf);
return ret;
}
/**
* vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
*
* @res: The resource for which to allocate a gbo buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
*/
static int vmw_resource_buf_alloc(struct vmw_resource *res,
bool interruptible)
{
unsigned long size = PFN_ALIGN(res->guest_memory_size);
struct vmw_bo *gbo;
struct vmw_bo_params bo_params = {
.domain = res->func->domain,
.busy_domain = res->func->busy_domain,
.bo_type = ttm_bo_type_device,
.size = res->guest_memory_size,
.pin = false
};
int ret;
if (likely(res->guest_memory_bo)) {
BUG_ON(res->guest_memory_bo->tbo.base.size < size);
return 0;
}
ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
res->guest_memory_bo = gbo;
out_no_bo:
return ret;
}
/**
* vmw_resource_do_validate - Make a resource up-to-date and visible
* to the device.
*
* @res: The resource to make visible to the device.
* @val_buf: Information about a buffer possibly
* containing backup data if a bind operation is needed.
* @dirtying: Transfer dirty regions.
*
* On hardware resource shortage, this function returns -EBUSY and
* should be retried once resources have been freed up.
*/
static int vmw_resource_do_validate(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf,
bool dirtying)
{
int ret = 0;
const struct vmw_res_func *func = res->func;
if (unlikely(res->id == -1)) {
ret = func->create(res);
if (unlikely(ret != 0))
return ret;
}
if (func->bind &&
((func->needs_guest_memory && !vmw_resource_mob_attached(res) &&
val_buf->bo) ||
(!func->needs_guest_memory && val_buf->bo))) {
ret = func->bind(res, val_buf);
if (unlikely(ret != 0))
goto out_bind_failed;
if (func->needs_guest_memory)
vmw_resource_mob_attach(res);
}
/*
* Handle the case where the backup mob is marked coherent but
* the resource isn't.
*/
if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
!res->coherent) {
if (res->guest_memory_bo->dirty && !res->dirty) {
ret = func->dirty_alloc(res);
if (ret)
return ret;
} else if (!res->guest_memory_bo->dirty && res->dirty) {
func->dirty_free(res);
}
}
/*
* Transfer the dirty regions to the resource and update
* the resource.
*/
if (res->dirty) {
if (dirtying && !res->res_dirty) {
pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
pgoff_t end = __KERNEL_DIV_ROUND_UP
(res->guest_memory_offset + res->guest_memory_size,
PAGE_SIZE);
vmw_bo_dirty_unmap(res->guest_memory_bo, start, end);
}
vmw_bo_dirty_transfer_to_res(res);
return func->dirty_sync(res);
}
return 0;
out_bind_failed:
func->destroy(res);
return ret;
}
/**
* vmw_resource_unreserve - Unreserve a resource previously reserved for
* command submission.
*
* @res: Pointer to the struct vmw_resource to unreserve.
* @dirty_set: Change dirty status of the resource.
* @dirty: When changing dirty status indicates the new status.
* @switch_guest_memory: Guest memory buffer has been switched.
* @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
* switched. May be NULL.
* @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
*
* Currently unreserving a resource means putting it back on the device's
* resource lru list, so that it can be evicted if necessary.
*/
void vmw_resource_unreserve(struct vmw_resource *res,
bool dirty_set,
bool dirty,
bool switch_guest_memory,
struct vmw_bo *new_guest_memory_bo,
unsigned long new_guest_memory_offset)
{
struct vmw_private *dev_priv = res->dev_priv;
if (!list_empty(&res->lru_head))
return;
if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) {
if (res->guest_memory_bo) {
vmw_resource_mob_detach(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
vmw_bo_unreference(&res->guest_memory_bo);
}
if (new_guest_memory_bo) {
res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo);
/*
* The validation code should already have added a
* dirty tracker here.
*/
WARN_ON(res->coherent && !new_guest_memory_bo->dirty);
vmw_resource_mob_attach(res);
} else {
res->guest_memory_bo = NULL;
}
} else if (switch_guest_memory && res->coherent) {
vmw_bo_dirty_release(res->guest_memory_bo);
}
if (switch_guest_memory)
res->guest_memory_offset = new_guest_memory_offset;
if (dirty_set)
res->res_dirty = dirty;
if (!res->func->may_evict || res->id == -1 || res->pin_count)
return;
spin_lock(&dev_priv->resource_lock);
list_add_tail(&res->lru_head,
&res->dev_priv->res_lru[res->func->res_type]);
spin_unlock(&dev_priv->resource_lock);
}
/**
* vmw_resource_check_buffer - Check whether a backup buffer is needed
* for a resource and in that case, allocate
* one, reserve and validate it.
*
* @ticket: The ww acquire context to use, or NULL if trylocking.
* @res: The resource for which to allocate a backup buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
* @val_buf: On successful return contains data about the
* reserved and validated backup buffer.
*/
static int
vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
struct vmw_resource *res,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
struct ttm_operation_ctx ctx = { true, false };
struct list_head val_list;
bool guest_memory_dirty = false;
int ret;
if (unlikely(!res->guest_memory_bo)) {
ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0))
return ret;
}
INIT_LIST_HEAD(&val_list);
ttm_bo_get(&res->guest_memory_bo->tbo);
val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res))
return 0;
guest_memory_dirty = res->guest_memory_dirty;
vmw_bo_placement_set(res->guest_memory_bo, res->func->domain,
res->func->busy_domain);
ret = ttm_bo_validate(&res->guest_memory_bo->tbo,
&res->guest_memory_bo->placement,
&ctx);
if (unlikely(ret != 0))
goto out_no_validate;
return 0;
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
ttm_bo_put(val_buf->bo);
val_buf->bo = NULL;
if (guest_memory_dirty)
vmw_bo_unreference(&res->guest_memory_bo);
return ret;
}
/*
* vmw_resource_reserve - Reserve a resource for command submission
*
* @res: The resource to reserve.
*
* This function takes the resource off the LRU list and make sure
* a guest memory buffer is present for guest-backed resources.
* However, the buffer may not be bound to the resource at this
* point.
*
*/
int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_guest_memory)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
spin_unlock(&dev_priv->resource_lock);
if (res->func->needs_guest_memory && !res->guest_memory_bo &&
!no_guest_memory) {
ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a guest memory buffer "
"of size %lu. bytes\n",
(unsigned long) res->guest_memory_size);
return ret;
}
}
return 0;
}
/**
* vmw_resource_backoff_reservation - Unreserve and unreference a
* guest memory buffer
*.
* @ticket: The ww acquire ctx used for reservation.
* @val_buf: Guest memory buffer information.
*/
static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
if (likely(val_buf->bo == NULL))
return;
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
ttm_bo_put(val_buf->bo);
val_buf->bo = NULL;
}
/**
* vmw_resource_do_evict - Evict a resource, and transfer its data
* to a backup buffer.
*
* @ticket: The ww acquire ticket to use, or NULL if trylocking.
* @res: The resource to evict.
* @interruptible: Whether to wait interruptible.
*/
static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
int ret;
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
val_buf.num_shared = 0;
ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
if (unlikely(func->unbind != NULL &&
(!func->needs_guest_memory || vmw_resource_mob_attached(res)))) {
ret = func->unbind(res, res->res_dirty, &val_buf);
if (unlikely(ret != 0))
goto out_no_unbind;
vmw_resource_mob_detach(res);
}
ret = func->destroy(res);
res->guest_memory_dirty = true;
res->res_dirty = false;
out_no_unbind:
vmw_resource_backoff_reservation(ticket, &val_buf);
return ret;
}
/**
* vmw_resource_validate - Make a resource up-to-date and visible
* to the device.
* @res: The resource to make visible to the device.
* @intr: Perform waits interruptible if possible.
* @dirtying: Pending GPU operation will dirty the resource
*
* On successful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated.
* On hardware resource shortage, this function will repeatedly evict
* resources of the same type until the validation succeeds.
*
* Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
* on failure.
*/
int vmw_resource_validate(struct vmw_resource *res, bool intr,
bool dirtying)
{
int ret;
struct vmw_resource *evict_res;
struct vmw_private *dev_priv = res->dev_priv;
struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
struct ttm_validate_buffer val_buf;
unsigned err_count = 0;
if (!res->func->create)
return 0;
val_buf.bo = NULL;
val_buf.num_shared = 0;
if (res->guest_memory_bo)
val_buf.bo = &res->guest_memory_bo->tbo;
do {
ret = vmw_resource_do_validate(res, &val_buf, dirtying);
if (likely(ret != -EBUSY))
break;
spin_lock(&dev_priv->resource_lock);
if (list_empty(lru_list) || !res->func->may_evict) {
DRM_ERROR("Out of device device resources "
"for %s.\n", res->func->type_name);
ret = -EBUSY;
spin_unlock(&dev_priv->resource_lock);
break;
}
evict_res = vmw_resource_reference
(list_first_entry(lru_list, struct vmw_resource,
lru_head));
list_del_init(&evict_res->lru_head);
spin_unlock(&dev_priv->resource_lock);
/* Trylock backup buffers with a NULL ticket. */
ret = vmw_resource_do_evict(NULL, evict_res, intr);
if (unlikely(ret != 0)) {
spin_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
spin_unlock(&dev_priv->resource_lock);
if (ret == -ERESTARTSYS ||
++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res);
goto out_no_validate;
}
}
vmw_resource_unreference(&evict_res);
} while (1);
if (unlikely(ret != 0))
goto out_no_validate;
else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
WARN_ON_ONCE(vmw_resource_mob_attached(res));
vmw_bo_unreference(&res->guest_memory_bo);
}
return 0;
out_no_validate:
return ret;
}
/**
* vmw_resource_unbind_list
*
* @vbo: Pointer to the current backing MOB.
*
* Evicts the Guest Backed hardware resource if the backup
* buffer is being moved out of MOB memory.
* Note that this function will not race with the resource
* validation code, since resource validation and eviction
* both require the backup buffer to be reserved.
*/
void vmw_resource_unbind_list(struct vmw_bo *vbo)
{
struct ttm_validate_buffer val_buf = {
.bo = &vbo->tbo,
.num_shared = 0
};
dma_resv_assert_held(vbo->tbo.base.resv);
while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
struct rb_node *node = vbo->res_tree.rb_node;
struct vmw_resource *res =
container_of(node, struct vmw_resource, mob_node);
if (!WARN_ON_ONCE(!res->func->unbind))
(void) res->func->unbind(res, res->res_dirty, &val_buf);
res->guest_memory_size = true;
res->res_dirty = false;
vmw_resource_mob_detach(res);
}
(void) ttm_bo_wait(&vbo->tbo, false, false);
}
/**
* vmw_query_readback_all - Read back cached query states
*
* @dx_query_mob: Buffer containing the DX query MOB
*
* Read back cached states from the device if they exist. This function
* assumes binding_mutex is held.
*/
int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
{
struct vmw_resource *dx_query_ctx;
struct vmw_private *dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXReadbackAllQuery body;
} *cmd;
/* No query bound, so do nothing */
if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
return 0;
dx_query_ctx = dx_query_mob->dx_query_ctx;
dev_priv = dx_query_ctx->dev_priv;
cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = dx_query_ctx->id;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
/* Triggers a rebind the next time affected context is bound */
dx_query_mob->dx_query_ctx = NULL;
return 0;
}
/**
* vmw_query_move_notify - Read back cached query states
*
* @bo: The TTM buffer object about to move.
* @old_mem: The memory region @bo is moving from.
* @new_mem: The memory region @bo is moving to.
*
* Called before the query MOB is swapped out to read back cached query
* states from the device.
*/
void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *old_mem,
struct ttm_resource *new_mem)
{
struct vmw_bo *dx_query_mob;
struct ttm_device *bdev = bo->bdev;
struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
mutex_lock(&dev_priv->binding_mutex);
/* If BO is being moved from MOB to system memory */
if (old_mem &&
new_mem->mem_type == TTM_PL_SYSTEM &&
old_mem->mem_type == VMW_PL_MOB) {
struct vmw_fence_obj *fence;
dx_query_mob = to_vmw_bo(&bo->base);
if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
mutex_unlock(&dev_priv->binding_mutex);
return;
}
(void) vmw_query_readback_all(dx_query_mob);
mutex_unlock(&dev_priv->binding_mutex);
/* Create a fence and attach the BO to it */
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_bo_fence_single(bo, fence);
if (fence != NULL)
vmw_fence_obj_unreference(&fence);
(void) ttm_bo_wait(bo, false, false);
} else
mutex_unlock(&dev_priv->binding_mutex);
}
/**
* vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
*
* @res: The resource being queried.
*/
bool vmw_resource_needs_backup(const struct vmw_resource *res)
{
return res->func->needs_guest_memory;
}
/**
* vmw_resource_evict_type - Evict all resources of a specific type
*
* @dev_priv: Pointer to a device private struct
* @type: The resource type to evict
*
* To avoid thrashing starvation or as part of the hibernation sequence,
* try to evict all evictable resources of a specific type.
*/
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
enum vmw_res_type type)
{
struct list_head *lru_list = &dev_priv->res_lru[type];
struct vmw_resource *evict_res;
unsigned err_count = 0;
int ret;
struct ww_acquire_ctx ticket;
do {
spin_lock(&dev_priv->resource_lock);
if (list_empty(lru_list))
goto out_unlock;
evict_res = vmw_resource_reference(
list_first_entry(lru_list, struct vmw_resource,
lru_head));
list_del_init(&evict_res->lru_head);
spin_unlock(&dev_priv->resource_lock);
/* Wait lock backup buffers with a ticket. */
ret = vmw_resource_do_evict(&ticket, evict_res, false);
if (unlikely(ret != 0)) {
spin_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
spin_unlock(&dev_priv->resource_lock);
if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res);
return;
}
}
vmw_resource_unreference(&evict_res);
} while (1);
out_unlock:
spin_unlock(&dev_priv->resource_lock);
}
/**
* vmw_resource_evict_all - Evict all evictable resources
*
* @dev_priv: Pointer to a device private struct
*
* To avoid thrashing starvation or as part of the hibernation sequence,
* evict all evictable resources. In particular this means that all
* guest-backed resources that are registered with the device are
* evicted and the OTable becomes clean.
*/
void vmw_resource_evict_all(struct vmw_private *dev_priv)
{
enum vmw_res_type type;
mutex_lock(&dev_priv->cmdbuf_mutex);
for (type = 0; type < vmw_res_max; ++type)
vmw_resource_evict_type(dev_priv, type);
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
/*
* vmw_resource_pin - Add a pin reference on a resource
*
* @res: The resource to add a pin reference on
*
* This function adds a pin reference, and if needed validates the resource.
* Having a pin reference means that the resource can never be evicted, and
* its id will never change as long as there is a pin reference.
* This function returns 0 on success and a negative error code on failure.
*/
int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
{
struct ttm_operation_ctx ctx = { interruptible, false };
struct vmw_private *dev_priv = res->dev_priv;
int ret;
mutex_lock(&dev_priv->cmdbuf_mutex);
ret = vmw_resource_reserve(res, interruptible, false);
if (ret)
goto out_no_reserve;
if (res->pin_count == 0) {
struct vmw_bo *vbo = NULL;
if (res->guest_memory_bo) {
vbo = res->guest_memory_bo;
ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL);
if (ret)
goto out_no_validate;
if (!vbo->tbo.pin_count) {
vmw_bo_placement_set(vbo,
res->func->domain,
res->func->busy_domain);
ret = ttm_bo_validate
(&vbo->tbo,
&vbo->placement,
&ctx);
if (ret) {
ttm_bo_unreserve(&vbo->tbo);
goto out_no_validate;
}
}
/* Do we really need to pin the MOB as well? */
vmw_bo_pin_reserved(vbo, true);
}
ret = vmw_resource_validate(res, interruptible, true);
if (vbo)
ttm_bo_unreserve(&vbo->tbo);
if (ret)
goto out_no_validate;
}
res->pin_count++;
out_no_validate:
vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
out_no_reserve:
mutex_unlock(&dev_priv->cmdbuf_mutex);
return ret;
}
/**
* vmw_resource_unpin - Remove a pin reference from a resource
*
* @res: The resource to remove a pin reference from
*
* Having a pin reference means that the resource can never be evicted, and
* its id will never change as long as there is a pin reference.
*/
void vmw_resource_unpin(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
mutex_lock(&dev_priv->cmdbuf_mutex);
ret = vmw_resource_reserve(res, false, true);
WARN_ON(ret);
WARN_ON(res->pin_count == 0);
if (--res->pin_count == 0 && res->guest_memory_bo) {
struct vmw_bo *vbo = res->guest_memory_bo;
(void) ttm_bo_reserve(&vbo->tbo, false, false, NULL);
vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(&vbo->tbo);
}
vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
/**
* vmw_res_type - Return the resource type
*
* @res: Pointer to the resource
*/
enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
{
return res->func->res_type;
}
/**
* vmw_resource_dirty_update - Update a resource's dirty tracker with a
* sequential range of touched backing store memory.
* @res: The resource.
* @start: The first page touched.
* @end: The last page touched + 1.
*/
void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
pgoff_t end)
{
if (res->dirty)
res->func->dirty_range_add(res, start << PAGE_SHIFT,
end << PAGE_SHIFT);
}
/**
* vmw_resources_clean - Clean resources intersecting a mob range
* @vbo: The mob buffer object
* @start: The mob page offset starting the range
* @end: The mob page offset ending the range
* @num_prefault: Returns how many pages including the first have been
* cleaned and are ok to prefault
*/
int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
pgoff_t end, pgoff_t *num_prefault)
{
struct rb_node *cur = vbo->res_tree.rb_node;
struct vmw_resource *found = NULL;
unsigned long res_start = start << PAGE_SHIFT;
unsigned long res_end = end << PAGE_SHIFT;
unsigned long last_cleaned = 0;
/*
* Find the resource with lowest backup_offset that intersects the
* range.
*/
while (cur) {
struct vmw_resource *cur_res =
container_of(cur, struct vmw_resource, mob_node);
if (cur_res->guest_memory_offset >= res_end) {
cur = cur->rb_left;
} else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <=
res_start) {
cur = cur->rb_right;
} else {
found = cur_res;
cur = cur->rb_left;
/* Continue to look for resources with lower offsets */
}
}
/*
* In order of increasing guest_memory_offset, clean dirty resources
* intersecting the range.
*/
while (found) {
if (found->res_dirty) {
int ret;
if (!found->func->clean)
return -EINVAL;
ret = found->func->clean(found);
if (ret)
return ret;
found->res_dirty = false;
}
last_cleaned = found->guest_memory_offset + found->guest_memory_size;
cur = rb_next(&found->mob_node);
if (!cur)
break;
found = container_of(cur, struct vmw_resource, mob_node);
if (found->guest_memory_offset >= res_end)
break;
}
/*
* Set number of pages allowed prefaulting and fence the buffer object
*/
*num_prefault = 1;
if (last_cleaned > res_start) {
struct ttm_buffer_object *bo = &vbo->tbo;
*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
PAGE_SIZE);
vmw_bo_fence_single(bo, NULL);
}
return 0;
}
| linux-master | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_crtc.c -- SH Mobile DRM CRTCs
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
*/
#include <linux/backlight.h>
#include <linux/clk.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include "shmob_drm_backlight.h"
#include "shmob_drm_crtc.h"
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
#include "shmob_drm_plane.h"
#include "shmob_drm_regs.h"
/*
* TODO: panel support
*/
/* -----------------------------------------------------------------------------
* Clock management
*/
static int shmob_drm_clk_on(struct shmob_drm_device *sdev)
{
int ret;
if (sdev->clock) {
ret = clk_prepare_enable(sdev->clock);
if (ret < 0)
return ret;
}
return 0;
}
static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
{
if (sdev->clock)
clk_disable_unprepare(sdev->clock);
}
/* -----------------------------------------------------------------------------
* CRTC
*/
static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
{
struct drm_crtc *crtc = &scrtc->crtc;
struct shmob_drm_device *sdev = crtc->dev->dev_private;
const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
const struct drm_display_mode *mode = &crtc->mode;
u32 value;
value = sdev->ldmt1r
| ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL)
| ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL)
| ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0)
| ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0)
| ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0)
| ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0)
| ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0);
lcdc_write(sdev, LDMT1R, value);
if (idata->interface >= SHMOB_DRM_IFACE_SYS8A &&
idata->interface <= SHMOB_DRM_IFACE_SYS24) {
/* Setup SYS bus. */
value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT)
| (idata->sys.vsync_active_high ? LDMT2R_RSV : 0)
| (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0)
| (idata->sys.write_setup << LDMT2R_WCSC_SHIFT)
| (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT)
| (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT);
lcdc_write(sdev, LDMT2R, value);
value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT)
| (idata->sys.read_setup << LDMT3R_RCSC_SHIFT)
| (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT)
| (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT);
lcdc_write(sdev, LDMT3R, value);
}
value = ((mode->hdisplay / 8) << 16) /* HDCN */
| (mode->htotal / 8); /* HTCN */
lcdc_write(sdev, LDHCNR, value);
value = (((mode->hsync_end - mode->hsync_start) / 8) << 16) /* HSYNW */
| (mode->hsync_start / 8); /* HSYNP */
lcdc_write(sdev, LDHSYNR, value);
value = ((mode->hdisplay & 7) << 24) | ((mode->htotal & 7) << 16)
| (((mode->hsync_end - mode->hsync_start) & 7) << 8)
| (mode->hsync_start & 7);
lcdc_write(sdev, LDHAJR, value);
value = ((mode->vdisplay) << 16) /* VDLN */
| mode->vtotal; /* VTLN */
lcdc_write(sdev, LDVLNR, value);
value = ((mode->vsync_end - mode->vsync_start) << 16) /* VSYNW */
| mode->vsync_start; /* VSYNP */
lcdc_write(sdev, LDVSYNR, value);
}
static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
{
struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private;
u32 value;
value = lcdc_read(sdev, LDCNT2R);
if (start)
lcdc_write(sdev, LDCNT2R, value | LDCNT2R_DO);
else
lcdc_write(sdev, LDCNT2R, value & ~LDCNT2R_DO);
/* Wait until power is applied/stopped. */
while (1) {
value = lcdc_read(sdev, LDPMR) & LDPMR_LPS;
if ((start && value) || (!start && !value))
break;
cpu_relax();
}
if (!start) {
/* Stop the dot clock. */
lcdc_write(sdev, LDDCKSTPR, LDDCKSTPR_DCKSTP);
}
}
/*
* shmob_drm_crtc_start - Configure and start the LCDC
* @scrtc: the SH Mobile CRTC
*
* Configure and start the LCDC device. External devices (clocks, MERAM, panels,
* ...) are not touched by this function.
*/
static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
{
struct drm_crtc *crtc = &scrtc->crtc;
struct shmob_drm_device *sdev = crtc->dev->dev_private;
const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
const struct shmob_drm_format_info *format;
struct drm_device *dev = sdev->ddev;
struct drm_plane *plane;
u32 value;
int ret;
if (scrtc->started)
return;
format = shmob_drm_format_info(crtc->primary->fb->format->format);
if (WARN_ON(format == NULL))
return;
/* Enable clocks before accessing the hardware. */
ret = shmob_drm_clk_on(sdev);
if (ret < 0)
return;
/* Reset and enable the LCDC. */
lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR);
lcdc_wait_bit(sdev, LDCNT2R, LDCNT2R_BR, 0);
lcdc_write(sdev, LDCNT2R, LDCNT2R_ME);
/* Stop the LCDC first and disable all interrupts. */
shmob_drm_crtc_start_stop(scrtc, false);
lcdc_write(sdev, LDINTR, 0);
/* Configure power supply, dot clocks and start them. */
lcdc_write(sdev, LDPMR, 0);
value = sdev->lddckr;
if (idata->clk_div) {
/* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider
* denominator.
*/
lcdc_write(sdev, LDDCKPAT1R, 0);
lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1);
if (idata->clk_div == 1)
value |= LDDCKR_MOSEL;
else
value |= idata->clk_div;
}
lcdc_write(sdev, LDDCKR, value);
lcdc_write(sdev, LDDCKSTPR, 0);
lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0);
/* TODO: Setup SYS panel */
/* Setup geometry, format, frame buffer memory and operation mode. */
shmob_drm_crtc_setup_geometry(scrtc);
/* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1);
lcdc_write(sdev, LDMLSR, scrtc->line_size);
lcdc_write(sdev, LDSA1R, scrtc->dma[0]);
if (format->yuv)
lcdc_write(sdev, LDSA2R, scrtc->dma[1]);
lcdc_write(sdev, LDSM1R, 0);
/* Word and long word swap. */
switch (format->fourcc) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV42:
value = LDDDSR_LS | LDDDSR_WS;
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV24:
value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
default:
value = LDDDSR_LS;
break;
}
lcdc_write(sdev, LDDDSR, value);
/* Setup planes. */
drm_for_each_legacy_plane(plane, dev) {
if (plane->crtc == crtc)
shmob_drm_plane_setup(plane);
}
/* Enable the display output. */
lcdc_write(sdev, LDCNT1R, LDCNT1R_DE);
shmob_drm_crtc_start_stop(scrtc, true);
scrtc->started = true;
}
static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
{
struct drm_crtc *crtc = &scrtc->crtc;
struct shmob_drm_device *sdev = crtc->dev->dev_private;
if (!scrtc->started)
return;
/* Stop the LCDC. */
shmob_drm_crtc_start_stop(scrtc, false);
/* Disable the display output. */
lcdc_write(sdev, LDCNT1R, 0);
/* Stop clocks. */
shmob_drm_clk_off(sdev);
scrtc->started = false;
}
void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc)
{
shmob_drm_crtc_stop(scrtc);
}
void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc)
{
if (scrtc->dpms != DRM_MODE_DPMS_ON)
return;
shmob_drm_crtc_start(scrtc);
}
static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
int x, int y)
{
struct drm_crtc *crtc = &scrtc->crtc;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_gem_dma_object *gem;
unsigned int bpp;
bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
gem = drm_fb_dma_get_gem_obj(fb, 0);
scrtc->dma[0] = gem->dma_addr + fb->offsets[0]
+ y * fb->pitches[0] + x * bpp / 8;
if (scrtc->format->yuv) {
bpp = scrtc->format->bpp - 8;
gem = drm_fb_dma_get_gem_obj(fb, 1);
scrtc->dma[1] = gem->dma_addr + fb->offsets[1]
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ x * (bpp == 16 ? 2 : 1);
}
}
static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
{
struct drm_crtc *crtc = &scrtc->crtc;
struct shmob_drm_device *sdev = crtc->dev->dev_private;
shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y);
lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]);
if (scrtc->format->yuv)
lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]);
lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
}
#define to_shmob_crtc(c) container_of(c, struct shmob_drm_crtc, crtc)
static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
if (scrtc->dpms == mode)
return;
if (mode == DRM_MODE_DPMS_ON)
shmob_drm_crtc_start(scrtc);
else
shmob_drm_crtc_stop(scrtc);
scrtc->dpms = mode;
}
static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
{
shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
struct shmob_drm_device *sdev = crtc->dev->dev_private;
const struct shmob_drm_format_info *format;
format = shmob_drm_format_info(crtc->primary->fb->format->format);
if (format == NULL) {
dev_dbg(sdev->dev, "mode_set: unsupported format %p4cc\n",
&crtc->primary->fb->format->format);
return -EINVAL;
}
scrtc->format = format;
scrtc->line_size = crtc->primary->fb->pitches[0];
shmob_drm_crtc_compute_base(scrtc, x, y);
return 0;
}
static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc)
{
shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
shmob_drm_crtc_update_base(to_shmob_crtc(crtc));
return 0;
}
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
.dpms = shmob_drm_crtc_dpms,
.prepare = shmob_drm_crtc_mode_prepare,
.commit = shmob_drm_crtc_mode_commit,
.mode_set = shmob_drm_crtc_mode_set,
.mode_set_base = shmob_drm_crtc_mode_set_base,
};
void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
{
struct drm_pending_vblank_event *event;
struct drm_device *dev = scrtc->crtc.dev;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = scrtc->event;
scrtc->event = NULL;
if (event) {
drm_crtc_send_vblank_event(&scrtc->crtc, event);
drm_crtc_vblank_put(&scrtc->crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
struct drm_modeset_acquire_ctx *ctx)
{
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
struct drm_device *dev = scrtc->crtc.dev;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (scrtc->event != NULL) {
spin_unlock_irqrestore(&dev->event_lock, flags);
return -EBUSY;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->primary->fb = fb;
shmob_drm_crtc_update_base(scrtc);
if (event) {
event->pipe = 0;
drm_crtc_vblank_get(&scrtc->crtc);
spin_lock_irqsave(&dev->event_lock, flags);
scrtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
return 0;
}
static void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev,
bool enable)
{
unsigned long flags;
u32 ldintr;
/* Be careful not to acknowledge any pending interrupt. */
spin_lock_irqsave(&sdev->irq_lock, flags);
ldintr = lcdc_read(sdev, LDINTR) | LDINTR_STATUS_MASK;
if (enable)
ldintr |= LDINTR_VEE;
else
ldintr &= ~LDINTR_VEE;
lcdc_write(sdev, LDINTR, ldintr);
spin_unlock_irqrestore(&sdev->irq_lock, flags);
}
static int shmob_drm_enable_vblank(struct drm_crtc *crtc)
{
struct shmob_drm_device *sdev = crtc->dev->dev_private;
shmob_drm_crtc_enable_vblank(sdev, true);
return 0;
}
static void shmob_drm_disable_vblank(struct drm_crtc *crtc)
{
struct shmob_drm_device *sdev = crtc->dev->dev_private;
shmob_drm_crtc_enable_vblank(sdev, false);
}
static const struct drm_crtc_funcs crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_crtc_helper_set_config,
.page_flip = shmob_drm_crtc_page_flip,
.enable_vblank = shmob_drm_enable_vblank,
.disable_vblank = shmob_drm_disable_vblank,
};
static const uint32_t modeset_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
};
static const struct drm_plane_funcs primary_plane_funcs = {
DRM_PLANE_NON_ATOMIC_FUNCS,
};
int shmob_drm_crtc_create(struct shmob_drm_device *sdev)
{
struct drm_crtc *crtc = &sdev->crtc.crtc;
struct drm_plane *primary;
int ret;
sdev->crtc.dpms = DRM_MODE_DPMS_OFF;
primary = __drm_universal_plane_alloc(sdev->ddev, sizeof(*primary), 0,
0, &primary_plane_funcs,
modeset_formats,
ARRAY_SIZE(modeset_formats),
NULL, DRM_PLANE_TYPE_PRIMARY,
NULL);
if (IS_ERR(primary))
return PTR_ERR(primary);
ret = drm_crtc_init_with_planes(sdev->ddev, crtc, primary, NULL,
&crtc_funcs, NULL);
if (ret < 0) {
drm_plane_cleanup(primary);
kfree(primary);
return ret;
}
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
return 0;
}
/* -----------------------------------------------------------------------------
* Encoder
*/
#define to_shmob_encoder(e) \
container_of(e, struct shmob_drm_encoder, encoder)
static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct shmob_drm_encoder *senc = to_shmob_encoder(encoder);
struct shmob_drm_device *sdev = encoder->dev->dev_private;
struct shmob_drm_connector *scon = &sdev->connector;
if (senc->dpms == mode)
return;
shmob_drm_backlight_dpms(scon, mode);
senc->dpms = mode;
}
static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct shmob_drm_device *sdev = dev->dev_private;
struct drm_connector *connector = &sdev->connector.connector;
const struct drm_display_mode *panel_mode;
if (list_empty(&connector->modes)) {
dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
return false;
}
/* The flat panel mode is fixed, just copy it to the adjusted mode. */
panel_mode = list_first_entry(&connector->modes,
struct drm_display_mode, head);
drm_mode_copy(adjusted_mode, panel_mode);
return true;
}
static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder)
{
/* No-op, everything is handled in the CRTC code. */
}
static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* No-op, everything is handled in the CRTC code. */
}
static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder)
{
/* No-op, everything is handled in the CRTC code. */
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
.dpms = shmob_drm_encoder_dpms,
.mode_fixup = shmob_drm_encoder_mode_fixup,
.prepare = shmob_drm_encoder_mode_prepare,
.commit = shmob_drm_encoder_mode_commit,
.mode_set = shmob_drm_encoder_mode_set,
};
int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
{
struct drm_encoder *encoder = &sdev->encoder.encoder;
int ret;
sdev->encoder.dpms = DRM_MODE_DPMS_OFF;
encoder->possible_crtcs = 1;
ret = drm_simple_encoder_init(sdev->ddev, encoder,
DRM_MODE_ENCODER_LVDS);
if (ret < 0)
return ret;
drm_encoder_helper_add(encoder, &encoder_helper_funcs);
return 0;
}
/* -----------------------------------------------------------------------------
* Connector
*/
#define to_shmob_connector(c) \
container_of(c, struct shmob_drm_connector, connector)
static int shmob_drm_connector_get_modes(struct drm_connector *connector)
{
struct shmob_drm_device *sdev = connector->dev->dev_private;
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
if (mode == NULL)
return 0;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
mode->clock = sdev->pdata->panel.mode.clock;
mode->hdisplay = sdev->pdata->panel.mode.hdisplay;
mode->hsync_start = sdev->pdata->panel.mode.hsync_start;
mode->hsync_end = sdev->pdata->panel.mode.hsync_end;
mode->htotal = sdev->pdata->panel.mode.htotal;
mode->vdisplay = sdev->pdata->panel.mode.vdisplay;
mode->vsync_start = sdev->pdata->panel.mode.vsync_start;
mode->vsync_end = sdev->pdata->panel.mode.vsync_end;
mode->vtotal = sdev->pdata->panel.mode.vtotal;
mode->flags = sdev->pdata->panel.mode.flags;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = sdev->pdata->panel.width_mm;
connector->display_info.height_mm = sdev->pdata->panel.height_mm;
return 1;
}
static struct drm_encoder *
shmob_drm_connector_best_encoder(struct drm_connector *connector)
{
struct shmob_drm_connector *scon = to_shmob_connector(connector);
return scon->encoder;
}
static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = shmob_drm_connector_get_modes,
.best_encoder = shmob_drm_connector_best_encoder,
};
static void shmob_drm_connector_destroy(struct drm_connector *connector)
{
struct shmob_drm_connector *scon = to_shmob_connector(connector);
shmob_drm_backlight_exit(scon);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = shmob_drm_connector_destroy,
};
int shmob_drm_connector_create(struct shmob_drm_device *sdev,
struct drm_encoder *encoder)
{
struct drm_connector *connector = &sdev->connector.connector;
int ret;
sdev->connector.encoder = encoder;
connector->display_info.width_mm = sdev->pdata->panel.width_mm;
connector->display_info.height_mm = sdev->pdata->panel.height_mm;
ret = drm_connector_init(sdev->ddev, connector, &connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
if (ret < 0)
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
ret = shmob_drm_backlight_init(&sdev->connector);
if (ret < 0)
goto err_cleanup;
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
goto err_backlight;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
drm_object_property_set_value(&connector->base,
sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
return 0;
err_backlight:
shmob_drm_backlight_exit(&sdev->connector);
err_cleanup:
drm_connector_cleanup(connector);
return ret;
}
| linux-master | drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_plane.c -- SH Mobile DRM Planes
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
*/
#include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
#include "shmob_drm_plane.h"
#include "shmob_drm_regs.h"
struct shmob_drm_plane {
struct drm_plane plane;
unsigned int index;
unsigned int alpha;
const struct shmob_drm_format_info *format;
unsigned long dma[2];
unsigned int src_x;
unsigned int src_y;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_w;
unsigned int crtc_h;
};
#define to_shmob_plane(p) container_of(p, struct shmob_drm_plane, plane)
static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
struct drm_framebuffer *fb,
int x, int y)
{
struct drm_gem_dma_object *gem;
unsigned int bpp;
bpp = splane->format->yuv ? 8 : splane->format->bpp;
gem = drm_fb_dma_get_gem_obj(fb, 0);
splane->dma[0] = gem->dma_addr + fb->offsets[0]
+ y * fb->pitches[0] + x * bpp / 8;
if (splane->format->yuv) {
bpp = splane->format->bpp - 8;
gem = drm_fb_dma_get_gem_obj(fb, 1);
splane->dma[1] = gem->dma_addr + fb->offsets[1]
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ x * (bpp == 16 ? 2 : 1);
}
}
static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
struct drm_framebuffer *fb)
{
struct shmob_drm_device *sdev = splane->plane.dev->dev_private;
u32 format;
/* TODO: Support ROP3 mode */
format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT);
switch (splane->format->fourcc) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV42:
format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW;
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV24:
format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
default:
format |= LDBBSIFR_SWPL;
break;
}
switch (splane->format->fourcc) {
case DRM_FORMAT_RGB565:
format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16;
break;
case DRM_FORMAT_RGB888:
format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24;
break;
case DRM_FORMAT_ARGB8888:
format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
break;
case DRM_FORMAT_XRGB8888:
format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420;
break;
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422;
break;
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444;
break;
}
#define plane_reg_dump(sdev, splane, reg) \
dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
splane->index, #reg, \
lcdc_read(sdev, reg(splane->index)), \
lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET))
plane_reg_dump(sdev, splane, LDBnBSIFR);
plane_reg_dump(sdev, splane, LDBnBSSZR);
plane_reg_dump(sdev, splane, LDBnBLOCR);
plane_reg_dump(sdev, splane, LDBnBSMWR);
plane_reg_dump(sdev, splane, LDBnBSAYR);
plane_reg_dump(sdev, splane, LDBnBSACR);
lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
"LDBCR", lcdc_read(sdev, LDBCR));
lcdc_write(sdev, LDBnBSIFR(splane->index), format);
lcdc_write(sdev, LDBnBSSZR(splane->index),
(splane->crtc_h << LDBBSSZR_BVSS_SHIFT) |
(splane->crtc_w << LDBBSSZR_BHSS_SHIFT));
lcdc_write(sdev, LDBnBLOCR(splane->index),
(splane->crtc_y << LDBBLOCR_CVLC_SHIFT) |
(splane->crtc_x << LDBBLOCR_CHLC_SHIFT));
lcdc_write(sdev, LDBnBSMWR(splane->index),
fb->pitches[0] << LDBBSMWR_BSMW_SHIFT);
shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y);
lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]);
if (splane->format->yuv)
lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]);
lcdc_write(sdev, LDBCR,
LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
"LDBCR", lcdc_read(sdev, LDBCR));
plane_reg_dump(sdev, splane, LDBnBSIFR);
plane_reg_dump(sdev, splane, LDBnBSSZR);
plane_reg_dump(sdev, splane, LDBnBLOCR);
plane_reg_dump(sdev, splane, LDBnBSMWR);
plane_reg_dump(sdev, splane, LDBnBSAYR);
plane_reg_dump(sdev, splane, LDBnBSACR);
}
void shmob_drm_plane_setup(struct drm_plane *plane)
{
struct shmob_drm_plane *splane = to_shmob_plane(plane);
if (plane->fb == NULL)
return;
__shmob_drm_plane_setup(splane, plane->fb);
}
static int
shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct shmob_drm_plane *splane = to_shmob_plane(plane);
struct shmob_drm_device *sdev = plane->dev->dev_private;
const struct shmob_drm_format_info *format;
format = shmob_drm_format_info(fb->format->format);
if (format == NULL) {
dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n",
fb->format->format);
return -EINVAL;
}
if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__);
return -EINVAL;
}
splane->format = format;
splane->src_x = src_x >> 16;
splane->src_y = src_y >> 16;
splane->crtc_x = crtc_x;
splane->crtc_y = crtc_y;
splane->crtc_w = crtc_w;
splane->crtc_h = crtc_h;
__shmob_drm_plane_setup(splane, fb);
return 0;
}
static int shmob_drm_plane_disable(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx)
{
struct shmob_drm_plane *splane = to_shmob_plane(plane);
struct shmob_drm_device *sdev = plane->dev->dev_private;
splane->format = NULL;
lcdc_write(sdev, LDBnBSIFR(splane->index), 0);
return 0;
}
static void shmob_drm_plane_destroy(struct drm_plane *plane)
{
drm_plane_force_disable(plane);
drm_plane_cleanup(plane);
}
static const struct drm_plane_funcs shmob_drm_plane_funcs = {
.update_plane = shmob_drm_plane_update,
.disable_plane = shmob_drm_plane_disable,
.destroy = shmob_drm_plane_destroy,
};
static const uint32_t formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV16,
DRM_FORMAT_NV61,
DRM_FORMAT_NV24,
DRM_FORMAT_NV42,
};
int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
{
struct shmob_drm_plane *splane;
int ret;
splane = devm_kzalloc(sdev->dev, sizeof(*splane), GFP_KERNEL);
if (splane == NULL)
return -ENOMEM;
splane->index = index;
splane->alpha = 255;
ret = drm_universal_plane_init(sdev->ddev, &splane->plane, 1,
&shmob_drm_plane_funcs,
formats, ARRAY_SIZE(formats), NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
return ret;
}
| linux-master | drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_kms.c -- SH Mobile DRM Mode Setting
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
*/
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include "shmob_drm_crtc.h"
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
#include "shmob_drm_regs.h"
/* -----------------------------------------------------------------------------
* Format helpers
*/
static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
{
.fourcc = DRM_FORMAT_RGB565,
.bpp = 16,
.yuv = false,
.lddfr = LDDFR_PKF_RGB16,
}, {
.fourcc = DRM_FORMAT_RGB888,
.bpp = 24,
.yuv = false,
.lddfr = LDDFR_PKF_RGB24,
}, {
.fourcc = DRM_FORMAT_ARGB8888,
.bpp = 32,
.yuv = false,
.lddfr = LDDFR_PKF_ARGB32,
}, {
.fourcc = DRM_FORMAT_XRGB8888,
.bpp = 32,
.yuv = false,
.lddfr = LDDFR_PKF_ARGB32,
}, {
.fourcc = DRM_FORMAT_NV12,
.bpp = 12,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_420,
}, {
.fourcc = DRM_FORMAT_NV21,
.bpp = 12,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_420,
}, {
.fourcc = DRM_FORMAT_NV16,
.bpp = 16,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_422,
}, {
.fourcc = DRM_FORMAT_NV61,
.bpp = 16,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_422,
}, {
.fourcc = DRM_FORMAT_NV24,
.bpp = 24,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_444,
}, {
.fourcc = DRM_FORMAT_NV42,
.bpp = 24,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_444,
},
};
const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(shmob_drm_format_infos); ++i) {
if (shmob_drm_format_infos[i].fourcc == fourcc)
return &shmob_drm_format_infos[i];
}
return NULL;
}
/* -----------------------------------------------------------------------------
* Frame buffer
*/
static struct drm_framebuffer *
shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct shmob_drm_format_info *format;
format = shmob_drm_format_info(mode_cmd->pixel_format);
if (format == NULL) {
dev_dbg(dev->dev, "unsupported pixel format %p4cc\n",
&mode_cmd->pixel_format);
return ERR_PTR(-EINVAL);
}
if (mode_cmd->pitches[0] & 7 || mode_cmd->pitches[0] >= 65536) {
dev_dbg(dev->dev, "invalid pitch value %u\n",
mode_cmd->pitches[0]);
return ERR_PTR(-EINVAL);
}
if (format->yuv) {
unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1;
if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) {
dev_dbg(dev->dev,
"luma and chroma pitches do not match\n");
return ERR_PTR(-EINVAL);
}
}
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
.fb_create = shmob_drm_fb_create,
};
int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
{
int ret;
ret = drmm_mode_config_init(sdev->ddev);
if (ret)
return ret;
shmob_drm_crtc_create(sdev);
shmob_drm_encoder_create(sdev);
shmob_drm_connector_create(sdev, &sdev->encoder.encoder);
drm_kms_helper_poll_init(sdev->ddev);
sdev->ddev->mode_config.min_width = 0;
sdev->ddev->mode_config.min_height = 0;
sdev->ddev->mode_config.max_width = 4095;
sdev->ddev->mode_config.max_height = 4095;
sdev->ddev->mode_config.funcs = &shmob_drm_mode_config_funcs;
drm_helper_disable_unused_functions(sdev->ddev);
return 0;
}
| linux-master | drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_drv.c -- SH Mobile DRM driver
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
#include "shmob_drm_plane.h"
#include "shmob_drm_regs.h"
/* -----------------------------------------------------------------------------
* Hardware initialization
*/
static int shmob_drm_init_interface(struct shmob_drm_device *sdev)
{
static const u32 ldmt1r[] = {
[SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
[SHMOB_DRM_IFACE_RGB9] = LDMT1R_MIFTYP_RGB9,
[SHMOB_DRM_IFACE_RGB12A] = LDMT1R_MIFTYP_RGB12A,
[SHMOB_DRM_IFACE_RGB12B] = LDMT1R_MIFTYP_RGB12B,
[SHMOB_DRM_IFACE_RGB16] = LDMT1R_MIFTYP_RGB16,
[SHMOB_DRM_IFACE_RGB18] = LDMT1R_MIFTYP_RGB18,
[SHMOB_DRM_IFACE_RGB24] = LDMT1R_MIFTYP_RGB24,
[SHMOB_DRM_IFACE_YUV422] = LDMT1R_MIFTYP_YCBCR,
[SHMOB_DRM_IFACE_SYS8A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8A,
[SHMOB_DRM_IFACE_SYS8B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8B,
[SHMOB_DRM_IFACE_SYS8C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8C,
[SHMOB_DRM_IFACE_SYS8D] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8D,
[SHMOB_DRM_IFACE_SYS9] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS9,
[SHMOB_DRM_IFACE_SYS12] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS12,
[SHMOB_DRM_IFACE_SYS16A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16A,
[SHMOB_DRM_IFACE_SYS16B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16B,
[SHMOB_DRM_IFACE_SYS16C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16C,
[SHMOB_DRM_IFACE_SYS18] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS18,
[SHMOB_DRM_IFACE_SYS24] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS24,
};
if (sdev->pdata->iface.interface >= ARRAY_SIZE(ldmt1r)) {
dev_err(sdev->dev, "invalid interface type %u\n",
sdev->pdata->iface.interface);
return -EINVAL;
}
sdev->ldmt1r = ldmt1r[sdev->pdata->iface.interface];
return 0;
}
static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
enum shmob_drm_clk_source clksrc)
{
struct clk *clk;
char *clkname;
switch (clksrc) {
case SHMOB_DRM_CLK_BUS:
clkname = "bus_clk";
sdev->lddckr = LDDCKR_ICKSEL_BUS;
break;
case SHMOB_DRM_CLK_PERIPHERAL:
clkname = "peripheral_clk";
sdev->lddckr = LDDCKR_ICKSEL_MIPI;
break;
case SHMOB_DRM_CLK_EXTERNAL:
clkname = NULL;
sdev->lddckr = LDDCKR_ICKSEL_HDMI;
break;
default:
return -EINVAL;
}
clk = devm_clk_get(sdev->dev, clkname);
if (IS_ERR(clk)) {
dev_err(sdev->dev, "cannot get dot clock %s\n", clkname);
return PTR_ERR(clk);
}
sdev->clock = clk;
return 0;
}
/* -----------------------------------------------------------------------------
* DRM operations
*/
static irqreturn_t shmob_drm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct shmob_drm_device *sdev = dev->dev_private;
unsigned long flags;
u32 status;
/* Acknowledge interrupts. Putting interrupt enable and interrupt flag
* bits in the same register is really brain-dead design and requires
* taking a spinlock.
*/
spin_lock_irqsave(&sdev->irq_lock, flags);
status = lcdc_read(sdev, LDINTR);
lcdc_write(sdev, LDINTR, status ^ LDINTR_STATUS_MASK);
spin_unlock_irqrestore(&sdev->irq_lock, flags);
if (status & LDINTR_VES) {
drm_handle_vblank(dev, 0);
shmob_drm_crtc_finish_page_flip(&sdev->crtc);
}
return IRQ_HANDLED;
}
DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops);
static const struct drm_driver shmob_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
DRM_GEM_DMA_DRIVER_OPS,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
.date = "20120424",
.major = 1,
.minor = 0,
};
/* -----------------------------------------------------------------------------
* Power management
*/
static int shmob_drm_pm_suspend(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
drm_kms_helper_poll_disable(sdev->ddev);
shmob_drm_crtc_suspend(&sdev->crtc);
return 0;
}
static int shmob_drm_pm_resume(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
drm_modeset_lock_all(sdev->ddev);
shmob_drm_crtc_resume(&sdev->crtc);
drm_modeset_unlock_all(sdev->ddev);
drm_kms_helper_poll_enable(sdev->ddev);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(shmob_drm_pm_ops,
shmob_drm_pm_suspend, shmob_drm_pm_resume);
/* -----------------------------------------------------------------------------
* Platform driver
*/
static int shmob_drm_remove(struct platform_device *pdev)
{
struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
struct drm_device *ddev = sdev->ddev;
drm_dev_unregister(ddev);
drm_kms_helper_poll_fini(ddev);
free_irq(sdev->irq, ddev);
drm_dev_put(ddev);
return 0;
}
static int shmob_drm_probe(struct platform_device *pdev)
{
struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
struct shmob_drm_device *sdev;
struct drm_device *ddev;
unsigned int i;
int ret;
if (pdata == NULL) {
dev_err(&pdev->dev, "no platform data\n");
return -EINVAL;
}
/*
* Allocate and initialize the driver private data, I/O resources and
* clocks.
*/
sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
if (sdev == NULL)
return -ENOMEM;
sdev->dev = &pdev->dev;
sdev->pdata = pdata;
spin_lock_init(&sdev->irq_lock);
platform_set_drvdata(pdev, sdev);
sdev->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->mmio))
return PTR_ERR(sdev->mmio);
ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
if (ret < 0)
return ret;
ret = shmob_drm_init_interface(sdev);
if (ret < 0)
return ret;
/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&shmob_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
sdev->ddev = ddev;
ddev->dev_private = sdev;
ret = shmob_drm_modeset_init(sdev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to initialize mode setting\n");
goto err_free_drm_dev;
}
for (i = 0; i < 4; ++i) {
ret = shmob_drm_plane_create(sdev, i);
if (ret < 0) {
dev_err(&pdev->dev, "failed to create plane %u\n", i);
goto err_modeset_cleanup;
}
}
ret = drm_vblank_init(ddev, 1);
if (ret < 0) {
dev_err(&pdev->dev, "failed to initialize vblank\n");
goto err_modeset_cleanup;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_modeset_cleanup;
sdev->irq = ret;
ret = request_irq(sdev->irq, shmob_drm_irq, 0, ddev->driver->name,
ddev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to install IRQ handler\n");
goto err_modeset_cleanup;
}
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
ret = drm_dev_register(ddev, 0);
if (ret < 0)
goto err_irq_uninstall;
drm_fbdev_generic_setup(ddev, 16);
return 0;
err_irq_uninstall:
free_irq(sdev->irq, ddev);
err_modeset_cleanup:
drm_kms_helper_poll_fini(ddev);
err_free_drm_dev:
drm_dev_put(ddev);
return ret;
}
static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
.remove = shmob_drm_remove,
.driver = {
.name = "shmob-drm",
.pm = pm_sleep_ptr(&shmob_drm_pm_ops),
},
};
drm_module_platform_driver(shmob_drm_platform_driver);
MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
MODULE_DESCRIPTION("Renesas SH Mobile DRM Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_backlight.c -- SH Mobile DRM Backlight
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
*/
#include <linux/backlight.h>
#include "shmob_drm_backlight.h"
#include "shmob_drm_crtc.h"
#include "shmob_drm_drv.h"
static int shmob_drm_backlight_update(struct backlight_device *bdev)
{
struct shmob_drm_connector *scon = bl_get_data(bdev);
struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
int brightness = backlight_get_brightness(bdev);
return bdata->set_brightness(brightness);
}
static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev)
{
struct shmob_drm_connector *scon = bl_get_data(bdev);
struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
return bdata->get_brightness();
}
static const struct backlight_ops shmob_drm_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = shmob_drm_backlight_update,
.get_brightness = shmob_drm_backlight_get_brightness,
};
void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode)
{
if (scon->backlight == NULL)
return;
scon->backlight->props.power = mode == DRM_MODE_DPMS_ON
? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
backlight_update_status(scon->backlight);
}
int shmob_drm_backlight_init(struct shmob_drm_connector *scon)
{
struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
struct drm_connector *connector = &scon->connector;
struct drm_device *dev = connector->dev;
struct backlight_device *backlight;
if (!bdata->max_brightness)
return 0;
backlight = backlight_device_register(bdata->name, dev->dev, scon,
&shmob_drm_backlight_ops, NULL);
if (IS_ERR(backlight)) {
dev_err(dev->dev, "unable to register backlight device: %ld\n",
PTR_ERR(backlight));
return PTR_ERR(backlight);
}
backlight->props.max_brightness = bdata->max_brightness;
backlight->props.brightness = bdata->max_brightness;
backlight->props.power = FB_BLANK_POWERDOWN;
backlight_update_status(backlight);
scon->backlight = backlight;
return 0;
}
void shmob_drm_backlight_exit(struct shmob_drm_connector *scon)
{
backlight_device_unregister(scon->backlight);
}
| linux-master | drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Display Unit Planes
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_group.h"
#include "rcar_du_kms.h"
#include "rcar_du_plane.h"
#include "rcar_du_regs.h"
/* -----------------------------------------------------------------------------
* Atomic hardware plane allocator
*
* The hardware plane allocator is solely based on the atomic plane states
* without keeping any external state to avoid races between .atomic_check()
* and .atomic_commit().
*
* The core idea is to avoid using a free planes bitmask that would need to be
* shared between check and commit handlers with a collective knowledge based on
* the allocated hardware plane(s) for each KMS plane. The allocator then loops
* over all plane states to compute the free planes bitmask, allocates hardware
* planes based on that bitmask, and stores the result back in the plane states.
*
* For this to work we need to access the current state of planes not touched by
* the atomic update. To ensure that it won't be modified, we need to lock all
* planes using drm_atomic_get_plane_state(). This effectively serializes atomic
* updates from .atomic_check() up to completion (when swapping the states if
* the check step has succeeded) or rollback (when freeing the states if the
* check step has failed).
*
* Allocation is performed in the .atomic_check() handler and applied
* automatically when the core swaps the old and new states.
*/
static bool rcar_du_plane_needs_realloc(
const struct rcar_du_plane_state *old_state,
const struct rcar_du_plane_state *new_state)
{
/*
* Lowering the number of planes doesn't strictly require reallocation
* as the extra hardware plane will be freed when committing, but doing
* so could lead to more fragmentation.
*/
if (!old_state->format ||
old_state->format->planes != new_state->format->planes)
return true;
/* Reallocate hardware planes if the source has changed. */
if (old_state->source != new_state->source)
return true;
return false;
}
static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
{
unsigned int mask;
if (state->hwindex == -1)
return 0;
mask = 1 << state->hwindex;
if (state->format->planes == 2)
mask |= 1 << ((state->hwindex + 1) % 8);
return mask;
}
/*
* The R8A7790 DU can source frames directly from the VSP1 devices VSPD0 and
* VSPD1. VSPD0 feeds DU0/1 plane 0, and VSPD1 feeds either DU2 plane 0 or
* DU0/1 plane 1.
*
* Allocate the correct fixed plane when sourcing frames from VSPD0 or VSPD1,
* and allocate planes in reverse index order otherwise to ensure maximum
* availability of planes 0 and 1.
*
* The caller is responsible for ensuring that the requested source is
* compatible with the DU revision.
*/
static int rcar_du_plane_hwalloc(struct rcar_du_plane *plane,
struct rcar_du_plane_state *state,
unsigned int free)
{
unsigned int num_planes = state->format->planes;
int fixed = -1;
int i;
if (state->source == RCAR_DU_PLANE_VSPD0) {
/* VSPD0 feeds plane 0 on DU0/1. */
if (plane->group->index != 0)
return -EINVAL;
fixed = 0;
} else if (state->source == RCAR_DU_PLANE_VSPD1) {
/* VSPD1 feeds plane 1 on DU0/1 or plane 0 on DU2. */
fixed = plane->group->index == 0 ? 1 : 0;
}
if (fixed >= 0)
return free & (1 << fixed) ? fixed : -EBUSY;
for (i = RCAR_DU_NUM_HW_PLANES - 1; i >= 0; --i) {
if (!(free & (1 << i)))
continue;
if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
break;
}
return i < 0 ? -EBUSY : i;
}
int rcar_du_atomic_check_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
bool needs_realloc = false;
unsigned int groups = 0;
unsigned int i;
struct drm_plane *drm_plane;
struct drm_plane_state *old_drm_plane_state;
struct drm_plane_state *new_drm_plane_state;
/* Check if hardware planes need to be reallocated. */
for_each_oldnew_plane_in_state(state, drm_plane, old_drm_plane_state,
new_drm_plane_state, i) {
struct rcar_du_plane_state *old_plane_state;
struct rcar_du_plane_state *new_plane_state;
struct rcar_du_plane *plane;
unsigned int index;
plane = to_rcar_plane(drm_plane);
old_plane_state = to_rcar_plane_state(old_drm_plane_state);
new_plane_state = to_rcar_plane_state(new_drm_plane_state);
dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
/*
* If the plane is being disabled we don't need to go through
* the full reallocation procedure. Just mark the hardware
* plane(s) as freed.
*/
if (!new_plane_state->format) {
dev_dbg(rcdu->dev, "%s: plane is being disabled\n",
__func__);
index = plane - plane->group->planes;
group_freed_planes[plane->group->index] |= 1 << index;
new_plane_state->hwindex = -1;
continue;
}
/*
* If the plane needs to be reallocated mark it as such, and
* mark the hardware plane(s) as free.
*/
if (rcar_du_plane_needs_realloc(old_plane_state, new_plane_state)) {
dev_dbg(rcdu->dev, "%s: plane needs reallocation\n",
__func__);
groups |= 1 << plane->group->index;
needs_realloc = true;
index = plane - plane->group->planes;
group_freed_planes[plane->group->index] |= 1 << index;
new_plane_state->hwindex = -1;
}
}
if (!needs_realloc)
return 0;
/*
* Grab all plane states for the groups that need reallocation to ensure
* locking and avoid racy updates. This serializes the update operation,
* but there's not much we can do about it as that's the hardware
* design.
*
* Compute the used planes mask for each group at the same time to avoid
* looping over the planes separately later.
*/
while (groups) {
unsigned int index = ffs(groups) - 1;
struct rcar_du_group *group = &rcdu->groups[index];
unsigned int used_planes = 0;
dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n",
__func__, index);
for (i = 0; i < group->num_planes; ++i) {
struct rcar_du_plane *plane = &group->planes[i];
struct rcar_du_plane_state *new_plane_state;
struct drm_plane_state *s;
s = drm_atomic_get_plane_state(state, &plane->plane);
if (IS_ERR(s))
return PTR_ERR(s);
/*
* If the plane has been freed in the above loop its
* hardware planes must not be added to the used planes
* bitmask. However, the current state doesn't reflect
* the free state yet, as we've modified the new state
* above. Use the local freed planes list to check for
* that condition instead.
*/
if (group_freed_planes[index] & (1 << i)) {
dev_dbg(rcdu->dev,
"%s: plane (%u,%tu) has been freed, skipping\n",
__func__, plane->group->index,
plane - plane->group->planes);
continue;
}
new_plane_state = to_rcar_plane_state(s);
used_planes |= rcar_du_plane_hwmask(new_plane_state);
dev_dbg(rcdu->dev,
"%s: plane (%u,%tu) uses %u hwplanes (index %d)\n",
__func__, plane->group->index,
plane - plane->group->planes,
new_plane_state->format ?
new_plane_state->format->planes : 0,
new_plane_state->hwindex);
}
group_free_planes[index] = 0xff & ~used_planes;
groups &= ~(1 << index);
dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
__func__, index, group_free_planes[index]);
}
/* Reallocate hardware planes for each plane that needs it. */
for_each_oldnew_plane_in_state(state, drm_plane, old_drm_plane_state,
new_drm_plane_state, i) {
struct rcar_du_plane_state *old_plane_state;
struct rcar_du_plane_state *new_plane_state;
struct rcar_du_plane *plane;
unsigned int crtc_planes;
unsigned int free;
int idx;
plane = to_rcar_plane(drm_plane);
old_plane_state = to_rcar_plane_state(old_drm_plane_state);
new_plane_state = to_rcar_plane_state(new_drm_plane_state);
dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
/*
* Skip planes that are being disabled or don't need to be
* reallocated.
*/
if (!new_plane_state->format ||
!rcar_du_plane_needs_realloc(old_plane_state, new_plane_state))
continue;
/*
* Try to allocate the plane from the free planes currently
* associated with the target CRTC to avoid restarting the CRTC
* group and thus minimize flicker. If it fails fall back to
* allocating from all free planes.
*/
crtc_planes = to_rcar_crtc(new_plane_state->state.crtc)->index % 2
? plane->group->dptsr_planes
: ~plane->group->dptsr_planes;
free = group_free_planes[plane->group->index];
idx = rcar_du_plane_hwalloc(plane, new_plane_state,
free & crtc_planes);
if (idx < 0)
idx = rcar_du_plane_hwalloc(plane, new_plane_state,
free);
if (idx < 0) {
dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
__func__);
return idx;
}
dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n",
__func__, new_plane_state->format->planes, idx);
new_plane_state->hwindex = idx;
group_free_planes[plane->group->index] &=
~rcar_du_plane_hwmask(new_plane_state);
dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
__func__, plane->group->index,
group_free_planes[plane->group->index]);
}
return 0;
}
/* -----------------------------------------------------------------------------
* Plane Setup
*/
#define RCAR_DU_COLORKEY_NONE (0 << 24)
#define RCAR_DU_COLORKEY_SOURCE (1 << 24)
#define RCAR_DU_COLORKEY_MASK (1 << 24)
static void rcar_du_plane_write(struct rcar_du_group *rgrp,
unsigned int index, u32 reg, u32 data)
{
rcar_du_write(rgrp->dev, rgrp->mmio_offset + index * PLANE_OFF + reg,
data);
}
static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
const struct rcar_du_plane_state *state)
{
unsigned int src_x = state->state.src.x1 >> 16;
unsigned int src_y = state->state.src.y1 >> 16;
unsigned int index = state->hwindex;
unsigned int pitch;
bool interlaced;
u32 dma[2];
interlaced = state->state.crtc->state->adjusted_mode.flags
& DRM_MODE_FLAG_INTERLACE;
if (state->source == RCAR_DU_PLANE_MEMORY) {
struct drm_framebuffer *fb = state->state.fb;
struct drm_gem_dma_object *gem;
unsigned int i;
if (state->format->planes == 2)
pitch = fb->pitches[0];
else
pitch = fb->pitches[0] * 8 / state->format->bpp;
for (i = 0; i < state->format->planes; ++i) {
gem = drm_fb_dma_get_gem_obj(fb, i);
dma[i] = gem->dma_addr + fb->offsets[i];
}
} else {
pitch = drm_rect_width(&state->state.src) >> 16;
dma[0] = 0;
dma[1] = 0;
}
/*
* Memory pitch (expressed in pixels). Must be doubled for interlaced
* operation with 32bpp formats.
*/
rcar_du_plane_write(rgrp, index, PnMWR,
(interlaced && state->format->bpp == 32) ?
pitch * 2 : pitch);
/*
* The Y position is expressed in raster line units and must be doubled
* for 32bpp formats, according to the R8A7790 datasheet. No mention of
* doubling the Y position is found in the R8A7779 datasheet, but the
* rule seems to apply there as well.
*
* Despite not being documented, doubling seem not to be needed when
* operating in interlaced mode.
*
* Similarly, for the second plane, NV12 and NV21 formats seem to
* require a halved Y position value, in both progressive and interlaced
* modes.
*/
rcar_du_plane_write(rgrp, index, PnSPXR, src_x);
rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
(!interlaced && state->format->bpp == 32 ? 2 : 1));
rcar_du_plane_write(rgrp, index, PnDSA0R, dma[0]);
if (state->format->planes == 2) {
index = (index + 1) % 8;
rcar_du_plane_write(rgrp, index, PnMWR, pitch);
rcar_du_plane_write(rgrp, index, PnSPXR, src_x);
rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
(state->format->bpp == 16 ? 2 : 1) / 2);
rcar_du_plane_write(rgrp, index, PnDSA0R, dma[1]);
}
}
static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp,
unsigned int index,
const struct rcar_du_plane_state *state)
{
u32 colorkey;
u32 pnmr;
/*
* The PnALPHAR register controls alpha-blending in 16bpp formats
* (ARGB1555 and XRGB1555).
*
* For ARGB, set the alpha value to 0, and enable alpha-blending when
* the A bit is 0. This maps A=0 to alpha=0 and A=1 to alpha=255.
*
* For XRGB, set the alpha value to the plane-wide alpha value and
* enable alpha-blending regardless of the X bit value.
*/
if (state->format->fourcc != DRM_FORMAT_XRGB1555)
rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
else
rcar_du_plane_write(rgrp, index, PnALPHAR,
PnALPHAR_ABIT_X | state->state.alpha >> 8);
pnmr = PnMR_BM_MD | state->format->pnmr;
/*
* Disable color keying when requested. YUV formats have the
* PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying
* automatically.
*/
if ((state->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE)
pnmr |= PnMR_SPIM_TP_OFF;
/* For packed YUV formats we need to select the U/V order. */
if (state->format->fourcc == DRM_FORMAT_YUYV)
pnmr |= PnMR_YCDF_YUYV;
rcar_du_plane_write(rgrp, index, PnMR, pnmr);
switch (state->format->fourcc) {
case DRM_FORMAT_RGB565:
colorkey = ((state->colorkey & 0xf80000) >> 8)
| ((state->colorkey & 0x00fc00) >> 5)
| ((state->colorkey & 0x0000f8) >> 3);
rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
break;
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
colorkey = ((state->colorkey & 0xf80000) >> 9)
| ((state->colorkey & 0x00f800) >> 6)
| ((state->colorkey & 0x0000f8) >> 3);
rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
rcar_du_plane_write(rgrp, index, PnTC3R,
PnTC3R_CODE | (state->colorkey & 0xffffff));
break;
}
}
static void rcar_du_plane_setup_format_gen2(struct rcar_du_group *rgrp,
unsigned int index,
const struct rcar_du_plane_state *state)
{
u32 ddcr2 = PnDDCR2_CODE;
u32 ddcr4;
/*
* Data format
*
* The data format is selected by the DDDF field in PnMR and the EDF
* field in DDCR4.
*/
rcar_du_plane_setup_mode(rgrp, index, state);
if (state->format->planes == 2) {
if (state->hwindex != index) {
if (state->format->fourcc == DRM_FORMAT_NV12 ||
state->format->fourcc == DRM_FORMAT_NV21)
ddcr2 |= PnDDCR2_Y420;
if (state->format->fourcc == DRM_FORMAT_NV21)
ddcr2 |= PnDDCR2_NV21;
ddcr2 |= PnDDCR2_DIVU;
} else {
ddcr2 |= PnDDCR2_DIVY;
}
}
rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
ddcr4 = state->format->edf | PnDDCR4_CODE;
if (state->source != RCAR_DU_PLANE_MEMORY)
ddcr4 |= PnDDCR4_VSPS;
rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
}
static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp,
unsigned int index,
const struct rcar_du_plane_state *state)
{
struct rcar_du_device *rcdu = rgrp->dev;
u32 pnmr = state->format->pnmr | PnMR_SPIM_TP_OFF;
if (rcdu->info->features & RCAR_DU_FEATURE_NO_BLENDING) {
/* No blending. ALP and EOR are not supported. */
pnmr &= ~(PnMR_SPIM_ALP | PnMR_SPIM_EOR);
}
rcar_du_plane_write(rgrp, index, PnMR, pnmr);
rcar_du_plane_write(rgrp, index, PnDDCR4,
state->format->edf | PnDDCR4_CODE);
/*
* On Gen3, some DU channels have two planes, each being wired to a
* separate VSPD instance. The DU can then blend two planes. While
* this feature isn't used by the driver, issues related to alpha
* blending (such as incorrect colors or planes being invisible) may
* still occur if the PnALPHAR register has a stale value. Set the
* register to 0 to avoid this.
*/
rcar_du_plane_write(rgrp, index, PnALPHAR, 0);
}
static void rcar_du_plane_setup_format(struct rcar_du_group *rgrp,
unsigned int index,
const struct rcar_du_plane_state *state)
{
struct rcar_du_device *rcdu = rgrp->dev;
const struct drm_rect *dst = &state->state.dst;
if (rcdu->info->gen < 3)
rcar_du_plane_setup_format_gen2(rgrp, index, state);
else
rcar_du_plane_setup_format_gen3(rgrp, index, state);
/* Destination position and size */
rcar_du_plane_write(rgrp, index, PnDSXR, drm_rect_width(dst));
rcar_du_plane_write(rgrp, index, PnDSYR, drm_rect_height(dst));
rcar_du_plane_write(rgrp, index, PnDPXR, dst->x1);
rcar_du_plane_write(rgrp, index, PnDPYR, dst->y1);
if (rcdu->info->gen < 3) {
/* Wrap-around and blinking, disabled */
rcar_du_plane_write(rgrp, index, PnWASPR, 0);
rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
rcar_du_plane_write(rgrp, index, PnBTR, 0);
rcar_du_plane_write(rgrp, index, PnMLR, 0);
}
}
void __rcar_du_plane_setup(struct rcar_du_group *rgrp,
const struct rcar_du_plane_state *state)
{
struct rcar_du_device *rcdu = rgrp->dev;
rcar_du_plane_setup_format(rgrp, state->hwindex, state);
if (state->format->planes == 2)
rcar_du_plane_setup_format(rgrp, (state->hwindex + 1) % 8,
state);
if (rcdu->info->gen >= 3)
return;
rcar_du_plane_setup_scanout(rgrp, state);
if (state->source == RCAR_DU_PLANE_VSPD1) {
unsigned int vspd1_sink = rgrp->index ? 2 : 0;
if (rcdu->vspd1_sink != vspd1_sink) {
rcdu->vspd1_sink = vspd1_sink;
rcar_du_set_dpad0_vsp1_routing(rcdu);
/*
* Changes to the VSP1 sink take effect on DRES and thus
* need a restart of the group.
*/
rgrp->need_restart = true;
}
}
}
int __rcar_du_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state,
const struct rcar_du_format_info **format)
{
struct drm_device *dev = plane->dev;
struct drm_crtc_state *crtc_state;
int ret;
if (!state->crtc) {
/*
* The visible field is not reset by the DRM core but only
* updated by drm_atomic_helper_check_plane_state(), set it
* manually.
*/
state->visible = false;
*format = NULL;
return 0;
}
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
if (ret < 0)
return ret;
if (!state->visible) {
*format = NULL;
return 0;
}
*format = rcar_du_format_info(state->fb->format->format);
if (*format == NULL) {
dev_dbg(dev->dev, "%s: unsupported format %p4cc\n", __func__,
&state->fb->format->format);
return -EINVAL;
}
return 0;
}
static int rcar_du_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct rcar_du_plane_state *rstate = to_rcar_plane_state(new_plane_state);
return __rcar_du_plane_atomic_check(plane, new_plane_state,
&rstate->format);
}
static void rcar_du_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct rcar_du_plane *rplane = to_rcar_plane(plane);
struct rcar_du_plane_state *old_rstate;
struct rcar_du_plane_state *new_rstate;
if (!new_state->visible)
return;
rcar_du_plane_setup(rplane);
/*
* Check whether the source has changed from memory to live source or
* from live source to memory. The source has been configured by the
* VSPS bit in the PnDDCR4 register. Although the datasheet states that
* the bit is updated during vertical blanking, it seems that updates
* only occur when the DU group is held in reset through the DSYSR.DRES
* bit. We thus need to restart the group if the source changes.
*/
old_rstate = to_rcar_plane_state(old_state);
new_rstate = to_rcar_plane_state(new_state);
if ((old_rstate->source == RCAR_DU_PLANE_MEMORY) !=
(new_rstate->source == RCAR_DU_PLANE_MEMORY))
rplane->group->need_restart = true;
}
static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = {
.atomic_check = rcar_du_plane_atomic_check,
.atomic_update = rcar_du_plane_atomic_update,
};
static struct drm_plane_state *
rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane)
{
struct rcar_du_plane_state *state;
struct rcar_du_plane_state *copy;
if (WARN_ON(!plane->state))
return NULL;
state = to_rcar_plane_state(plane->state);
copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
if (copy == NULL)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, ©->state);
return ©->state;
}
static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(to_rcar_plane_state(state));
}
static void rcar_du_plane_reset(struct drm_plane *plane)
{
struct rcar_du_plane_state *state;
if (plane->state) {
rcar_du_plane_atomic_destroy_state(plane, plane->state);
plane->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return;
__drm_atomic_helper_plane_reset(plane, &state->state);
state->hwindex = -1;
state->source = RCAR_DU_PLANE_MEMORY;
state->colorkey = RCAR_DU_COLORKEY_NONE;
}
static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val)
{
struct rcar_du_plane_state *rstate = to_rcar_plane_state(state);
struct rcar_du_device *rcdu = to_rcar_plane(plane)->group->dev;
if (property == rcdu->props.colorkey)
rstate->colorkey = val;
else
return -EINVAL;
return 0;
}
static int rcar_du_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state, struct drm_property *property,
uint64_t *val)
{
const struct rcar_du_plane_state *rstate =
container_of(state, const struct rcar_du_plane_state, state);
struct rcar_du_device *rcdu = to_rcar_plane(plane)->group->dev;
if (property == rcdu->props.colorkey)
*val = rstate->colorkey;
else
return -EINVAL;
return 0;
}
static const struct drm_plane_funcs rcar_du_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = rcar_du_plane_reset,
.destroy = drm_plane_cleanup,
.atomic_duplicate_state = rcar_du_plane_atomic_duplicate_state,
.atomic_destroy_state = rcar_du_plane_atomic_destroy_state,
.atomic_set_property = rcar_du_plane_atomic_set_property,
.atomic_get_property = rcar_du_plane_atomic_get_property,
};
static const uint32_t formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV16,
};
int rcar_du_planes_init(struct rcar_du_group *rgrp)
{
struct rcar_du_device *rcdu = rgrp->dev;
unsigned int crtcs;
unsigned int i;
int ret;
/*
* Create one primary plane per CRTC in this group and seven overlay
* planes.
*/
rgrp->num_planes = rgrp->num_crtcs + 7;
crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
for (i = 0; i < rgrp->num_planes; ++i) {
enum drm_plane_type type = i < rgrp->num_crtcs
? DRM_PLANE_TYPE_PRIMARY
: DRM_PLANE_TYPE_OVERLAY;
struct rcar_du_plane *plane = &rgrp->planes[i];
plane->group = rgrp;
ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
crtcs, &rcar_du_plane_funcs,
formats, ARRAY_SIZE(formats),
NULL, type, NULL);
if (ret < 0)
return ret;
drm_plane_helper_add(&plane->plane,
&rcar_du_plane_helper_funcs);
drm_plane_create_alpha_property(&plane->plane);
if (type == DRM_PLANE_TYPE_PRIMARY) {
drm_plane_create_zpos_immutable_property(&plane->plane,
0);
} else {
drm_object_attach_property(&plane->plane.base,
rcdu->props.colorkey,
RCAR_DU_COLORKEY_NONE);
drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
}
}
return 0;
}
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Display Unit Mode Setting
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/wait.h>
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
#include "rcar_du_regs.h"
#include "rcar_du_vsp.h"
#include "rcar_du_writeback.h"
/* -----------------------------------------------------------------------------
* Format helpers
*/
static const struct rcar_du_format_info rcar_du_format_infos[] = {
{
.fourcc = DRM_FORMAT_RGB565,
.v4l2 = V4L2_PIX_FMT_RGB565,
.bpp = 16,
.planes = 1,
.hsub = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_ARGB1555,
.v4l2 = V4L2_PIX_FMT_ARGB555,
.bpp = 16,
.planes = 1,
.hsub = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_XRGB1555,
.v4l2 = V4L2_PIX_FMT_XRGB555,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_XRGB8888,
.v4l2 = V4L2_PIX_FMT_XBGR32,
.bpp = 32,
.planes = 1,
.hsub = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_RGB888,
}, {
.fourcc = DRM_FORMAT_ARGB8888,
.v4l2 = V4L2_PIX_FMT_ABGR32,
.bpp = 32,
.planes = 1,
.hsub = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_ARGB8888,
}, {
.fourcc = DRM_FORMAT_UYVY,
.v4l2 = V4L2_PIX_FMT_UYVY,
.bpp = 16,
.planes = 1,
.hsub = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_YUYV,
.v4l2 = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.planes = 1,
.hsub = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV12,
.v4l2 = V4L2_PIX_FMT_NV12M,
.bpp = 12,
.planes = 2,
.hsub = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV21,
.v4l2 = V4L2_PIX_FMT_NV21M,
.bpp = 12,
.planes = 2,
.hsub = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV16,
.v4l2 = V4L2_PIX_FMT_NV16M,
.bpp = 16,
.planes = 2,
.hsub = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
},
/*
* The following formats are not supported on Gen2 and thus have no
* associated .pnmr or .edf settings.
*/
{
.fourcc = DRM_FORMAT_RGB332,
.v4l2 = V4L2_PIX_FMT_RGB332,
.bpp = 8,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_ARGB4444,
.v4l2 = V4L2_PIX_FMT_ARGB444,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_XRGB4444,
.v4l2 = V4L2_PIX_FMT_XRGB444,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBA4444,
.v4l2 = V4L2_PIX_FMT_RGBA444,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBX4444,
.v4l2 = V4L2_PIX_FMT_RGBX444,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_ABGR4444,
.v4l2 = V4L2_PIX_FMT_ABGR444,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_XBGR4444,
.v4l2 = V4L2_PIX_FMT_XBGR444,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRA4444,
.v4l2 = V4L2_PIX_FMT_BGRA444,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRX4444,
.v4l2 = V4L2_PIX_FMT_BGRX444,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBA5551,
.v4l2 = V4L2_PIX_FMT_RGBA555,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBX5551,
.v4l2 = V4L2_PIX_FMT_RGBX555,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_ABGR1555,
.v4l2 = V4L2_PIX_FMT_ABGR555,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_XBGR1555,
.v4l2 = V4L2_PIX_FMT_XBGR555,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRA5551,
.v4l2 = V4L2_PIX_FMT_BGRA555,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRX5551,
.v4l2 = V4L2_PIX_FMT_BGRX555,
.bpp = 16,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGR888,
.v4l2 = V4L2_PIX_FMT_RGB24,
.bpp = 24,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
.v4l2 = V4L2_PIX_FMT_BGR24,
.bpp = 24,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBA8888,
.v4l2 = V4L2_PIX_FMT_BGRA32,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBX8888,
.v4l2 = V4L2_PIX_FMT_BGRX32,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_ABGR8888,
.v4l2 = V4L2_PIX_FMT_RGBA32,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_XBGR8888,
.v4l2 = V4L2_PIX_FMT_RGBX32,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRA8888,
.v4l2 = V4L2_PIX_FMT_ARGB32,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRX8888,
.v4l2 = V4L2_PIX_FMT_XRGB32,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBX1010102,
.v4l2 = V4L2_PIX_FMT_RGBX1010102,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBA1010102,
.v4l2 = V4L2_PIX_FMT_RGBA1010102,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_ARGB2101010,
.v4l2 = V4L2_PIX_FMT_ARGB2101010,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_YVYU,
.v4l2 = V4L2_PIX_FMT_YVYU,
.bpp = 16,
.planes = 1,
.hsub = 2,
}, {
.fourcc = DRM_FORMAT_NV61,
.v4l2 = V4L2_PIX_FMT_NV61M,
.bpp = 16,
.planes = 2,
.hsub = 2,
}, {
.fourcc = DRM_FORMAT_YUV420,
.v4l2 = V4L2_PIX_FMT_YUV420M,
.bpp = 12,
.planes = 3,
.hsub = 2,
}, {
.fourcc = DRM_FORMAT_YVU420,
.v4l2 = V4L2_PIX_FMT_YVU420M,
.bpp = 12,
.planes = 3,
.hsub = 2,
}, {
.fourcc = DRM_FORMAT_YUV422,
.v4l2 = V4L2_PIX_FMT_YUV422M,
.bpp = 16,
.planes = 3,
.hsub = 2,
}, {
.fourcc = DRM_FORMAT_YVU422,
.v4l2 = V4L2_PIX_FMT_YVU422M,
.bpp = 16,
.planes = 3,
.hsub = 2,
}, {
.fourcc = DRM_FORMAT_YUV444,
.v4l2 = V4L2_PIX_FMT_YUV444M,
.bpp = 24,
.planes = 3,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_YVU444,
.v4l2 = V4L2_PIX_FMT_YVU444M,
.bpp = 24,
.planes = 3,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_Y210,
.v4l2 = V4L2_PIX_FMT_Y210,
.bpp = 32,
.planes = 1,
.hsub = 2,
}, {
.fourcc = DRM_FORMAT_Y212,
.v4l2 = V4L2_PIX_FMT_Y212,
.bpp = 32,
.planes = 1,
.hsub = 2,
},
};
const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(rcar_du_format_infos); ++i) {
if (rcar_du_format_infos[i].fourcc == fourcc)
return &rcar_du_format_infos[i];
}
return NULL;
}
/* -----------------------------------------------------------------------------
* Frame buffer
*/
static const struct drm_gem_object_funcs rcar_du_gem_funcs = {
.free = drm_gem_dma_object_free,
.print_info = drm_gem_dma_object_print_info,
.get_sg_table = drm_gem_dma_object_get_sg_table,
.vmap = drm_gem_dma_object_vmap,
.mmap = drm_gem_dma_object_mmap,
.vm_ops = &drm_gem_dma_vm_ops,
};
struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem_obj;
int ret;
if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
return drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
/* Create a DMA GEM buffer. */
dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
if (!dma_obj)
return ERR_PTR(-ENOMEM);
gem_obj = &dma_obj->base;
gem_obj->funcs = &rcar_du_gem_funcs;
drm_gem_private_object_init(dev, gem_obj, attach->dmabuf->size);
dma_obj->map_noncoherent = false;
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret) {
drm_gem_object_release(gem_obj);
kfree(dma_obj);
return ERR_PTR(ret);
}
dma_obj->dma_addr = 0;
dma_obj->sgt = sgt;
return gem_obj;
}
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align;
/*
* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
* but the R8A7790 DU seems to require a 128 bytes pitch alignment.
*/
if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
align = 128;
else
align = 16 * args->bpp / 8;
args->pitch = roundup(min_pitch, align);
return drm_gem_dma_dumb_create_internal(file, dev, args);
}
static struct drm_framebuffer *
rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
const struct rcar_du_format_info *format;
unsigned int chroma_pitch;
unsigned int max_pitch;
unsigned int align;
unsigned int i;
format = rcar_du_format_info(mode_cmd->pixel_format);
if (format == NULL) {
dev_dbg(dev->dev, "unsupported pixel format %p4cc\n",
&mode_cmd->pixel_format);
return ERR_PTR(-EINVAL);
}
if (rcdu->info->gen < 3) {
/*
* On Gen2 the DU limits the pitch to 4095 pixels and requires
* buffers to be aligned to a 16 pixels boundary (or 128 bytes
* on some platforms).
*/
unsigned int bpp = format->planes == 1 ? format->bpp / 8 : 1;
max_pitch = 4095 * bpp;
if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
align = 128;
else
align = 16 * bpp;
} else {
/*
* On Gen3 the memory interface is handled by the VSP that
* limits the pitch to 65535 bytes and has no alignment
* constraint.
*/
max_pitch = 65535;
align = 1;
}
if (mode_cmd->pitches[0] & (align - 1) ||
mode_cmd->pitches[0] > max_pitch) {
dev_dbg(dev->dev, "invalid pitch value %u\n",
mode_cmd->pitches[0]);
return ERR_PTR(-EINVAL);
}
/*
* Calculate the chroma plane(s) pitch using the horizontal subsampling
* factor. For semi-planar formats, the U and V planes are combined, the
* pitch must thus be doubled.
*/
chroma_pitch = mode_cmd->pitches[0] / format->hsub;
if (format->planes == 2)
chroma_pitch *= 2;
for (i = 1; i < format->planes; ++i) {
if (mode_cmd->pitches[i] != chroma_pitch) {
dev_dbg(dev->dev,
"luma and chroma pitches are not compatible\n");
return ERR_PTR(-EINVAL);
}
}
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
/* -----------------------------------------------------------------------------
* Atomic Check and Update
*/
static int rcar_du_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
int ret;
ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
return 0;
return rcar_du_atomic_check_planes(dev, state);
}
static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned int i;
/*
* Store RGB routing to DPAD0 and DPAD1, the hardware will be configured
* when starting the CRTCs.
*/
rcdu->dpad1_source = -1;
for_each_new_crtc_in_state(old_state, crtc, crtc_state, i) {
struct rcar_du_crtc_state *rcrtc_state =
to_rcar_crtc_state(crtc_state);
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
if (rcrtc_state->outputs & BIT(RCAR_DU_OUTPUT_DPAD0))
rcdu->dpad0_source = rcrtc->index;
if (rcrtc_state->outputs & BIT(RCAR_DU_OUTPUT_DPAD1))
rcdu->dpad1_source = rcrtc->index;
}
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
}
/* -----------------------------------------------------------------------------
* Initialization
*/
static const struct drm_mode_config_helper_funcs rcar_du_mode_config_helper = {
.atomic_commit_tail = rcar_du_atomic_commit_tail,
};
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create,
.atomic_check = rcar_du_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct of_endpoint *ep)
{
struct device_node *entity;
int ret;
/* Locate the connected entity and initialize the encoder. */
entity = of_graph_get_remote_port_parent(ep->local_node);
if (!entity) {
dev_dbg(rcdu->dev, "unconnected endpoint %pOF, skipping\n",
ep->local_node);
return -ENODEV;
}
if (!of_device_is_available(entity)) {
dev_dbg(rcdu->dev,
"connected entity %pOF is disabled, skipping\n",
entity);
of_node_put(entity);
return -ENODEV;
}
ret = rcar_du_encoder_init(rcdu, output, entity);
if (ret && ret != -EPROBE_DEFER && ret != -ENOLINK)
dev_warn(rcdu->dev,
"failed to initialize encoder %pOF on output %s (%d), skipping\n",
entity, rcar_du_output_name(output), ret);
of_node_put(entity);
return ret;
}
static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
{
struct device_node *np = rcdu->dev->of_node;
struct device_node *ep_node;
unsigned int num_encoders = 0;
/*
* Iterate over the endpoints and create one encoder for each output
* pipeline.
*/
for_each_endpoint_of_node(np, ep_node) {
enum rcar_du_output output;
struct of_endpoint ep;
unsigned int i;
int ret;
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret < 0) {
of_node_put(ep_node);
return ret;
}
/* Find the output route corresponding to the port number. */
for (i = 0; i < RCAR_DU_OUTPUT_MAX; ++i) {
if (rcdu->info->routes[i].possible_crtcs &&
rcdu->info->routes[i].port == ep.port) {
output = i;
break;
}
}
if (i == RCAR_DU_OUTPUT_MAX) {
dev_warn(rcdu->dev,
"port %u references unexisting output, skipping\n",
ep.port);
continue;
}
/* Process the output pipeline. */
ret = rcar_du_encoders_init_one(rcdu, output, &ep);
if (ret < 0) {
if (ret == -EPROBE_DEFER) {
of_node_put(ep_node);
return ret;
}
continue;
}
num_encoders++;
}
return num_encoders;
}
static int rcar_du_properties_init(struct rcar_du_device *rcdu)
{
/*
* The color key is expressed as an RGB888 triplet stored in a 32-bit
* integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
* or enable source color keying (1).
*/
rcdu->props.colorkey =
drm_property_create_range(&rcdu->ddev, 0, "colorkey",
0, 0x01ffffff);
if (rcdu->props.colorkey == NULL)
return -ENOMEM;
return 0;
}
static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
{
const struct device_node *np = rcdu->dev->of_node;
const char *vsps_prop_name = "renesas,vsps";
struct of_phandle_args args;
struct {
struct device_node *np;
unsigned int crtcs_mask;
} vsps[RCAR_DU_MAX_VSPS] = { { NULL, }, };
unsigned int vsps_count = 0;
unsigned int cells;
unsigned int i;
int ret;
/*
* First parse the DT vsps property to populate the list of VSPs. Each
* entry contains a pointer to the VSP DT node and a bitmask of the
* connected DU CRTCs.
*/
ret = of_property_count_u32_elems(np, vsps_prop_name);
if (ret < 0) {
/* Backward compatibility with old DTBs. */
vsps_prop_name = "vsps";
ret = of_property_count_u32_elems(np, vsps_prop_name);
}
cells = ret / rcdu->num_crtcs - 1;
if (cells > 1)
return -EINVAL;
for (i = 0; i < rcdu->num_crtcs; ++i) {
unsigned int j;
ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
cells, i, &args);
if (ret < 0)
goto error;
/*
* Add the VSP to the list or update the corresponding existing
* entry if the VSP has already been added.
*/
for (j = 0; j < vsps_count; ++j) {
if (vsps[j].np == args.np)
break;
}
if (j < vsps_count)
of_node_put(args.np);
else
vsps[vsps_count++].np = args.np;
vsps[j].crtcs_mask |= BIT(i);
/*
* Store the VSP pointer and pipe index in the CRTC. If the
* second cell of the 'renesas,vsps' specifier isn't present,
* default to 0 to remain compatible with older DT bindings.
*/
rcdu->crtcs[i].vsp = &rcdu->vsps[j];
rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
}
/*
* Then initialize all the VSPs from the node pointers and CRTCs bitmask
* computed previously.
*/
for (i = 0; i < vsps_count; ++i) {
struct rcar_du_vsp *vsp = &rcdu->vsps[i];
vsp->index = i;
vsp->dev = rcdu;
ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
if (ret < 0)
goto error;
}
return 0;
error:
for (i = 0; i < ARRAY_SIZE(vsps); ++i)
of_node_put(vsps[i].np);
return ret;
}
static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
{
const struct device_node *np = rcdu->dev->of_node;
unsigned int i;
int cells;
cells = of_property_count_u32_elems(np, "renesas,cmms");
if (cells == -EINVAL)
return 0;
if (cells > rcdu->num_crtcs) {
dev_err(rcdu->dev,
"Invalid number of entries in 'renesas,cmms'\n");
return -EINVAL;
}
for (i = 0; i < cells; ++i) {
struct platform_device *pdev;
struct device_link *link;
struct device_node *cmm;
int ret;
cmm = of_parse_phandle(np, "renesas,cmms", i);
if (!cmm) {
dev_err(rcdu->dev,
"Failed to parse 'renesas,cmms' property\n");
return -EINVAL;
}
if (!of_device_is_available(cmm)) {
/* It's fine to have a phandle to a non-enabled CMM. */
of_node_put(cmm);
continue;
}
pdev = of_find_device_by_node(cmm);
if (!pdev) {
dev_err(rcdu->dev, "No device found for CMM%u\n", i);
of_node_put(cmm);
return -EINVAL;
}
of_node_put(cmm);
/*
* -ENODEV is used to report that the CMM config option is
* disabled: return 0 and let the DU continue probing.
*/
ret = rcar_cmm_init(pdev);
if (ret) {
platform_device_put(pdev);
return ret == -ENODEV ? 0 : ret;
}
rcdu->cmms[i] = pdev;
/*
* Enforce suspend/resume ordering by making the CMM a provider
* of the DU: CMM is suspended after and resumed before the DU.
*/
link = device_link_add(rcdu->dev, &pdev->dev, DL_FLAG_STATELESS);
if (!link) {
dev_err(rcdu->dev,
"Failed to create device link to CMM%u\n", i);
return -EINVAL;
}
}
return 0;
}
static void rcar_du_modeset_cleanup(struct drm_device *dev, void *res)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(rcdu->cmms); ++i)
platform_device_put(rcdu->cmms[i]);
}
int rcar_du_modeset_init(struct rcar_du_device *rcdu)
{
static const unsigned int mmio_offsets[] = {
DU0_REG_OFFSET, DU2_REG_OFFSET
};
struct drm_device *dev = &rcdu->ddev;
struct drm_encoder *encoder;
unsigned int dpad0_sources;
unsigned int num_encoders;
unsigned int num_groups;
unsigned int swindex;
unsigned int hwindex;
unsigned int i;
int ret;
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
ret = drmm_add_action(&rcdu->ddev, rcar_du_modeset_cleanup, NULL);
if (ret)
return ret;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.normalize_zpos = true;
dev->mode_config.funcs = &rcar_du_mode_config_funcs;
dev->mode_config.helper_private = &rcar_du_mode_config_helper;
if (rcdu->info->gen < 3) {
dev->mode_config.max_width = 4095;
dev->mode_config.max_height = 2047;
} else {
/*
* The Gen3 DU uses the VSP1 for memory access, and is limited
* to frame sizes of 8190x8190.
*/
dev->mode_config.max_width = 8190;
dev->mode_config.max_height = 8190;
}
rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
ret = rcar_du_properties_init(rcdu);
if (ret < 0)
return ret;
/*
* Initialize vertical blanking interrupts handling. Start with vblank
* disabled for all CRTCs.
*/
ret = drm_vblank_init(dev, rcdu->num_crtcs);
if (ret < 0)
return ret;
/* Initialize the groups. */
num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
for (i = 0; i < num_groups; ++i) {
struct rcar_du_group *rgrp = &rcdu->groups[i];
mutex_init(&rgrp->lock);
rgrp->dev = rcdu;
rgrp->mmio_offset = mmio_offsets[i];
rgrp->index = i;
/* Extract the channel mask for this group only. */
rgrp->channels_mask = (rcdu->info->channels_mask >> (2 * i))
& GENMASK(1, 0);
rgrp->num_crtcs = hweight8(rgrp->channels_mask);
/*
* If we have more than one CRTCs in this group pre-associate
* the low-order planes with CRTC 0 and the high-order planes
* with CRTC 1 to minimize flicker occurring when the
* association is changed.
*/
rgrp->dptsr_planes = rgrp->num_crtcs > 1
? (rcdu->info->gen >= 3 ? 0x04 : 0xf0)
: 0;
if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
ret = rcar_du_planes_init(rgrp);
if (ret < 0)
return ret;
}
}
/* Initialize the compositors. */
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
ret = rcar_du_vsps_init(rcdu);
if (ret < 0)
return ret;
}
/* Initialize the Color Management Modules. */
ret = rcar_du_cmm_init(rcdu);
if (ret)
return dev_err_probe(rcdu->dev, ret,
"failed to initialize CMM\n");
/* Create the CRTCs. */
for (swindex = 0, hwindex = 0; swindex < rcdu->num_crtcs; ++hwindex) {
struct rcar_du_group *rgrp;
/* Skip unpopulated DU channels. */
if (!(rcdu->info->channels_mask & BIT(hwindex)))
continue;
rgrp = &rcdu->groups[hwindex / 2];
ret = rcar_du_crtc_create(rgrp, swindex++, hwindex);
if (ret < 0)
return ret;
}
/* Initialize the encoders. */
ret = rcar_du_encoders_init(rcdu);
if (ret < 0)
return dev_err_probe(rcdu->dev, ret,
"failed to initialize encoders\n");
if (ret == 0) {
dev_err(rcdu->dev, "error: no encoder could be initialized\n");
return -EINVAL;
}
num_encoders = ret;
/*
* Set the possible CRTCs and possible clones. There's always at least
* one way for all encoders to clone each other, set all bits in the
* possible clones field.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
const struct rcar_du_output_routing *route =
&rcdu->info->routes[renc->output];
encoder->possible_crtcs = route->possible_crtcs;
encoder->possible_clones = (1 << num_encoders) - 1;
}
/* Create the writeback connectors. */
if (rcdu->info->gen >= 3) {
for (i = 0; i < rcdu->num_crtcs; ++i) {
struct rcar_du_crtc *rcrtc = &rcdu->crtcs[i];
ret = rcar_du_writeback_init(rcdu, rcrtc);
if (ret < 0)
return ret;
}
}
/*
* Initialize the default DPAD0 source to the index of the first DU
* channel that can be connected to DPAD0. The exact value doesn't
* matter as it should be overwritten by mode setting for the RGB
* output, but it is nonetheless required to ensure a valid initial
* hardware configuration on Gen3 where DU0 can't always be connected to
* DPAD0.
*/
dpad0_sources = rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs;
rcdu->dpad0_source = ffs(dpad0_sources) - 1;
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
return 0;
}
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Gen3 HDMI PHY
*
* Copyright (C) 2016 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*/
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_modes.h>
#define RCAR_HDMI_PHY_OPMODE_PLLCFG 0x06 /* Mode of operation and PLL dividers */
#define RCAR_HDMI_PHY_PLLCURRGMPCTRL 0x10 /* PLL current and Gmp (conductance) */
#define RCAR_HDMI_PHY_PLLDIVCTRL 0x11 /* PLL dividers */
struct rcar_hdmi_phy_params {
unsigned long mpixelclock;
u16 opmode_div; /* Mode of operation and PLL dividers */
u16 curr_gmp; /* PLL current and Gmp (conductance) */
u16 div; /* PLL dividers */
};
static const struct rcar_hdmi_phy_params rcar_hdmi_phy_params[] = {
{ 35500000, 0x0003, 0x0344, 0x0328 },
{ 44900000, 0x0003, 0x0285, 0x0128 },
{ 71000000, 0x0002, 0x1184, 0x0314 },
{ 90000000, 0x0002, 0x1144, 0x0114 },
{ 140250000, 0x0001, 0x20c4, 0x030a },
{ 182750000, 0x0001, 0x2084, 0x010a },
{ 281250000, 0x0000, 0x0084, 0x0305 },
{ 297000000, 0x0000, 0x0084, 0x0105 },
{ ~0UL, 0x0000, 0x0000, 0x0000 },
};
static enum drm_mode_status
rcar_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
/*
* The maximum supported clock frequency is 297 MHz, as shown in the PHY
* parameters table.
*/
if (mode->clock > 297000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi, void *data,
unsigned long mpixelclock)
{
const struct rcar_hdmi_phy_params *params = rcar_hdmi_phy_params;
for (; params->mpixelclock != ~0UL; ++params) {
if (mpixelclock <= params->mpixelclock)
break;
}
if (params->mpixelclock == ~0UL)
return -EINVAL;
dw_hdmi_phy_i2c_write(hdmi, params->opmode_div,
RCAR_HDMI_PHY_OPMODE_PLLCFG);
dw_hdmi_phy_i2c_write(hdmi, params->curr_gmp,
RCAR_HDMI_PHY_PLLCURRGMPCTRL);
dw_hdmi_phy_i2c_write(hdmi, params->div, RCAR_HDMI_PHY_PLLDIVCTRL);
return 0;
}
static const struct dw_hdmi_plat_data rcar_dw_hdmi_plat_data = {
.output_port = 1,
.mode_valid = rcar_hdmi_mode_valid,
.configure_phy = rcar_hdmi_phy_configure,
};
static int rcar_dw_hdmi_probe(struct platform_device *pdev)
{
struct dw_hdmi *hdmi;
hdmi = dw_hdmi_probe(pdev, &rcar_dw_hdmi_plat_data);
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
platform_set_drvdata(pdev, hdmi);
return 0;
}
static void rcar_dw_hdmi_remove(struct platform_device *pdev)
{
struct dw_hdmi *hdmi = platform_get_drvdata(pdev);
dw_hdmi_remove(hdmi);
}
static const struct of_device_id rcar_dw_hdmi_of_table[] = {
{ .compatible = "renesas,rcar-gen3-hdmi" },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, rcar_dw_hdmi_of_table);
static struct platform_driver rcar_dw_hdmi_platform_driver = {
.probe = rcar_dw_hdmi_probe,
.remove_new = rcar_dw_hdmi_remove,
.driver = {
.name = "rcar-dw-hdmi",
.of_match_table = rcar_dw_hdmi_of_table,
},
};
module_platform_driver(rcar_dw_hdmi_platform_driver);
MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
MODULE_DESCRIPTION("Renesas R-Car Gen3 HDMI Encoder Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* R-Car LVDS Encoder
*
* Copyright (C) 2013-2018 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "rcar_lvds.h"
#include "rcar_lvds_regs.h"
struct rcar_lvds;
/* Keep in sync with the LVDCR0.LVMD hardware register values. */
enum rcar_lvds_mode {
RCAR_LVDS_MODE_JEIDA = 0,
RCAR_LVDS_MODE_MIRROR = 1,
RCAR_LVDS_MODE_VESA = 4,
};
enum rcar_lvds_link_type {
RCAR_LVDS_SINGLE_LINK = 0,
RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 1,
RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 2,
};
#define RCAR_LVDS_QUIRK_LANES BIT(0) /* LVDS lanes 1 and 3 inverted */
#define RCAR_LVDS_QUIRK_GEN3_LVEN BIT(1) /* LVEN bit needs to be set on R8A77970/R8A7799x */
#define RCAR_LVDS_QUIRK_PWD BIT(2) /* PWD bit available (all of Gen3 but E3) */
#define RCAR_LVDS_QUIRK_EXT_PLL BIT(3) /* Has extended PLL */
#define RCAR_LVDS_QUIRK_DUAL_LINK BIT(4) /* Supports dual-link operation */
struct rcar_lvds_device_info {
unsigned int gen;
unsigned int quirks;
void (*pll_setup)(struct rcar_lvds *lvds, unsigned int freq);
};
struct rcar_lvds {
struct device *dev;
const struct rcar_lvds_device_info *info;
struct reset_control *rstc;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct drm_panel *panel;
void __iomem *mmio;
struct {
struct clk *mod; /* CPG module clock */
struct clk *extal; /* External clock */
struct clk *dotclkin[2]; /* External DU clocks */
} clocks;
struct drm_bridge *companion;
enum rcar_lvds_link_type link_type;
};
#define bridge_to_rcar_lvds(b) \
container_of(b, struct rcar_lvds, bridge)
static u32 rcar_lvds_read(struct rcar_lvds *lvds, u32 reg)
{
return ioread32(lvds->mmio + reg);
}
static void rcar_lvds_write(struct rcar_lvds *lvds, u32 reg, u32 data)
{
iowrite32(data, lvds->mmio + reg);
}
/* -----------------------------------------------------------------------------
* PLL Setup
*/
static void rcar_lvds_pll_setup_gen2(struct rcar_lvds *lvds, unsigned int freq)
{
u32 val;
if (freq < 39000000)
val = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
else if (freq < 61000000)
val = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
else if (freq < 121000000)
val = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
else
val = LVDPLLCR_PLLDLYCNT_150M;
rcar_lvds_write(lvds, LVDPLLCR, val);
}
static void rcar_lvds_pll_setup_gen3(struct rcar_lvds *lvds, unsigned int freq)
{
u32 val;
if (freq < 42000000)
val = LVDPLLCR_PLLDIVCNT_42M;
else if (freq < 85000000)
val = LVDPLLCR_PLLDIVCNT_85M;
else if (freq < 128000000)
val = LVDPLLCR_PLLDIVCNT_128M;
else
val = LVDPLLCR_PLLDIVCNT_148M;
rcar_lvds_write(lvds, LVDPLLCR, val);
}
struct pll_info {
unsigned long diff;
unsigned int pll_m;
unsigned int pll_n;
unsigned int pll_e;
unsigned int div;
u32 clksel;
};
static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk,
unsigned long target, struct pll_info *pll,
u32 clksel, bool dot_clock_only)
{
unsigned int div7 = dot_clock_only ? 1 : 7;
unsigned long output;
unsigned long fin;
unsigned int m_min;
unsigned int m_max;
unsigned int m;
int error;
if (!clk)
return;
/*
* The LVDS PLL is made of a pre-divider and a multiplier (strangely
* enough called M and N respectively), followed by a post-divider E.
*
* ,-----. ,-----. ,-----. ,-----.
* Fin --> | 1/M | -Fpdf-> | PFD | --> | VCO | -Fvco-> | 1/E | --> Fout
* `-----' ,-> | | `-----' | `-----'
* | `-----' |
* | ,-----. |
* `-------- | 1/N | <-------'
* `-----'
*
* The clock output by the PLL is then further divided by a programmable
* divider DIV to achieve the desired target frequency. Finally, an
* optional fixed /7 divider is used to convert the bit clock to a pixel
* clock (as LVDS transmits 7 bits per lane per clock sample).
*
* ,-------. ,-----. |\
* Fout --> | 1/DIV | --> | 1/7 | --> | |
* `-------' | `-----' | | --> dot clock
* `------------> | |
* |/
*
* The /7 divider is optional, it is enabled when the LVDS PLL is used
* to drive the LVDS encoder, and disabled when used to generate a dot
* clock for the DU RGB output, without using the LVDS encoder.
*
* The PLL allowed input frequency range is 12 MHz to 192 MHz.
*/
fin = clk_get_rate(clk);
if (fin < 12000000 || fin > 192000000)
return;
/*
* The comparison frequency range is 12 MHz to 24 MHz, which limits the
* allowed values for the pre-divider M (normal range 1-8).
*
* Fpfd = Fin / M
*/
m_min = max_t(unsigned int, 1, DIV_ROUND_UP(fin, 24000000));
m_max = min_t(unsigned int, 8, fin / 12000000);
for (m = m_min; m <= m_max; ++m) {
unsigned long fpfd;
unsigned int n_min;
unsigned int n_max;
unsigned int n;
/*
* The VCO operating range is 900 Mhz to 1800 MHz, which limits
* the allowed values for the multiplier N (normal range
* 60-120).
*
* Fvco = Fin * N / M
*/
fpfd = fin / m;
n_min = max_t(unsigned int, 60, DIV_ROUND_UP(900000000, fpfd));
n_max = min_t(unsigned int, 120, 1800000000 / fpfd);
for (n = n_min; n < n_max; ++n) {
unsigned long fvco;
unsigned int e_min;
unsigned int e;
/*
* The output frequency is limited to 1039.5 MHz,
* limiting again the allowed values for the
* post-divider E (normal value 1, 2 or 4).
*
* Fout = Fvco / E
*/
fvco = fpfd * n;
e_min = fvco > 1039500000 ? 1 : 0;
for (e = e_min; e < 3; ++e) {
unsigned long fout;
unsigned long diff;
unsigned int div;
/*
* Finally we have a programable divider after
* the PLL, followed by a an optional fixed /7
* divider.
*/
fout = fvco / (1 << e) / div7;
div = max(1UL, DIV_ROUND_CLOSEST(fout, target));
diff = abs(fout / div - target);
if (diff < pll->diff) {
pll->diff = diff;
pll->pll_m = m;
pll->pll_n = n;
pll->pll_e = e;
pll->div = div;
pll->clksel = clksel;
if (diff == 0)
goto done;
}
}
}
}
done:
output = fin * pll->pll_n / pll->pll_m / (1 << pll->pll_e)
/ div7 / pll->div;
error = (long)(output - target) * 10000 / (long)target;
dev_dbg(lvds->dev,
"%pC %lu Hz -> Fout %lu Hz (target %lu Hz, error %d.%02u%%), PLL M/N/E/DIV %u/%u/%u/%u\n",
clk, fin, output, target, error / 100,
error < 0 ? -error % 100 : error % 100,
pll->pll_m, pll->pll_n, pll->pll_e, pll->div);
}
static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds,
unsigned int freq, bool dot_clock_only)
{
struct pll_info pll = { .diff = (unsigned long)-1 };
u32 lvdpllcr;
rcar_lvds_d3_e3_pll_calc(lvds, lvds->clocks.dotclkin[0], freq, &pll,
LVDPLLCR_CKSEL_DU_DOTCLKIN(0), dot_clock_only);
rcar_lvds_d3_e3_pll_calc(lvds, lvds->clocks.dotclkin[1], freq, &pll,
LVDPLLCR_CKSEL_DU_DOTCLKIN(1), dot_clock_only);
rcar_lvds_d3_e3_pll_calc(lvds, lvds->clocks.extal, freq, &pll,
LVDPLLCR_CKSEL_EXTAL, dot_clock_only);
lvdpllcr = LVDPLLCR_PLLON | pll.clksel | LVDPLLCR_CLKOUT
| LVDPLLCR_PLLN(pll.pll_n - 1) | LVDPLLCR_PLLM(pll.pll_m - 1);
if (pll.pll_e > 0)
lvdpllcr |= LVDPLLCR_STP_CLKOUTE | LVDPLLCR_OUTCLKSEL
| LVDPLLCR_PLLE(pll.pll_e - 1);
if (dot_clock_only)
lvdpllcr |= LVDPLLCR_OCKSEL;
rcar_lvds_write(lvds, LVDPLLCR, lvdpllcr);
if (pll.div > 1)
/*
* The DIVRESET bit is a misnomer, setting it to 1 deasserts the
* divisor reset.
*/
rcar_lvds_write(lvds, LVDDIV, LVDDIV_DIVSEL |
LVDDIV_DIVRESET | LVDDIV_DIV(pll.div - 1));
else
rcar_lvds_write(lvds, LVDDIV, 0);
}
/* -----------------------------------------------------------------------------
* Enable/disable
*/
static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds,
const struct drm_connector *connector)
{
const struct drm_display_info *info;
enum rcar_lvds_mode mode;
/*
* There is no API yet to retrieve LVDS mode from a bridge, only panels
* are supported.
*/
if (!lvds->panel)
return RCAR_LVDS_MODE_JEIDA;
info = &connector->display_info;
if (!info->num_bus_formats || !info->bus_formats) {
dev_warn(lvds->dev,
"no LVDS bus format reported, using JEIDA\n");
return RCAR_LVDS_MODE_JEIDA;
}
switch (info->bus_formats[0]) {
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
mode = RCAR_LVDS_MODE_JEIDA;
break;
case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
mode = RCAR_LVDS_MODE_VESA;
break;
default:
dev_warn(lvds->dev,
"unsupported LVDS bus format 0x%04x, using JEIDA\n",
info->bus_formats[0]);
return RCAR_LVDS_MODE_JEIDA;
}
if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
mode |= RCAR_LVDS_MODE_MIRROR;
return mode;
}
static void rcar_lvds_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_connector *connector)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
u32 lvdhcr;
u32 lvdcr0;
int ret;
ret = pm_runtime_resume_and_get(lvds->dev);
if (ret)
return;
/* Enable the companion LVDS encoder in dual-link mode. */
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
rcar_lvds_enable(lvds->companion, state, crtc, connector);
/*
* Hardcode the channels and control signals routing for now.
*
* HSYNC -> CTRL0
* VSYNC -> CTRL1
* DISP -> CTRL2
* 0 -> CTRL3
*/
rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
LVDCTRCR_CTR0SEL_HSYNC);
if (lvds->info->quirks & RCAR_LVDS_QUIRK_LANES)
lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3)
| LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1);
else
lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 1)
| LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 3);
rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) {
u32 lvdstripe = 0;
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK) {
/*
* By default we generate even pixels from the primary
* encoder and odd pixels from the companion encoder.
* Swap pixels around if the sink requires odd pixels
* from the primary encoder and even pixels from the
* companion encoder.
*/
bool swap_pixels = lvds->link_type ==
RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
/*
* Configure vertical stripe since we are dealing with
* an LVDS dual-link connection.
*
* ST_SWAP is reserved for the companion encoder, only
* set it in the primary encoder.
*/
lvdstripe = LVDSTRIPE_ST_ON
| (lvds->companion && swap_pixels ?
LVDSTRIPE_ST_SWAP : 0);
}
rcar_lvds_write(lvds, LVDSTRIPE, lvdstripe);
}
/*
* PLL clock configuration on all instances but the companion in
* dual-link mode.
*
* The extended PLL has been turned on by an explicit call to
* rcar_lvds_pclk_enable() from the DU driver.
*/
if ((lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) &&
!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
const struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, crtc);
const struct drm_display_mode *mode =
&crtc_state->adjusted_mode;
lvds->info->pll_setup(lvds, mode->clock * 1000);
}
/* Set the LVDS mode and select the input. */
lvdcr0 = rcar_lvds_get_lvds_mode(lvds, connector) << LVDCR0_LVMD_SHIFT;
if (lvds->bridge.encoder) {
if (drm_crtc_index(crtc) == 2)
lvdcr0 |= LVDCR0_DUSEL;
}
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
/* Turn all the channels on. */
rcar_lvds_write(lvds, LVDCR1,
LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) |
LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY);
if (lvds->info->gen < 3) {
/* Enable LVDS operation and turn the bias circuitry on. */
lvdcr0 |= LVDCR0_BEN | LVDCR0_LVEN;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
/*
* Turn the PLL on (simple PLL only, extended PLL is fully
* controlled through LVDPLLCR).
*/
lvdcr0 |= LVDCR0_PLLON;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
if (lvds->info->quirks & RCAR_LVDS_QUIRK_PWD) {
/* Set LVDS normal mode. */
lvdcr0 |= LVDCR0_PWD;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN3_LVEN) {
/*
* Turn on the LVDS PHY. On D3, the LVEN and LVRES bit must be
* set at the same time, so don't write the register yet.
*/
lvdcr0 |= LVDCR0_LVEN;
if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_PWD))
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
/* Wait for the PLL startup delay (simple PLL only). */
usleep_range(100, 150);
}
/* Turn the output on. */
lvdcr0 |= LVDCR0_LVRES;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
static void rcar_lvds_disable(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
u32 lvdcr0;
/*
* Clear the LVDCR0 bits in the order specified by the hardware
* documentation, ending with a write of 0 to the full register to
* clear all remaining bits.
*/
lvdcr0 = rcar_lvds_read(lvds, LVDCR0);
lvdcr0 &= ~LVDCR0_LVRES;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN3_LVEN) {
lvdcr0 &= ~LVDCR0_LVEN;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
if (lvds->info->quirks & RCAR_LVDS_QUIRK_PWD) {
lvdcr0 &= ~LVDCR0_PWD;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
lvdcr0 &= ~LVDCR0_PLLON;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
rcar_lvds_write(lvds, LVDCR0, 0);
rcar_lvds_write(lvds, LVDCR1, 0);
/* The extended PLL is turned off in rcar_lvds_pclk_disable(). */
if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL))
rcar_lvds_write(lvds, LVDPLLCR, 0);
/* Disable the companion LVDS encoder in dual-link mode. */
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
rcar_lvds_disable(lvds->companion);
pm_runtime_put_sync(lvds->dev);
}
/* -----------------------------------------------------------------------------
* Clock - D3/E3 only
*/
int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq,
bool dot_clk_only)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
int ret;
if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
return -ENODEV;
dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq);
ret = pm_runtime_resume_and_get(lvds->dev);
if (ret)
return ret;
rcar_lvds_pll_setup_d3_e3(lvds, freq, dot_clk_only);
return 0;
}
EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable);
void rcar_lvds_pclk_disable(struct drm_bridge *bridge, bool dot_clk_only)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
return;
dev_dbg(lvds->dev, "disabling LVDS PLL\n");
if (!dot_clk_only)
rcar_lvds_disable(bridge);
rcar_lvds_write(lvds, LVDPLLCR, 0);
pm_runtime_put_sync(lvds->dev);
}
EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
/* -----------------------------------------------------------------------------
* Bridge
*/
static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_connector *connector;
struct drm_crtc *crtc;
connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
rcar_lvds_enable(bridge, state, crtc, connector);
}
static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
/*
* For D3 and E3, disabling the LVDS encoder before the DU would stall
* the DU, causing a vblank wait timeout when stopping the DU. This has
* been traced to clearing the LVEN bit, but the exact reason is
* unknown. Keep the encoder enabled, it will be disabled by an explicit
* call to rcar_lvds_pclk_disable() from the DU driver.
*
* We could clear the LVRES bit already to disable the LVDS output, but
* that's likely pointless.
*/
if (lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)
return;
rcar_lvds_disable(bridge);
}
static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
int min_freq;
/*
* The internal LVDS encoder has a restricted clock frequency operating
* range, from 5MHz to 148.5MHz on D3 and E3, and from 31MHz to
* 148.5MHz on all other platforms. Clamp the clock accordingly.
*/
min_freq = lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL ? 5000 : 31000;
adjusted_mode->clock = clamp(adjusted_mode->clock, min_freq, 148500);
return true;
}
static int rcar_lvds_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
if (!lvds->next_bridge)
return 0;
return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
flags);
}
static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
.attach = rcar_lvds_attach,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = rcar_lvds_atomic_enable,
.atomic_disable = rcar_lvds_atomic_disable,
.mode_fixup = rcar_lvds_mode_fixup,
};
bool rcar_lvds_dual_link(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
return lvds->link_type != RCAR_LVDS_SINGLE_LINK;
}
EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
bool rcar_lvds_is_connected(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
return lvds->next_bridge != NULL;
}
EXPORT_SYMBOL_GPL(rcar_lvds_is_connected);
/* -----------------------------------------------------------------------------
* Probe & Remove
*/
static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
{
const struct of_device_id *match;
struct device_node *companion;
struct device_node *port0, *port1;
struct rcar_lvds *companion_lvds;
struct device *dev = lvds->dev;
int dual_link;
int ret = 0;
/* Locate the companion LVDS encoder for dual-link operation, if any. */
companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
if (!companion)
return 0;
/*
* Sanity check: the companion encoder must have the same compatible
* string.
*/
match = of_match_device(dev->driver->of_match_table, dev);
if (!of_device_is_compatible(companion, match->compatible)) {
dev_err(dev, "Companion LVDS encoder is invalid\n");
ret = -ENXIO;
goto done;
}
/*
* We need to work out if the sink is expecting us to function in
* dual-link mode. We do this by looking at the DT port nodes we are
* connected to, if they are marked as expecting even pixels and
* odd pixels than we need to enable vertical stripe output.
*/
port0 = of_graph_get_port_by_id(dev->of_node, 1);
port1 = of_graph_get_port_by_id(companion, 1);
dual_link = drm_of_lvds_get_dual_link_pixel_order(port0, port1);
of_node_put(port0);
of_node_put(port1);
switch (dual_link) {
case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS:
lvds->link_type = RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
break;
case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS:
lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS;
break;
default:
/*
* Early dual-link bridge specific implementations populate the
* timings field of drm_bridge. If the flag is set, we assume
* that we are expected to generate even pixels from the primary
* encoder, and odd pixels from the companion encoder.
*/
if (lvds->next_bridge->timings &&
lvds->next_bridge->timings->dual_link)
lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS;
else
lvds->link_type = RCAR_LVDS_SINGLE_LINK;
}
if (lvds->link_type == RCAR_LVDS_SINGLE_LINK) {
dev_dbg(dev, "Single-link configuration detected\n");
goto done;
}
lvds->companion = of_drm_find_bridge(companion);
if (!lvds->companion) {
ret = -EPROBE_DEFER;
goto done;
}
dev_dbg(dev,
"Dual-link configuration detected (companion encoder %pOF)\n",
companion);
if (lvds->link_type == RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS)
dev_dbg(dev, "Data swapping required\n");
/*
* FIXME: We should not be messing with the companion encoder private
* data from the primary encoder, we should rather let the companion
* encoder work things out on its own. However, the companion encoder
* doesn't hold a reference to the primary encoder, and
* drm_of_lvds_get_dual_link_pixel_order needs to be given references
* to the output ports of both encoders, therefore leave it like this
* for the time being.
*/
companion_lvds = bridge_to_rcar_lvds(lvds->companion);
companion_lvds->link_type = lvds->link_type;
done:
of_node_put(companion);
return ret;
}
static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
{
int ret;
ret = drm_of_find_panel_or_bridge(lvds->dev->of_node, 1, 0,
&lvds->panel, &lvds->next_bridge);
if (ret)
goto done;
if (lvds->panel) {
lvds->next_bridge = devm_drm_panel_bridge_add(lvds->dev,
lvds->panel);
if (IS_ERR_OR_NULL(lvds->next_bridge)) {
ret = -EINVAL;
goto done;
}
}
if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK)
ret = rcar_lvds_parse_dt_companion(lvds);
done:
/*
* On D3/E3 the LVDS encoder provides a clock to the DU, which can be
* used for the DPAD output even when the LVDS output is not connected.
* Don't fail probe in that case as the DU will need the bridge to
* control the clock.
*/
if (lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)
return ret == -ENODEV ? 0 : ret;
return ret;
}
static struct clk *rcar_lvds_get_clock(struct rcar_lvds *lvds, const char *name,
bool optional)
{
struct clk *clk;
clk = devm_clk_get(lvds->dev, name);
if (!IS_ERR(clk))
return clk;
if (PTR_ERR(clk) == -ENOENT && optional)
return NULL;
dev_err_probe(lvds->dev, PTR_ERR(clk), "failed to get %s clock\n",
name ? name : "module");
return clk;
}
static int rcar_lvds_get_clocks(struct rcar_lvds *lvds)
{
lvds->clocks.mod = rcar_lvds_get_clock(lvds, NULL, false);
if (IS_ERR(lvds->clocks.mod))
return PTR_ERR(lvds->clocks.mod);
/*
* LVDS encoders without an extended PLL have no external clock inputs.
*/
if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL))
return 0;
lvds->clocks.extal = rcar_lvds_get_clock(lvds, "extal", true);
if (IS_ERR(lvds->clocks.extal))
return PTR_ERR(lvds->clocks.extal);
lvds->clocks.dotclkin[0] = rcar_lvds_get_clock(lvds, "dclkin.0", true);
if (IS_ERR(lvds->clocks.dotclkin[0]))
return PTR_ERR(lvds->clocks.dotclkin[0]);
lvds->clocks.dotclkin[1] = rcar_lvds_get_clock(lvds, "dclkin.1", true);
if (IS_ERR(lvds->clocks.dotclkin[1]))
return PTR_ERR(lvds->clocks.dotclkin[1]);
/* At least one input to the PLL must be available. */
if (!lvds->clocks.extal && !lvds->clocks.dotclkin[0] &&
!lvds->clocks.dotclkin[1]) {
dev_err(lvds->dev,
"no input clock (extal, dclkin.0 or dclkin.1)\n");
return -EINVAL;
}
return 0;
}
static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = {
.gen = 2,
.quirks = RCAR_LVDS_QUIRK_LANES,
.pll_setup = rcar_lvds_pll_setup_gen2,
};
static const struct soc_device_attribute lvds_quirk_matches[] = {
{
.soc_id = "r8a7790", .revision = "ES1.*",
.data = &rcar_lvds_r8a7790es1_info,
},
{ /* sentinel */ }
};
static int rcar_lvds_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rcar_lvds *lvds;
int ret;
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
if (lvds == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, lvds);
lvds->dev = &pdev->dev;
lvds->info = of_device_get_match_data(&pdev->dev);
attr = soc_device_match(lvds_quirk_matches);
if (attr)
lvds->info = attr->data;
ret = rcar_lvds_parse_dt(lvds);
if (ret < 0)
return ret;
lvds->bridge.funcs = &rcar_lvds_bridge_ops;
lvds->bridge.of_node = pdev->dev.of_node;
lvds->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->mmio))
return PTR_ERR(lvds->mmio);
ret = rcar_lvds_get_clocks(lvds);
if (ret < 0)
return ret;
lvds->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(lvds->rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(lvds->rstc),
"failed to get cpg reset\n");
pm_runtime_enable(&pdev->dev);
drm_bridge_add(&lvds->bridge);
return 0;
}
static void rcar_lvds_remove(struct platform_device *pdev)
{
struct rcar_lvds *lvds = platform_get_drvdata(pdev);
drm_bridge_remove(&lvds->bridge);
pm_runtime_disable(&pdev->dev);
}
static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
.gen = 2,
.pll_setup = rcar_lvds_pll_setup_gen2,
};
static const struct rcar_lvds_device_info rcar_lvds_gen3_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_PWD,
.pll_setup = rcar_lvds_pll_setup_gen3,
};
static const struct rcar_lvds_device_info rcar_lvds_r8a77970_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_PWD | RCAR_LVDS_QUIRK_GEN3_LVEN,
.pll_setup = rcar_lvds_pll_setup_gen2,
};
static const struct rcar_lvds_device_info rcar_lvds_r8a77990_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_GEN3_LVEN | RCAR_LVDS_QUIRK_EXT_PLL
| RCAR_LVDS_QUIRK_DUAL_LINK,
};
static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_GEN3_LVEN | RCAR_LVDS_QUIRK_PWD
| RCAR_LVDS_QUIRK_EXT_PLL | RCAR_LVDS_QUIRK_DUAL_LINK,
};
static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7742-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
{ .compatible = "renesas,r8a774e1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77961-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77965-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info },
{ .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77990-lvds", .data = &rcar_lvds_r8a77990_info },
{ .compatible = "renesas,r8a77995-lvds", .data = &rcar_lvds_r8a77995_info },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_lvds_of_table);
static int rcar_lvds_runtime_suspend(struct device *dev)
{
struct rcar_lvds *lvds = dev_get_drvdata(dev);
clk_disable_unprepare(lvds->clocks.mod);
reset_control_assert(lvds->rstc);
return 0;
}
static int rcar_lvds_runtime_resume(struct device *dev)
{
struct rcar_lvds *lvds = dev_get_drvdata(dev);
int ret;
ret = reset_control_deassert(lvds->rstc);
if (ret)
return ret;
ret = clk_prepare_enable(lvds->clocks.mod);
if (ret < 0)
goto err_reset_assert;
return 0;
err_reset_assert:
reset_control_assert(lvds->rstc);
return ret;
}
static const struct dev_pm_ops rcar_lvds_pm_ops = {
SET_RUNTIME_PM_OPS(rcar_lvds_runtime_suspend, rcar_lvds_runtime_resume, NULL)
};
static struct platform_driver rcar_lvds_platform_driver = {
.probe = rcar_lvds_probe,
.remove_new = rcar_lvds_remove,
.driver = {
.name = "rcar-lvds",
.pm = &rcar_lvds_pm_ops,
.of_match_table = rcar_lvds_of_table,
},
};
module_platform_driver(rcar_lvds_platform_driver);
MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
MODULE_DESCRIPTION("Renesas R-Car LVDS Encoder Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c |
// SPDX-License-Identifier: GPL-2.0
/*
* R-Car Display Unit Writeback Support
*
* Copyright (C) 2019 Laurent Pinchart <[email protected]>
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
#include "rcar_du_writeback.h"
/**
* struct rcar_du_wb_conn_state - Driver-specific writeback connector state
* @state: base DRM connector state
* @format: format of the writeback framebuffer
*/
struct rcar_du_wb_conn_state {
struct drm_connector_state state;
const struct rcar_du_format_info *format;
};
#define to_rcar_wb_conn_state(s) \
container_of(s, struct rcar_du_wb_conn_state, state)
/**
* struct rcar_du_wb_job - Driver-private data for writeback jobs
* @sg_tables: scatter-gather tables for the framebuffer memory
*/
struct rcar_du_wb_job {
struct sg_table sg_tables[3];
};
static int rcar_du_wb_conn_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
return drm_add_modes_noedid(connector, dev->mode_config.max_width,
dev->mode_config.max_height);
}
static int rcar_du_wb_prepare_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector);
struct rcar_du_wb_job *rjob;
int ret;
if (!job->fb)
return 0;
rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
if (!rjob)
return -ENOMEM;
/* Map the framebuffer to the VSP. */
ret = rcar_du_vsp_map_fb(rcrtc->vsp, job->fb, rjob->sg_tables);
if (ret < 0) {
kfree(rjob);
return ret;
}
job->priv = rjob;
return 0;
}
static void rcar_du_wb_cleanup_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector);
struct rcar_du_wb_job *rjob = job->priv;
if (!job->fb)
return;
rcar_du_vsp_unmap_fb(rcrtc->vsp, job->fb, rjob->sg_tables);
kfree(rjob);
}
static const struct drm_connector_helper_funcs rcar_du_wb_conn_helper_funcs = {
.get_modes = rcar_du_wb_conn_get_modes,
.prepare_writeback_job = rcar_du_wb_prepare_job,
.cleanup_writeback_job = rcar_du_wb_cleanup_job,
};
static struct drm_connector_state *
rcar_du_wb_conn_duplicate_state(struct drm_connector *connector)
{
struct rcar_du_wb_conn_state *copy;
if (WARN_ON(!connector->state))
return NULL;
copy = kzalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
__drm_atomic_helper_connector_duplicate_state(connector, ©->state);
return ©->state;
}
static void rcar_du_wb_conn_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
__drm_atomic_helper_connector_destroy_state(state);
kfree(to_rcar_wb_conn_state(state));
}
static void rcar_du_wb_conn_reset(struct drm_connector *connector)
{
struct rcar_du_wb_conn_state *state;
if (connector->state) {
rcar_du_wb_conn_destroy_state(connector, connector->state);
connector->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return;
__drm_atomic_helper_connector_reset(connector, &state->state);
}
static const struct drm_connector_funcs rcar_du_wb_conn_funcs = {
.reset = rcar_du_wb_conn_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = rcar_du_wb_conn_duplicate_state,
.atomic_destroy_state = rcar_du_wb_conn_destroy_state,
};
static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rcar_du_wb_conn_state *wb_state =
to_rcar_wb_conn_state(conn_state);
const struct drm_display_mode *mode = &crtc_state->mode;
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
if (!conn_state->writeback_job)
return 0;
fb = conn_state->writeback_job->fb;
/*
* Verify that the framebuffer format is supported and that its size
* matches the current mode.
*/
if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
dev_dbg(dev->dev, "%s: invalid framebuffer size %ux%u\n",
__func__, fb->width, fb->height);
return -EINVAL;
}
wb_state->format = rcar_du_format_info(fb->format->format);
if (wb_state->format == NULL) {
dev_dbg(dev->dev, "%s: unsupported format %p4cc\n", __func__,
&fb->format->format);
return -EINVAL;
}
return 0;
}
static const struct drm_encoder_helper_funcs rcar_du_wb_enc_helper_funcs = {
.atomic_check = rcar_du_wb_enc_atomic_check,
};
/*
* Only RGB formats are currently supported as the VSP outputs RGB to the DU
* and can't convert to YUV separately for writeback.
*/
static const u32 writeback_formats[] = {
DRM_FORMAT_RGB332,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
};
int rcar_du_writeback_init(struct rcar_du_device *rcdu,
struct rcar_du_crtc *rcrtc)
{
struct drm_writeback_connector *wb_conn = &rcrtc->writeback;
drm_connector_helper_add(&wb_conn->base,
&rcar_du_wb_conn_helper_funcs);
return drm_writeback_connector_init(&rcdu->ddev, wb_conn,
&rcar_du_wb_conn_funcs,
&rcar_du_wb_enc_helper_funcs,
writeback_formats,
ARRAY_SIZE(writeback_formats),
1 << drm_crtc_index(&rcrtc->crtc));
}
void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
struct vsp1_du_writeback_config *cfg)
{
struct rcar_du_wb_conn_state *wb_state;
struct drm_connector_state *state;
struct rcar_du_wb_job *rjob;
struct drm_framebuffer *fb;
unsigned int i;
state = rcrtc->writeback.base.state;
if (!state || !state->writeback_job)
return;
fb = state->writeback_job->fb;
rjob = state->writeback_job->priv;
wb_state = to_rcar_wb_conn_state(state);
cfg->pixelformat = wb_state->format->v4l2;
cfg->pitch = fb->pitches[0];
for (i = 0; i < wb_state->format->planes; ++i)
cfg->mem[i] = sg_dma_address(rjob->sg_tables[i].sgl)
+ fb->offsets[i];
drm_writeback_queue_job(&rcrtc->writeback, state);
}
void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc)
{
drm_writeback_signal_completion(&rcrtc->writeback, 0);
}
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_du_writeback.c |
// SPDX-License-Identifier: GPL-2.0
/*
* R-Car MIPI DSI Encoder
*
* Copyright (C) 2020 Renesas Electronics Corporation
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include "rcar_mipi_dsi.h"
#include "rcar_mipi_dsi_regs.h"
#define MHZ(v) ((u32)((v) * 1000000U))
enum rcar_mipi_dsi_hw_model {
RCAR_DSI_V3U,
RCAR_DSI_V4H,
};
struct rcar_mipi_dsi_device_info {
enum rcar_mipi_dsi_hw_model model;
const struct dsi_clk_config *clk_cfg;
u8 clockset2_m_offset;
u8 n_min;
u8 n_max;
u8 n_mul;
unsigned long fpfd_min;
unsigned long fpfd_max;
u16 m_min;
u16 m_max;
unsigned long fout_min;
unsigned long fout_max;
};
struct rcar_mipi_dsi {
struct device *dev;
const struct rcar_mipi_dsi_device_info *info;
struct reset_control *rstc;
struct mipi_dsi_host host;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct drm_connector connector;
void __iomem *mmio;
struct {
struct clk *mod;
struct clk *pll;
struct clk *dsi;
} clocks;
enum mipi_dsi_pixel_format format;
unsigned int num_data_lanes;
unsigned int lanes;
};
struct dsi_setup_info {
unsigned long hsfreq;
u16 hsfreqrange;
unsigned long fout;
u16 m;
u16 n;
u16 vclk_divider;
const struct dsi_clk_config *clkset;
};
static inline struct rcar_mipi_dsi *
bridge_to_rcar_mipi_dsi(struct drm_bridge *bridge)
{
return container_of(bridge, struct rcar_mipi_dsi, bridge);
}
static inline struct rcar_mipi_dsi *
host_to_rcar_mipi_dsi(struct mipi_dsi_host *host)
{
return container_of(host, struct rcar_mipi_dsi, host);
}
static const u32 hsfreqrange_table[][2] = {
{ MHZ(80), 0x00 }, { MHZ(90), 0x10 }, { MHZ(100), 0x20 },
{ MHZ(110), 0x30 }, { MHZ(120), 0x01 }, { MHZ(130), 0x11 },
{ MHZ(140), 0x21 }, { MHZ(150), 0x31 }, { MHZ(160), 0x02 },
{ MHZ(170), 0x12 }, { MHZ(180), 0x22 }, { MHZ(190), 0x32 },
{ MHZ(205), 0x03 }, { MHZ(220), 0x13 }, { MHZ(235), 0x23 },
{ MHZ(250), 0x33 }, { MHZ(275), 0x04 }, { MHZ(300), 0x14 },
{ MHZ(325), 0x25 }, { MHZ(350), 0x35 }, { MHZ(400), 0x05 },
{ MHZ(450), 0x16 }, { MHZ(500), 0x26 }, { MHZ(550), 0x37 },
{ MHZ(600), 0x07 }, { MHZ(650), 0x18 }, { MHZ(700), 0x28 },
{ MHZ(750), 0x39 }, { MHZ(800), 0x09 }, { MHZ(850), 0x19 },
{ MHZ(900), 0x29 }, { MHZ(950), 0x3a }, { MHZ(1000), 0x0a },
{ MHZ(1050), 0x1a }, { MHZ(1100), 0x2a }, { MHZ(1150), 0x3b },
{ MHZ(1200), 0x0b }, { MHZ(1250), 0x1b }, { MHZ(1300), 0x2b },
{ MHZ(1350), 0x3c }, { MHZ(1400), 0x0c }, { MHZ(1450), 0x1c },
{ MHZ(1500), 0x2c }, { MHZ(1550), 0x3d }, { MHZ(1600), 0x0d },
{ MHZ(1650), 0x1d }, { MHZ(1700), 0x2e }, { MHZ(1750), 0x3e },
{ MHZ(1800), 0x0e }, { MHZ(1850), 0x1e }, { MHZ(1900), 0x2f },
{ MHZ(1950), 0x3f }, { MHZ(2000), 0x0f }, { MHZ(2050), 0x40 },
{ MHZ(2100), 0x41 }, { MHZ(2150), 0x42 }, { MHZ(2200), 0x43 },
{ MHZ(2250), 0x44 }, { MHZ(2300), 0x45 }, { MHZ(2350), 0x46 },
{ MHZ(2400), 0x47 }, { MHZ(2450), 0x48 }, { MHZ(2500), 0x49 },
{ /* sentinel */ },
};
struct dsi_clk_config {
u32 min_freq;
u32 max_freq;
u8 vco_cntrl;
u8 cpbias_cntrl;
u8 gmp_cntrl;
u8 int_cntrl;
u8 prop_cntrl;
};
static const struct dsi_clk_config dsi_clk_cfg_v3u[] = {
{ MHZ(40), MHZ(55), 0x3f, 0x10, 0x01, 0x00, 0x0b },
{ MHZ(52.5), MHZ(80), 0x39, 0x10, 0x01, 0x00, 0x0b },
{ MHZ(80), MHZ(110), 0x2f, 0x10, 0x01, 0x00, 0x0b },
{ MHZ(105), MHZ(160), 0x29, 0x10, 0x01, 0x00, 0x0b },
{ MHZ(160), MHZ(220), 0x1f, 0x10, 0x01, 0x00, 0x0b },
{ MHZ(210), MHZ(320), 0x19, 0x10, 0x01, 0x00, 0x0b },
{ MHZ(320), MHZ(440), 0x0f, 0x10, 0x01, 0x00, 0x0b },
{ MHZ(420), MHZ(660), 0x09, 0x10, 0x01, 0x00, 0x0b },
{ MHZ(630), MHZ(1149), 0x03, 0x10, 0x01, 0x00, 0x0b },
{ MHZ(1100), MHZ(1152), 0x01, 0x10, 0x01, 0x00, 0x0b },
{ MHZ(1150), MHZ(1250), 0x01, 0x10, 0x01, 0x00, 0x0c },
{ /* sentinel */ },
};
static const struct dsi_clk_config dsi_clk_cfg_v4h[] = {
{ MHZ(40), MHZ(45.31), 0x2b, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(45.31), MHZ(54.66), 0x28, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(54.66), MHZ(62.5), 0x28, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(62.5), MHZ(75), 0x27, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(75), MHZ(90.63), 0x23, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(90.63), MHZ(109.37), 0x20, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(109.37), MHZ(125), 0x20, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(125), MHZ(150), 0x1f, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(150), MHZ(181.25), 0x1b, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(181.25), MHZ(218.75), 0x18, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(218.75), MHZ(250), 0x18, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(250), MHZ(300), 0x17, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(300), MHZ(362.5), 0x13, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(362.5), MHZ(455.48), 0x10, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(455.48), MHZ(500), 0x10, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(500), MHZ(600), 0x0f, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(600), MHZ(725), 0x0b, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(725), MHZ(875), 0x08, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(875), MHZ(1000), 0x08, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(1000), MHZ(1200), 0x07, 0x00, 0x00, 0x08, 0x0a },
{ MHZ(1200), MHZ(1250), 0x03, 0x00, 0x00, 0x08, 0x0a },
{ /* sentinel */ },
};
static void rcar_mipi_dsi_write(struct rcar_mipi_dsi *dsi, u32 reg, u32 data)
{
iowrite32(data, dsi->mmio + reg);
}
static u32 rcar_mipi_dsi_read(struct rcar_mipi_dsi *dsi, u32 reg)
{
return ioread32(dsi->mmio + reg);
}
static void rcar_mipi_dsi_clr(struct rcar_mipi_dsi *dsi, u32 reg, u32 clr)
{
rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) & ~clr);
}
static void rcar_mipi_dsi_set(struct rcar_mipi_dsi *dsi, u32 reg, u32 set)
{
rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) | set);
}
static int rcar_mipi_dsi_write_phtw(struct rcar_mipi_dsi *dsi, u32 phtw)
{
u32 status;
int ret;
rcar_mipi_dsi_write(dsi, PHTW, phtw);
ret = read_poll_timeout(rcar_mipi_dsi_read, status,
!(status & (PHTW_DWEN | PHTW_CWEN)),
2000, 10000, false, dsi, PHTW);
if (ret < 0) {
dev_err(dsi->dev, "PHY test interface write timeout (0x%08x)\n",
phtw);
return ret;
}
return ret;
}
static int rcar_mipi_dsi_write_phtw_arr(struct rcar_mipi_dsi *dsi,
const u32 *phtw, unsigned int size)
{
for (unsigned int i = 0; i < size; i++) {
int ret = rcar_mipi_dsi_write_phtw(dsi, phtw[i]);
if (ret < 0)
return ret;
}
return 0;
}
#define WRITE_PHTW(...) \
({ \
static const u32 phtw[] = { __VA_ARGS__ }; \
int ret; \
ret = rcar_mipi_dsi_write_phtw_arr(dsi, phtw, \
ARRAY_SIZE(phtw)); \
ret; \
})
static int rcar_mipi_dsi_init_phtw_v3u(struct rcar_mipi_dsi *dsi)
{
return WRITE_PHTW(0x01020114, 0x01600115, 0x01030116, 0x0102011d,
0x011101a4, 0x018601a4, 0x014201a0, 0x010001a3,
0x0101011f);
}
static int rcar_mipi_dsi_post_init_phtw_v3u(struct rcar_mipi_dsi *dsi)
{
return WRITE_PHTW(0x010c0130, 0x010c0140, 0x010c0150, 0x010c0180,
0x010c0190, 0x010a0160, 0x010a0170, 0x01800164,
0x01800174);
}
static int rcar_mipi_dsi_init_phtw_v4h(struct rcar_mipi_dsi *dsi,
const struct dsi_setup_info *setup_info)
{
int ret;
if (setup_info->hsfreq < MHZ(450)) {
ret = WRITE_PHTW(0x01010100, 0x011b01ac);
if (ret)
return ret;
}
ret = WRITE_PHTW(0x01010100, 0x01030173, 0x01000174, 0x01500175,
0x01030176, 0x01040166, 0x010201ad);
if (ret)
return ret;
if (setup_info->hsfreq <= MHZ(1000))
ret = WRITE_PHTW(0x01020100, 0x01910170, 0x01020171,
0x01110172);
else if (setup_info->hsfreq <= MHZ(1500))
ret = WRITE_PHTW(0x01020100, 0x01980170, 0x01030171,
0x01100172);
else if (setup_info->hsfreq <= MHZ(2500))
ret = WRITE_PHTW(0x01020100, 0x0144016b, 0x01000172);
else
return -EINVAL;
if (ret)
return ret;
if (dsi->lanes <= 1) {
ret = WRITE_PHTW(0x01070100, 0x010e010b);
if (ret)
return ret;
}
if (dsi->lanes <= 2) {
ret = WRITE_PHTW(0x01090100, 0x010e010b);
if (ret)
return ret;
}
if (dsi->lanes <= 3) {
ret = WRITE_PHTW(0x010b0100, 0x010e010b);
if (ret)
return ret;
}
if (setup_info->hsfreq <= MHZ(1500)) {
ret = WRITE_PHTW(0x01010100, 0x01c0016e);
if (ret)
return ret;
}
return 0;
}
static int
rcar_mipi_dsi_post_init_phtw_v4h(struct rcar_mipi_dsi *dsi,
const struct dsi_setup_info *setup_info)
{
u32 status;
int ret;
if (setup_info->hsfreq <= MHZ(1500)) {
WRITE_PHTW(0x01020100, 0x00000180);
ret = read_poll_timeout(rcar_mipi_dsi_read, status,
status & PHTR_TEST, 2000, 10000, false,
dsi, PHTR);
if (ret < 0) {
dev_err(dsi->dev, "failed to test PHTR\n");
return ret;
}
WRITE_PHTW(0x01010100, 0x0100016e);
}
return 0;
}
/* -----------------------------------------------------------------------------
* Hardware Setup
*/
static void rcar_mipi_dsi_pll_calc(struct rcar_mipi_dsi *dsi,
unsigned long fin_rate,
unsigned long fout_target,
struct dsi_setup_info *setup_info)
{
unsigned int best_err = -1;
const struct rcar_mipi_dsi_device_info *info = dsi->info;
for (unsigned int n = info->n_min; n <= info->n_max; n++) {
unsigned long fpfd;
fpfd = fin_rate / n;
if (fpfd < info->fpfd_min || fpfd > info->fpfd_max)
continue;
for (unsigned int m = info->m_min; m <= info->m_max; m++) {
unsigned int err;
u64 fout;
fout = div64_u64((u64)fpfd * m, dsi->info->n_mul);
if (fout < info->fout_min || fout > info->fout_max)
continue;
fout = div64_u64(fout, setup_info->vclk_divider);
if (fout < setup_info->clkset->min_freq ||
fout > setup_info->clkset->max_freq)
continue;
err = abs((long)(fout - fout_target) * 10000 /
(long)fout_target);
if (err < best_err) {
setup_info->m = m;
setup_info->n = n;
setup_info->fout = (unsigned long)fout;
best_err = err;
if (err == 0)
return;
}
}
}
}
static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi,
struct clk *clk, unsigned long target,
struct dsi_setup_info *setup_info)
{
const struct dsi_clk_config *clk_cfg;
unsigned long fout_target;
unsigned long fin_rate;
unsigned int i;
unsigned int err;
/*
* Calculate Fout = dot clock * ColorDepth / (2 * Lane Count)
* The range out Fout is [40 - 1250] Mhz
*/
fout_target = target * mipi_dsi_pixel_format_to_bpp(dsi->format)
/ (2 * dsi->lanes);
if (fout_target < MHZ(40) || fout_target > MHZ(1250))
return;
/* Find PLL settings */
for (clk_cfg = dsi->info->clk_cfg; clk_cfg->min_freq != 0; clk_cfg++) {
if (fout_target > clk_cfg->min_freq &&
fout_target <= clk_cfg->max_freq) {
setup_info->clkset = clk_cfg;
break;
}
}
fin_rate = clk_get_rate(clk);
switch (dsi->info->model) {
case RCAR_DSI_V3U:
default:
setup_info->vclk_divider = 1 << ((clk_cfg->vco_cntrl >> 4) & 0x3);
break;
case RCAR_DSI_V4H:
setup_info->vclk_divider = 1 << (((clk_cfg->vco_cntrl >> 3) & 0x7) + 1);
break;
}
rcar_mipi_dsi_pll_calc(dsi, fin_rate, fout_target, setup_info);
/* Find hsfreqrange */
setup_info->hsfreq = setup_info->fout * 2;
for (i = 0; i < ARRAY_SIZE(hsfreqrange_table); i++) {
if (hsfreqrange_table[i][0] >= setup_info->hsfreq) {
setup_info->hsfreqrange = hsfreqrange_table[i][1];
break;
}
}
err = abs((long)(setup_info->fout - fout_target) * 10000 / (long)fout_target);
dev_dbg(dsi->dev,
"Fout = %u * %lu / (%u * %u * %u) = %lu (target %lu Hz, error %d.%02u%%)\n",
setup_info->m, fin_rate, dsi->info->n_mul, setup_info->n,
setup_info->vclk_divider, setup_info->fout, fout_target,
err / 100, err % 100);
dev_dbg(dsi->dev,
"vco_cntrl = 0x%x\tprop_cntrl = 0x%x\thsfreqrange = 0x%x\n",
clk_cfg->vco_cntrl, clk_cfg->prop_cntrl,
setup_info->hsfreqrange);
}
static void rcar_mipi_dsi_set_display_timing(struct rcar_mipi_dsi *dsi,
const struct drm_display_mode *mode)
{
u32 setr;
u32 vprmset0r;
u32 vprmset1r;
u32 vprmset2r;
u32 vprmset3r;
u32 vprmset4r;
/* Configuration for Pixel Stream and Packet Header */
if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 24)
rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB24);
else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 18)
rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB18);
else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 16)
rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB16);
else {
dev_warn(dsi->dev, "unsupported format");
return;
}
/* Configuration for Blanking sequence and Input Pixel */
setr = TXVMSETR_HSABPEN_EN | TXVMSETR_HBPBPEN_EN
| TXVMSETR_HFPBPEN_EN | TXVMSETR_SYNSEQ_PULSES
| TXVMSETR_PIXWDTH | TXVMSETR_VSTPM;
rcar_mipi_dsi_write(dsi, TXVMSETR, setr);
/* Configuration for Video Parameters */
vprmset0r = (mode->flags & DRM_MODE_FLAG_PVSYNC ?
TXVMVPRMSET0R_VSPOL_HIG : TXVMVPRMSET0R_VSPOL_LOW)
| (mode->flags & DRM_MODE_FLAG_PHSYNC ?
TXVMVPRMSET0R_HSPOL_HIG : TXVMVPRMSET0R_HSPOL_LOW)
| TXVMVPRMSET0R_CSPC_RGB | TXVMVPRMSET0R_BPP_24;
vprmset1r = TXVMVPRMSET1R_VACTIVE(mode->vdisplay)
| TXVMVPRMSET1R_VSA(mode->vsync_end - mode->vsync_start);
vprmset2r = TXVMVPRMSET2R_VFP(mode->vsync_start - mode->vdisplay)
| TXVMVPRMSET2R_VBP(mode->vtotal - mode->vsync_end);
vprmset3r = TXVMVPRMSET3R_HACTIVE(mode->hdisplay)
| TXVMVPRMSET3R_HSA(mode->hsync_end - mode->hsync_start);
vprmset4r = TXVMVPRMSET4R_HFP(mode->hsync_start - mode->hdisplay)
| TXVMVPRMSET4R_HBP(mode->htotal - mode->hsync_end);
rcar_mipi_dsi_write(dsi, TXVMVPRMSET0R, vprmset0r);
rcar_mipi_dsi_write(dsi, TXVMVPRMSET1R, vprmset1r);
rcar_mipi_dsi_write(dsi, TXVMVPRMSET2R, vprmset2r);
rcar_mipi_dsi_write(dsi, TXVMVPRMSET3R, vprmset3r);
rcar_mipi_dsi_write(dsi, TXVMVPRMSET4R, vprmset4r);
}
static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
const struct drm_display_mode *mode)
{
struct dsi_setup_info setup_info = {};
unsigned int timeout;
int ret;
int dsi_format;
u32 phy_setup;
u32 clockset2, clockset3;
u32 ppisetr;
u32 vclkset;
/* Checking valid format */
dsi_format = mipi_dsi_pixel_format_to_bpp(dsi->format);
if (dsi_format < 0) {
dev_warn(dsi->dev, "invalid format");
return -EINVAL;
}
/* Parameters Calculation */
rcar_mipi_dsi_parameters_calc(dsi, dsi->clocks.pll,
mode->clock * 1000, &setup_info);
/* LPCLK enable */
rcar_mipi_dsi_set(dsi, LPCLKSET, LPCLKSET_CKEN);
/* CFGCLK enabled */
rcar_mipi_dsi_set(dsi, CFGCLKSET, CFGCLKSET_CKEN);
rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ);
rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
rcar_mipi_dsi_set(dsi, PHTC, PHTC_TESTCLR);
rcar_mipi_dsi_clr(dsi, PHTC, PHTC_TESTCLR);
/* PHY setting */
phy_setup = rcar_mipi_dsi_read(dsi, PHYSETUP);
phy_setup &= ~PHYSETUP_HSFREQRANGE_MASK;
phy_setup |= PHYSETUP_HSFREQRANGE(setup_info.hsfreqrange);
rcar_mipi_dsi_write(dsi, PHYSETUP, phy_setup);
switch (dsi->info->model) {
case RCAR_DSI_V3U:
default:
ret = rcar_mipi_dsi_init_phtw_v3u(dsi);
if (ret < 0)
return ret;
break;
case RCAR_DSI_V4H:
ret = rcar_mipi_dsi_init_phtw_v4h(dsi, &setup_info);
if (ret < 0)
return ret;
break;
}
/* PLL Clock Setting */
rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
clockset2 = CLOCKSET2_M(setup_info.m - dsi->info->clockset2_m_offset)
| CLOCKSET2_N(setup_info.n - 1)
| CLOCKSET2_VCO_CNTRL(setup_info.clkset->vco_cntrl);
clockset3 = CLOCKSET3_PROP_CNTRL(setup_info.clkset->prop_cntrl)
| CLOCKSET3_INT_CNTRL(setup_info.clkset->int_cntrl)
| CLOCKSET3_CPBIAS_CNTRL(setup_info.clkset->cpbias_cntrl)
| CLOCKSET3_GMP_CNTRL(setup_info.clkset->gmp_cntrl);
rcar_mipi_dsi_write(dsi, CLOCKSET2, clockset2);
rcar_mipi_dsi_write(dsi, CLOCKSET3, clockset3);
rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
udelay(10);
rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN;
rcar_mipi_dsi_write(dsi, PPISETR, ppisetr);
rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_RSTZ);
usleep_range(400, 500);
/* Checking PPI clock status register */
for (timeout = 10; timeout > 0; --timeout) {
if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
(rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
(rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
break;
usleep_range(1000, 2000);
}
if (!timeout) {
dev_err(dsi->dev, "failed to enable PPI clock\n");
return -ETIMEDOUT;
}
switch (dsi->info->model) {
case RCAR_DSI_V3U:
default:
ret = rcar_mipi_dsi_post_init_phtw_v3u(dsi);
if (ret < 0)
return ret;
break;
case RCAR_DSI_V4H:
ret = rcar_mipi_dsi_post_init_phtw_v4h(dsi, &setup_info);
if (ret < 0)
return ret;
break;
}
/* Enable DOT clock */
vclkset = VCLKSET_CKEN;
rcar_mipi_dsi_write(dsi, VCLKSET, vclkset);
if (dsi_format == 24)
vclkset |= VCLKSET_BPP_24;
else if (dsi_format == 18)
vclkset |= VCLKSET_BPP_18;
else if (dsi_format == 16)
vclkset |= VCLKSET_BPP_16;
else {
dev_warn(dsi->dev, "unsupported format");
return -EINVAL;
}
vclkset |= VCLKSET_COLOR_RGB | VCLKSET_LANE(dsi->lanes - 1);
switch (dsi->info->model) {
case RCAR_DSI_V3U:
default:
vclkset |= VCLKSET_DIV_V3U(__ffs(setup_info.vclk_divider));
break;
case RCAR_DSI_V4H:
vclkset |= VCLKSET_DIV_V4H(__ffs(setup_info.vclk_divider) - 1);
break;
}
rcar_mipi_dsi_write(dsi, VCLKSET, vclkset);
/* After setting VCLKSET register, enable VCLKEN */
rcar_mipi_dsi_set(dsi, VCLKEN, VCLKEN_CKEN);
dev_dbg(dsi->dev, "DSI device is started\n");
return 0;
}
static void rcar_mipi_dsi_shutdown(struct rcar_mipi_dsi *dsi)
{
/* Disable VCLKEN */
rcar_mipi_dsi_write(dsi, VCLKSET, 0);
/* Disable DOT clock */
rcar_mipi_dsi_write(dsi, VCLKSET, 0);
rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ);
rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
/* CFGCLK disable */
rcar_mipi_dsi_clr(dsi, CFGCLKSET, CFGCLKSET_CKEN);
/* LPCLK disable */
rcar_mipi_dsi_clr(dsi, LPCLKSET, LPCLKSET_CKEN);
dev_dbg(dsi->dev, "DSI device is shutdown\n");
}
static int rcar_mipi_dsi_clk_enable(struct rcar_mipi_dsi *dsi)
{
int ret;
reset_control_deassert(dsi->rstc);
ret = clk_prepare_enable(dsi->clocks.mod);
if (ret < 0)
goto err_reset;
ret = clk_prepare_enable(dsi->clocks.dsi);
if (ret < 0)
goto err_clock;
return 0;
err_clock:
clk_disable_unprepare(dsi->clocks.mod);
err_reset:
reset_control_assert(dsi->rstc);
return ret;
}
static void rcar_mipi_dsi_clk_disable(struct rcar_mipi_dsi *dsi)
{
clk_disable_unprepare(dsi->clocks.dsi);
clk_disable_unprepare(dsi->clocks.mod);
reset_control_assert(dsi->rstc);
}
static int rcar_mipi_dsi_start_hs_clock(struct rcar_mipi_dsi *dsi)
{
/*
* In HW manual, we need to check TxDDRClkHS-Q Stable? but it dont
* write how to check. So we skip this check in this patch
*/
u32 status;
int ret;
/* Start HS clock. */
rcar_mipi_dsi_set(dsi, PPICLCR, PPICLCR_TXREQHS);
ret = read_poll_timeout(rcar_mipi_dsi_read, status,
status & PPICLSR_TOHS,
2000, 10000, false, dsi, PPICLSR);
if (ret < 0) {
dev_err(dsi->dev, "failed to enable HS clock\n");
return ret;
}
rcar_mipi_dsi_set(dsi, PPICLSCR, PPICLSCR_TOHS);
return 0;
}
static int rcar_mipi_dsi_start_video(struct rcar_mipi_dsi *dsi)
{
u32 status;
int ret;
/* Wait for the link to be ready. */
ret = read_poll_timeout(rcar_mipi_dsi_read, status,
!(status & (LINKSR_LPBUSY | LINKSR_HSBUSY)),
2000, 10000, false, dsi, LINKSR);
if (ret < 0) {
dev_err(dsi->dev, "Link failed to become ready\n");
return ret;
}
/* De-assert video FIFO clear. */
rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_VFCLR);
ret = read_poll_timeout(rcar_mipi_dsi_read, status,
status & TXVMSR_VFRDY,
2000, 10000, false, dsi, TXVMSR);
if (ret < 0) {
dev_err(dsi->dev, "Failed to de-assert video FIFO clear\n");
return ret;
}
/* Enable transmission in video mode. */
rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_EN_VIDEO);
ret = read_poll_timeout(rcar_mipi_dsi_read, status,
status & TXVMSR_RDY,
2000, 10000, false, dsi, TXVMSR);
if (ret < 0) {
dev_err(dsi->dev, "Failed to enable video transmission\n");
return ret;
}
return 0;
}
static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi)
{
u32 status;
int ret;
/* Disable transmission in video mode. */
rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_EN_VIDEO);
ret = read_poll_timeout(rcar_mipi_dsi_read, status,
!(status & TXVMSR_ACT),
2000, 100000, false, dsi, TXVMSR);
if (ret < 0) {
dev_err(dsi->dev, "Failed to disable video transmission\n");
return;
}
/* Assert video FIFO clear. */
rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_VFCLR);
ret = read_poll_timeout(rcar_mipi_dsi_read, status,
!(status & TXVMSR_VFRDY),
2000, 100000, false, dsi, TXVMSR);
if (ret < 0) {
dev_err(dsi->dev, "Failed to assert video FIFO clear\n");
return;
}
}
/* -----------------------------------------------------------------------------
* Bridge
*/
static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
flags);
}
static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
rcar_mipi_dsi_start_video(dsi);
}
static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
rcar_mipi_dsi_stop_video(dsi);
}
void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
const struct drm_display_mode *mode;
struct drm_connector *connector;
struct drm_crtc *crtc;
int ret;
connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
mode = &drm_atomic_get_new_crtc_state(state, crtc)->adjusted_mode;
ret = rcar_mipi_dsi_clk_enable(dsi);
if (ret < 0) {
dev_err(dsi->dev, "failed to enable DSI clocks\n");
return;
}
ret = rcar_mipi_dsi_startup(dsi, mode);
if (ret < 0)
goto err_dsi_startup;
rcar_mipi_dsi_set_display_timing(dsi, mode);
ret = rcar_mipi_dsi_start_hs_clock(dsi);
if (ret < 0)
goto err_dsi_start_hs;
return;
err_dsi_start_hs:
rcar_mipi_dsi_shutdown(dsi);
err_dsi_startup:
rcar_mipi_dsi_clk_disable(dsi);
}
EXPORT_SYMBOL_GPL(rcar_mipi_dsi_pclk_enable);
void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
rcar_mipi_dsi_shutdown(dsi);
rcar_mipi_dsi_clk_disable(dsi);
}
EXPORT_SYMBOL_GPL(rcar_mipi_dsi_pclk_disable);
static enum drm_mode_status
rcar_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
if (mode->clock > 297000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static const struct drm_bridge_funcs rcar_mipi_dsi_bridge_ops = {
.attach = rcar_mipi_dsi_attach,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = rcar_mipi_dsi_atomic_enable,
.atomic_disable = rcar_mipi_dsi_atomic_disable,
.mode_valid = rcar_mipi_dsi_bridge_mode_valid,
};
/* -----------------------------------------------------------------------------
* Host setting
*/
static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
int ret;
if (device->lanes > dsi->num_data_lanes)
return -EINVAL;
dsi->lanes = device->lanes;
dsi->format = device->format;
dsi->next_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node,
1, 0);
if (IS_ERR(dsi->next_bridge)) {
ret = PTR_ERR(dsi->next_bridge);
dev_err(dsi->dev, "failed to get next bridge: %d\n", ret);
return ret;
}
/* Initialize the DRM bridge. */
dsi->bridge.funcs = &rcar_mipi_dsi_bridge_ops;
dsi->bridge.of_node = dsi->dev->of_node;
drm_bridge_add(&dsi->bridge);
return 0;
}
static int rcar_mipi_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
drm_bridge_remove(&dsi->bridge);
return 0;
}
static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = {
.attach = rcar_mipi_dsi_host_attach,
.detach = rcar_mipi_dsi_host_detach,
};
/* -----------------------------------------------------------------------------
* Probe & Remove
*/
static int rcar_mipi_dsi_parse_dt(struct rcar_mipi_dsi *dsi)
{
int ret;
ret = drm_of_get_data_lanes_count_ep(dsi->dev->of_node, 1, 0, 1, 4);
if (ret < 0) {
dev_err(dsi->dev, "missing or invalid data-lanes property\n");
return ret;
}
dsi->num_data_lanes = ret;
return 0;
}
static struct clk *rcar_mipi_dsi_get_clock(struct rcar_mipi_dsi *dsi,
const char *name,
bool optional)
{
struct clk *clk;
clk = devm_clk_get(dsi->dev, name);
if (!IS_ERR(clk))
return clk;
if (PTR_ERR(clk) == -ENOENT && optional)
return NULL;
dev_err_probe(dsi->dev, PTR_ERR(clk), "failed to get %s clock\n",
name ? name : "module");
return clk;
}
static int rcar_mipi_dsi_get_clocks(struct rcar_mipi_dsi *dsi)
{
dsi->clocks.mod = rcar_mipi_dsi_get_clock(dsi, NULL, false);
if (IS_ERR(dsi->clocks.mod))
return PTR_ERR(dsi->clocks.mod);
dsi->clocks.pll = rcar_mipi_dsi_get_clock(dsi, "pll", true);
if (IS_ERR(dsi->clocks.pll))
return PTR_ERR(dsi->clocks.pll);
dsi->clocks.dsi = rcar_mipi_dsi_get_clock(dsi, "dsi", true);
if (IS_ERR(dsi->clocks.dsi))
return PTR_ERR(dsi->clocks.dsi);
if (!dsi->clocks.pll && !dsi->clocks.dsi) {
dev_err(dsi->dev, "no input clock (pll, dsi)\n");
return -EINVAL;
}
return 0;
}
static int rcar_mipi_dsi_probe(struct platform_device *pdev)
{
struct rcar_mipi_dsi *dsi;
int ret;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
if (dsi == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, dsi);
dsi->dev = &pdev->dev;
dsi->info = of_device_get_match_data(&pdev->dev);
ret = rcar_mipi_dsi_parse_dt(dsi);
if (ret < 0)
return ret;
/* Acquire resources. */
dsi->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->mmio))
return PTR_ERR(dsi->mmio);
ret = rcar_mipi_dsi_get_clocks(dsi);
if (ret < 0)
return ret;
dsi->rstc = devm_reset_control_get(dsi->dev, NULL);
if (IS_ERR(dsi->rstc)) {
dev_err(dsi->dev, "failed to get cpg reset\n");
return PTR_ERR(dsi->rstc);
}
/* Initialize the DSI host. */
dsi->host.dev = dsi->dev;
dsi->host.ops = &rcar_mipi_dsi_host_ops;
ret = mipi_dsi_host_register(&dsi->host);
if (ret < 0)
return ret;
return 0;
}
static void rcar_mipi_dsi_remove(struct platform_device *pdev)
{
struct rcar_mipi_dsi *dsi = platform_get_drvdata(pdev);
mipi_dsi_host_unregister(&dsi->host);
}
static const struct rcar_mipi_dsi_device_info v3u_data = {
.model = RCAR_DSI_V3U,
.clk_cfg = dsi_clk_cfg_v3u,
.clockset2_m_offset = 2,
.n_min = 3,
.n_max = 8,
.n_mul = 1,
.fpfd_min = MHZ(2),
.fpfd_max = MHZ(8),
.m_min = 64,
.m_max = 625,
.fout_min = MHZ(320),
.fout_max = MHZ(1250),
};
static const struct rcar_mipi_dsi_device_info v4h_data = {
.model = RCAR_DSI_V4H,
.clk_cfg = dsi_clk_cfg_v4h,
.clockset2_m_offset = 0,
.n_min = 1,
.n_max = 8,
.n_mul = 2,
.fpfd_min = MHZ(8),
.fpfd_max = MHZ(24),
.m_min = 167,
.m_max = 1000,
.fout_min = MHZ(2000),
.fout_max = MHZ(4000),
};
static const struct of_device_id rcar_mipi_dsi_of_table[] = {
{ .compatible = "renesas,r8a779a0-dsi-csi2-tx", .data = &v3u_data },
{ .compatible = "renesas,r8a779g0-dsi-csi2-tx", .data = &v4h_data },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_mipi_dsi_of_table);
static struct platform_driver rcar_mipi_dsi_platform_driver = {
.probe = rcar_mipi_dsi_probe,
.remove_new = rcar_mipi_dsi_remove,
.driver = {
.name = "rcar-mipi-dsi",
.of_match_table = rcar_mipi_dsi_of_table,
},
};
module_platform_driver(rcar_mipi_dsi_platform_driver);
MODULE_DESCRIPTION("Renesas R-Car MIPI DSI Encoder Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Display Unit VSP-Based Compositor
*
* Copyright (C) 2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_vblank.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/vsp1.h>
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
#include "rcar_du_vsp.h"
#include "rcar_du_writeback.h"
static void rcar_du_vsp_complete(void *private, unsigned int status, u32 crc)
{
struct rcar_du_crtc *crtc = private;
if (crtc->vblank_enable)
drm_crtc_handle_vblank(&crtc->crtc);
if (status & VSP1_DU_STATUS_COMPLETE)
rcar_du_crtc_finish_page_flip(crtc);
if (status & VSP1_DU_STATUS_WRITEBACK)
rcar_du_writeback_complete(crtc);
drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc);
}
void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
{
const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
struct rcar_du_device *rcdu = crtc->dev;
struct vsp1_du_lif_config cfg = {
.width = mode->hdisplay,
.height = mode->vdisplay,
.interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE,
.callback = rcar_du_vsp_complete,
.callback_data = crtc,
};
struct rcar_du_plane_state state = {
.state = {
.alpha = DRM_BLEND_ALPHA_OPAQUE,
.crtc = &crtc->crtc,
.dst.x1 = 0,
.dst.y1 = 0,
.dst.x2 = mode->hdisplay,
.dst.y2 = mode->vdisplay,
.src.x1 = 0,
.src.y1 = 0,
.src.x2 = mode->hdisplay << 16,
.src.y2 = mode->vdisplay << 16,
.zpos = 0,
},
.format = rcar_du_format_info(DRM_FORMAT_XRGB8888),
.source = RCAR_DU_PLANE_VSPD1,
.colorkey = 0,
};
if (rcdu->info->gen >= 3)
state.hwindex = (crtc->index % 2) ? 2 : 0;
else
state.hwindex = crtc->index % 2;
__rcar_du_plane_setup(crtc->group, &state);
vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc)
{
vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, NULL);
}
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
{
vsp1_du_atomic_begin(crtc->vsp->vsp, crtc->vsp_pipe);
}
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc)
{
struct vsp1_du_atomic_pipe_config cfg = { { 0, } };
struct rcar_du_crtc_state *state;
state = to_rcar_crtc_state(crtc->crtc.state);
cfg.crc = state->crc;
rcar_du_writeback_setup(crtc, &cfg.writeback);
vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
static const u32 rcar_du_vsp_formats[] = {
DRM_FORMAT_RGB332,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV16,
DRM_FORMAT_NV61,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YVU422,
DRM_FORMAT_YUV444,
DRM_FORMAT_YVU444,
};
/*
* Gen4 supports the same formats as above, and additionally 2-10-10-10 RGB
* formats and Y210 & Y212 formats.
*/
static const u32 rcar_du_vsp_formats_gen4[] = {
DRM_FORMAT_RGB332,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGBX1010102,
DRM_FORMAT_RGBA1010102,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV16,
DRM_FORMAT_NV61,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YVU422,
DRM_FORMAT_YUV444,
DRM_FORMAT_YVU444,
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
};
static u32 rcar_du_vsp_state_get_format(struct rcar_du_vsp_plane_state *state)
{
u32 fourcc = state->format->fourcc;
if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
switch (fourcc) {
case DRM_FORMAT_ARGB1555:
fourcc = DRM_FORMAT_XRGB1555;
break;
case DRM_FORMAT_ARGB4444:
fourcc = DRM_FORMAT_XRGB4444;
break;
case DRM_FORMAT_ARGB8888:
fourcc = DRM_FORMAT_XRGB8888;
break;
case DRM_FORMAT_ABGR8888:
fourcc = DRM_FORMAT_XBGR8888;
break;
case DRM_FORMAT_BGRA8888:
fourcc = DRM_FORMAT_BGRX8888;
break;
case DRM_FORMAT_RGBA1010102:
fourcc = DRM_FORMAT_RGBX1010102;
break;
}
}
return fourcc;
}
static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
{
struct rcar_du_vsp_plane_state *state =
to_rcar_vsp_plane_state(plane->plane.state);
struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc);
struct drm_framebuffer *fb = plane->plane.state->fb;
const struct rcar_du_format_info *format;
struct vsp1_du_atomic_config cfg = {
.pixelformat = 0,
.pitch = fb->pitches[0],
.alpha = state->state.alpha >> 8,
.zpos = state->state.zpos,
};
u32 fourcc = rcar_du_vsp_state_get_format(state);
unsigned int i;
cfg.src.left = state->state.src.x1 >> 16;
cfg.src.top = state->state.src.y1 >> 16;
cfg.src.width = drm_rect_width(&state->state.src) >> 16;
cfg.src.height = drm_rect_height(&state->state.src) >> 16;
cfg.dst.left = state->state.dst.x1;
cfg.dst.top = state->state.dst.y1;
cfg.dst.width = drm_rect_width(&state->state.dst);
cfg.dst.height = drm_rect_height(&state->state.dst);
for (i = 0; i < state->format->planes; ++i)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
format = rcar_du_format_info(fourcc);
cfg.pixelformat = format->v4l2;
cfg.premult = state->state.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI;
vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
plane->index, &cfg);
}
int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
struct sg_table sg_tables[3])
{
struct rcar_du_device *rcdu = vsp->dev;
unsigned int i, j;
int ret;
for (i = 0; i < fb->format->num_planes; ++i) {
struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
struct sg_table *sgt = &sg_tables[i];
if (gem->sgt) {
struct scatterlist *src;
struct scatterlist *dst;
/*
* If the GEM buffer has a scatter gather table, it has
* been imported from a dma-buf and has no physical
* address as it might not be physically contiguous.
* Copy the original scatter gather table to map it to
* the VSP.
*/
ret = sg_alloc_table(sgt, gem->sgt->orig_nents,
GFP_KERNEL);
if (ret)
goto fail;
src = gem->sgt->sgl;
dst = sgt->sgl;
for (j = 0; j < gem->sgt->orig_nents; ++j) {
sg_set_page(dst, sg_page(src), src->length,
src->offset);
src = sg_next(src);
dst = sg_next(dst);
}
} else {
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr,
gem->dma_addr, gem->base.size);
if (ret)
goto fail;
}
ret = vsp1_du_map_sg(vsp->vsp, sgt);
if (ret) {
sg_free_table(sgt);
goto fail;
}
}
return 0;
fail:
while (i--) {
struct sg_table *sgt = &sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
}
return ret;
}
static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
int ret;
/*
* There's no need to prepare (and unprepare) the framebuffer when the
* plane is not visible, as it will not be displayed.
*/
if (!state->visible)
return 0;
ret = rcar_du_vsp_map_fb(vsp, state->fb, rstate->sg_tables);
if (ret < 0)
return ret;
return drm_gem_plane_helper_prepare_fb(plane, state);
}
void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
struct sg_table sg_tables[3])
{
unsigned int i;
for (i = 0; i < fb->format->num_planes; ++i) {
struct sg_table *sgt = &sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
}
}
static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
if (!state->visible)
return;
rcar_du_vsp_unmap_fb(vsp, state->fb, rstate->sg_tables);
}
static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(new_plane_state);
return __rcar_du_plane_atomic_check(plane, new_plane_state,
&rstate->format);
}
static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane);
struct rcar_du_crtc *crtc = to_rcar_crtc(old_state->crtc);
if (new_state->visible)
rcar_du_vsp_plane_setup(rplane);
else if (old_state->crtc)
vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe,
rplane->index, NULL);
}
static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = {
.prepare_fb = rcar_du_vsp_plane_prepare_fb,
.cleanup_fb = rcar_du_vsp_plane_cleanup_fb,
.atomic_check = rcar_du_vsp_plane_atomic_check,
.atomic_update = rcar_du_vsp_plane_atomic_update,
};
static struct drm_plane_state *
rcar_du_vsp_plane_atomic_duplicate_state(struct drm_plane *plane)
{
struct rcar_du_vsp_plane_state *copy;
if (WARN_ON(!plane->state))
return NULL;
copy = kzalloc(sizeof(*copy), GFP_KERNEL);
if (copy == NULL)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, ©->state);
return ©->state;
}
static void rcar_du_vsp_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(to_rcar_vsp_plane_state(state));
}
static void rcar_du_vsp_plane_reset(struct drm_plane *plane)
{
struct rcar_du_vsp_plane_state *state;
if (plane->state) {
rcar_du_vsp_plane_atomic_destroy_state(plane, plane->state);
plane->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return;
__drm_atomic_helper_plane_reset(plane, &state->state);
}
static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = rcar_du_vsp_plane_reset,
.destroy = drm_plane_cleanup,
.atomic_duplicate_state = rcar_du_vsp_plane_atomic_duplicate_state,
.atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
};
static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
{
struct rcar_du_vsp *vsp = res;
unsigned int i;
for (i = 0; i < vsp->num_planes; ++i) {
struct rcar_du_vsp_plane *plane = &vsp->planes[i];
drm_plane_cleanup(&plane->plane);
}
kfree(vsp->planes);
put_device(vsp->vsp);
}
int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
unsigned int crtcs)
{
struct rcar_du_device *rcdu = vsp->dev;
struct platform_device *pdev;
unsigned int num_crtcs = hweight32(crtcs);
unsigned int num_planes;
unsigned int i;
int ret;
/* Find the VSP device and initialize it. */
pdev = of_find_device_by_node(np);
if (!pdev)
return -ENXIO;
vsp->vsp = &pdev->dev;
ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_vsp_cleanup, vsp);
if (ret < 0)
return ret;
ret = vsp1_du_init(vsp->vsp);
if (ret < 0)
return ret;
num_planes = rcdu->info->num_rpf;
vsp->planes = kcalloc(num_planes, sizeof(*vsp->planes), GFP_KERNEL);
if (!vsp->planes)
return -ENOMEM;
for (i = 0; i < num_planes; ++i) {
enum drm_plane_type type = i < num_crtcs
? DRM_PLANE_TYPE_PRIMARY
: DRM_PLANE_TYPE_OVERLAY;
struct rcar_du_vsp_plane *plane = &vsp->planes[i];
unsigned int num_formats;
const u32 *formats;
if (rcdu->info->gen < 4) {
num_formats = ARRAY_SIZE(rcar_du_vsp_formats);
formats = rcar_du_vsp_formats;
} else {
num_formats = ARRAY_SIZE(rcar_du_vsp_formats_gen4);
formats = rcar_du_vsp_formats_gen4;
}
plane->vsp = vsp;
plane->index = i;
ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
crtcs, &rcar_du_vsp_plane_funcs,
formats, num_formats,
NULL, type, NULL);
if (ret < 0)
return ret;
drm_plane_helper_add(&plane->plane,
&rcar_du_vsp_plane_helper_funcs);
drm_plane_create_alpha_property(&plane->plane);
drm_plane_create_zpos_property(&plane->plane, i, 0,
num_planes - 1);
drm_plane_create_blend_mode_property(&plane->plane,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
vsp->num_planes++;
}
return 0;
}
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Display Unit CRTCs
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*/
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "rcar_cmm.h"
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
#include "rcar_du_plane.h"
#include "rcar_du_regs.h"
#include "rcar_du_vsp.h"
#include "rcar_lvds.h"
#include "rcar_mipi_dsi.h"
static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
struct rcar_du_device *rcdu = rcrtc->dev;
return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
}
static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
{
struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
}
static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
{
struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
}
static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
{
struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
}
void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set)
{
struct rcar_du_device *rcdu = rcrtc->dev;
rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set;
rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr);
}
/* -----------------------------------------------------------------------------
* Hardware Setup
*/
struct dpll_info {
unsigned int output;
unsigned int fdpll;
unsigned int n;
unsigned int m;
};
static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
struct dpll_info *dpll,
unsigned long input,
unsigned long target)
{
unsigned long best_diff = (unsigned long)-1;
unsigned long diff;
unsigned int fdpll;
unsigned int m;
unsigned int n;
/*
* fin fvco fout fclkout
* in --> [1/M] --> |PD| -> [LPF] -> [VCO] -> [1/P] -+-> [1/FDPLL] -> out
* +-> | | |
* | |
* +---------------- [1/N] <------------+
*
* fclkout = fvco / P / FDPLL -- (1)
*
* fin/M = fvco/P/N
*
* fvco = fin * P * N / M -- (2)
*
* (1) + (2) indicates
*
* fclkout = fin * N / M / FDPLL
*
* NOTES
* N : (n + 1)
* M : (m + 1)
* FDPLL : (fdpll + 1)
* P : 2
* 2kHz < fvco < 4096MHz
*
* To minimize the jitter,
* N : as large as possible
* M : as small as possible
*/
for (m = 0; m < 4; m++) {
for (n = 119; n > 38; n--) {
/*
* This code only runs on 64-bit architectures, the
* unsigned long type can thus be used for 64-bit
* computation. It will still compile without any
* warning on 32-bit architectures.
*
* To optimize calculations, use fout instead of fvco
* to verify the VCO frequency constraint.
*/
unsigned long fout = input * (n + 1) / (m + 1);
if (fout < 1000 || fout > 2048 * 1000 * 1000U)
continue;
for (fdpll = 1; fdpll < 32; fdpll++) {
unsigned long output;
output = fout / (fdpll + 1);
if (output >= 400 * 1000 * 1000)
continue;
diff = abs((long)output - (long)target);
if (best_diff > diff) {
best_diff = diff;
dpll->n = n;
dpll->m = m;
dpll->fdpll = fdpll;
dpll->output = output;
}
if (diff == 0)
goto done;
}
}
}
done:
dev_dbg(rcrtc->dev->dev,
"output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff);
}
struct du_clk_params {
struct clk *clk;
unsigned long rate;
unsigned long diff;
u32 escr;
};
static void rcar_du_escr_divider(struct clk *clk, unsigned long target,
u32 escr, struct du_clk_params *params)
{
unsigned long rate;
unsigned long diff;
u32 div;
/*
* If the target rate has already been achieved perfectly we can't do
* better.
*/
if (params->diff == 0)
return;
/*
* Compute the input clock rate and internal divisor values to obtain
* the clock rate closest to the target frequency.
*/
rate = clk_round_rate(clk, target);
div = clamp(DIV_ROUND_CLOSEST(rate, target), 1UL, 64UL) - 1;
diff = abs(rate / (div + 1) - target);
/*
* Store the parameters if the resulting frequency is better than any
* previously calculated value.
*/
if (diff < params->diff) {
params->clk = clk;
params->rate = rate;
params->diff = diff;
params->escr = escr | div;
}
}
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
struct rcar_du_device *rcdu = rcrtc->dev;
unsigned long mode_clock = mode->clock * 1000;
unsigned int hdse_offset;
u32 dsmr;
u32 escr;
if (rcdu->info->dpll_mask & (1 << rcrtc->index)) {
unsigned long target = mode_clock;
struct dpll_info dpll = { 0 };
unsigned long extclk;
u32 dpllcr;
u32 div = 0;
/*
* DU channels that have a display PLL can't use the internal
* system clock, and have no internal clock divider.
*/
extclk = clk_get_rate(rcrtc->extclock);
rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
dpllcr = DPLLCR_CODE | DPLLCR_CLKE
| DPLLCR_FDPLL(dpll.fdpll)
| DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
| DPLLCR_STBY;
if (rcrtc->index == 1)
dpllcr |= DPLLCR_PLCS1
| DPLLCR_INCS_DOTCLKIN1;
else
dpllcr |= DPLLCR_PLCS0
| DPLLCR_INCS_DOTCLKIN0;
rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
escr = ESCR_DCLKSEL_DCLKIN | div;
} else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) ||
rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) {
/*
* Use the external LVDS or DSI PLL output as the dot clock when
* outputting to the LVDS or DSI encoder on an SoC that supports
* this clock routing option. We use the clock directly in that
* case, without any additional divider.
*/
escr = ESCR_DCLKSEL_DCLKIN;
} else {
struct du_clk_params params = { .diff = (unsigned long)-1 };
rcar_du_escr_divider(rcrtc->clock, mode_clock,
ESCR_DCLKSEL_CLKS, ¶ms);
if (rcrtc->extclock)
rcar_du_escr_divider(rcrtc->extclock, mode_clock,
ESCR_DCLKSEL_DCLKIN, ¶ms);
dev_dbg(rcrtc->dev->dev, "mode clock %lu %s rate %lu\n",
mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext",
params.rate);
clk_set_rate(params.clk, params.rate);
escr = params.escr;
}
/*
* The ESCR register only exists in DU channels that can output to an
* LVDS or DPAT, and the OTAR register in DU channels that can output
* to a DPAD.
*/
if ((rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs |
rcdu->info->routes[RCAR_DU_OUTPUT_DPAD1].possible_crtcs |
rcdu->info->routes[RCAR_DU_OUTPUT_LVDS0].possible_crtcs |
rcdu->info->routes[RCAR_DU_OUTPUT_LVDS1].possible_crtcs) &
BIT(rcrtc->index)) {
dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
}
if ((rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs |
rcdu->info->routes[RCAR_DU_OUTPUT_DPAD1].possible_crtcs) &
BIT(rcrtc->index))
rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
/* Signal polarities */
dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
| ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
| ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0)
| DSMR_DIPM_DISP | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, dsmr);
/*
* When the CMM is enabled, an additional offset of 25 pixels must be
* subtracted from the HDS (horizontal display start) and HDE
* (horizontal display end) registers.
*/
hdse_offset = 19;
if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2))
hdse_offset += 25;
/* Display timings */
rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start -
hdse_offset);
rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
mode->hdisplay - hdse_offset);
rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1);
rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
mode->crtc_vsync_end - 2);
rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
mode->crtc_vsync_end +
mode->crtc_vdisplay - 2);
rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
mode->crtc_vsync_end +
mode->crtc_vsync_start - 1);
rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
}
static unsigned int plane_zpos(struct rcar_du_plane *plane)
{
return plane->plane.state->normalized_zpos;
}
static const struct rcar_du_format_info *
plane_format(struct rcar_du_plane *plane)
{
return to_rcar_plane_state(plane->plane.state)->format;
}
static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
{
struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
struct rcar_du_device *rcdu = rcrtc->dev;
unsigned int num_planes = 0;
unsigned int dptsr_planes;
unsigned int hwplanes = 0;
unsigned int prio = 0;
unsigned int i;
u32 dspr = 0;
for (i = 0; i < rcrtc->group->num_planes; ++i) {
struct rcar_du_plane *plane = &rcrtc->group->planes[i];
unsigned int j;
if (plane->plane.state->crtc != &rcrtc->crtc ||
!plane->plane.state->visible)
continue;
/* Insert the plane in the sorted planes array. */
for (j = num_planes++; j > 0; --j) {
if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
break;
planes[j] = planes[j-1];
}
planes[j] = plane;
prio += plane_format(plane)->planes * 4;
}
for (i = 0; i < num_planes; ++i) {
struct rcar_du_plane *plane = planes[i];
struct drm_plane_state *state = plane->plane.state;
unsigned int index = to_rcar_plane_state(state)->hwindex;
prio -= 4;
dspr |= (index + 1) << prio;
hwplanes |= 1 << index;
if (plane_format(plane)->planes == 2) {
index = (index + 1) % 8;
prio -= 4;
dspr |= (index + 1) << prio;
hwplanes |= 1 << index;
}
}
/* If VSP+DU integration is enabled the plane assignment is fixed. */
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
if (rcdu->info->gen < 3) {
dspr = (rcrtc->index % 2) + 1;
hwplanes = 1 << (rcrtc->index % 2);
} else {
dspr = (rcrtc->index % 2) ? 3 : 1;
hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0);
}
}
/*
* Update the planes to display timing and dot clock generator
* associations.
*
* Updating the DPTSR register requires restarting the CRTC group,
* resulting in visible flicker. To mitigate the issue only update the
* association if needed by enabled planes. Planes being disabled will
* keep their current association.
*/
mutex_lock(&rcrtc->group->lock);
dptsr_planes = rcrtc->index % 2 ? rcrtc->group->dptsr_planes | hwplanes
: rcrtc->group->dptsr_planes & ~hwplanes;
if (dptsr_planes != rcrtc->group->dptsr_planes) {
rcar_du_group_write(rcrtc->group, DPTSR,
(dptsr_planes << 16) | dptsr_planes);
rcrtc->group->dptsr_planes = dptsr_planes;
if (rcrtc->group->used_crtcs)
rcar_du_group_restart(rcrtc->group);
}
/* Restart the group if plane sources have changed. */
if (rcrtc->group->need_restart)
rcar_du_group_restart(rcrtc->group);
mutex_unlock(&rcrtc->group->lock);
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
dspr);
}
/* -----------------------------------------------------------------------------
* Page Flip
*/
void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
{
struct drm_pending_vblank_event *event;
struct drm_device *dev = rcrtc->crtc.dev;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = rcrtc->event;
rcrtc->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
if (event == NULL)
return;
spin_lock_irqsave(&dev->event_lock, flags);
drm_crtc_send_vblank_event(&rcrtc->crtc, event);
wake_up(&rcrtc->flip_wait);
spin_unlock_irqrestore(&dev->event_lock, flags);
drm_crtc_vblank_put(&rcrtc->crtc);
}
static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
{
struct drm_device *dev = rcrtc->crtc.dev;
unsigned long flags;
bool pending;
spin_lock_irqsave(&dev->event_lock, flags);
pending = rcrtc->event != NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
return pending;
}
static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
{
struct rcar_du_device *rcdu = rcrtc->dev;
if (wait_event_timeout(rcrtc->flip_wait,
!rcar_du_crtc_page_flip_pending(rcrtc),
msecs_to_jiffies(50)))
return;
dev_warn(rcdu->dev, "page flip timeout\n");
rcar_du_crtc_finish_page_flip(rcrtc);
}
/* -----------------------------------------------------------------------------
* Color Management Module (CMM)
*/
static int rcar_du_cmm_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct drm_property_blob *drm_lut = state->gamma_lut;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct device *dev = rcrtc->dev->dev;
if (!drm_lut)
return 0;
/* We only accept fully populated LUT tables. */
if (drm_color_lut_size(drm_lut) != CM2_LUT_SIZE) {
dev_err(dev, "invalid gamma lut size: %zu bytes\n",
drm_lut->length);
return -EINVAL;
}
return 0;
}
static void rcar_du_cmm_setup(struct drm_crtc *crtc)
{
struct drm_property_blob *drm_lut = crtc->state->gamma_lut;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_cmm_config cmm_config = {};
if (!rcrtc->cmm)
return;
if (drm_lut)
cmm_config.lut.table = (struct drm_color_lut *)drm_lut->data;
rcar_cmm_setup(rcrtc->cmm, &cmm_config);
}
/* -----------------------------------------------------------------------------
* Start/Stop and Suspend/Resume
*/
static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
{
/* Set display off and background to black */
rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
/* Configure display timings and output routing */
rcar_du_crtc_set_display_timing(rcrtc);
rcar_du_group_set_routing(rcrtc->group);
/* Start with all planes disabled. */
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
/* Enable the VSP compositor. */
if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_enable(rcrtc);
/* Turn vertical blanking interrupt reporting on. */
drm_crtc_vblank_on(&rcrtc->crtc);
}
static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
{
int ret;
/*
* Guard against double-get, as the function is called from both the
* .atomic_enable() and .atomic_begin() handlers.
*/
if (rcrtc->initialized)
return 0;
ret = clk_prepare_enable(rcrtc->clock);
if (ret < 0)
return ret;
ret = clk_prepare_enable(rcrtc->extclock);
if (ret < 0)
goto error_clock;
ret = rcar_du_group_get(rcrtc->group);
if (ret < 0)
goto error_group;
rcar_du_crtc_setup(rcrtc);
rcrtc->initialized = true;
return 0;
error_group:
clk_disable_unprepare(rcrtc->extclock);
error_clock:
clk_disable_unprepare(rcrtc->clock);
return ret;
}
static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
{
rcar_du_group_put(rcrtc->group);
clk_disable_unprepare(rcrtc->extclock);
clk_disable_unprepare(rcrtc->clock);
rcrtc->initialized = false;
}
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
bool interlaced;
/*
* Select master sync mode. This enables display operation in master
* sync mode (with the HSYNC and VSYNC signals configured as outputs and
* actively driven).
*/
interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
(interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
DSYSR_TVM_MASTER);
rcar_du_group_start_stop(rcrtc->group, true);
}
static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
{
struct rcar_du_device *rcdu = rcrtc->dev;
struct drm_crtc *crtc = &rcrtc->crtc;
u32 status;
/* Make sure vblank interrupts are enabled. */
drm_crtc_vblank_get(crtc);
/*
* Disable planes and calculate how many vertical blanking interrupts we
* have to wait for. If a vertical blanking interrupt has been triggered
* but not processed yet, we don't know whether it occurred before or
* after the planes got disabled. We thus have to wait for two vblank
* interrupts in that case.
*/
spin_lock_irq(&rcrtc->vblank_lock);
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
status = rcar_du_crtc_read(rcrtc, DSSR);
rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
spin_unlock_irq(&rcrtc->vblank_lock);
if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
msecs_to_jiffies(100)))
dev_warn(rcdu->dev, "vertical blanking timeout\n");
drm_crtc_vblank_put(crtc);
}
static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
/*
* Disable all planes and wait for the change to take effect. This is
* required as the plane enable registers are updated on vblank, and no
* vblank will occur once the CRTC is stopped. Disabling planes when
* starting the CRTC thus wouldn't be enough as it would start scanning
* out immediately from old frame buffers until the next vblank.
*
* This increases the CRTC stop delay, especially when multiple CRTCs
* are stopped in one operation as we now wait for one vblank per CRTC.
* Whether this can be improved needs to be researched.
*/
rcar_du_crtc_disable_planes(rcrtc);
/*
* Disable vertical blanking interrupt reporting. We first need to wait
* for page flip completion before stopping the CRTC as userspace
* expects page flips to eventually complete.
*/
rcar_du_crtc_wait_page_flip(rcrtc);
drm_crtc_vblank_off(crtc);
/* Disable the VSP compositor. */
if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_disable(rcrtc);
if (rcrtc->cmm)
rcar_cmm_disable(rcrtc->cmm);
/*
* Select switch sync mode. This stops display operation and configures
* the HSYNC and VSYNC signals as inputs.
*
* TODO: Find another way to stop the display for DUs that don't support
* TVM sync.
*/
if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_TVM_SYNC))
rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK,
DSYSR_TVM_SWITCH);
rcar_du_group_start_stop(rcrtc->group, false);
}
/* -----------------------------------------------------------------------------
* CRTC Functions
*/
static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc_state);
struct drm_encoder *encoder;
int ret;
ret = rcar_du_cmm_check(crtc, crtc_state);
if (ret)
return ret;
/* Store the routes from the CRTC output to the DU outputs. */
rstate->outputs = 0;
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc_state->encoder_mask) {
struct rcar_du_encoder *renc;
/* Skip the writeback encoder. */
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
continue;
renc = to_rcar_encoder(encoder);
rstate->outputs |= BIT(renc->output);
}
return 0;
}
static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
struct rcar_du_device *rcdu = rcrtc->dev;
if (rcrtc->cmm)
rcar_cmm_enable(rcrtc->cmm);
rcar_du_crtc_get(rcrtc);
/*
* On D3/E3 the dot clock is provided by the LVDS encoder attached to
* the DU channel. We need to enable its clock output explicitly before
* starting the CRTC, as the bridge hasn't been enabled by the atomic
* helpers yet.
*/
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
bool dot_clk_only = rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0);
struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
rcar_lvds_pclk_enable(bridge, mode->clock * 1000, dot_clk_only);
}
/*
* Similarly to LVDS, on V3U the dot clock is provided by the DSI
* encoder, and we need to enable the DSI clocks before enabling the CRTC.
*/
if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
(rstate->outputs &
(BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) {
struct drm_bridge *bridge = rcdu->dsi[rcrtc->index];
rcar_mipi_dsi_pclk_enable(bridge, state);
}
rcar_du_crtc_start(rcrtc);
/*
* TODO: The chip manual indicates that CMM tables should be written
* after the DU channel has been activated. Investigate the impact
* of this restriction on the first displayed frame.
*/
rcar_du_cmm_setup(crtc);
}
static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
bool dot_clk_only = rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0);
struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
/*
* Disable the LVDS clock output, see
* rcar_du_crtc_atomic_enable(). When the LVDS output is used,
* this also disables the LVDS encoder.
*/
rcar_lvds_pclk_disable(bridge, dot_clk_only);
}
if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
(rstate->outputs &
(BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) {
struct drm_bridge *bridge = rcdu->dsi[rcrtc->index];
/*
* Disable the DSI clock output, see
* rcar_du_crtc_atomic_enable().
*/
rcar_mipi_dsi_pclk_disable(bridge);
}
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
}
static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
WARN_ON(!crtc->state->enable);
/*
* If a mode set is in progress we can be called with the CRTC disabled.
* We thus need to first get and setup the CRTC in order to configure
* planes. We must *not* put the CRTC in .atomic_flush(), as it must be
* kept awake until the .atomic_enable() call that will follow. The get
* operation in .atomic_enable() will in that case be a no-op, and the
* CRTC will be put later in .atomic_disable().
*
* If a mode set is not in progress the CRTC is enabled, and the
* following get call will be a no-op. There is thus no need to balance
* it in .atomic_flush() either.
*/
rcar_du_crtc_get(rcrtc);
/* If the active state changed, we let .atomic_enable handle CMM. */
if (crtc->state->color_mgmt_changed && !crtc->state->active_changed)
rcar_du_cmm_setup(crtc);
if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_begin(rcrtc);
}
static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct drm_device *dev = rcrtc->crtc.dev;
unsigned long flags;
rcar_du_crtc_update_planes(rcrtc);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
rcrtc->event = crtc->state->event;
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_flush(rcrtc);
}
static enum drm_mode_status
rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_device *rcdu = rcrtc->dev;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
unsigned int min_sync_porch;
unsigned int vbp;
if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
return MODE_NO_INTERLACE;
/*
* The hardware requires a minimum combined horizontal sync and back
* porch of 20 pixels (when CMM isn't used) or 45 pixels (when CMM is
* used), and a minimum vertical back porch of 3 lines.
*/
min_sync_porch = 20;
if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2))
min_sync_porch += 25;
if (mode->htotal - mode->hsync_start < min_sync_porch)
return MODE_HBLANK_NARROW;
vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
if (vbp < 3)
return MODE_VBLANK_NARROW;
return MODE_OK;
}
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
.atomic_check = rcar_du_crtc_atomic_check,
.atomic_begin = rcar_du_crtc_atomic_begin,
.atomic_flush = rcar_du_crtc_atomic_flush,
.atomic_enable = rcar_du_crtc_atomic_enable,
.atomic_disable = rcar_du_crtc_atomic_disable,
.mode_valid = rcar_du_crtc_mode_valid,
};
static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
{
struct rcar_du_device *rcdu = rcrtc->dev;
const char **sources;
unsigned int count;
int i = -1;
/* CRC available only on Gen3 HW. */
if (rcdu->info->gen < 3)
return;
/* Reserve 1 for "auto" source. */
count = rcrtc->vsp->num_planes + 1;
sources = kmalloc_array(count, sizeof(*sources), GFP_KERNEL);
if (!sources)
return;
sources[0] = kstrdup("auto", GFP_KERNEL);
if (!sources[0])
goto error;
for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
struct drm_plane *plane = &rcrtc->vsp->planes[i].plane;
char name[16];
sprintf(name, "plane%u", plane->base.id);
sources[i + 1] = kstrdup(name, GFP_KERNEL);
if (!sources[i + 1])
goto error;
}
rcrtc->sources = sources;
rcrtc->sources_count = count;
return;
error:
while (i >= 0) {
kfree(sources[i]);
i--;
}
kfree(sources);
}
static void rcar_du_crtc_crc_cleanup(struct rcar_du_crtc *rcrtc)
{
unsigned int i;
if (!rcrtc->sources)
return;
for (i = 0; i < rcrtc->sources_count; i++)
kfree(rcrtc->sources[i]);
kfree(rcrtc->sources);
rcrtc->sources = NULL;
rcrtc->sources_count = 0;
}
static struct drm_crtc_state *
rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
struct rcar_du_crtc_state *state;
struct rcar_du_crtc_state *copy;
if (WARN_ON(!crtc->state))
return NULL;
state = to_rcar_crtc_state(crtc->state);
copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
if (copy == NULL)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, ©->state);
return ©->state;
}
static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
__drm_atomic_helper_crtc_destroy_state(state);
kfree(to_rcar_crtc_state(state));
}
static void rcar_du_crtc_cleanup(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
rcar_du_crtc_crc_cleanup(rcrtc);
return drm_crtc_cleanup(crtc);
}
static void rcar_du_crtc_reset(struct drm_crtc *crtc)
{
struct rcar_du_crtc_state *state;
if (crtc->state) {
rcar_du_crtc_atomic_destroy_state(crtc, crtc->state);
crtc->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return;
state->crc.source = VSP1_DU_CRC_NONE;
state->crc.index = 0;
__drm_atomic_helper_crtc_reset(crtc, &state->state);
}
static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
rcrtc->vblank_enable = true;
return 0;
}
static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
rcrtc->vblank_enable = false;
}
static int rcar_du_crtc_parse_crc_source(struct rcar_du_crtc *rcrtc,
const char *source_name,
enum vsp1_du_crc_source *source)
{
unsigned int index;
int ret;
/*
* Parse the source name. Supported values are "plane%u" to compute the
* CRC on an input plane (%u is the plane ID), and "auto" to compute the
* CRC on the composer (VSP) output.
*/
if (!source_name) {
*source = VSP1_DU_CRC_NONE;
return 0;
} else if (!strcmp(source_name, "auto")) {
*source = VSP1_DU_CRC_OUTPUT;
return 0;
} else if (strstarts(source_name, "plane")) {
unsigned int i;
*source = VSP1_DU_CRC_PLANE;
ret = kstrtouint(source_name + strlen("plane"), 10, &index);
if (ret < 0)
return ret;
for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
if (index == rcrtc->vsp->planes[i].plane.base.id)
return i;
}
}
return -EINVAL;
}
static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source_name,
size_t *values_cnt)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
enum vsp1_du_crc_source source;
if (rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source) < 0) {
DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
return -EINVAL;
}
*values_cnt = 1;
return 0;
}
static const char *const *
rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
*count = rcrtc->sources_count;
return rcrtc->sources;
}
static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
const char *source_name)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct drm_modeset_acquire_ctx ctx;
struct drm_crtc_state *crtc_state;
struct drm_atomic_state *state;
enum vsp1_du_crc_source source;
unsigned int index;
int ret;
ret = rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source);
if (ret < 0)
return ret;
index = ret;
/* Perform an atomic commit to set the CRC source. */
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(crtc->dev);
if (!state) {
ret = -ENOMEM;
goto unlock;
}
state->acquire_ctx = &ctx;
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (!IS_ERR(crtc_state)) {
struct rcar_du_crtc_state *rcrtc_state;
rcrtc_state = to_rcar_crtc_state(crtc_state);
rcrtc_state->crc.source = source;
rcrtc_state->crc.index = index;
ret = drm_atomic_commit(state);
} else {
ret = PTR_ERR(crtc_state);
}
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
drm_atomic_state_put(state);
unlock:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
static const struct drm_crtc_funcs crtc_funcs_gen2 = {
.reset = rcar_du_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
.atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
.enable_vblank = rcar_du_crtc_enable_vblank,
.disable_vblank = rcar_du_crtc_disable_vblank,
};
static const struct drm_crtc_funcs crtc_funcs_gen3 = {
.reset = rcar_du_crtc_reset,
.destroy = rcar_du_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
.atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
.enable_vblank = rcar_du_crtc_enable_vblank,
.disable_vblank = rcar_du_crtc_disable_vblank,
.set_crc_source = rcar_du_crtc_set_crc_source,
.verify_crc_source = rcar_du_crtc_verify_crc_source,
.get_crc_sources = rcar_du_crtc_get_crc_sources,
};
/* -----------------------------------------------------------------------------
* Interrupt Handling
*/
static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
struct rcar_du_crtc *rcrtc = arg;
struct rcar_du_device *rcdu = rcrtc->dev;
irqreturn_t ret = IRQ_NONE;
u32 status;
spin_lock(&rcrtc->vblank_lock);
status = rcar_du_crtc_read(rcrtc, DSSR);
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
if (status & DSSR_VBK) {
/*
* Wake up the vblank wait if the counter reaches 0. This must
* be protected by the vblank_lock to avoid races in
* rcar_du_crtc_disable_planes().
*/
if (rcrtc->vblank_count) {
if (--rcrtc->vblank_count == 0)
wake_up(&rcrtc->vblank_wait);
}
}
spin_unlock(&rcrtc->vblank_lock);
if (status & DSSR_VBK) {
if (rcdu->info->gen < 3) {
drm_crtc_handle_vblank(&rcrtc->crtc);
rcar_du_crtc_finish_page_flip(rcrtc);
}
ret = IRQ_HANDLED;
}
return ret;
}
/* -----------------------------------------------------------------------------
* Initialization
*/
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
unsigned int hwindex)
{
static const unsigned int mmio_offsets[] = {
DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET
};
struct rcar_du_device *rcdu = rgrp->dev;
struct platform_device *pdev = to_platform_device(rcdu->dev);
struct rcar_du_crtc *rcrtc = &rcdu->crtcs[swindex];
struct drm_crtc *crtc = &rcrtc->crtc;
struct drm_plane *primary;
unsigned int irqflags;
struct clk *clk;
char clk_name[9];
char *name;
int irq;
int ret;
/* Get the CRTC clock and the optional external clock. */
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_CLOCK)) {
sprintf(clk_name, "du.%u", hwindex);
name = clk_name;
} else {
name = NULL;
}
rcrtc->clock = devm_clk_get(rcdu->dev, name);
if (IS_ERR(rcrtc->clock)) {
dev_err(rcdu->dev, "no clock for DU channel %u\n", hwindex);
return PTR_ERR(rcrtc->clock);
}
sprintf(clk_name, "dclkin.%u", hwindex);
clk = devm_clk_get(rcdu->dev, clk_name);
if (!IS_ERR(clk)) {
rcrtc->extclock = clk;
} else if (PTR_ERR(clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (rcdu->info->dpll_mask & BIT(hwindex)) {
/*
* DU channels that have a display PLL can't use the internal
* system clock and thus require an external clock.
*/
ret = PTR_ERR(clk);
dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
return ret;
}
init_waitqueue_head(&rcrtc->flip_wait);
init_waitqueue_head(&rcrtc->vblank_wait);
spin_lock_init(&rcrtc->vblank_lock);
rcrtc->dev = rcdu;
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[hwindex];
rcrtc->index = hwindex;
rcrtc->dsysr = rcrtc->index % 2 ? 0 : DSYSR_DRES;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_TVM_SYNC))
rcrtc->dsysr |= DSYSR_TVM_TVSYNC;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
else
primary = &rgrp->planes[swindex % 2].plane;
ret = drm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL,
rcdu->info->gen <= 2 ?
&crtc_funcs_gen2 : &crtc_funcs_gen3,
NULL);
if (ret < 0)
return ret;
/* CMM might be disabled for this CRTC. */
if (rcdu->cmms[swindex]) {
rcrtc->cmm = rcdu->cmms[swindex];
rgrp->cmms_mask |= BIT(hwindex % 2);
drm_mode_crtc_set_gamma_size(crtc, CM2_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, CM2_LUT_SIZE);
}
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
/* Register the interrupt handler. */
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ)) {
/* The IRQ's are associated with the CRTC (sw)index. */
irq = platform_get_irq(pdev, swindex);
irqflags = 0;
} else {
irq = platform_get_irq(pdev, 0);
irqflags = IRQF_SHARED;
}
if (irq < 0) {
dev_err(rcdu->dev, "no IRQ for CRTC %u\n", swindex);
return irq;
}
ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
dev_name(rcdu->dev), rcrtc);
if (ret < 0) {
dev_err(rcdu->dev,
"failed to register IRQ for CRTC %u\n", swindex);
return ret;
}
rcar_du_crtc_crc_init(rcrtc);
return 0;
}
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RZ/G2L MIPI DSI Encoder Driver
*
* Copyright (C) 2022 Renesas Electronics Corporation
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include "rzg2l_mipi_dsi_regs.h"
struct rzg2l_mipi_dsi {
struct device *dev;
void __iomem *mmio;
struct reset_control *rstc;
struct reset_control *arstc;
struct reset_control *prstc;
struct mipi_dsi_host host;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct clk *vclk;
enum mipi_dsi_pixel_format format;
unsigned int num_data_lanes;
unsigned int lanes;
unsigned long mode_flags;
};
static inline struct rzg2l_mipi_dsi *
bridge_to_rzg2l_mipi_dsi(struct drm_bridge *bridge)
{
return container_of(bridge, struct rzg2l_mipi_dsi, bridge);
}
static inline struct rzg2l_mipi_dsi *
host_to_rzg2l_mipi_dsi(struct mipi_dsi_host *host)
{
return container_of(host, struct rzg2l_mipi_dsi, host);
}
struct rzg2l_mipi_dsi_timings {
unsigned long hsfreq_max;
u32 t_init;
u32 tclk_prepare;
u32 ths_prepare;
u32 tclk_zero;
u32 tclk_pre;
u32 tclk_post;
u32 tclk_trail;
u32 ths_zero;
u32 ths_trail;
u32 ths_exit;
u32 tlpx;
};
static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
{
.hsfreq_max = 80000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 13,
.tclk_zero = 33,
.tclk_pre = 24,
.tclk_post = 94,
.tclk_trail = 10,
.ths_zero = 23,
.ths_trail = 17,
.ths_exit = 13,
.tlpx = 6,
},
{
.hsfreq_max = 125000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 12,
.tclk_zero = 33,
.tclk_pre = 15,
.tclk_post = 94,
.tclk_trail = 10,
.ths_zero = 23,
.ths_trail = 17,
.ths_exit = 13,
.tlpx = 6,
},
{
.hsfreq_max = 250000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 12,
.tclk_zero = 33,
.tclk_pre = 13,
.tclk_post = 94,
.tclk_trail = 10,
.ths_zero = 23,
.ths_trail = 16,
.ths_exit = 13,
.tlpx = 6,
},
{
.hsfreq_max = 360000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 10,
.tclk_zero = 33,
.tclk_pre = 4,
.tclk_post = 35,
.tclk_trail = 7,
.ths_zero = 16,
.ths_trail = 9,
.ths_exit = 13,
.tlpx = 6,
},
{
.hsfreq_max = 720000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 9,
.tclk_zero = 33,
.tclk_pre = 4,
.tclk_post = 35,
.tclk_trail = 7,
.ths_zero = 16,
.ths_trail = 9,
.ths_exit = 13,
.tlpx = 6,
},
{
.hsfreq_max = 1500000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 9,
.tclk_zero = 33,
.tclk_pre = 4,
.tclk_post = 35,
.tclk_trail = 7,
.ths_zero = 16,
.ths_trail = 9,
.ths_exit = 13,
.tlpx = 6,
},
};
static void rzg2l_mipi_dsi_phy_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data)
{
iowrite32(data, dsi->mmio + reg);
}
static void rzg2l_mipi_dsi_link_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data)
{
iowrite32(data, dsi->mmio + LINK_REG_OFFSET + reg);
}
static u32 rzg2l_mipi_dsi_phy_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
{
return ioread32(dsi->mmio + reg);
}
static u32 rzg2l_mipi_dsi_link_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
{
return ioread32(dsi->mmio + LINK_REG_OFFSET + reg);
}
/* -----------------------------------------------------------------------------
* Hardware Setup
*/
static int rzg2l_mipi_dsi_dphy_init(struct rzg2l_mipi_dsi *dsi,
unsigned long hsfreq)
{
const struct rzg2l_mipi_dsi_timings *dphy_timings;
unsigned int i;
u32 dphyctrl0;
u32 dphytim0;
u32 dphytim1;
u32 dphytim2;
u32 dphytim3;
int ret;
/* All DSI global operation timings are set with recommended setting */
for (i = 0; i < ARRAY_SIZE(rzg2l_mipi_dsi_global_timings); ++i) {
dphy_timings = &rzg2l_mipi_dsi_global_timings[i];
if (hsfreq <= dphy_timings->hsfreq_max)
break;
}
/* Initializing DPHY before accessing LINK */
dphyctrl0 = DSIDPHYCTRL0_CAL_EN_HSRX_OFS | DSIDPHYCTRL0_CMN_MASTER_EN |
DSIDPHYCTRL0_RE_VDD_DETVCCQLV18 | DSIDPHYCTRL0_EN_BGR;
rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYCTRL0, dphyctrl0);
usleep_range(20, 30);
dphyctrl0 |= DSIDPHYCTRL0_EN_LDO1200;
rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYCTRL0, dphyctrl0);
usleep_range(10, 20);
dphytim0 = DSIDPHYTIM0_TCLK_MISS(0) |
DSIDPHYTIM0_T_INIT(dphy_timings->t_init);
dphytim1 = DSIDPHYTIM1_THS_PREPARE(dphy_timings->ths_prepare) |
DSIDPHYTIM1_TCLK_PREPARE(dphy_timings->tclk_prepare) |
DSIDPHYTIM1_THS_SETTLE(0) |
DSIDPHYTIM1_TCLK_SETTLE(0);
dphytim2 = DSIDPHYTIM2_TCLK_TRAIL(dphy_timings->tclk_trail) |
DSIDPHYTIM2_TCLK_POST(dphy_timings->tclk_post) |
DSIDPHYTIM2_TCLK_PRE(dphy_timings->tclk_pre) |
DSIDPHYTIM2_TCLK_ZERO(dphy_timings->tclk_zero);
dphytim3 = DSIDPHYTIM3_TLPX(dphy_timings->tlpx) |
DSIDPHYTIM3_THS_EXIT(dphy_timings->ths_exit) |
DSIDPHYTIM3_THS_TRAIL(dphy_timings->ths_trail) |
DSIDPHYTIM3_THS_ZERO(dphy_timings->ths_zero);
rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYTIM0, dphytim0);
rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYTIM1, dphytim1);
rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYTIM2, dphytim2);
rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYTIM3, dphytim3);
ret = reset_control_deassert(dsi->rstc);
if (ret < 0)
return ret;
udelay(1);
return 0;
}
static void rzg2l_mipi_dsi_dphy_exit(struct rzg2l_mipi_dsi *dsi)
{
u32 dphyctrl0;
dphyctrl0 = rzg2l_mipi_dsi_phy_read(dsi, DSIDPHYCTRL0);
dphyctrl0 &= ~(DSIDPHYCTRL0_EN_LDO1200 | DSIDPHYCTRL0_EN_BGR);
rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYCTRL0, dphyctrl0);
reset_control_assert(dsi->rstc);
}
static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
const struct drm_display_mode *mode)
{
unsigned long hsfreq;
unsigned int bpp;
u32 txsetr;
u32 clstptsetr;
u32 lptrnstsetr;
u32 clkkpt;
u32 clkbfht;
u32 clkstpt;
u32 golpbkt;
int ret;
/*
* Relationship between hsclk and vclk must follow
* vclk * bpp = hsclk * 8 * lanes
* where vclk: video clock (Hz)
* bpp: video pixel bit depth
* hsclk: DSI HS Byte clock frequency (Hz)
* lanes: number of data lanes
*
* hsclk(bit) = hsclk(byte) * 8
*/
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
hsfreq = (mode->clock * bpp * 8) / (8 * dsi->lanes);
ret = pm_runtime_resume_and_get(dsi->dev);
if (ret < 0)
return ret;
clk_set_rate(dsi->vclk, mode->clock * 1000);
ret = rzg2l_mipi_dsi_dphy_init(dsi, hsfreq);
if (ret < 0)
goto err_phy;
/* Enable Data lanes and Clock lanes */
txsetr = TXSETR_DLEN | TXSETR_NUMLANEUSE(dsi->lanes - 1) | TXSETR_CLEN;
rzg2l_mipi_dsi_link_write(dsi, TXSETR, txsetr);
/*
* Global timings characteristic depends on high speed Clock Frequency
* Currently MIPI DSI-IF just supports maximum FHD@60 with:
* - videoclock = 148.5 (MHz)
* - bpp: maximum 24bpp
* - data lanes: maximum 4 lanes
* Therefore maximum hsclk will be 891 Mbps.
*/
if (hsfreq > 445500) {
clkkpt = 12;
clkbfht = 15;
clkstpt = 48;
golpbkt = 75;
} else if (hsfreq > 250000) {
clkkpt = 7;
clkbfht = 8;
clkstpt = 27;
golpbkt = 40;
} else {
clkkpt = 8;
clkbfht = 6;
clkstpt = 24;
golpbkt = 29;
}
clstptsetr = CLSTPTSETR_CLKKPT(clkkpt) | CLSTPTSETR_CLKBFHT(clkbfht) |
CLSTPTSETR_CLKSTPT(clkstpt);
rzg2l_mipi_dsi_link_write(dsi, CLSTPTSETR, clstptsetr);
lptrnstsetr = LPTRNSTSETR_GOLPBKT(golpbkt);
rzg2l_mipi_dsi_link_write(dsi, LPTRNSTSETR, lptrnstsetr);
return 0;
err_phy:
rzg2l_mipi_dsi_dphy_exit(dsi);
pm_runtime_put(dsi->dev);
return ret;
}
static void rzg2l_mipi_dsi_stop(struct rzg2l_mipi_dsi *dsi)
{
rzg2l_mipi_dsi_dphy_exit(dsi);
pm_runtime_put(dsi->dev);
}
static void rzg2l_mipi_dsi_set_display_timing(struct rzg2l_mipi_dsi *dsi,
const struct drm_display_mode *mode)
{
u32 vich1ppsetr;
u32 vich1vssetr;
u32 vich1vpsetr;
u32 vich1hssetr;
u32 vich1hpsetr;
int dsi_format;
u32 delay[2];
u8 index;
/* Configuration for Pixel Packet */
dsi_format = mipi_dsi_pixel_format_to_bpp(dsi->format);
switch (dsi_format) {
case 24:
vich1ppsetr = VICH1PPSETR_DT_RGB24;
break;
case 18:
vich1ppsetr = VICH1PPSETR_DT_RGB18;
break;
}
if ((dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) &&
!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST))
vich1ppsetr |= VICH1PPSETR_TXESYNC_PULSE;
rzg2l_mipi_dsi_link_write(dsi, VICH1PPSETR, vich1ppsetr);
/* Configuration for Video Parameters */
vich1vssetr = VICH1VSSETR_VACTIVE(mode->vdisplay) |
VICH1VSSETR_VSA(mode->vsync_end - mode->vsync_start);
vich1vssetr |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ?
VICH1VSSETR_VSPOL_HIGH : VICH1VSSETR_VSPOL_LOW;
vich1vpsetr = VICH1VPSETR_VFP(mode->vsync_start - mode->vdisplay) |
VICH1VPSETR_VBP(mode->vtotal - mode->vsync_end);
vich1hssetr = VICH1HSSETR_HACTIVE(mode->hdisplay) |
VICH1HSSETR_HSA(mode->hsync_end - mode->hsync_start);
vich1hssetr |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ?
VICH1HSSETR_HSPOL_HIGH : VICH1HSSETR_HSPOL_LOW;
vich1hpsetr = VICH1HPSETR_HFP(mode->hsync_start - mode->hdisplay) |
VICH1HPSETR_HBP(mode->htotal - mode->hsync_end);
rzg2l_mipi_dsi_link_write(dsi, VICH1VSSETR, vich1vssetr);
rzg2l_mipi_dsi_link_write(dsi, VICH1VPSETR, vich1vpsetr);
rzg2l_mipi_dsi_link_write(dsi, VICH1HSSETR, vich1hssetr);
rzg2l_mipi_dsi_link_write(dsi, VICH1HPSETR, vich1hpsetr);
/*
* Configuration for Delay Value
* Delay value based on 2 ranges of video clock.
* 74.25MHz is videoclock of HD@60p or FHD@30p
*/
if (mode->clock > 74250) {
delay[0] = 231;
delay[1] = 216;
} else {
delay[0] = 220;
delay[1] = 212;
}
if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
index = 0;
else
index = 1;
rzg2l_mipi_dsi_link_write(dsi, VICH1SET1R,
VICH1SET1R_DLY(delay[index]));
}
static int rzg2l_mipi_dsi_start_hs_clock(struct rzg2l_mipi_dsi *dsi)
{
bool is_clk_cont;
u32 hsclksetr;
u32 status;
int ret;
is_clk_cont = !(dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS);
/* Start HS clock */
hsclksetr = HSCLKSETR_HSCLKRUN_HS | (is_clk_cont ?
HSCLKSETR_HSCLKMODE_CONT :
HSCLKSETR_HSCLKMODE_NON_CONT);
rzg2l_mipi_dsi_link_write(dsi, HSCLKSETR, hsclksetr);
if (is_clk_cont) {
ret = read_poll_timeout(rzg2l_mipi_dsi_link_read, status,
status & PLSR_CLLP2HS,
2000, 20000, false, dsi, PLSR);
if (ret < 0) {
dev_err(dsi->dev, "failed to start HS clock\n");
return ret;
}
}
dev_dbg(dsi->dev, "Start High Speed Clock with %s clock mode",
is_clk_cont ? "continuous" : "non-continuous");
return 0;
}
static int rzg2l_mipi_dsi_stop_hs_clock(struct rzg2l_mipi_dsi *dsi)
{
bool is_clk_cont;
u32 status;
int ret;
is_clk_cont = !(dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS);
/* Stop HS clock */
rzg2l_mipi_dsi_link_write(dsi, HSCLKSETR,
is_clk_cont ? HSCLKSETR_HSCLKMODE_CONT :
HSCLKSETR_HSCLKMODE_NON_CONT);
if (is_clk_cont) {
ret = read_poll_timeout(rzg2l_mipi_dsi_link_read, status,
status & PLSR_CLHS2LP,
2000, 20000, false, dsi, PLSR);
if (ret < 0) {
dev_err(dsi->dev, "failed to stop HS clock\n");
return ret;
}
}
return 0;
}
static int rzg2l_mipi_dsi_start_video(struct rzg2l_mipi_dsi *dsi)
{
u32 vich1set0r;
u32 status;
int ret;
/* Configuration for Blanking sequence and start video input*/
vich1set0r = VICH1SET0R_HFPNOLP | VICH1SET0R_HBPNOLP |
VICH1SET0R_HSANOLP | VICH1SET0R_VSTART;
rzg2l_mipi_dsi_link_write(dsi, VICH1SET0R, vich1set0r);
ret = read_poll_timeout(rzg2l_mipi_dsi_link_read, status,
status & VICH1SR_VIRDY,
2000, 20000, false, dsi, VICH1SR);
if (ret < 0)
dev_err(dsi->dev, "Failed to start video signal input\n");
return ret;
}
static int rzg2l_mipi_dsi_stop_video(struct rzg2l_mipi_dsi *dsi)
{
u32 status;
int ret;
rzg2l_mipi_dsi_link_write(dsi, VICH1SET0R, VICH1SET0R_VSTPAFT);
ret = read_poll_timeout(rzg2l_mipi_dsi_link_read, status,
(status & VICH1SR_STOP) && (!(status & VICH1SR_RUNNING)),
2000, 20000, false, dsi, VICH1SR);
if (ret < 0)
goto err;
ret = read_poll_timeout(rzg2l_mipi_dsi_link_read, status,
!(status & LINKSR_HSBUSY),
2000, 20000, false, dsi, LINKSR);
if (ret < 0)
goto err;
return 0;
err:
dev_err(dsi->dev, "Failed to stop video signal input\n");
return ret;
}
/* -----------------------------------------------------------------------------
* Bridge
*/
static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
flags);
}
static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *state = old_bridge_state->base.state;
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
const struct drm_display_mode *mode;
struct drm_connector *connector;
struct drm_crtc *crtc;
int ret;
connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
mode = &drm_atomic_get_new_crtc_state(state, crtc)->adjusted_mode;
ret = rzg2l_mipi_dsi_startup(dsi, mode);
if (ret < 0)
return;
rzg2l_mipi_dsi_set_display_timing(dsi, mode);
ret = rzg2l_mipi_dsi_start_hs_clock(dsi);
if (ret < 0)
goto err_stop;
ret = rzg2l_mipi_dsi_start_video(dsi);
if (ret < 0)
goto err_stop_clock;
return;
err_stop_clock:
rzg2l_mipi_dsi_stop_hs_clock(dsi);
err_stop:
rzg2l_mipi_dsi_stop(dsi);
}
static void rzg2l_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
rzg2l_mipi_dsi_stop_video(dsi);
rzg2l_mipi_dsi_stop_hs_clock(dsi);
rzg2l_mipi_dsi_stop(dsi);
}
static enum drm_mode_status
rzg2l_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
if (mode->clock > 148500)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static const struct drm_bridge_funcs rzg2l_mipi_dsi_bridge_ops = {
.attach = rzg2l_mipi_dsi_attach,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = rzg2l_mipi_dsi_atomic_enable,
.atomic_disable = rzg2l_mipi_dsi_atomic_disable,
.mode_valid = rzg2l_mipi_dsi_bridge_mode_valid,
};
/* -----------------------------------------------------------------------------
* Host setting
*/
static int rzg2l_mipi_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct rzg2l_mipi_dsi *dsi = host_to_rzg2l_mipi_dsi(host);
int ret;
if (device->lanes > dsi->num_data_lanes) {
dev_err(dsi->dev,
"Number of lines of device (%u) exceeds host (%u)\n",
device->lanes, dsi->num_data_lanes);
return -EINVAL;
}
switch (mipi_dsi_pixel_format_to_bpp(device->format)) {
case 24:
case 18:
break;
default:
dev_err(dsi->dev, "Unsupported format 0x%04x\n", device->format);
return -EINVAL;
}
dsi->lanes = device->lanes;
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
dsi->next_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node,
1, 0);
if (IS_ERR(dsi->next_bridge)) {
ret = PTR_ERR(dsi->next_bridge);
dev_err(dsi->dev, "failed to get next bridge: %d\n", ret);
return ret;
}
drm_bridge_add(&dsi->bridge);
return 0;
}
static int rzg2l_mipi_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct rzg2l_mipi_dsi *dsi = host_to_rzg2l_mipi_dsi(host);
drm_bridge_remove(&dsi->bridge);
return 0;
}
static const struct mipi_dsi_host_ops rzg2l_mipi_dsi_host_ops = {
.attach = rzg2l_mipi_dsi_host_attach,
.detach = rzg2l_mipi_dsi_host_detach,
};
/* -----------------------------------------------------------------------------
* Power Management
*/
static int __maybe_unused rzg2l_mipi_pm_runtime_suspend(struct device *dev)
{
struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev);
reset_control_assert(dsi->prstc);
reset_control_assert(dsi->arstc);
return 0;
}
static int __maybe_unused rzg2l_mipi_pm_runtime_resume(struct device *dev)
{
struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev);
int ret;
ret = reset_control_deassert(dsi->arstc);
if (ret < 0)
return ret;
ret = reset_control_deassert(dsi->prstc);
if (ret < 0)
reset_control_assert(dsi->arstc);
return ret;
}
static const struct dev_pm_ops rzg2l_mipi_pm_ops = {
SET_RUNTIME_PM_OPS(rzg2l_mipi_pm_runtime_suspend, rzg2l_mipi_pm_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
* Probe & Remove
*/
static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
{
unsigned int num_data_lanes;
struct rzg2l_mipi_dsi *dsi;
u32 txsetr;
int ret;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
platform_set_drvdata(pdev, dsi);
dsi->dev = &pdev->dev;
ret = drm_of_get_data_lanes_count_ep(dsi->dev->of_node, 1, 0, 1, 4);
if (ret < 0)
return dev_err_probe(dsi->dev, ret,
"missing or invalid data-lanes property\n");
num_data_lanes = ret;
dsi->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->mmio))
return PTR_ERR(dsi->mmio);
dsi->vclk = devm_clk_get(dsi->dev, "vclk");
if (IS_ERR(dsi->vclk))
return PTR_ERR(dsi->vclk);
dsi->rstc = devm_reset_control_get_exclusive(dsi->dev, "rst");
if (IS_ERR(dsi->rstc))
return dev_err_probe(dsi->dev, PTR_ERR(dsi->rstc),
"failed to get rst\n");
dsi->arstc = devm_reset_control_get_exclusive(dsi->dev, "arst");
if (IS_ERR(dsi->arstc))
return dev_err_probe(&pdev->dev, PTR_ERR(dsi->arstc),
"failed to get arst\n");
dsi->prstc = devm_reset_control_get_exclusive(dsi->dev, "prst");
if (IS_ERR(dsi->prstc))
return dev_err_probe(dsi->dev, PTR_ERR(dsi->prstc),
"failed to get prst\n");
platform_set_drvdata(pdev, dsi);
pm_runtime_enable(dsi->dev);
ret = pm_runtime_resume_and_get(dsi->dev);
if (ret < 0)
goto err_pm_disable;
/*
* TXSETR register can be read only after DPHY init. But during probe
* mode->clock and format are not available. So initialize DPHY with
* timing parameters for 80Mbps.
*/
ret = rzg2l_mipi_dsi_dphy_init(dsi, 80000);
if (ret < 0)
goto err_phy;
txsetr = rzg2l_mipi_dsi_link_read(dsi, TXSETR);
dsi->num_data_lanes = min(((txsetr >> 16) & 3) + 1, num_data_lanes);
rzg2l_mipi_dsi_dphy_exit(dsi);
pm_runtime_put(dsi->dev);
/* Initialize the DRM bridge. */
dsi->bridge.funcs = &rzg2l_mipi_dsi_bridge_ops;
dsi->bridge.of_node = dsi->dev->of_node;
/* Init host device */
dsi->host.dev = dsi->dev;
dsi->host.ops = &rzg2l_mipi_dsi_host_ops;
ret = mipi_dsi_host_register(&dsi->host);
if (ret < 0)
goto err_pm_disable;
return 0;
err_phy:
rzg2l_mipi_dsi_dphy_exit(dsi);
pm_runtime_put(dsi->dev);
err_pm_disable:
pm_runtime_disable(dsi->dev);
return ret;
}
static void rzg2l_mipi_dsi_remove(struct platform_device *pdev)
{
struct rzg2l_mipi_dsi *dsi = platform_get_drvdata(pdev);
mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(&pdev->dev);
}
static const struct of_device_id rzg2l_mipi_dsi_of_table[] = {
{ .compatible = "renesas,rzg2l-mipi-dsi" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_mipi_dsi_of_table);
static struct platform_driver rzg2l_mipi_dsi_platform_driver = {
.probe = rzg2l_mipi_dsi_probe,
.remove_new = rzg2l_mipi_dsi_remove,
.driver = {
.name = "rzg2l-mipi-dsi",
.pm = &rzg2l_mipi_pm_ops,
.of_match_table = rzg2l_mipi_dsi_of_table,
},
};
module_platform_driver(rzg2l_mipi_dsi_platform_driver);
MODULE_AUTHOR("Biju Das <[email protected]>");
MODULE_DESCRIPTION("Renesas RZ/G2L MIPI DSI Encoder Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Display Unit Color Management Module
*
* Copyright (C) 2019 Jacopo Mondi <[email protected]>
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_color_mgmt.h>
#include "rcar_cmm.h"
#define CM2_LUT_CTRL 0x0000
#define CM2_LUT_CTRL_LUT_EN BIT(0)
#define CM2_LUT_TBL_BASE 0x0600
#define CM2_LUT_TBL(__i) (CM2_LUT_TBL_BASE + (__i) * 4)
struct rcar_cmm {
void __iomem *base;
/*
* @lut: 1D-LUT state
* @lut.enabled: 1D-LUT enabled flag
*/
struct {
bool enabled;
} lut;
};
static inline int rcar_cmm_read(struct rcar_cmm *rcmm, u32 reg)
{
return ioread32(rcmm->base + reg);
}
static inline void rcar_cmm_write(struct rcar_cmm *rcmm, u32 reg, u32 data)
{
iowrite32(data, rcmm->base + reg);
}
/*
* rcar_cmm_lut_write() - Scale the DRM LUT table entries to hardware precision
* and write to the CMM registers
* @rcmm: Pointer to the CMM device
* @drm_lut: Pointer to the DRM LUT table
*/
static void rcar_cmm_lut_write(struct rcar_cmm *rcmm,
const struct drm_color_lut *drm_lut)
{
unsigned int i;
for (i = 0; i < CM2_LUT_SIZE; ++i) {
u32 entry = drm_color_lut_extract(drm_lut[i].red, 8) << 16
| drm_color_lut_extract(drm_lut[i].green, 8) << 8
| drm_color_lut_extract(drm_lut[i].blue, 8);
rcar_cmm_write(rcmm, CM2_LUT_TBL(i), entry);
}
}
/*
* rcar_cmm_setup() - Configure the CMM unit
* @pdev: The platform device associated with the CMM instance
* @config: The CMM unit configuration
*
* Configure the CMM unit with the given configuration. Currently enabling,
* disabling and programming of the 1-D LUT unit is supported.
*
* As rcar_cmm_setup() accesses the CMM registers the unit should be powered
* and its functional clock enabled. To guarantee this, before any call to
* this function is made, the CMM unit has to be enabled by calling
* rcar_cmm_enable() first.
*
* TODO: Add support for LUT double buffer operations to avoid updating the
* LUT table entries while a frame is being displayed.
*/
int rcar_cmm_setup(struct platform_device *pdev,
const struct rcar_cmm_config *config)
{
struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
/* Disable LUT if no table is provided. */
if (!config->lut.table) {
if (rcmm->lut.enabled) {
rcar_cmm_write(rcmm, CM2_LUT_CTRL, 0);
rcmm->lut.enabled = false;
}
return 0;
}
/* Enable LUT and program the new gamma table values. */
if (!rcmm->lut.enabled) {
rcar_cmm_write(rcmm, CM2_LUT_CTRL, CM2_LUT_CTRL_LUT_EN);
rcmm->lut.enabled = true;
}
rcar_cmm_lut_write(rcmm, config->lut.table);
return 0;
}
EXPORT_SYMBOL_GPL(rcar_cmm_setup);
/*
* rcar_cmm_enable() - Enable the CMM unit
* @pdev: The platform device associated with the CMM instance
*
* When the output of the corresponding DU channel is routed to the CMM unit,
* the unit shall be enabled before the DU channel is started, and remain
* enabled until the channel is stopped. The CMM unit shall be disabled with
* rcar_cmm_disable().
*
* Calls to rcar_cmm_enable() and rcar_cmm_disable() are not reference-counted.
* It is an error to attempt to enable an already enabled CMM unit, or to
* attempt to disable a disabled unit.
*/
int rcar_cmm_enable(struct platform_device *pdev)
{
int ret;
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(rcar_cmm_enable);
/*
* rcar_cmm_disable() - Disable the CMM unit
* @pdev: The platform device associated with the CMM instance
*
* See rcar_cmm_enable() for usage information.
*
* Disabling the CMM unit disable all the internal processing blocks. The CMM
* state shall thus be restored with rcar_cmm_setup() when re-enabling the CMM
* unit after the next rcar_cmm_enable() call.
*/
void rcar_cmm_disable(struct platform_device *pdev)
{
struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
rcar_cmm_write(rcmm, CM2_LUT_CTRL, 0);
rcmm->lut.enabled = false;
pm_runtime_put(&pdev->dev);
}
EXPORT_SYMBOL_GPL(rcar_cmm_disable);
/*
* rcar_cmm_init() - Initialize the CMM unit
* @pdev: The platform device associated with the CMM instance
*
* Return: 0 on success, -EPROBE_DEFER if the CMM is not available yet,
* -ENODEV if the DRM_RCAR_CMM config option is disabled
*/
int rcar_cmm_init(struct platform_device *pdev)
{
struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
if (!rcmm)
return -EPROBE_DEFER;
return 0;
}
EXPORT_SYMBOL_GPL(rcar_cmm_init);
static int rcar_cmm_probe(struct platform_device *pdev)
{
struct rcar_cmm *rcmm;
rcmm = devm_kzalloc(&pdev->dev, sizeof(*rcmm), GFP_KERNEL);
if (!rcmm)
return -ENOMEM;
platform_set_drvdata(pdev, rcmm);
rcmm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcmm->base))
return PTR_ERR(rcmm->base);
pm_runtime_enable(&pdev->dev);
return 0;
}
static void rcar_cmm_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
}
static const struct of_device_id rcar_cmm_of_table[] = {
{ .compatible = "renesas,rcar-gen3-cmm", },
{ .compatible = "renesas,rcar-gen2-cmm", },
{ },
};
MODULE_DEVICE_TABLE(of, rcar_cmm_of_table);
static struct platform_driver rcar_cmm_platform_driver = {
.probe = rcar_cmm_probe,
.remove_new = rcar_cmm_remove,
.driver = {
.name = "rcar-cmm",
.of_match_table = rcar_cmm_of_table,
},
};
module_platform_driver(rcar_cmm_platform_driver);
MODULE_AUTHOR("Jacopo Mondi <[email protected]>");
MODULE_DESCRIPTION("Renesas R-Car CMM Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Display Unit DRM driver
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
/* -----------------------------------------------------------------------------
* Device Information
*/
static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
* R8A774[34] has one RGB output and one LVDS output
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(1) | BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 1,
},
},
.num_lvds = 1,
.num_rpf = 4,
};
static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
* R8A7745 has two RGB outputs
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1),
.port = 1,
},
},
.num_rpf = 4,
};
static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
* R8A77470 has two RGB outputs, one LVDS output, and
* one (currently unsupported) analog video output
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1),
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0) | BIT(1),
.port = 2,
},
},
.num_rpf = 4,
};
static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(2) | BIT(1) | BIT(0),
.routes = {
/*
* R8A774A1 has one RGB output, one LVDS output and one HDMI
* output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2),
.port = 0,
},
[RCAR_DU_OUTPUT_HDMI0] = {
.possible_crtcs = BIT(1),
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 2,
},
},
.num_lvds = 1,
.num_rpf = 5,
.dpll_mask = BIT(1),
};
static const struct rcar_du_device_info rcar_du_r8a774b1_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(3) | BIT(1) | BIT(0),
.routes = {
/*
* R8A774B1 has one RGB output, one LVDS output and one HDMI
* output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2),
.port = 0,
},
[RCAR_DU_OUTPUT_HDMI0] = {
.possible_crtcs = BIT(1),
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 2,
},
},
.num_lvds = 1,
.num_rpf = 5,
.dpll_mask = BIT(1),
};
static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
* R8A774C0 has one RGB output and two LVDS outputs
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0) | BIT(1),
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS1] = {
.possible_crtcs = BIT(1),
.port = 2,
},
},
.num_lvds = 2,
.num_rpf = 4,
.lvds_clk_mask = BIT(1) | BIT(0),
};
static const struct rcar_du_device_info rcar_du_r8a774e1_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(3) | BIT(1) | BIT(0),
.routes = {
/*
* R8A774E1 has one RGB output, one LVDS output and one HDMI
* output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2),
.port = 0,
},
[RCAR_DU_OUTPUT_HDMI0] = {
.possible_crtcs = BIT(1),
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 2,
},
},
.num_lvds = 1,
.num_rpf = 5,
.dpll_mask = BIT(1),
};
static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.gen = 1,
.features = RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
* R8A7779 has two RGB outputs and one (currently unsupported)
* TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1) | BIT(0),
.port = 1,
},
},
};
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.quirks = RCAR_DU_QUIRK_ALIGN_128B,
.channels_mask = BIT(2) | BIT(1) | BIT(0),
.routes = {
/*
* R8A7742 and R8A7790 each have one RGB output and two LVDS
* outputs. Additionally R8A7790 supports one TCON output
* (currently unsupported by the driver).
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2) | BIT(1) | BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS1] = {
.possible_crtcs = BIT(2) | BIT(1),
.port = 2,
},
},
.num_lvds = 2,
.num_rpf = 4,
};
/* M2-W (r8a7791) and M2-N (r8a7793) are identical */
static const struct rcar_du_device_info rcar_du_r8a7791_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
* R8A779[13] has one RGB output, one LVDS output and one
* (currently unsupported) TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(1) | BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 1,
},
},
.num_lvds = 1,
.num_rpf = 4,
};
static const struct rcar_du_device_info rcar_du_r8a7792_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/* R8A7792 has two RGB outputs. */
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1),
.port = 1,
},
},
.num_rpf = 4,
};
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
* R8A7794 has two RGB outputs and one (currently unsupported)
* TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1),
.port = 1,
},
},
.num_rpf = 4,
};
static const struct rcar_du_device_info rcar_du_r8a7795_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
.routes = {
/*
* R8A7795 has one RGB output, two HDMI outputs and one
* LVDS output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(3),
.port = 0,
},
[RCAR_DU_OUTPUT_HDMI0] = {
.possible_crtcs = BIT(1),
.port = 1,
},
[RCAR_DU_OUTPUT_HDMI1] = {
.possible_crtcs = BIT(2),
.port = 2,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 3,
},
},
.num_lvds = 1,
.num_rpf = 5,
.dpll_mask = BIT(2) | BIT(1),
};
static const struct rcar_du_device_info rcar_du_r8a7796_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(2) | BIT(1) | BIT(0),
.routes = {
/*
* R8A7796 has one RGB output, one LVDS output and one HDMI
* output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2),
.port = 0,
},
[RCAR_DU_OUTPUT_HDMI0] = {
.possible_crtcs = BIT(1),
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 2,
},
},
.num_lvds = 1,
.num_rpf = 5,
.dpll_mask = BIT(1),
};
static const struct rcar_du_device_info rcar_du_r8a77965_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(3) | BIT(1) | BIT(0),
.routes = {
/*
* R8A77965 has one RGB output, one LVDS output and one HDMI
* output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2),
.port = 0,
},
[RCAR_DU_OUTPUT_HDMI0] = {
.possible_crtcs = BIT(1),
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 2,
},
},
.num_lvds = 1,
.num_rpf = 5,
.dpll_mask = BIT(1),
};
static const struct rcar_du_device_info rcar_du_r8a77970_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(0),
.routes = {
/*
* R8A77970 and R8A77980 have one RGB output and one LVDS
* output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 1,
},
},
.num_lvds = 1,
.num_rpf = 5,
};
static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
* R8A77990 and R8A77995 have one RGB output and two LVDS
* outputs.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0) | BIT(1),
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS1] = {
.possible_crtcs = BIT(1),
.port = 2,
},
},
.num_lvds = 2,
.num_rpf = 5,
.lvds_clk_mask = BIT(1) | BIT(0),
};
static const struct rcar_du_device_info rcar_du_r8a779a0_info = {
.gen = 4,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_NO_BLENDING,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/* R8A779A0 has two MIPI DSI outputs. */
[RCAR_DU_OUTPUT_DSI0] = {
.possible_crtcs = BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_DSI1] = {
.possible_crtcs = BIT(1),
.port = 1,
},
},
.num_rpf = 5,
.dsi_clk_mask = BIT(1) | BIT(0),
};
static const struct rcar_du_device_info rcar_du_r8a779g0_info = {
.gen = 4,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_NO_BLENDING,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/* R8A779G0 has two MIPI DSI outputs. */
[RCAR_DU_OUTPUT_DSI0] = {
.possible_crtcs = BIT(0),
.port = 0,
},
[RCAR_DU_OUTPUT_DSI1] = {
.possible_crtcs = BIT(1),
.port = 1,
},
},
.num_rpf = 5,
.dsi_clk_mask = BIT(1) | BIT(0),
};
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
{ .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
{ .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
{ .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info },
{ .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info },
{ .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
{ .compatible = "renesas,du-r8a774e1", .data = &rcar_du_r8a774e1_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
{ .compatible = "renesas,du-r8a7792", .data = &rcar_du_r8a7792_info },
{ .compatible = "renesas,du-r8a7793", .data = &rcar_du_r8a7791_info },
{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
{ .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
{ .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
{ .compatible = "renesas,du-r8a77961", .data = &rcar_du_r8a7796_info },
{ .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info },
{ .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info },
{ .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info },
{ .compatible = "renesas,du-r8a77990", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a779a0", .data = &rcar_du_r8a779a0_info },
{ .compatible = "renesas,du-r8a779g0", .data = &rcar_du_r8a779g0_info },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_du_of_table);
const char *rcar_du_output_name(enum rcar_du_output output)
{
static const char * const names[] = {
[RCAR_DU_OUTPUT_DPAD0] = "DPAD0",
[RCAR_DU_OUTPUT_DPAD1] = "DPAD1",
[RCAR_DU_OUTPUT_DSI0] = "DSI0",
[RCAR_DU_OUTPUT_DSI1] = "DSI1",
[RCAR_DU_OUTPUT_HDMI0] = "HDMI0",
[RCAR_DU_OUTPUT_HDMI1] = "HDMI1",
[RCAR_DU_OUTPUT_LVDS0] = "LVDS0",
[RCAR_DU_OUTPUT_LVDS1] = "LVDS1",
[RCAR_DU_OUTPUT_TCON] = "TCON",
};
if (output >= ARRAY_SIZE(names) || !names[output])
return "UNKNOWN";
return names[output];
}
/* -----------------------------------------------------------------------------
* DRM operations
*/
DEFINE_DRM_GEM_DMA_FOPS(rcar_du_fops);
static const struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.dumb_create = rcar_du_dumb_create,
.gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table,
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
.date = "20130110",
.major = 1,
.minor = 0,
};
/* -----------------------------------------------------------------------------
* Power management
*/
static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(&rcdu->ddev);
}
static int rcar_du_pm_resume(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(&rcdu->ddev);
}
static DEFINE_SIMPLE_DEV_PM_OPS(rcar_du_pm_ops,
rcar_du_pm_suspend, rcar_du_pm_resume);
/* -----------------------------------------------------------------------------
* Platform driver
*/
static void rcar_du_remove(struct platform_device *pdev)
{
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
struct drm_device *ddev = &rcdu->ddev;
drm_dev_unregister(ddev);
drm_atomic_helper_shutdown(ddev);
drm_kms_helper_poll_fini(ddev);
}
static void rcar_du_shutdown(struct platform_device *pdev)
{
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
drm_atomic_helper_shutdown(&rcdu->ddev);
}
static int rcar_du_probe(struct platform_device *pdev)
{
struct rcar_du_device *rcdu;
unsigned int mask;
int ret;
if (drm_firmware_drivers_only())
return -ENODEV;
/* Allocate and initialize the R-Car device structure. */
rcdu = devm_drm_dev_alloc(&pdev->dev, &rcar_du_driver,
struct rcar_du_device, ddev);
if (IS_ERR(rcdu))
return PTR_ERR(rcdu);
rcdu->dev = &pdev->dev;
rcdu->info = of_device_get_match_data(rcdu->dev);
platform_set_drvdata(pdev, rcdu);
/* I/O resources */
rcdu->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcdu->mmio))
return PTR_ERR(rcdu->mmio);
/*
* Set the DMA coherent mask to reflect the DU 32-bit DMA address space
* limitations. When sourcing frames from a VSP the DU doesn't perform
* any memory access so set the mask to 40 bits to accept all buffers.
*/
mask = rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE) ? 40 : 32;
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(mask));
if (ret)
return ret;
/* DRM/KMS objects */
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
/*
* Don't use dev_err_probe(), as it would overwrite the probe
* deferral reason recorded in rcar_du_modeset_init().
*/
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"failed to initialize DRM/KMS (%d)\n", ret);
goto error;
}
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
ret = drm_dev_register(&rcdu->ddev, 0);
if (ret)
goto error;
drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev));
drm_fbdev_generic_setup(&rcdu->ddev, 32);
return 0;
error:
drm_kms_helper_poll_fini(&rcdu->ddev);
return ret;
}
static struct platform_driver rcar_du_platform_driver = {
.probe = rcar_du_probe,
.remove_new = rcar_du_remove,
.shutdown = rcar_du_shutdown,
.driver = {
.name = "rcar-du",
.pm = pm_sleep_ptr(&rcar_du_pm_ops),
.of_match_table = rcar_du_of_table,
},
};
module_platform_driver(rcar_du_platform_driver);
MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
MODULE_DESCRIPTION("Renesas R-Car Display Unit DRM Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Display Unit Encoder
*
* Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*/
#include <linux/export.h>
#include <linux/of.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_panel.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
#include "rcar_lvds.h"
/* -----------------------------------------------------------------------------
* Encoder
*/
static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
{
struct device_node *ports;
struct device_node *port;
unsigned int num_ports = 0;
ports = of_get_child_by_name(node, "ports");
if (!ports)
ports = of_node_get(node);
for_each_child_of_node(ports, port) {
if (of_node_name_eq(port, "port"))
num_ports++;
}
of_node_put(ports);
return num_ports;
}
static const struct drm_encoder_funcs rcar_du_encoder_funcs = {
};
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node)
{
struct rcar_du_encoder *renc;
struct drm_connector *connector;
struct drm_bridge *bridge;
int ret;
/*
* Locate the DRM bridge from the DT node. For the DPAD outputs, if the
* DT node has a single port, assume that it describes a panel and
* create a panel bridge.
*/
if ((output == RCAR_DU_OUTPUT_DPAD0 ||
output == RCAR_DU_OUTPUT_DPAD1) &&
rcar_du_encoder_count_ports(enc_node) == 1) {
struct drm_panel *panel = of_drm_find_panel(enc_node);
if (IS_ERR(panel))
return PTR_ERR(panel);
bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
} else {
bridge = of_drm_find_bridge(enc_node);
if (!bridge)
return -EPROBE_DEFER;
if (output == RCAR_DU_OUTPUT_LVDS0 ||
output == RCAR_DU_OUTPUT_LVDS1)
rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge;
if (output == RCAR_DU_OUTPUT_DSI0 ||
output == RCAR_DU_OUTPUT_DSI1)
rcdu->dsi[output - RCAR_DU_OUTPUT_DSI0] = bridge;
}
/*
* Create and initialize the encoder. On Gen3, skip the LVDS1 output if
* the LVDS1 encoder is used as a companion for LVDS0 in dual-link
* mode, or any LVDS output if it isn't connected. The latter may happen
* on D3 or E3 as the LVDS encoders are needed to provide the pixel
* clock to the DU, even when the LVDS outputs are not used.
*/
if (rcdu->info->gen >= 3) {
if (output == RCAR_DU_OUTPUT_LVDS1 &&
rcar_lvds_dual_link(bridge))
return -ENOLINK;
if ((output == RCAR_DU_OUTPUT_LVDS0 ||
output == RCAR_DU_OUTPUT_LVDS1) &&
!rcar_lvds_is_connected(bridge))
return -ENOLINK;
}
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n",
enc_node, rcar_du_output_name(output));
renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
&rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
NULL);
if (IS_ERR(renc))
return PTR_ERR(renc);
renc->output = output;
/* Attach the bridge to the encoder. */
ret = drm_bridge_attach(&renc->base, bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
dev_err(rcdu->dev,
"failed to attach bridge %pOF for output %s (%d)\n",
bridge->of_node, rcar_du_output_name(output), ret);
return ret;
}
/* Create the connector for the chain of bridges. */
connector = drm_bridge_connector_init(&rcdu->ddev, &renc->base);
if (IS_ERR(connector)) {
dev_err(rcdu->dev,
"failed to created connector for output %s (%ld)\n",
rcar_du_output_name(output), PTR_ERR(connector));
return PTR_ERR(connector);
}
return drm_connector_attach_encoder(connector, &renc->base);
}
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_du_encoder.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Display Unit Channels Pair
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*/
/*
* The R8A7779 DU is split in per-CRTC resources (scan-out engine, blending
* unit, timings generator, ...) and device-global resources (start/stop
* control, planes, ...) shared between the two CRTCs.
*
* The R8A7790 introduced a third CRTC with its own set of global resources.
* This would be modeled as two separate DU device instances if it wasn't for
* a handful or resources that are shared between the three CRTCs (mostly
* related to input and output routing). For this reason the R8A7790 DU must be
* modeled as a single device with three CRTCs, two sets of "semi-global"
* resources, and a few device-global resources.
*
* The rcar_du_group object is a driver specific object, without any real
* counterpart in the DU documentation, that models those semi-global resources.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include "rcar_du_drv.h"
#include "rcar_du_group.h"
#include "rcar_du_regs.h"
u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg)
{
return rcar_du_read(rgrp->dev, rgrp->mmio_offset + reg);
}
void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data)
{
rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data);
}
static void rcar_du_group_setup_pins(struct rcar_du_group *rgrp)
{
u32 defr6 = DEFR6_CODE;
if (rgrp->channels_mask & BIT(0))
defr6 |= DEFR6_ODPM02_DISP;
if (rgrp->channels_mask & BIT(1))
defr6 |= DEFR6_ODPM12_DISP;
rcar_du_group_write(rgrp, DEFR6, defr6);
}
static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
{
struct rcar_du_device *rcdu = rgrp->dev;
u32 defr8 = DEFR8_CODE;
if (rcdu->info->gen < 3) {
defr8 |= DEFR8_DEFE8;
/*
* On Gen2 the DEFR8 register for the first group also controls
* RGB output routing to DPAD0 and VSPD1 routing to DU0/1/2 for
* DU instances that support it.
*/
if (rgrp->index == 0) {
defr8 |= DEFR8_DRGBS_DU(rcdu->dpad0_source);
if (rgrp->dev->vspd1_sink == 2)
defr8 |= DEFR8_VSCS;
}
} else {
/*
* On Gen3 VSPD routing can't be configured, and DPAD routing
* is set in the group corresponding to the DPAD output (no Gen3
* SoC has multiple DPAD sources belonging to separate groups).
*/
if (rgrp->index == rcdu->dpad0_source / 2)
defr8 |= DEFR8_DRGBS_DU(rcdu->dpad0_source);
}
rcar_du_group_write(rgrp, DEFR8, defr8);
}
static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp)
{
struct rcar_du_device *rcdu = rgrp->dev;
struct rcar_du_crtc *rcrtc;
unsigned int num_crtcs = 0;
unsigned int i;
u32 didsr;
/*
* Configure input dot clock routing with a hardcoded configuration. If
* the DU channel can use the LVDS encoder output clock as the dot
* clock, do so. Otherwise route DU_DOTCLKINn signal to DUn.
*
* Each channel can then select between the dot clock configured here
* and the clock provided by the CPG through the ESCR register.
*/
if (rcdu->info->gen < 3 && rgrp->index == 0) {
/*
* On Gen2 a single register in the first group controls dot
* clock selection for all channels.
*/
rcrtc = rcdu->crtcs;
num_crtcs = rcdu->num_crtcs;
} else if (rcdu->info->gen >= 3 && rgrp->num_crtcs > 1) {
/*
* On Gen3 dot clocks are setup through per-group registers,
* only available when the group has two channels.
*/
rcrtc = &rcdu->crtcs[rgrp->index * 2];
num_crtcs = rgrp->num_crtcs;
}
if (!num_crtcs)
return;
didsr = DIDSR_CODE;
for (i = 0; i < num_crtcs; ++i, ++rcrtc) {
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index))
didsr |= DIDSR_LDCS_LVDS0(i)
| DIDSR_PDCS_CLK(i, 0);
else if (rcdu->info->dsi_clk_mask & BIT(rcrtc->index))
didsr |= DIDSR_LDCS_DSI(i);
else
didsr |= DIDSR_LDCS_DCLKIN(i)
| DIDSR_PDCS_CLK(i, 0);
}
rcar_du_group_write(rgrp, DIDSR, didsr);
}
static void rcar_du_group_setup(struct rcar_du_group *rgrp)
{
struct rcar_du_device *rcdu = rgrp->dev;
u32 defr7 = DEFR7_CODE;
u32 dorcr;
/* Enable extended features */
rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
if (rcdu->info->gen < 3) {
rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
}
rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
if (rcdu->info->gen < 4)
rcar_du_group_setup_pins(rgrp);
if (rcdu->info->gen < 4) {
/*
* TODO: Handle routing of the DU output to CMM dynamically, as
* we should bypass CMM completely when no color management
* feature is used.
*/
defr7 |= (rgrp->cmms_mask & BIT(1) ? DEFR7_CMME1 : 0) |
(rgrp->cmms_mask & BIT(0) ? DEFR7_CMME0 : 0);
rcar_du_group_write(rgrp, DEFR7, defr7);
}
if (rcdu->info->gen >= 2) {
if (rcdu->info->gen < 4)
rcar_du_group_setup_defr8(rgrp);
rcar_du_group_setup_didsr(rgrp);
}
if (rcdu->info->gen >= 3)
rcar_du_group_write(rgrp, DEFR10, DEFR10_CODE | DEFR10_DEFE10);
/*
* Use DS1PR and DS2PR to configure planes priorities and connects the
* superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
*
* Groups that have a single channel have a hardcoded configuration. On
* Gen3 and newer, the documentation requires PG1T, DK1S and PG1D_DS1 to
* always be set in this case.
*/
dorcr = DORCR_PG0D_DS0 | DORCR_DPRS;
if (rcdu->info->gen >= 3 && rgrp->num_crtcs == 1)
dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1;
rcar_du_group_write(rgrp, DORCR, dorcr);
/* Apply planes to CRTCs association. */
mutex_lock(&rgrp->lock);
rcar_du_group_write(rgrp, DPTSR, (rgrp->dptsr_planes << 16) |
rgrp->dptsr_planes);
mutex_unlock(&rgrp->lock);
}
/*
* rcar_du_group_get - Acquire a reference to the DU channels group
*
* Acquiring the first reference setups core registers. A reference must be held
* before accessing any hardware registers.
*
* This function must be called with the DRM mode_config lock held.
*
* Return 0 in case of success or a negative error code otherwise.
*/
int rcar_du_group_get(struct rcar_du_group *rgrp)
{
if (rgrp->use_count)
goto done;
rcar_du_group_setup(rgrp);
done:
rgrp->use_count++;
return 0;
}
/*
* rcar_du_group_put - Release a reference to the DU
*
* This function must be called with the DRM mode_config lock held.
*/
void rcar_du_group_put(struct rcar_du_group *rgrp)
{
--rgrp->use_count;
}
static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
{
struct rcar_du_device *rcdu = rgrp->dev;
/*
* Group start/stop is controlled by the DRES and DEN bits of DSYSR0
* for the first group and DSYSR2 for the second group. On most DU
* instances, this maps to the first CRTC of the group, and we can just
* use rcar_du_crtc_dsysr_clr_set() to access the correct DSYSR. On
* M3-N, however, DU2 doesn't exist, but DSYSR2 does. We thus need to
* access the register directly using group read/write.
*/
if (rcdu->info->channels_mask & BIT(rgrp->index * 2)) {
struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
start ? DSYSR_DEN : DSYSR_DRES);
} else {
rcar_du_group_write(rgrp, DSYSR,
start ? DSYSR_DEN : DSYSR_DRES);
}
}
void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
{
/*
* Many of the configuration bits are only updated when the display
* reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
* of those bits could be pre-configured, but others (especially the
* bits related to plane assignment to display timing controllers) need
* to be modified at runtime.
*
* Restart the display controller if a start is requested. Sorry for the
* flicker. It should be possible to move most of the "DRES-update" bits
* setup to driver initialization time and minimize the number of cases
* when the display controller will have to be restarted.
*/
if (start) {
if (rgrp->used_crtcs++ != 0)
__rcar_du_group_start_stop(rgrp, false);
__rcar_du_group_start_stop(rgrp, true);
} else {
if (--rgrp->used_crtcs == 0)
__rcar_du_group_start_stop(rgrp, false);
}
}
void rcar_du_group_restart(struct rcar_du_group *rgrp)
{
rgrp->need_restart = false;
__rcar_du_group_start_stop(rgrp, false);
__rcar_du_group_start_stop(rgrp, true);
}
int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu)
{
struct rcar_du_group *rgrp;
struct rcar_du_crtc *crtc;
unsigned int index;
int ret;
if (rcdu->info->gen < 2)
return 0;
/*
* RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are
* configured in the DEFR8 register of the first group on Gen2 and the
* last group on Gen3. As this function can be called with the DU
* channels of the corresponding CRTCs disabled, we need to enable the
* group clock before accessing the register.
*/
index = rcdu->info->gen < 3 ? 0 : DIV_ROUND_UP(rcdu->num_crtcs, 2) - 1;
rgrp = &rcdu->groups[index];
crtc = &rcdu->crtcs[index * 2];
ret = clk_prepare_enable(crtc->clock);
if (ret < 0)
return ret;
rcar_du_group_setup_defr8(rgrp);
clk_disable_unprepare(crtc->clock);
return 0;
}
static void rcar_du_group_set_dpad_levels(struct rcar_du_group *rgrp)
{
static const u32 doflr_values[2] = {
DOFLR_HSYCFL0 | DOFLR_VSYCFL0 | DOFLR_ODDFL0 |
DOFLR_DISPFL0 | DOFLR_CDEFL0 | DOFLR_RGBFL0,
DOFLR_HSYCFL1 | DOFLR_VSYCFL1 | DOFLR_ODDFL1 |
DOFLR_DISPFL1 | DOFLR_CDEFL1 | DOFLR_RGBFL1,
};
static const u32 dpad_mask = BIT(RCAR_DU_OUTPUT_DPAD1)
| BIT(RCAR_DU_OUTPUT_DPAD0);
struct rcar_du_device *rcdu = rgrp->dev;
u32 doflr = DOFLR_CODE;
unsigned int i;
if (rcdu->info->gen < 2)
return;
/*
* The DPAD outputs can't be controlled directly. However, the parallel
* output of the DU channels routed to DPAD can be set to fixed levels
* through the DOFLR group register. Use this to turn the DPAD on or off
* by driving fixed low-level signals at the output of any DU channel
* not routed to a DPAD output. This doesn't affect the DU output
* signals going to other outputs, such as the internal LVDS and HDMI
* encoders.
*/
for (i = 0; i < rgrp->num_crtcs; ++i) {
struct rcar_du_crtc_state *rstate;
struct rcar_du_crtc *rcrtc;
rcrtc = &rcdu->crtcs[rgrp->index * 2 + i];
rstate = to_rcar_crtc_state(rcrtc->crtc.state);
if (!(rstate->outputs & dpad_mask))
doflr |= doflr_values[i];
}
rcar_du_group_write(rgrp, DOFLR, doflr);
}
int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
{
struct rcar_du_device *rcdu = rgrp->dev;
u32 dorcr = rcar_du_group_read(rgrp, DORCR);
dorcr &= ~(DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_MASK);
/*
* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
* CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
* by default.
*/
if (rcdu->dpad1_source == rgrp->index * 2)
dorcr |= DORCR_PG1D_DS0;
else
dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1;
rcar_du_group_write(rgrp, DORCR, dorcr);
rcar_du_group_set_dpad_levels(rgrp);
return rcar_du_set_dpad0_vsp1_routing(rgrp->dev);
}
| linux-master | drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <[email protected]>
* Fabien Dessenne <[email protected]>
* for STMicroelectronics.
*/
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <drm/drm_print.h>
#include "sti_compositor.h"
#include "sti_mixer.h"
#include "sti_vtg.h"
/* Module parameter to set the background color of the mixer */
static unsigned int bkg_color = 0x000000;
MODULE_PARM_DESC(bkgcolor, "Value of the background color 0xRRGGBB");
module_param_named(bkgcolor, bkg_color, int, 0644);
/* regs offset */
#define GAM_MIXER_CTL 0x00
#define GAM_MIXER_BKC 0x04
#define GAM_MIXER_BCO 0x0C
#define GAM_MIXER_BCS 0x10
#define GAM_MIXER_AVO 0x28
#define GAM_MIXER_AVS 0x2C
#define GAM_MIXER_CRB 0x34
#define GAM_MIXER_ACT 0x38
#define GAM_MIXER_MBP 0x3C
#define GAM_MIXER_MX0 0x80
/* id for depth of CRB reg */
#define GAM_DEPTH_VID0_ID 1
#define GAM_DEPTH_VID1_ID 2
#define GAM_DEPTH_GDP0_ID 3
#define GAM_DEPTH_GDP1_ID 4
#define GAM_DEPTH_GDP2_ID 5
#define GAM_DEPTH_GDP3_ID 6
#define GAM_DEPTH_MASK_ID 7
/* mask in CTL reg */
#define GAM_CTL_BACK_MASK BIT(0)
#define GAM_CTL_VID0_MASK BIT(1)
#define GAM_CTL_VID1_MASK BIT(2)
#define GAM_CTL_GDP0_MASK BIT(3)
#define GAM_CTL_GDP1_MASK BIT(4)
#define GAM_CTL_GDP2_MASK BIT(5)
#define GAM_CTL_GDP3_MASK BIT(6)
#define GAM_CTL_CURSOR_MASK BIT(9)
const char *sti_mixer_to_str(struct sti_mixer *mixer)
{
switch (mixer->id) {
case STI_MIXER_MAIN:
return "MAIN_MIXER";
case STI_MIXER_AUX:
return "AUX_MIXER";
default:
return "<UNKNOWN MIXER>";
}
}
static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
{
return readl(mixer->regs + reg_id);
}
static inline void sti_mixer_reg_write(struct sti_mixer *mixer,
u32 reg_id, u32 val)
{
writel(val, mixer->regs + reg_id);
}
#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
sti_mixer_reg_read(mixer, reg))
static void mixer_dbg_ctl(struct seq_file *s, int val)
{
unsigned int i;
int count = 0;
char *const disp_layer[] = {"BKG", "VID0", "VID1", "GDP0",
"GDP1", "GDP2", "GDP3"};
seq_puts(s, "\tEnabled: ");
for (i = 0; i < 7; i++) {
if (val & 1) {
seq_printf(s, "%s ", disp_layer[i]);
count++;
}
val = val >> 1;
}
val = val >> 2;
if (val & 1) {
seq_puts(s, "CURS ");
count++;
}
if (!count)
seq_puts(s, "Nothing");
}
static void mixer_dbg_crb(struct seq_file *s, int val)
{
int i;
seq_puts(s, "\tDepth: ");
for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
switch (val & GAM_DEPTH_MASK_ID) {
case GAM_DEPTH_VID0_ID:
seq_puts(s, "VID0");
break;
case GAM_DEPTH_VID1_ID:
seq_puts(s, "VID1");
break;
case GAM_DEPTH_GDP0_ID:
seq_puts(s, "GDP0");
break;
case GAM_DEPTH_GDP1_ID:
seq_puts(s, "GDP1");
break;
case GAM_DEPTH_GDP2_ID:
seq_puts(s, "GDP2");
break;
case GAM_DEPTH_GDP3_ID:
seq_puts(s, "GDP3");
break;
default:
seq_puts(s, "---");
}
if (i < GAM_MIXER_NB_DEPTH_LEVEL - 1)
seq_puts(s, " < ");
val = val >> 3;
}
}
static void mixer_dbg_mxn(struct seq_file *s, void *addr)
{
int i;
for (i = 1; i < 8; i++)
seq_printf(s, "-0x%08X", (int)readl(addr + i * 4));
}
static int mixer_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_mixer_to_str(mixer), mixer->regs);
DBGFS_DUMP(GAM_MIXER_CTL);
mixer_dbg_ctl(s, sti_mixer_reg_read(mixer, GAM_MIXER_CTL));
DBGFS_DUMP(GAM_MIXER_BKC);
DBGFS_DUMP(GAM_MIXER_BCO);
DBGFS_DUMP(GAM_MIXER_BCS);
DBGFS_DUMP(GAM_MIXER_AVO);
DBGFS_DUMP(GAM_MIXER_AVS);
DBGFS_DUMP(GAM_MIXER_CRB);
mixer_dbg_crb(s, sti_mixer_reg_read(mixer, GAM_MIXER_CRB));
DBGFS_DUMP(GAM_MIXER_ACT);
DBGFS_DUMP(GAM_MIXER_MBP);
DBGFS_DUMP(GAM_MIXER_MX0);
mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
seq_putc(s, '\n');
return 0;
}
static struct drm_info_list mixer0_debugfs_files[] = {
{ "mixer_main", mixer_dbg_show, 0, NULL },
};
static struct drm_info_list mixer1_debugfs_files[] = {
{ "mixer_aux", mixer_dbg_show, 0, NULL },
};
void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
{
unsigned int i;
struct drm_info_list *mixer_debugfs_files;
int nb_files;
switch (mixer->id) {
case STI_MIXER_MAIN:
mixer_debugfs_files = mixer0_debugfs_files;
nb_files = ARRAY_SIZE(mixer0_debugfs_files);
break;
case STI_MIXER_AUX:
mixer_debugfs_files = mixer1_debugfs_files;
nb_files = ARRAY_SIZE(mixer1_debugfs_files);
break;
default:
return;
}
for (i = 0; i < nb_files; i++)
mixer_debugfs_files[i].data = mixer;
drm_debugfs_create_files(mixer_debugfs_files,
nb_files,
minor->debugfs_root, minor);
}
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
{
u32 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
val &= ~GAM_CTL_BACK_MASK;
val |= enable;
sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
}
static void sti_mixer_set_background_color(struct sti_mixer *mixer,
unsigned int rgb)
{
sti_mixer_reg_write(mixer, GAM_MIXER_BKC, rgb);
}
static void sti_mixer_set_background_area(struct sti_mixer *mixer,
struct drm_display_mode *mode)
{
u32 ydo, xdo, yds, xds;
ydo = sti_vtg_get_line_number(*mode, 0);
yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
xdo = sti_vtg_get_pixel_number(*mode, 0);
xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
sti_mixer_reg_write(mixer, GAM_MIXER_BCO, ydo << 16 | xdo);
sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
}
int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
{
int plane_id, depth = plane->drm_plane.state->normalized_zpos;
unsigned int i;
u32 mask, val;
switch (plane->desc) {
case STI_GDP_0:
plane_id = GAM_DEPTH_GDP0_ID;
break;
case STI_GDP_1:
plane_id = GAM_DEPTH_GDP1_ID;
break;
case STI_GDP_2:
plane_id = GAM_DEPTH_GDP2_ID;
break;
case STI_GDP_3:
plane_id = GAM_DEPTH_GDP3_ID;
break;
case STI_HQVDP_0:
plane_id = GAM_DEPTH_VID0_ID;
break;
case STI_CURSOR:
/* no need to set depth for cursor */
return 0;
default:
DRM_ERROR("Unknown plane %d\n", plane->desc);
return 1;
}
/* Search if a previous depth was already assigned to the plane */
val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
mask = GAM_DEPTH_MASK_ID << (3 * i);
if ((val & mask) == plane_id << (3 * i))
break;
}
mask |= GAM_DEPTH_MASK_ID << (3 * depth);
plane_id = plane_id << (3 * depth);
DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
sti_plane_to_str(plane), depth);
dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
plane_id, mask);
val &= ~mask;
val |= plane_id;
sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
sti_mixer_reg_read(mixer, GAM_MIXER_CRB));
return 0;
}
int sti_mixer_active_video_area(struct sti_mixer *mixer,
struct drm_display_mode *mode)
{
u32 ydo, xdo, yds, xds;
ydo = sti_vtg_get_line_number(*mode, 0);
yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
xdo = sti_vtg_get_pixel_number(*mode, 0);
xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
DRM_DEBUG_DRIVER("%s active video area xdo:%d ydo:%d xds:%d yds:%d\n",
sti_mixer_to_str(mixer), xdo, ydo, xds, yds);
sti_mixer_reg_write(mixer, GAM_MIXER_AVO, ydo << 16 | xdo);
sti_mixer_reg_write(mixer, GAM_MIXER_AVS, yds << 16 | xds);
sti_mixer_set_background_color(mixer, bkg_color);
sti_mixer_set_background_area(mixer, mode);
sti_mixer_set_background_status(mixer, true);
return 0;
}
static u32 sti_mixer_get_plane_mask(struct sti_plane *plane)
{
switch (plane->desc) {
case STI_BACK:
return GAM_CTL_BACK_MASK;
case STI_GDP_0:
return GAM_CTL_GDP0_MASK;
case STI_GDP_1:
return GAM_CTL_GDP1_MASK;
case STI_GDP_2:
return GAM_CTL_GDP2_MASK;
case STI_GDP_3:
return GAM_CTL_GDP3_MASK;
case STI_HQVDP_0:
return GAM_CTL_VID0_MASK;
case STI_CURSOR:
return GAM_CTL_CURSOR_MASK;
default:
return 0;
}
}
int sti_mixer_set_plane_status(struct sti_mixer *mixer,
struct sti_plane *plane, bool status)
{
u32 mask, val;
DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
sti_mixer_to_str(mixer), sti_plane_to_str(plane));
mask = sti_mixer_get_plane_mask(plane);
if (!mask) {
DRM_ERROR("Can't find layer mask\n");
return -EINVAL;
}
val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
val &= ~mask;
val |= status ? mask : 0;
sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
return 0;
}
struct sti_mixer *sti_mixer_create(struct device *dev,
struct drm_device *drm_dev,
int id,
void __iomem *baseaddr)
{
struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
dev_dbg(dev, "%s\n", __func__);
if (!mixer) {
DRM_ERROR("Failed to allocated memory for mixer\n");
return NULL;
}
mixer->regs = baseaddr;
mixer->dev = dev;
mixer->id = id;
DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
sti_mixer_to_str(mixer), mixer->regs);
return mixer;
}
| linux-master | drivers/gpu/drm/sti/sti_mixer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <[email protected]> for STMicroelectronics.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/debugfs.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <sound/hdmi-codec.h>
#include "sti_hdmi.h"
#include "sti_hdmi_tx3g4c28phy.h"
#include "sti_vtg.h"
#define HDMI_CFG 0x0000
#define HDMI_INT_EN 0x0004
#define HDMI_INT_STA 0x0008
#define HDMI_INT_CLR 0x000C
#define HDMI_STA 0x0010
#define HDMI_ACTIVE_VID_XMIN 0x0100
#define HDMI_ACTIVE_VID_XMAX 0x0104
#define HDMI_ACTIVE_VID_YMIN 0x0108
#define HDMI_ACTIVE_VID_YMAX 0x010C
#define HDMI_DFLT_CHL0_DAT 0x0110
#define HDMI_DFLT_CHL1_DAT 0x0114
#define HDMI_DFLT_CHL2_DAT 0x0118
#define HDMI_AUDIO_CFG 0x0200
#define HDMI_SPDIF_FIFO_STATUS 0x0204
#define HDMI_SW_DI_1_HEAD_WORD 0x0210
#define HDMI_SW_DI_1_PKT_WORD0 0x0214
#define HDMI_SW_DI_1_PKT_WORD1 0x0218
#define HDMI_SW_DI_1_PKT_WORD2 0x021C
#define HDMI_SW_DI_1_PKT_WORD3 0x0220
#define HDMI_SW_DI_1_PKT_WORD4 0x0224
#define HDMI_SW_DI_1_PKT_WORD5 0x0228
#define HDMI_SW_DI_1_PKT_WORD6 0x022C
#define HDMI_SW_DI_CFG 0x0230
#define HDMI_SAMPLE_FLAT_MASK 0x0244
#define HDMI_AUDN 0x0400
#define HDMI_AUD_CTS 0x0404
#define HDMI_SW_DI_2_HEAD_WORD 0x0600
#define HDMI_SW_DI_2_PKT_WORD0 0x0604
#define HDMI_SW_DI_2_PKT_WORD1 0x0608
#define HDMI_SW_DI_2_PKT_WORD2 0x060C
#define HDMI_SW_DI_2_PKT_WORD3 0x0610
#define HDMI_SW_DI_2_PKT_WORD4 0x0614
#define HDMI_SW_DI_2_PKT_WORD5 0x0618
#define HDMI_SW_DI_2_PKT_WORD6 0x061C
#define HDMI_SW_DI_3_HEAD_WORD 0x0620
#define HDMI_SW_DI_3_PKT_WORD0 0x0624
#define HDMI_SW_DI_3_PKT_WORD1 0x0628
#define HDMI_SW_DI_3_PKT_WORD2 0x062C
#define HDMI_SW_DI_3_PKT_WORD3 0x0630
#define HDMI_SW_DI_3_PKT_WORD4 0x0634
#define HDMI_SW_DI_3_PKT_WORD5 0x0638
#define HDMI_SW_DI_3_PKT_WORD6 0x063C
#define HDMI_IFRAME_SLOT_AVI 1
#define HDMI_IFRAME_SLOT_AUDIO 2
#define HDMI_IFRAME_SLOT_VENDOR 3
#define XCAT(prefix, x, suffix) prefix ## x ## suffix
#define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD)
#define HDMI_SW_DI_N_PKT_WORD0(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD0)
#define HDMI_SW_DI_N_PKT_WORD1(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD1)
#define HDMI_SW_DI_N_PKT_WORD2(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD2)
#define HDMI_SW_DI_N_PKT_WORD3(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD3)
#define HDMI_SW_DI_N_PKT_WORD4(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD4)
#define HDMI_SW_DI_N_PKT_WORD5(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD5)
#define HDMI_SW_DI_N_PKT_WORD6(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD6)
#define HDMI_SW_DI_MAX_WORD 7
#define HDMI_IFRAME_DISABLED 0x0
#define HDMI_IFRAME_SINGLE_SHOT 0x1
#define HDMI_IFRAME_FIELD 0x2
#define HDMI_IFRAME_FRAME 0x3
#define HDMI_IFRAME_MASK 0x3
#define HDMI_IFRAME_CFG_DI_N(x, n) ((x) << ((n-1)*4)) /* n from 1 to 6 */
#define HDMI_CFG_DEVICE_EN BIT(0)
#define HDMI_CFG_HDMI_NOT_DVI BIT(1)
#define HDMI_CFG_HDCP_EN BIT(2)
#define HDMI_CFG_ESS_NOT_OESS BIT(3)
#define HDMI_CFG_H_SYNC_POL_NEG BIT(4)
#define HDMI_CFG_V_SYNC_POL_NEG BIT(6)
#define HDMI_CFG_422_EN BIT(8)
#define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12)
#define HDMI_CFG_FIFO_UNDERRUN_CLR BIT(13)
#define HDMI_CFG_SW_RST_EN BIT(31)
#define HDMI_INT_GLOBAL BIT(0)
#define HDMI_INT_SW_RST BIT(1)
#define HDMI_INT_PIX_CAP BIT(3)
#define HDMI_INT_HOT_PLUG BIT(4)
#define HDMI_INT_DLL_LCK BIT(5)
#define HDMI_INT_NEW_FRAME BIT(6)
#define HDMI_INT_GENCTRL_PKT BIT(7)
#define HDMI_INT_AUDIO_FIFO_XRUN BIT(8)
#define HDMI_INT_SINK_TERM_PRESENT BIT(11)
#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
| HDMI_INT_DLL_LCK \
| HDMI_INT_HOT_PLUG \
| HDMI_INT_GLOBAL)
#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
| HDMI_INT_AUDIO_FIFO_XRUN \
| HDMI_INT_GENCTRL_PKT \
| HDMI_INT_NEW_FRAME \
| HDMI_INT_DLL_LCK \
| HDMI_INT_HOT_PLUG \
| HDMI_INT_PIX_CAP \
| HDMI_INT_SW_RST \
| HDMI_INT_GLOBAL)
#define HDMI_STA_SW_RST BIT(1)
#define HDMI_AUD_CFG_8CH BIT(0)
#define HDMI_AUD_CFG_SPDIF_DIV_2 BIT(1)
#define HDMI_AUD_CFG_SPDIF_DIV_3 BIT(2)
#define HDMI_AUD_CFG_SPDIF_CLK_DIV_4 (BIT(1) | BIT(2))
#define HDMI_AUD_CFG_CTS_CLK_256FS BIT(12)
#define HDMI_AUD_CFG_DTS_INVALID BIT(16)
#define HDMI_AUD_CFG_ONE_BIT_INVALID (BIT(18) | BIT(19) | BIT(20) | BIT(21))
#define HDMI_AUD_CFG_CH12_VALID BIT(28)
#define HDMI_AUD_CFG_CH34_VALID BIT(29)
#define HDMI_AUD_CFG_CH56_VALID BIT(30)
#define HDMI_AUD_CFG_CH78_VALID BIT(31)
/* sample flat mask */
#define HDMI_SAMPLE_FLAT_NO 0
#define HDMI_SAMPLE_FLAT_SP0 BIT(0)
#define HDMI_SAMPLE_FLAT_SP1 BIT(1)
#define HDMI_SAMPLE_FLAT_SP2 BIT(2)
#define HDMI_SAMPLE_FLAT_SP3 BIT(3)
#define HDMI_SAMPLE_FLAT_ALL (HDMI_SAMPLE_FLAT_SP0 | HDMI_SAMPLE_FLAT_SP1 |\
HDMI_SAMPLE_FLAT_SP2 | HDMI_SAMPLE_FLAT_SP3)
#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
struct sti_hdmi_connector {
struct drm_connector drm_connector;
struct drm_encoder *encoder;
struct sti_hdmi *hdmi;
struct drm_property *colorspace_property;
};
#define to_sti_hdmi_connector(x) \
container_of(x, struct sti_hdmi_connector, drm_connector)
static const struct drm_prop_enum_list colorspace_mode_names[] = {
{ HDMI_COLORSPACE_RGB, "rgb" },
{ HDMI_COLORSPACE_YUV422, "yuv422" },
{ HDMI_COLORSPACE_YUV444, "yuv444" },
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset)
{
return readl(hdmi->regs + offset);
}
void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset)
{
writel(val, hdmi->regs + offset);
}
/*
* HDMI interrupt handler threaded
*
* @irq: irq number
* @arg: connector structure
*/
static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
struct sti_hdmi *hdmi = arg;
/* Hot plug/unplug IRQ */
if (hdmi->irq_status & HDMI_INT_HOT_PLUG) {
hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG;
if (hdmi->drm_dev)
drm_helper_hpd_irq_event(hdmi->drm_dev);
}
/* Sw reset and PLL lock are exclusive so we can use the same
* event to signal them
*/
if (hdmi->irq_status & (HDMI_INT_SW_RST | HDMI_INT_DLL_LCK)) {
hdmi->event_received = true;
wake_up_interruptible(&hdmi->wait_event);
}
/* Audio FIFO underrun IRQ */
if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
DRM_INFO("Warning: audio FIFO underrun occurs!\n");
return IRQ_HANDLED;
}
/*
* HDMI interrupt handler
*
* @irq: irq number
* @arg: connector structure
*/
static irqreturn_t hdmi_irq(int irq, void *arg)
{
struct sti_hdmi *hdmi = arg;
/* read interrupt status */
hdmi->irq_status = hdmi_read(hdmi, HDMI_INT_STA);
/* clear interrupt status */
hdmi_write(hdmi, hdmi->irq_status, HDMI_INT_CLR);
/* force sync bus write */
hdmi_read(hdmi, HDMI_INT_STA);
return IRQ_WAKE_THREAD;
}
/*
* Set hdmi active area depending on the drm display mode selected
*
* @hdmi: pointer on the hdmi internal structure
*/
static void hdmi_active_area(struct sti_hdmi *hdmi)
{
u32 xmin, xmax;
u32 ymin, ymax;
xmin = sti_vtg_get_pixel_number(hdmi->mode, 1);
xmax = sti_vtg_get_pixel_number(hdmi->mode, hdmi->mode.hdisplay);
ymin = sti_vtg_get_line_number(hdmi->mode, 0);
ymax = sti_vtg_get_line_number(hdmi->mode, hdmi->mode.vdisplay - 1);
hdmi_write(hdmi, xmin, HDMI_ACTIVE_VID_XMIN);
hdmi_write(hdmi, xmax, HDMI_ACTIVE_VID_XMAX);
hdmi_write(hdmi, ymin, HDMI_ACTIVE_VID_YMIN);
hdmi_write(hdmi, ymax, HDMI_ACTIVE_VID_YMAX);
}
/*
* Overall hdmi configuration
*
* @hdmi: pointer on the hdmi internal structure
*/
static void hdmi_config(struct sti_hdmi *hdmi)
{
struct drm_connector *connector = hdmi->drm_connector;
u32 conf;
DRM_DEBUG_DRIVER("\n");
/* Clear overrun and underrun fifo */
conf = HDMI_CFG_FIFO_OVERRUN_CLR | HDMI_CFG_FIFO_UNDERRUN_CLR;
/* Select encryption type and the framing mode */
conf |= HDMI_CFG_ESS_NOT_OESS;
if (connector->display_info.is_hdmi)
conf |= HDMI_CFG_HDMI_NOT_DVI;
/* Set Hsync polarity */
if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) {
DRM_DEBUG_DRIVER("H Sync Negative\n");
conf |= HDMI_CFG_H_SYNC_POL_NEG;
}
/* Set Vsync polarity */
if (hdmi->mode.flags & DRM_MODE_FLAG_NVSYNC) {
DRM_DEBUG_DRIVER("V Sync Negative\n");
conf |= HDMI_CFG_V_SYNC_POL_NEG;
}
/* Enable HDMI */
conf |= HDMI_CFG_DEVICE_EN;
hdmi_write(hdmi, conf, HDMI_CFG);
}
/*
* Helper to reset info frame
*
* @hdmi: pointer on the hdmi internal structure
* @slot: infoframe to reset
*/
static void hdmi_infoframe_reset(struct sti_hdmi *hdmi,
u32 slot)
{
u32 val, i;
u32 head_offset, pack_offset;
switch (slot) {
case HDMI_IFRAME_SLOT_AVI:
head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI);
pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI);
break;
case HDMI_IFRAME_SLOT_AUDIO:
head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO);
pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO);
break;
case HDMI_IFRAME_SLOT_VENDOR:
head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_VENDOR);
pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_VENDOR);
break;
default:
DRM_ERROR("unsupported infoframe slot: %#x\n", slot);
return;
}
/* Disable transmission for the selected slot */
val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, slot);
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
/* Reset info frame registers */
hdmi_write(hdmi, 0x0, head_offset);
for (i = 0; i < HDMI_SW_DI_MAX_WORD; i += sizeof(u32))
hdmi_write(hdmi, 0x0, pack_offset + i);
}
/*
* Helper to concatenate infoframe in 32 bits word
*
* @ptr: pointer on the hdmi internal structure
* @size: size to write
*/
static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size)
{
unsigned long value = 0;
size_t i;
for (i = size; i > 0; i--)
value = (value << 8) | ptr[i - 1];
return value;
}
/*
* Helper to write info frame
*
* @hdmi: pointer on the hdmi internal structure
* @data: infoframe to write
* @size: size to write
*/
static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi,
const u8 *data,
size_t size)
{
const u8 *ptr = data;
u32 val, slot, mode, i;
u32 head_offset, pack_offset;
switch (*ptr) {
case HDMI_INFOFRAME_TYPE_AVI:
slot = HDMI_IFRAME_SLOT_AVI;
mode = HDMI_IFRAME_FIELD;
head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI);
pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI);
break;
case HDMI_INFOFRAME_TYPE_AUDIO:
slot = HDMI_IFRAME_SLOT_AUDIO;
mode = HDMI_IFRAME_FRAME;
head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO);
pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO);
break;
case HDMI_INFOFRAME_TYPE_VENDOR:
slot = HDMI_IFRAME_SLOT_VENDOR;
mode = HDMI_IFRAME_FRAME;
head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_VENDOR);
pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_VENDOR);
break;
default:
DRM_ERROR("unsupported infoframe type: %#x\n", *ptr);
return;
}
/* Disable transmission slot for updated infoframe */
val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, slot);
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
val = HDMI_INFOFRAME_HEADER_TYPE(*ptr++);
val |= HDMI_INFOFRAME_HEADER_VERSION(*ptr++);
val |= HDMI_INFOFRAME_HEADER_LEN(*ptr++);
writel(val, hdmi->regs + head_offset);
/*
* Each subpack contains 4 bytes
* The First Bytes of the first subpacket must contain the checksum
* Packet size is increase by one.
*/
size = size - HDMI_INFOFRAME_HEADER_SIZE + 1;
for (i = 0; i < size; i += sizeof(u32)) {
size_t num;
num = min_t(size_t, size - i, sizeof(u32));
val = hdmi_infoframe_subpack(ptr, num);
ptr += sizeof(u32);
writel(val, hdmi->regs + pack_offset + i);
}
/* Enable transmission slot for updated infoframe */
val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
val |= HDMI_IFRAME_CFG_DI_N(mode, slot);
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
}
/*
* Prepare and configure the AVI infoframe
*
* AVI infoframe are transmitted at least once per two video field and
* contains information about HDMI transmission mode such as color space,
* colorimetry, ...
*
* @hdmi: pointer on the hdmi internal structure
*
* Return negative value if error occurs
*/
static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
{
struct drm_display_mode *mode = &hdmi->mode;
struct hdmi_avi_infoframe infoframe;
u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
int ret;
DRM_DEBUG_DRIVER("\n");
ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe,
hdmi->drm_connector, mode);
if (ret < 0) {
DRM_ERROR("failed to setup AVI infoframe: %d\n", ret);
return ret;
}
/* fixed infoframe configuration not linked to the mode */
infoframe.colorspace = hdmi->colorspace;
infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
ret = hdmi_avi_infoframe_pack(&infoframe, buffer, sizeof(buffer));
if (ret < 0) {
DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
return ret;
}
hdmi_infoframe_write_infopack(hdmi, buffer, ret);
return 0;
}
/*
* Prepare and configure the AUDIO infoframe
*
* AUDIO infoframe are transmitted once per frame and
* contains information about HDMI transmission mode such as audio codec,
* sample size, ...
*
* @hdmi: pointer on the hdmi internal structure
*
* Return negative value if error occurs
*/
static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
{
struct hdmi_audio_params *audio = &hdmi->audio;
u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
int ret, val;
DRM_DEBUG_DRIVER("enter %s, AIF %s\n", __func__,
audio->enabled ? "enable" : "disable");
if (audio->enabled) {
/* set audio parameters stored*/
ret = hdmi_audio_infoframe_pack(&audio->cea, buffer,
sizeof(buffer));
if (ret < 0) {
DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
return ret;
}
hdmi_infoframe_write_infopack(hdmi, buffer, ret);
} else {
/*disable audio info frame transmission */
val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK,
HDMI_IFRAME_SLOT_AUDIO);
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
}
return 0;
}
/*
* Prepare and configure the VS infoframe
*
* Vendor Specific infoframe are transmitted once per frame and
* contains vendor specific information.
*
* @hdmi: pointer on the hdmi internal structure
*
* Return negative value if error occurs
*/
#define HDMI_VENDOR_INFOFRAME_MAX_SIZE 6
static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
{
struct drm_display_mode *mode = &hdmi->mode;
struct hdmi_vendor_infoframe infoframe;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_VENDOR_INFOFRAME_MAX_SIZE];
int ret;
DRM_DEBUG_DRIVER("\n");
ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe,
hdmi->drm_connector,
mode);
if (ret < 0) {
/*
* Going into that statement does not means vendor infoframe
* fails. It just informed us that vendor infoframe is not
* needed for the selected mode. Only 4k or stereoscopic 3D
* mode requires vendor infoframe. So just simply return 0.
*/
return 0;
}
ret = hdmi_vendor_infoframe_pack(&infoframe, buffer, sizeof(buffer));
if (ret < 0) {
DRM_ERROR("failed to pack VS infoframe: %d\n", ret);
return ret;
}
hdmi_infoframe_write_infopack(hdmi, buffer, ret);
return 0;
}
#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
/*
* Software reset of the hdmi subsystem
*
* @hdmi: pointer on the hdmi internal structure
*
*/
static void hdmi_swreset(struct sti_hdmi *hdmi)
{
u32 val;
DRM_DEBUG_DRIVER("\n");
/* Enable hdmi_audio clock only during hdmi reset */
if (clk_prepare_enable(hdmi->clk_audio))
DRM_INFO("Failed to prepare/enable hdmi_audio clk\n");
/* Sw reset */
hdmi->event_received = false;
val = hdmi_read(hdmi, HDMI_CFG);
val |= HDMI_CFG_SW_RST_EN;
hdmi_write(hdmi, val, HDMI_CFG);
/* Wait reset completed */
wait_event_interruptible_timeout(hdmi->wait_event,
hdmi->event_received,
msecs_to_jiffies
(HDMI_TIMEOUT_SWRESET));
/*
* HDMI_STA_SW_RST bit is set to '1' when SW_RST bit in HDMI_CFG is
* set to '1' and clk_audio is running.
*/
if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_SW_RST) == 0)
DRM_DEBUG_DRIVER("Warning: HDMI sw reset timeout occurs\n");
val = hdmi_read(hdmi, HDMI_CFG);
val &= ~HDMI_CFG_SW_RST_EN;
hdmi_write(hdmi, val, HDMI_CFG);
/* Disable hdmi_audio clock. Not used anymore for drm purpose */
clk_disable_unprepare(hdmi->clk_audio);
}
#define DBGFS_PRINT_STR(str1, str2) seq_printf(s, "%-24s %s\n", str1, str2)
#define DBGFS_PRINT_INT(str1, int2) seq_printf(s, "%-24s %d\n", str1, int2)
#define DBGFS_DUMP(str, reg) seq_printf(s, "%s %-25s 0x%08X", str, #reg, \
hdmi_read(hdmi, reg))
#define DBGFS_DUMP_DI(reg, slot) DBGFS_DUMP("\n", reg(slot))
static void hdmi_dbg_cfg(struct seq_file *s, int val)
{
int tmp;
seq_putc(s, '\t');
tmp = val & HDMI_CFG_HDMI_NOT_DVI;
DBGFS_PRINT_STR("mode:", tmp ? "HDMI" : "DVI");
seq_puts(s, "\t\t\t\t\t");
tmp = val & HDMI_CFG_HDCP_EN;
DBGFS_PRINT_STR("HDCP:", tmp ? "enable" : "disable");
seq_puts(s, "\t\t\t\t\t");
tmp = val & HDMI_CFG_ESS_NOT_OESS;
DBGFS_PRINT_STR("HDCP mode:", tmp ? "ESS enable" : "OESS enable");
seq_puts(s, "\t\t\t\t\t");
tmp = val & HDMI_CFG_H_SYNC_POL_NEG;
DBGFS_PRINT_STR("Hsync polarity:", tmp ? "inverted" : "normal");
seq_puts(s, "\t\t\t\t\t");
tmp = val & HDMI_CFG_V_SYNC_POL_NEG;
DBGFS_PRINT_STR("Vsync polarity:", tmp ? "inverted" : "normal");
seq_puts(s, "\t\t\t\t\t");
tmp = val & HDMI_CFG_422_EN;
DBGFS_PRINT_STR("YUV422 format:", tmp ? "enable" : "disable");
}
static void hdmi_dbg_sta(struct seq_file *s, int val)
{
int tmp;
seq_putc(s, '\t');
tmp = (val & HDMI_STA_DLL_LCK);
DBGFS_PRINT_STR("pll:", tmp ? "locked" : "not locked");
seq_puts(s, "\t\t\t\t\t");
tmp = (val & HDMI_STA_HOT_PLUG);
DBGFS_PRINT_STR("hdmi cable:", tmp ? "connected" : "not connected");
}
static void hdmi_dbg_sw_di_cfg(struct seq_file *s, int val)
{
int tmp;
char *const en_di[] = {"no transmission",
"single transmission",
"once every field",
"once every frame"};
seq_putc(s, '\t');
tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 1));
DBGFS_PRINT_STR("Data island 1:", en_di[tmp]);
seq_puts(s, "\t\t\t\t\t");
tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 2)) >> 4;
DBGFS_PRINT_STR("Data island 2:", en_di[tmp]);
seq_puts(s, "\t\t\t\t\t");
tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 3)) >> 8;
DBGFS_PRINT_STR("Data island 3:", en_di[tmp]);
seq_puts(s, "\t\t\t\t\t");
tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 4)) >> 12;
DBGFS_PRINT_STR("Data island 4:", en_di[tmp]);
seq_puts(s, "\t\t\t\t\t");
tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 5)) >> 16;
DBGFS_PRINT_STR("Data island 5:", en_di[tmp]);
seq_puts(s, "\t\t\t\t\t");
tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 6)) >> 20;
DBGFS_PRINT_STR("Data island 6:", en_di[tmp]);
}
static int hdmi_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
DBGFS_DUMP("\n", HDMI_CFG);
hdmi_dbg_cfg(s, hdmi_read(hdmi, HDMI_CFG));
DBGFS_DUMP("", HDMI_INT_EN);
DBGFS_DUMP("\n", HDMI_STA);
hdmi_dbg_sta(s, hdmi_read(hdmi, HDMI_STA));
DBGFS_DUMP("", HDMI_ACTIVE_VID_XMIN);
seq_putc(s, '\t');
DBGFS_PRINT_INT("Xmin:", hdmi_read(hdmi, HDMI_ACTIVE_VID_XMIN));
DBGFS_DUMP("", HDMI_ACTIVE_VID_XMAX);
seq_putc(s, '\t');
DBGFS_PRINT_INT("Xmax:", hdmi_read(hdmi, HDMI_ACTIVE_VID_XMAX));
DBGFS_DUMP("", HDMI_ACTIVE_VID_YMIN);
seq_putc(s, '\t');
DBGFS_PRINT_INT("Ymin:", hdmi_read(hdmi, HDMI_ACTIVE_VID_YMIN));
DBGFS_DUMP("", HDMI_ACTIVE_VID_YMAX);
seq_putc(s, '\t');
DBGFS_PRINT_INT("Ymax:", hdmi_read(hdmi, HDMI_ACTIVE_VID_YMAX));
DBGFS_DUMP("", HDMI_SW_DI_CFG);
hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
DBGFS_DUMP("\n", HDMI_AUDIO_CFG);
DBGFS_DUMP("\n", HDMI_SPDIF_FIFO_STATUS);
DBGFS_DUMP("\n", HDMI_AUDN);
seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_AVI);
seq_printf(s, "\n\n AUDIO Infoframe (Data Island slot N=%d):",
HDMI_IFRAME_SLOT_AUDIO);
DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AUDIO);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_AUDIO);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_AUDIO);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_AUDIO);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_AUDIO);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_AUDIO);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_AUDIO);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_AUDIO);
seq_printf(s, "\n\n VENDOR SPECIFIC Infoframe (Data Island slot N=%d):",
HDMI_IFRAME_SLOT_VENDOR);
DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_VENDOR);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_VENDOR);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_VENDOR);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_VENDOR);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_VENDOR);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_VENDOR);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_VENDOR);
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
seq_putc(s, '\n');
return 0;
}
static struct drm_info_list hdmi_debugfs_files[] = {
{ "hdmi", hdmi_dbg_show, 0, NULL },
};
static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++)
hdmi_debugfs_files[i].data = hdmi;
drm_debugfs_create_files(hdmi_debugfs_files,
ARRAY_SIZE(hdmi_debugfs_files),
minor->debugfs_root, minor);
}
static void sti_hdmi_disable(struct drm_bridge *bridge)
{
struct sti_hdmi *hdmi = bridge->driver_private;
u32 val = hdmi_read(hdmi, HDMI_CFG);
if (!hdmi->enabled)
return;
DRM_DEBUG_DRIVER("\n");
/* Disable HDMI */
val &= ~HDMI_CFG_DEVICE_EN;
hdmi_write(hdmi, val, HDMI_CFG);
hdmi_write(hdmi, 0xffffffff, HDMI_INT_CLR);
/* Stop the phy */
hdmi->phy_ops->stop(hdmi);
/* Reset info frame transmission */
hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_AVI);
hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_AUDIO);
hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_VENDOR);
/* Set the default channel data to be a dark red */
hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL0_DAT);
hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL1_DAT);
hdmi_write(hdmi, 0x0060, HDMI_DFLT_CHL2_DAT);
/* Disable/unprepare hdmi clock */
clk_disable_unprepare(hdmi->clk_phy);
clk_disable_unprepare(hdmi->clk_tmds);
clk_disable_unprepare(hdmi->clk_pix);
hdmi->enabled = false;
cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
}
/*
* sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
* clocks. None-coherent clocks means that audio and TMDS clocks have not the
* same source (drifts between clocks). In this case assumption is that CTS is
* automatically calculated by hardware.
*
* @audio_fs: audio frame clock frequency in Hz
*
* Values computed are based on table described in HDMI specification 1.4b
*
* Returns n value.
*/
static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
{
unsigned int n;
switch (audio_fs) {
case 32000:
n = 4096;
break;
case 44100:
n = 6272;
break;
case 48000:
n = 6144;
break;
case 88200:
n = 6272 * 2;
break;
case 96000:
n = 6144 * 2;
break;
case 176400:
n = 6272 * 4;
break;
case 192000:
n = 6144 * 4;
break;
default:
/* Not pre-defined, recommended value: 128 * fs / 1000 */
n = (audio_fs * 128) / 1000;
}
return n;
}
static int hdmi_audio_configure(struct sti_hdmi *hdmi)
{
int audio_cfg, n;
struct hdmi_audio_params *params = &hdmi->audio;
struct hdmi_audio_infoframe *info = ¶ms->cea;
DRM_DEBUG_DRIVER("\n");
if (!hdmi->enabled)
return 0;
/* update N parameter */
n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
params->sample_rate, hdmi->mode.clock * 1000, n);
hdmi_write(hdmi, n, HDMI_AUDN);
/* update HDMI registers according to configuration */
audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
HDMI_AUD_CFG_ONE_BIT_INVALID;
switch (info->channels) {
case 8:
audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
fallthrough;
case 6:
audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
fallthrough;
case 4:
audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
fallthrough;
case 2:
audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
break;
default:
DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
info->channels);
return -EINVAL;
}
hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
return hdmi_audio_infoframe_config(hdmi);
}
static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
{
struct sti_hdmi *hdmi = bridge->driver_private;
DRM_DEBUG_DRIVER("\n");
if (hdmi->enabled)
return;
/* Prepare/enable clocks */
if (clk_prepare_enable(hdmi->clk_pix))
DRM_ERROR("Failed to prepare/enable hdmi_pix clk\n");
if (clk_prepare_enable(hdmi->clk_tmds))
DRM_ERROR("Failed to prepare/enable hdmi_tmds clk\n");
if (clk_prepare_enable(hdmi->clk_phy))
DRM_ERROR("Failed to prepare/enable hdmi_rejection_pll clk\n");
hdmi->enabled = true;
/* Program hdmi serializer and start phy */
if (!hdmi->phy_ops->start(hdmi)) {
DRM_ERROR("Unable to start hdmi phy\n");
return;
}
/* Program hdmi active area */
hdmi_active_area(hdmi);
/* Enable working interrupts */
hdmi_write(hdmi, HDMI_WORKING_INT, HDMI_INT_EN);
/* Program hdmi config */
hdmi_config(hdmi);
/* Program AVI infoframe */
if (hdmi_avi_infoframe_config(hdmi))
DRM_ERROR("Unable to configure AVI infoframe\n");
if (hdmi->audio.enabled) {
if (hdmi_audio_configure(hdmi))
DRM_ERROR("Unable to configure audio\n");
} else {
hdmi_audio_infoframe_config(hdmi);
}
/* Program VS infoframe */
if (hdmi_vendor_infoframe_config(hdmi))
DRM_ERROR("Unable to configure VS infoframe\n");
/* Sw reset */
hdmi_swreset(hdmi);
}
static void sti_hdmi_set_mode(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct sti_hdmi *hdmi = bridge->driver_private;
int ret;
DRM_DEBUG_DRIVER("\n");
/* Copy the drm display mode in the connector local structure */
drm_mode_copy(&hdmi->mode, mode);
/* Update clock framerate according to the selected mode */
ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000);
if (ret < 0) {
DRM_ERROR("Cannot set rate (%dHz) for hdmi_pix clk\n",
mode->clock * 1000);
return;
}
ret = clk_set_rate(hdmi->clk_phy, mode->clock * 1000);
if (ret < 0) {
DRM_ERROR("Cannot set rate (%dHz) for hdmi_rejection_pll clk\n",
mode->clock * 1000);
return;
}
}
static void sti_hdmi_bridge_nope(struct drm_bridge *bridge)
{
/* do nothing */
}
static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
.pre_enable = sti_hdmi_pre_enable,
.enable = sti_hdmi_bridge_nope,
.disable = sti_hdmi_disable,
.post_disable = sti_hdmi_bridge_nope,
.mode_set = sti_hdmi_set_mode,
};
static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct sti_hdmi_connector *hdmi_connector
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
struct edid *edid;
int count;
DRM_DEBUG_DRIVER("\n");
edid = drm_get_edid(connector, hdmi->ddc_adapt);
if (!edid)
goto fail;
cec_notifier_set_phys_addr_from_edid(hdmi->notifier, edid);
count = drm_add_edid_modes(connector, edid);
drm_connector_update_edid_property(connector, edid);
DRM_DEBUG_KMS("%s : %dx%d cm\n",
(connector->display_info.is_hdmi ? "hdmi monitor" : "dvi monitor"),
edid->width_cm, edid->height_cm);
kfree(edid);
return count;
fail:
DRM_ERROR("Can't read HDMI EDID\n");
return 0;
}
#define CLK_TOLERANCE_HZ 50
static enum drm_mode_status
sti_hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
int target_max = target + CLK_TOLERANCE_HZ;
int result;
struct sti_hdmi_connector *hdmi_connector
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
result = clk_round_rate(hdmi->clk_pix, target);
DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
target, result);
if ((result < target_min) || (result > target_max)) {
DRM_DEBUG_DRIVER("hdmi pixclk=%d not supported\n", target);
return MODE_BAD;
}
return MODE_OK;
}
static const
struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
.get_modes = sti_hdmi_connector_get_modes,
.mode_valid = sti_hdmi_connector_mode_valid,
};
/* get detection status of display device */
static enum drm_connector_status
sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct sti_hdmi_connector *hdmi_connector
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
DRM_DEBUG_DRIVER("\n");
if (hdmi->hpd) {
DRM_DEBUG_DRIVER("hdmi cable connected\n");
return connector_status_connected;
}
DRM_DEBUG_DRIVER("hdmi cable disconnected\n");
cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
return connector_status_disconnected;
}
static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
struct drm_connector *connector)
{
struct sti_hdmi_connector *hdmi_connector
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
struct drm_property *prop;
/* colorspace property */
hdmi->colorspace = DEFAULT_COLORSPACE_MODE;
prop = drm_property_create_enum(drm_dev, 0, "colorspace",
colorspace_mode_names,
ARRAY_SIZE(colorspace_mode_names));
if (!prop) {
DRM_ERROR("fails to create colorspace property\n");
return;
}
hdmi_connector->colorspace_property = prop;
drm_object_attach_property(&connector->base, prop, hdmi->colorspace);
}
static int
sti_hdmi_connector_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val)
{
struct sti_hdmi_connector *hdmi_connector
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
if (property == hdmi_connector->colorspace_property) {
hdmi->colorspace = val;
return 0;
}
DRM_ERROR("failed to set hdmi connector property\n");
return -EINVAL;
}
static int
sti_hdmi_connector_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val)
{
struct sti_hdmi_connector *hdmi_connector
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
if (property == hdmi_connector->colorspace_property) {
*val = hdmi->colorspace;
return 0;
}
DRM_ERROR("failed to get hdmi connector property\n");
return -EINVAL;
}
static int sti_hdmi_late_register(struct drm_connector *connector)
{
struct sti_hdmi_connector *hdmi_connector
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary);
return 0;
}
static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hdmi_connector_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_set_property = sti_hdmi_connector_set_property,
.atomic_get_property = sti_hdmi_connector_get_property,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.late_register = sti_hdmi_late_register,
};
static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
{
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
}
return NULL;
}
static void hdmi_audio_shutdown(struct device *dev, void *data)
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
int audio_cfg;
DRM_DEBUG_DRIVER("\n");
/* disable audio */
audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
HDMI_AUD_CFG_ONE_BIT_INVALID;
hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
hdmi->audio.enabled = false;
hdmi_audio_infoframe_config(hdmi);
}
static int hdmi_audio_hw_params(struct device *dev,
void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
int ret;
DRM_DEBUG_DRIVER("\n");
if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
daifmt->frame_clk_inv || daifmt->bit_clk_provider ||
daifmt->frame_clk_provider) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
daifmt->bit_clk_provider,
daifmt->frame_clk_provider);
return -EINVAL;
}
hdmi->audio.sample_width = params->sample_width;
hdmi->audio.sample_rate = params->sample_rate;
hdmi->audio.cea = params->cea;
hdmi->audio.enabled = true;
ret = hdmi_audio_configure(hdmi);
if (ret < 0)
return ret;
return 0;
}
static int hdmi_audio_mute(struct device *dev, void *data,
bool enable, int direction)
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
DRM_DEBUG_DRIVER("%s\n", enable ? "enable" : "disable");
if (enable)
hdmi_write(hdmi, HDMI_SAMPLE_FLAT_ALL, HDMI_SAMPLE_FLAT_MASK);
else
hdmi_write(hdmi, HDMI_SAMPLE_FLAT_NO, HDMI_SAMPLE_FLAT_MASK);
return 0;
}
static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
struct drm_connector *connector = hdmi->drm_connector;
DRM_DEBUG_DRIVER("\n");
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
return 0;
}
static const struct hdmi_codec_ops audio_codec_ops = {
.hw_params = hdmi_audio_hw_params,
.audio_shutdown = hdmi_audio_shutdown,
.mute_stream = hdmi_audio_mute,
.get_eld = hdmi_audio_get_eld,
.no_capture_mute = 1,
};
static int sti_hdmi_register_audio_driver(struct device *dev,
struct sti_hdmi *hdmi)
{
struct hdmi_codec_pdata codec_data = {
.ops = &audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
};
DRM_DEBUG_DRIVER("\n");
hdmi->audio.enabled = false;
hdmi->audio_pdev = platform_device_register_data(
dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
&codec_data, sizeof(codec_data));
if (IS_ERR(hdmi->audio_pdev))
return PTR_ERR(hdmi->audio_pdev);
DRM_INFO("%s Driver bound %s\n", HDMI_CODEC_DRV_NAME, dev_name(dev));
return 0;
}
static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct drm_encoder *encoder;
struct sti_hdmi_connector *connector;
struct cec_connector_info conn_info;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
hdmi->drm_dev = drm_dev;
encoder = sti_hdmi_find_encoder(drm_dev);
if (!encoder)
return -EINVAL;
connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
if (!connector)
return -EINVAL;
connector->hdmi = hdmi;
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -EINVAL;
bridge->driver_private = hdmi;
bridge->funcs = &sti_hdmi_bridge_funcs;
drm_bridge_attach(encoder, bridge, NULL, 0);
connector->encoder = encoder;
drm_connector = (struct drm_connector *)connector;
drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_init_with_ddc(drm_dev, drm_connector,
&sti_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc_adapt);
drm_connector_helper_add(drm_connector,
&sti_hdmi_connector_helper_funcs);
/* initialise property */
sti_hdmi_connector_init_property(drm_dev, drm_connector);
hdmi->drm_connector = drm_connector;
err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
}
err = sti_hdmi_register_audio_driver(dev, hdmi);
if (err) {
DRM_ERROR("Failed to attach an audio codec\n");
goto err_sysfs;
}
/* Initialize audio infoframe */
err = hdmi_audio_infoframe_init(&hdmi->audio.cea);
if (err) {
DRM_ERROR("Failed to init audio infoframe\n");
goto err_sysfs;
}
cec_fill_conn_info_from_drm(&conn_info, drm_connector);
hdmi->notifier = cec_notifier_conn_register(&hdmi->dev, NULL,
&conn_info);
if (!hdmi->notifier) {
hdmi->drm_connector = NULL;
return -ENOMEM;
}
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
return 0;
err_sysfs:
hdmi->drm_connector = NULL;
return -EINVAL;
}
static void sti_hdmi_unbind(struct device *dev,
struct device *master, void *data)
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
cec_notifier_conn_unregister(hdmi->notifier);
}
static const struct component_ops sti_hdmi_ops = {
.bind = sti_hdmi_bind,
.unbind = sti_hdmi_unbind,
};
static const struct of_device_id hdmi_of_match[] = {
{
.compatible = "st,stih407-hdmi",
.data = &tx3g4c28phy_ops,
}, {
/* end node */
}
};
MODULE_DEVICE_TABLE(of, hdmi_of_match);
static int sti_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_hdmi *hdmi;
struct device_node *np = dev->of_node;
struct resource *res;
struct device_node *ddc;
int ret;
DRM_INFO("%s\n", __func__);
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
if (ddc) {
hdmi->ddc_adapt = of_get_i2c_adapter_by_node(ddc);
of_node_put(ddc);
if (!hdmi->ddc_adapt)
return -EPROBE_DEFER;
}
hdmi->dev = pdev->dev;
/* Get resources */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg");
if (!res) {
DRM_ERROR("Invalid hdmi resource\n");
ret = -ENOMEM;
goto release_adapter;
}
hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hdmi->regs) {
ret = -ENOMEM;
goto release_adapter;
}
hdmi->phy_ops = (struct hdmi_phy_ops *)
of_match_node(hdmi_of_match, np)->data;
/* Get clock resources */
hdmi->clk_pix = devm_clk_get(dev, "pix");
if (IS_ERR(hdmi->clk_pix)) {
DRM_ERROR("Cannot get hdmi_pix clock\n");
ret = PTR_ERR(hdmi->clk_pix);
goto release_adapter;
}
hdmi->clk_tmds = devm_clk_get(dev, "tmds");
if (IS_ERR(hdmi->clk_tmds)) {
DRM_ERROR("Cannot get hdmi_tmds clock\n");
ret = PTR_ERR(hdmi->clk_tmds);
goto release_adapter;
}
hdmi->clk_phy = devm_clk_get(dev, "phy");
if (IS_ERR(hdmi->clk_phy)) {
DRM_ERROR("Cannot get hdmi_phy clock\n");
ret = PTR_ERR(hdmi->clk_phy);
goto release_adapter;
}
hdmi->clk_audio = devm_clk_get(dev, "audio");
if (IS_ERR(hdmi->clk_audio)) {
DRM_ERROR("Cannot get hdmi_audio clock\n");
ret = PTR_ERR(hdmi->clk_audio);
goto release_adapter;
}
hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG;
init_waitqueue_head(&hdmi->wait_event);
hdmi->irq = platform_get_irq_byname(pdev, "irq");
if (hdmi->irq < 0) {
DRM_ERROR("Cannot get HDMI irq\n");
ret = hdmi->irq;
goto release_adapter;
}
ret = devm_request_threaded_irq(dev, hdmi->irq, hdmi_irq,
hdmi_irq_thread, IRQF_ONESHOT, dev_name(dev), hdmi);
if (ret) {
DRM_ERROR("Failed to register HDMI interrupt\n");
goto release_adapter;
}
hdmi->reset = devm_reset_control_get(dev, "hdmi");
/* Take hdmi out of reset */
if (!IS_ERR(hdmi->reset))
reset_control_deassert(hdmi->reset);
platform_set_drvdata(pdev, hdmi);
return component_add(&pdev->dev, &sti_hdmi_ops);
release_adapter:
i2c_put_adapter(hdmi->ddc_adapt);
return ret;
}
static void sti_hdmi_remove(struct platform_device *pdev)
{
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
i2c_put_adapter(hdmi->ddc_adapt);
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
}
struct platform_driver sti_hdmi_driver = {
.driver = {
.name = "sti-hdmi",
.owner = THIS_MODULE,
.of_match_table = hdmi_of_match,
},
.probe = sti_hdmi_probe,
.remove_new = sti_hdmi_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sti/sti_hdmi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <[email protected]>
* Fabien Dessenne <[email protected]>
* for STMicroelectronics.
*/
#include <linux/component.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "sti_compositor.h"
#include "sti_crtc.h"
#include "sti_cursor.h"
#include "sti_drv.h"
#include "sti_gdp.h"
#include "sti_plane.h"
#include "sti_vid.h"
#include "sti_vtg.h"
/*
* stiH407 compositor properties
*/
static const struct sti_compositor_data stih407_compositor_data = {
.nb_subdev = 8,
.subdev_desc = {
{STI_CURSOR_SUBDEV, (int)STI_CURSOR, 0x000},
{STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
{STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
{STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
{STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700},
{STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
{STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
},
};
void sti_compositor_debugfs_init(struct sti_compositor *compo,
struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < STI_MAX_VID; i++)
if (compo->vid[i])
vid_debugfs_init(compo->vid[i], minor);
for (i = 0; i < STI_MAX_MIXER; i++)
if (compo->mixer[i])
sti_mixer_debugfs_init(compo->mixer[i], minor);
}
static int sti_compositor_bind(struct device *dev,
struct device *master,
void *data)
{
struct sti_compositor *compo = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0;
struct sti_private *dev_priv = drm_dev->dev_private;
struct drm_plane *cursor = NULL;
struct drm_plane *primary = NULL;
struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
unsigned int array_size = compo->data.nb_subdev;
dev_priv->compo = compo;
/* Register mixer subdev and video subdev first */
for (i = 0; i < array_size; i++) {
switch (desc[i].type) {
case STI_VID_SUBDEV:
compo->vid[vid_id++] =
sti_vid_create(compo->dev, drm_dev, desc[i].id,
compo->regs + desc[i].offset);
break;
case STI_MIXER_MAIN_SUBDEV:
case STI_MIXER_AUX_SUBDEV:
compo->mixer[mixer_id++] =
sti_mixer_create(compo->dev, drm_dev, desc[i].id,
compo->regs + desc[i].offset);
break;
case STI_GPD_SUBDEV:
case STI_CURSOR_SUBDEV:
/* Nothing to do, wait for the second round */
break;
default:
DRM_ERROR("Unknown subdev component type\n");
return 1;
}
}
/* Register the other subdevs, create crtc and planes */
for (i = 0; i < array_size; i++) {
enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
if (crtc_id < mixer_id)
plane_type = DRM_PLANE_TYPE_PRIMARY;
switch (desc[i].type) {
case STI_MIXER_MAIN_SUBDEV:
case STI_MIXER_AUX_SUBDEV:
case STI_VID_SUBDEV:
/* Nothing to do, already done at the first round */
break;
case STI_CURSOR_SUBDEV:
cursor = sti_cursor_create(drm_dev, compo->dev,
desc[i].id,
compo->regs + desc[i].offset,
1);
if (!cursor) {
DRM_ERROR("Can't create CURSOR plane\n");
break;
}
break;
case STI_GPD_SUBDEV:
primary = sti_gdp_create(drm_dev, compo->dev,
desc[i].id,
compo->regs + desc[i].offset,
(1 << mixer_id) - 1,
plane_type);
if (!primary) {
DRM_ERROR("Can't create GDP plane\n");
break;
}
break;
default:
DRM_ERROR("Unknown subdev component type\n");
return 1;
}
/* The first planes are reserved for primary planes*/
if (crtc_id < mixer_id && primary) {
sti_crtc_init(drm_dev, compo->mixer[crtc_id],
primary, cursor);
crtc_id++;
cursor = NULL;
primary = NULL;
}
}
drm_vblank_init(drm_dev, crtc_id);
return 0;
}
static void sti_compositor_unbind(struct device *dev, struct device *master,
void *data)
{
/* do nothing */
}
static const struct component_ops sti_compositor_ops = {
.bind = sti_compositor_bind,
.unbind = sti_compositor_unbind,
};
static const struct of_device_id compositor_of_match[] = {
{
.compatible = "st,stih407-compositor",
.data = &stih407_compositor_data,
}, {
/* end node */
}
};
MODULE_DEVICE_TABLE(of, compositor_of_match);
static int sti_compositor_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *vtg_np;
struct sti_compositor *compo;
struct resource *res;
unsigned int i;
compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
if (!compo) {
DRM_ERROR("Failed to allocate compositor context\n");
return -ENOMEM;
}
compo->dev = dev;
for (i = 0; i < STI_MAX_MIXER; i++)
compo->vtg_vblank_nb[i].notifier_call = sti_crtc_vblank_cb;
/* populate data structure depending on compatibility */
BUG_ON(!of_match_node(compositor_of_match, np)->data);
memcpy(&compo->data, of_match_node(compositor_of_match, np)->data,
sizeof(struct sti_compositor_data));
/* Get Memory ressources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
DRM_ERROR("Get memory resource failed\n");
return -ENXIO;
}
compo->regs = devm_ioremap(dev, res->start, resource_size(res));
if (compo->regs == NULL) {
DRM_ERROR("Register mapping failed\n");
return -ENXIO;
}
/* Get clock resources */
compo->clk_compo_main = devm_clk_get(dev, "compo_main");
if (IS_ERR(compo->clk_compo_main)) {
DRM_ERROR("Cannot get compo_main clock\n");
return PTR_ERR(compo->clk_compo_main);
}
compo->clk_compo_aux = devm_clk_get(dev, "compo_aux");
if (IS_ERR(compo->clk_compo_aux)) {
DRM_ERROR("Cannot get compo_aux clock\n");
return PTR_ERR(compo->clk_compo_aux);
}
compo->clk_pix_main = devm_clk_get(dev, "pix_main");
if (IS_ERR(compo->clk_pix_main)) {
DRM_ERROR("Cannot get pix_main clock\n");
return PTR_ERR(compo->clk_pix_main);
}
compo->clk_pix_aux = devm_clk_get(dev, "pix_aux");
if (IS_ERR(compo->clk_pix_aux)) {
DRM_ERROR("Cannot get pix_aux clock\n");
return PTR_ERR(compo->clk_pix_aux);
}
/* Get reset resources */
compo->rst_main = devm_reset_control_get_shared(dev, "compo-main");
/* Take compo main out of reset */
if (!IS_ERR(compo->rst_main))
reset_control_deassert(compo->rst_main);
compo->rst_aux = devm_reset_control_get_shared(dev, "compo-aux");
/* Take compo aux out of reset */
if (!IS_ERR(compo->rst_aux))
reset_control_deassert(compo->rst_aux);
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
compo->vtg[STI_MIXER_MAIN] = of_vtg_find(vtg_np);
of_node_put(vtg_np);
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
if (vtg_np)
compo->vtg[STI_MIXER_AUX] = of_vtg_find(vtg_np);
of_node_put(vtg_np);
platform_set_drvdata(pdev, compo);
return component_add(&pdev->dev, &sti_compositor_ops);
}
static void sti_compositor_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_compositor_ops);
}
struct platform_driver sti_compositor_driver = {
.driver = {
.name = "sti-compositor",
.of_match_table = compositor_of_match,
},
.probe = sti_compositor_probe,
.remove_new = sti_compositor_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sti/sti_compositor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Fabien Dessenne <[email protected]> for STMicroelectronics.
*/
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/seq_file.h>
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_drv.h"
#include "sti_hqvdp_lut.h"
#include "sti_plane.h"
#include "sti_vtg.h"
/* Firmware name */
#define HQVDP_FMW_NAME "hqvdp-stih407.bin"
/* Regs address */
#define HQVDP_DMEM 0x00000000 /* 0x00000000 */
#define HQVDP_PMEM 0x00040000 /* 0x00040000 */
#define HQVDP_RD_PLUG 0x000E0000 /* 0x000E0000 */
#define HQVDP_RD_PLUG_CONTROL (HQVDP_RD_PLUG + 0x1000) /* 0x000E1000 */
#define HQVDP_RD_PLUG_PAGE_SIZE (HQVDP_RD_PLUG + 0x1004) /* 0x000E1004 */
#define HQVDP_RD_PLUG_MIN_OPC (HQVDP_RD_PLUG + 0x1008) /* 0x000E1008 */
#define HQVDP_RD_PLUG_MAX_OPC (HQVDP_RD_PLUG + 0x100C) /* 0x000E100C */
#define HQVDP_RD_PLUG_MAX_CHK (HQVDP_RD_PLUG + 0x1010) /* 0x000E1010 */
#define HQVDP_RD_PLUG_MAX_MSG (HQVDP_RD_PLUG + 0x1014) /* 0x000E1014 */
#define HQVDP_RD_PLUG_MIN_SPACE (HQVDP_RD_PLUG + 0x1018) /* 0x000E1018 */
#define HQVDP_WR_PLUG 0x000E2000 /* 0x000E2000 */
#define HQVDP_WR_PLUG_CONTROL (HQVDP_WR_PLUG + 0x1000) /* 0x000E3000 */
#define HQVDP_WR_PLUG_PAGE_SIZE (HQVDP_WR_PLUG + 0x1004) /* 0x000E3004 */
#define HQVDP_WR_PLUG_MIN_OPC (HQVDP_WR_PLUG + 0x1008) /* 0x000E3008 */
#define HQVDP_WR_PLUG_MAX_OPC (HQVDP_WR_PLUG + 0x100C) /* 0x000E300C */
#define HQVDP_WR_PLUG_MAX_CHK (HQVDP_WR_PLUG + 0x1010) /* 0x000E3010 */
#define HQVDP_WR_PLUG_MAX_MSG (HQVDP_WR_PLUG + 0x1014) /* 0x000E3014 */
#define HQVDP_WR_PLUG_MIN_SPACE (HQVDP_WR_PLUG + 0x1018) /* 0x000E3018 */
#define HQVDP_MBX 0x000E4000 /* 0x000E4000 */
#define HQVDP_MBX_IRQ_TO_XP70 (HQVDP_MBX + 0x0000) /* 0x000E4000 */
#define HQVDP_MBX_INFO_HOST (HQVDP_MBX + 0x0004) /* 0x000E4004 */
#define HQVDP_MBX_IRQ_TO_HOST (HQVDP_MBX + 0x0008) /* 0x000E4008 */
#define HQVDP_MBX_INFO_XP70 (HQVDP_MBX + 0x000C) /* 0x000E400C */
#define HQVDP_MBX_SW_RESET_CTRL (HQVDP_MBX + 0x0010) /* 0x000E4010 */
#define HQVDP_MBX_STARTUP_CTRL1 (HQVDP_MBX + 0x0014) /* 0x000E4014 */
#define HQVDP_MBX_STARTUP_CTRL2 (HQVDP_MBX + 0x0018) /* 0x000E4018 */
#define HQVDP_MBX_GP_STATUS (HQVDP_MBX + 0x001C) /* 0x000E401C */
#define HQVDP_MBX_NEXT_CMD (HQVDP_MBX + 0x0020) /* 0x000E4020 */
#define HQVDP_MBX_CURRENT_CMD (HQVDP_MBX + 0x0024) /* 0x000E4024 */
#define HQVDP_MBX_SOFT_VSYNC (HQVDP_MBX + 0x0028) /* 0x000E4028 */
/* Plugs config */
#define PLUG_CONTROL_ENABLE 0x00000001
#define PLUG_PAGE_SIZE_256 0x00000002
#define PLUG_MIN_OPC_8 0x00000003
#define PLUG_MAX_OPC_64 0x00000006
#define PLUG_MAX_CHK_2X 0x00000001
#define PLUG_MAX_MSG_1X 0x00000000
#define PLUG_MIN_SPACE_1 0x00000000
/* SW reset CTRL */
#define SW_RESET_CTRL_FULL BIT(0)
#define SW_RESET_CTRL_CORE BIT(1)
/* Startup ctrl 1 */
#define STARTUP_CTRL1_RST_DONE BIT(0)
#define STARTUP_CTRL1_AUTH_IDLE BIT(2)
/* Startup ctrl 2 */
#define STARTUP_CTRL2_FETCH_EN BIT(1)
/* Info xP70 */
#define INFO_XP70_FW_READY BIT(15)
#define INFO_XP70_FW_PROCESSING BIT(14)
#define INFO_XP70_FW_INITQUEUES BIT(13)
/* SOFT_VSYNC */
#define SOFT_VSYNC_HW 0x00000000
#define SOFT_VSYNC_SW_CMD 0x00000001
#define SOFT_VSYNC_SW_CTRL_IRQ 0x00000003
/* Reset & boot poll config */
#define POLL_MAX_ATTEMPT 50
#define POLL_DELAY_MS 20
#define SCALE_FACTOR 8192
#define SCALE_MAX_FOR_LEG_LUT_F 4096
#define SCALE_MAX_FOR_LEG_LUT_E 4915
#define SCALE_MAX_FOR_LEG_LUT_D 6654
#define SCALE_MAX_FOR_LEG_LUT_C 8192
enum sti_hvsrc_orient {
HVSRC_HORI,
HVSRC_VERT
};
/* Command structures */
struct sti_hqvdp_top {
u32 config;
u32 mem_format;
u32 current_luma;
u32 current_enh_luma;
u32 current_right_luma;
u32 current_enh_right_luma;
u32 current_chroma;
u32 current_enh_chroma;
u32 current_right_chroma;
u32 current_enh_right_chroma;
u32 output_luma;
u32 output_chroma;
u32 luma_src_pitch;
u32 luma_enh_src_pitch;
u32 luma_right_src_pitch;
u32 luma_enh_right_src_pitch;
u32 chroma_src_pitch;
u32 chroma_enh_src_pitch;
u32 chroma_right_src_pitch;
u32 chroma_enh_right_src_pitch;
u32 luma_processed_pitch;
u32 chroma_processed_pitch;
u32 input_frame_size;
u32 input_viewport_ori;
u32 input_viewport_ori_right;
u32 input_viewport_size;
u32 left_view_border_width;
u32 right_view_border_width;
u32 left_view_3d_offset_width;
u32 right_view_3d_offset_width;
u32 side_stripe_color;
u32 crc_reset_ctrl;
};
/* Configs for interlaced : no IT, no pass thru, 3 fields */
#define TOP_CONFIG_INTER_BTM 0x00000000
#define TOP_CONFIG_INTER_TOP 0x00000002
/* Config for progressive : no IT, no pass thru, 3 fields */
#define TOP_CONFIG_PROGRESSIVE 0x00000001
/* Default MemFormat: in=420_raster_dual out=444_raster;opaque Mem2Tv mode */
#define TOP_MEM_FORMAT_DFLT 0x00018060
/* Min/Max size */
#define MAX_WIDTH 0x1FFF
#define MAX_HEIGHT 0x0FFF
#define MIN_WIDTH 0x0030
#define MIN_HEIGHT 0x0010
struct sti_hqvdp_vc1re {
u32 ctrl_prv_csdi;
u32 ctrl_cur_csdi;
u32 ctrl_nxt_csdi;
u32 ctrl_cur_fmd;
u32 ctrl_nxt_fmd;
};
struct sti_hqvdp_fmd {
u32 config;
u32 viewport_ori;
u32 viewport_size;
u32 next_next_luma;
u32 next_next_right_luma;
u32 next_next_next_luma;
u32 next_next_next_right_luma;
u32 threshold_scd;
u32 threshold_rfd;
u32 threshold_move;
u32 threshold_cfd;
};
struct sti_hqvdp_csdi {
u32 config;
u32 config2;
u32 dcdi_config;
u32 prev_luma;
u32 prev_enh_luma;
u32 prev_right_luma;
u32 prev_enh_right_luma;
u32 next_luma;
u32 next_enh_luma;
u32 next_right_luma;
u32 next_enh_right_luma;
u32 prev_chroma;
u32 prev_enh_chroma;
u32 prev_right_chroma;
u32 prev_enh_right_chroma;
u32 next_chroma;
u32 next_enh_chroma;
u32 next_right_chroma;
u32 next_enh_right_chroma;
u32 prev_motion;
u32 prev_right_motion;
u32 cur_motion;
u32 cur_right_motion;
u32 next_motion;
u32 next_right_motion;
};
/* Config for progressive: by pass */
#define CSDI_CONFIG_PROG 0x00000000
/* Config for directional deinterlacing without motion */
#define CSDI_CONFIG_INTER_DIR 0x00000016
/* Additional configs for fader, blender, motion,... deinterlace algorithms */
#define CSDI_CONFIG2_DFLT 0x000001B3
#define CSDI_DCDI_CONFIG_DFLT 0x00203803
struct sti_hqvdp_hvsrc {
u32 hor_panoramic_ctrl;
u32 output_picture_size;
u32 init_horizontal;
u32 init_vertical;
u32 param_ctrl;
u32 yh_coef[NB_COEF];
u32 ch_coef[NB_COEF];
u32 yv_coef[NB_COEF];
u32 cv_coef[NB_COEF];
u32 hori_shift;
u32 vert_shift;
};
/* Default ParamCtrl: all controls enabled */
#define HVSRC_PARAM_CTRL_DFLT 0xFFFFFFFF
struct sti_hqvdp_iqi {
u32 config;
u32 demo_wind_size;
u32 pk_config;
u32 coeff0_coeff1;
u32 coeff2_coeff3;
u32 coeff4;
u32 pk_lut;
u32 pk_gain;
u32 pk_coring_level;
u32 cti_config;
u32 le_config;
u32 le_lut[64];
u32 con_bri;
u32 sat_gain;
u32 pxf_conf;
u32 default_color;
};
/* Default Config : IQI bypassed */
#define IQI_CONFIG_DFLT 0x00000001
/* Default Contrast & Brightness gain = 256 */
#define IQI_CON_BRI_DFLT 0x00000100
/* Default Saturation gain = 256 */
#define IQI_SAT_GAIN_DFLT 0x00000100
/* Default PxfConf : P2I bypassed */
#define IQI_PXF_CONF_DFLT 0x00000001
struct sti_hqvdp_top_status {
u32 processing_time;
u32 input_y_crc;
u32 input_uv_crc;
};
struct sti_hqvdp_fmd_status {
u32 fmd_repeat_move_status;
u32 fmd_scene_count_status;
u32 cfd_sum;
u32 field_sum;
u32 next_y_fmd_crc;
u32 next_next_y_fmd_crc;
u32 next_next_next_y_fmd_crc;
};
struct sti_hqvdp_csdi_status {
u32 prev_y_csdi_crc;
u32 cur_y_csdi_crc;
u32 next_y_csdi_crc;
u32 prev_uv_csdi_crc;
u32 cur_uv_csdi_crc;
u32 next_uv_csdi_crc;
u32 y_csdi_crc;
u32 uv_csdi_crc;
u32 uv_cup_crc;
u32 mot_csdi_crc;
u32 mot_cur_csdi_crc;
u32 mot_prev_csdi_crc;
};
struct sti_hqvdp_hvsrc_status {
u32 y_hvsrc_crc;
u32 u_hvsrc_crc;
u32 v_hvsrc_crc;
};
struct sti_hqvdp_iqi_status {
u32 pxf_it_status;
u32 y_iqi_crc;
u32 u_iqi_crc;
u32 v_iqi_crc;
};
/* Main commands. We use 2 commands one being processed by the firmware, one
* ready to be fetched upon next Vsync*/
#define NB_VDP_CMD 2
struct sti_hqvdp_cmd {
struct sti_hqvdp_top top;
struct sti_hqvdp_vc1re vc1re;
struct sti_hqvdp_fmd fmd;
struct sti_hqvdp_csdi csdi;
struct sti_hqvdp_hvsrc hvsrc;
struct sti_hqvdp_iqi iqi;
struct sti_hqvdp_top_status top_status;
struct sti_hqvdp_fmd_status fmd_status;
struct sti_hqvdp_csdi_status csdi_status;
struct sti_hqvdp_hvsrc_status hvsrc_status;
struct sti_hqvdp_iqi_status iqi_status;
};
/*
* STI HQVDP structure
*
* @dev: driver device
* @drm_dev: the drm device
* @regs: registers
* @plane: plane structure for hqvdp it self
* @clk: IP clock
* @clk_pix_main: pix main clock
* @reset: reset control
* @vtg_nb: notifier to handle VTG Vsync
* @btm_field_pending: is there any bottom field (interlaced frame) to display
* @hqvdp_cmd: buffer of commands
* @hqvdp_cmd_paddr: physical address of hqvdp_cmd
* @vtg: vtg for main data path
* @xp70_initialized: true if xp70 is already initialized
* @vtg_registered: true if registered to VTG
*/
struct sti_hqvdp {
struct device *dev;
struct drm_device *drm_dev;
void __iomem *regs;
struct sti_plane plane;
struct clk *clk;
struct clk *clk_pix_main;
struct reset_control *reset;
struct notifier_block vtg_nb;
bool btm_field_pending;
void *hqvdp_cmd;
u32 hqvdp_cmd_paddr;
struct sti_vtg *vtg;
bool xp70_initialized;
bool vtg_registered;
};
#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
static const uint32_t hqvdp_supported_formats[] = {
DRM_FORMAT_NV12,
};
/**
* sti_hqvdp_get_free_cmd
* @hqvdp: hqvdp structure
*
* Look for a hqvdp_cmd that is not being used (or about to be used) by the FW.
*
* RETURNS:
* the offset of the command to be used.
* -1 in error cases
*/
static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
{
u32 curr_cmd, next_cmd;
u32 cmd = hqvdp->hqvdp_cmd_paddr;
int i;
curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
for (i = 0; i < NB_VDP_CMD; i++) {
if ((cmd != curr_cmd) && (cmd != next_cmd))
return i * sizeof(struct sti_hqvdp_cmd);
cmd += sizeof(struct sti_hqvdp_cmd);
}
return -1;
}
/**
* sti_hqvdp_get_curr_cmd
* @hqvdp: hqvdp structure
*
* Look for the hqvdp_cmd that is being used by the FW.
*
* RETURNS:
* the offset of the command to be used.
* -1 in error cases
*/
static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
{
u32 curr_cmd;
u32 cmd = hqvdp->hqvdp_cmd_paddr;
unsigned int i;
curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
for (i = 0; i < NB_VDP_CMD; i++) {
if (cmd == curr_cmd)
return i * sizeof(struct sti_hqvdp_cmd);
cmd += sizeof(struct sti_hqvdp_cmd);
}
return -1;
}
/**
* sti_hqvdp_get_next_cmd
* @hqvdp: hqvdp structure
*
* Look for the next hqvdp_cmd that will be used by the FW.
*
* RETURNS:
* the offset of the next command that will be used.
* -1 in error cases
*/
static int sti_hqvdp_get_next_cmd(struct sti_hqvdp *hqvdp)
{
int next_cmd;
dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
unsigned int i;
next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
for (i = 0; i < NB_VDP_CMD; i++) {
if (cmd == next_cmd)
return i * sizeof(struct sti_hqvdp_cmd);
cmd += sizeof(struct sti_hqvdp_cmd);
}
return -1;
}
#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
readl(hqvdp->regs + reg))
static const char *hqvdp_dbg_get_lut(u32 *coef)
{
if (!memcmp(coef, coef_lut_a_legacy, 16))
return "LUT A";
if (!memcmp(coef, coef_lut_b, 16))
return "LUT B";
if (!memcmp(coef, coef_lut_c_y_legacy, 16))
return "LUT C Y";
if (!memcmp(coef, coef_lut_c_c_legacy, 16))
return "LUT C C";
if (!memcmp(coef, coef_lut_d_y_legacy, 16))
return "LUT D Y";
if (!memcmp(coef, coef_lut_d_c_legacy, 16))
return "LUT D C";
if (!memcmp(coef, coef_lut_e_y_legacy, 16))
return "LUT E Y";
if (!memcmp(coef, coef_lut_e_c_legacy, 16))
return "LUT E C";
if (!memcmp(coef, coef_lut_f_y_legacy, 16))
return "LUT F Y";
if (!memcmp(coef, coef_lut_f_c_legacy, 16))
return "LUT F C";
return "<UNKNOWN>";
}
static void hqvdp_dbg_dump_cmd(struct seq_file *s, struct sti_hqvdp_cmd *c)
{
int src_w, src_h, dst_w, dst_h;
seq_puts(s, "\n\tTOP:");
seq_printf(s, "\n\t %-20s 0x%08X", "Config", c->top.config);
switch (c->top.config) {
case TOP_CONFIG_PROGRESSIVE:
seq_puts(s, "\tProgressive");
break;
case TOP_CONFIG_INTER_TOP:
seq_puts(s, "\tInterlaced, top field");
break;
case TOP_CONFIG_INTER_BTM:
seq_puts(s, "\tInterlaced, bottom field");
break;
default:
seq_puts(s, "\t<UNKNOWN>");
break;
}
seq_printf(s, "\n\t %-20s 0x%08X", "MemFormat", c->top.mem_format);
seq_printf(s, "\n\t %-20s 0x%08X", "CurrentY", c->top.current_luma);
seq_printf(s, "\n\t %-20s 0x%08X", "CurrentC", c->top.current_chroma);
seq_printf(s, "\n\t %-20s 0x%08X", "YSrcPitch", c->top.luma_src_pitch);
seq_printf(s, "\n\t %-20s 0x%08X", "CSrcPitch",
c->top.chroma_src_pitch);
seq_printf(s, "\n\t %-20s 0x%08X", "InputFrameSize",
c->top.input_frame_size);
seq_printf(s, "\t%dx%d",
c->top.input_frame_size & 0x0000FFFF,
c->top.input_frame_size >> 16);
seq_printf(s, "\n\t %-20s 0x%08X", "InputViewportSize",
c->top.input_viewport_size);
src_w = c->top.input_viewport_size & 0x0000FFFF;
src_h = c->top.input_viewport_size >> 16;
seq_printf(s, "\t%dx%d", src_w, src_h);
seq_puts(s, "\n\tHVSRC:");
seq_printf(s, "\n\t %-20s 0x%08X", "OutputPictureSize",
c->hvsrc.output_picture_size);
dst_w = c->hvsrc.output_picture_size & 0x0000FFFF;
dst_h = c->hvsrc.output_picture_size >> 16;
seq_printf(s, "\t%dx%d", dst_w, dst_h);
seq_printf(s, "\n\t %-20s 0x%08X", "ParamCtrl", c->hvsrc.param_ctrl);
seq_printf(s, "\n\t %-20s %s", "yh_coef",
hqvdp_dbg_get_lut(c->hvsrc.yh_coef));
seq_printf(s, "\n\t %-20s %s", "ch_coef",
hqvdp_dbg_get_lut(c->hvsrc.ch_coef));
seq_printf(s, "\n\t %-20s %s", "yv_coef",
hqvdp_dbg_get_lut(c->hvsrc.yv_coef));
seq_printf(s, "\n\t %-20s %s", "cv_coef",
hqvdp_dbg_get_lut(c->hvsrc.cv_coef));
seq_printf(s, "\n\t %-20s", "ScaleH");
if (dst_w > src_w)
seq_printf(s, " %d/1", dst_w / src_w);
else
seq_printf(s, " 1/%d", src_w / dst_w);
seq_printf(s, "\n\t %-20s", "tScaleV");
if (dst_h > src_h)
seq_printf(s, " %d/1", dst_h / src_h);
else
seq_printf(s, " 1/%d", src_h / dst_h);
seq_puts(s, "\n\tCSDI:");
seq_printf(s, "\n\t %-20s 0x%08X\t", "Config", c->csdi.config);
switch (c->csdi.config) {
case CSDI_CONFIG_PROG:
seq_puts(s, "Bypass");
break;
case CSDI_CONFIG_INTER_DIR:
seq_puts(s, "Deinterlace, directional");
break;
default:
seq_puts(s, "<UNKNOWN>");
break;
}
seq_printf(s, "\n\t %-20s 0x%08X", "Config2", c->csdi.config2);
seq_printf(s, "\n\t %-20s 0x%08X", "DcdiConfig", c->csdi.dcdi_config);
}
static int hqvdp_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
int cmd, cmd_offset, infoxp70;
void *virt;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
DBGFS_DUMP(HQVDP_MBX_IRQ_TO_XP70);
DBGFS_DUMP(HQVDP_MBX_INFO_HOST);
DBGFS_DUMP(HQVDP_MBX_IRQ_TO_HOST);
DBGFS_DUMP(HQVDP_MBX_INFO_XP70);
infoxp70 = readl(hqvdp->regs + HQVDP_MBX_INFO_XP70);
seq_puts(s, "\tFirmware state: ");
if (infoxp70 & INFO_XP70_FW_READY)
seq_puts(s, "idle and ready");
else if (infoxp70 & INFO_XP70_FW_PROCESSING)
seq_puts(s, "processing a picture");
else if (infoxp70 & INFO_XP70_FW_INITQUEUES)
seq_puts(s, "programming queues");
else
seq_puts(s, "NOT READY");
DBGFS_DUMP(HQVDP_MBX_SW_RESET_CTRL);
DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL1);
if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
& STARTUP_CTRL1_RST_DONE)
seq_puts(s, "\tReset is done");
else
seq_puts(s, "\tReset is NOT done");
DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL2);
if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2)
& STARTUP_CTRL2_FETCH_EN)
seq_puts(s, "\tFetch is enabled");
else
seq_puts(s, "\tFetch is NOT enabled");
DBGFS_DUMP(HQVDP_MBX_GP_STATUS);
DBGFS_DUMP(HQVDP_MBX_NEXT_CMD);
DBGFS_DUMP(HQVDP_MBX_CURRENT_CMD);
DBGFS_DUMP(HQVDP_MBX_SOFT_VSYNC);
if (!(readl(hqvdp->regs + HQVDP_MBX_SOFT_VSYNC) & 3))
seq_puts(s, "\tHW Vsync");
else
seq_puts(s, "\tSW Vsync ?!?!");
/* Last command */
cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
cmd_offset = sti_hqvdp_get_curr_cmd(hqvdp);
if (cmd_offset == -1) {
seq_puts(s, "\n\n Last command: unknown");
} else {
virt = hqvdp->hqvdp_cmd + cmd_offset;
seq_printf(s, "\n\n Last command: address @ 0x%x (0x%p)",
cmd, virt);
hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
}
/* Next command */
cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
cmd_offset = sti_hqvdp_get_next_cmd(hqvdp);
if (cmd_offset == -1) {
seq_puts(s, "\n\n Next command: unknown");
} else {
virt = hqvdp->hqvdp_cmd + cmd_offset;
seq_printf(s, "\n\n Next command address: @ 0x%x (0x%p)",
cmd, virt);
hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
}
seq_putc(s, '\n');
return 0;
}
static struct drm_info_list hqvdp_debugfs_files[] = {
{ "hqvdp", hqvdp_dbg_show, 0, NULL },
};
static void hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
hqvdp_debugfs_files[i].data = hqvdp;
drm_debugfs_create_files(hqvdp_debugfs_files,
ARRAY_SIZE(hqvdp_debugfs_files),
minor->debugfs_root, minor);
}
/**
* sti_hqvdp_update_hvsrc
* @orient: horizontal or vertical
* @scale: scaling/zoom factor
* @hvsrc: the structure containing the LUT coef
*
* Update the Y and C Lut coef, as well as the shift param
*
* RETURNS:
* None.
*/
static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
struct sti_hqvdp_hvsrc *hvsrc)
{
const int *coef_c, *coef_y;
int shift_c, shift_y;
/* Get the appropriate coef tables */
if (scale < SCALE_MAX_FOR_LEG_LUT_F) {
coef_y = coef_lut_f_y_legacy;
coef_c = coef_lut_f_c_legacy;
shift_y = SHIFT_LUT_F_Y_LEGACY;
shift_c = SHIFT_LUT_F_C_LEGACY;
} else if (scale < SCALE_MAX_FOR_LEG_LUT_E) {
coef_y = coef_lut_e_y_legacy;
coef_c = coef_lut_e_c_legacy;
shift_y = SHIFT_LUT_E_Y_LEGACY;
shift_c = SHIFT_LUT_E_C_LEGACY;
} else if (scale < SCALE_MAX_FOR_LEG_LUT_D) {
coef_y = coef_lut_d_y_legacy;
coef_c = coef_lut_d_c_legacy;
shift_y = SHIFT_LUT_D_Y_LEGACY;
shift_c = SHIFT_LUT_D_C_LEGACY;
} else if (scale < SCALE_MAX_FOR_LEG_LUT_C) {
coef_y = coef_lut_c_y_legacy;
coef_c = coef_lut_c_c_legacy;
shift_y = SHIFT_LUT_C_Y_LEGACY;
shift_c = SHIFT_LUT_C_C_LEGACY;
} else if (scale == SCALE_MAX_FOR_LEG_LUT_C) {
coef_y = coef_c = coef_lut_b;
shift_y = shift_c = SHIFT_LUT_B;
} else {
coef_y = coef_c = coef_lut_a_legacy;
shift_y = shift_c = SHIFT_LUT_A_LEGACY;
}
if (orient == HVSRC_HORI) {
hvsrc->hori_shift = (shift_c << 16) | shift_y;
memcpy(hvsrc->yh_coef, coef_y, sizeof(hvsrc->yh_coef));
memcpy(hvsrc->ch_coef, coef_c, sizeof(hvsrc->ch_coef));
} else {
hvsrc->vert_shift = (shift_c << 16) | shift_y;
memcpy(hvsrc->yv_coef, coef_y, sizeof(hvsrc->yv_coef));
memcpy(hvsrc->cv_coef, coef_c, sizeof(hvsrc->cv_coef));
}
}
/**
* sti_hqvdp_check_hw_scaling
* @hqvdp: hqvdp pointer
* @mode: display mode with timing constraints
* @src_w: source width
* @src_h: source height
* @dst_w: destination width
* @dst_h: destination height
*
* Check if the HW is able to perform the scaling request
* The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
* Zy = OutputHeight / InputHeight
* LFW = (Tx * IPClock) / (MaxNbCycles * Cp)
* Tx : Total video mode horizontal resolution
* IPClock : HQVDP IP clock (Mhz)
* MaxNbCycles: max(InputWidth, OutputWidth)
* Cp: Video mode pixel clock (Mhz)
*
* RETURNS:
* True if the HW can scale.
*/
static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
struct drm_display_mode *mode,
int src_w, int src_h,
int dst_w, int dst_h)
{
unsigned long lfw;
unsigned int inv_zy;
lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
lfw /= max(src_w, dst_w) * mode->clock / 1000;
inv_zy = DIV_ROUND_UP(src_h, dst_h);
return (inv_zy <= lfw) ? true : false;
}
/**
* sti_hqvdp_disable
* @hqvdp: hqvdp pointer
*
* Disables the HQVDP plane
*/
static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
{
int i;
DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
/* Unregister VTG Vsync callback */
if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
/* Set next cmd to NULL */
writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
& INFO_XP70_FW_READY)
break;
msleep(POLL_DELAY_MS);
}
/* VTG can stop now */
clk_disable_unprepare(hqvdp->clk_pix_main);
if (i == POLL_MAX_ATTEMPT)
DRM_ERROR("XP70 could not revert to idle\n");
hqvdp->plane.status = STI_PLANE_DISABLED;
hqvdp->vtg_registered = false;
}
/**
* sti_hqvdp_vtg_cb
* @nb: notifier block
* @evt: event message
* @data: private data
*
* Handle VTG Vsync event, display pending bottom field
*
* RETURNS:
* 0 on success.
*/
static int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
{
struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
int btm_cmd_offset, top_cmd_offest;
struct sti_hqvdp_cmd *btm_cmd, *top_cmd;
if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) {
DRM_DEBUG_DRIVER("Unknown event\n");
return 0;
}
if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
/* disable need to be synchronize on vsync event */
DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
sti_plane_to_str(&hqvdp->plane));
sti_hqvdp_disable(hqvdp);
}
if (hqvdp->btm_field_pending) {
/* Create the btm field command from the current one */
btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
DRM_DEBUG_DRIVER("Warning: no cmd, will skip field\n");
return -EBUSY;
}
btm_cmd = hqvdp->hqvdp_cmd + btm_cmd_offset;
top_cmd = hqvdp->hqvdp_cmd + top_cmd_offest;
memcpy(btm_cmd, top_cmd, sizeof(*btm_cmd));
btm_cmd->top.config = TOP_CONFIG_INTER_BTM;
btm_cmd->top.current_luma +=
btm_cmd->top.luma_src_pitch / 2;
btm_cmd->top.current_chroma +=
btm_cmd->top.chroma_src_pitch / 2;
/* Post the command to mailbox */
writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
hqvdp->regs + HQVDP_MBX_NEXT_CMD);
hqvdp->btm_field_pending = false;
dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
__func__, hqvdp->hqvdp_cmd_paddr);
sti_plane_update_fps(&hqvdp->plane, false, true);
}
return 0;
}
static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
{
int size;
dma_addr_t dma_addr;
hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
/* Allocate memory for the VDP commands */
size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
&dma_addr,
GFP_KERNEL | GFP_DMA);
if (!hqvdp->hqvdp_cmd) {
DRM_ERROR("Failed to allocate memory for VDP cmd\n");
return;
}
hqvdp->hqvdp_cmd_paddr = (u32)dma_addr;
memset(hqvdp->hqvdp_cmd, 0, size);
}
static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
{
/* Configure Plugs (same for RD & WR) */
writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_RD_PLUG_PAGE_SIZE);
writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_RD_PLUG_MIN_OPC);
writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_RD_PLUG_MAX_OPC);
writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_RD_PLUG_MAX_CHK);
writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_RD_PLUG_MAX_MSG);
writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_RD_PLUG_MIN_SPACE);
writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_RD_PLUG_CONTROL);
writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_WR_PLUG_PAGE_SIZE);
writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_WR_PLUG_MIN_OPC);
writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_WR_PLUG_MAX_OPC);
writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_WR_PLUG_MAX_CHK);
writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_WR_PLUG_MAX_MSG);
writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_WR_PLUG_MIN_SPACE);
writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_WR_PLUG_CONTROL);
}
/**
* sti_hqvdp_start_xp70
* @hqvdp: hqvdp pointer
*
* Run the xP70 initialization sequence
*/
static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp)
{
const struct firmware *firmware;
u32 *fw_rd_plug, *fw_wr_plug, *fw_pmem, *fw_dmem;
u8 *data;
int i;
struct fw_header {
int rd_size;
int wr_size;
int pmem_size;
int dmem_size;
} *header;
DRM_DEBUG_DRIVER("\n");
if (hqvdp->xp70_initialized) {
DRM_DEBUG_DRIVER("HQVDP XP70 already initialized\n");
return;
}
/* Request firmware */
if (request_firmware(&firmware, HQVDP_FMW_NAME, hqvdp->dev)) {
DRM_ERROR("Can't get HQVDP firmware\n");
return;
}
/* Check firmware parts */
if (!firmware) {
DRM_ERROR("Firmware not available\n");
return;
}
header = (struct fw_header *)firmware->data;
if (firmware->size < sizeof(*header)) {
DRM_ERROR("Invalid firmware size (%zu)\n", firmware->size);
goto out;
}
if ((sizeof(*header) + header->rd_size + header->wr_size +
header->pmem_size + header->dmem_size) != firmware->size) {
DRM_ERROR("Invalid fmw structure (%zu+%d+%d+%d+%d != %zu)\n",
sizeof(*header), header->rd_size, header->wr_size,
header->pmem_size, header->dmem_size,
firmware->size);
goto out;
}
data = (u8 *)firmware->data;
data += sizeof(*header);
fw_rd_plug = (void *)data;
data += header->rd_size;
fw_wr_plug = (void *)data;
data += header->wr_size;
fw_pmem = (void *)data;
data += header->pmem_size;
fw_dmem = (void *)data;
/* Enable clock */
if (clk_prepare_enable(hqvdp->clk))
DRM_ERROR("Failed to prepare/enable HQVDP clk\n");
/* Reset */
writel(SW_RESET_CTRL_FULL, hqvdp->regs + HQVDP_MBX_SW_RESET_CTRL);
for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
& STARTUP_CTRL1_RST_DONE)
break;
msleep(POLL_DELAY_MS);
}
if (i == POLL_MAX_ATTEMPT) {
DRM_ERROR("Could not reset\n");
clk_disable_unprepare(hqvdp->clk);
goto out;
}
/* Init Read & Write plugs */
for (i = 0; i < header->rd_size / 4; i++)
writel(fw_rd_plug[i], hqvdp->regs + HQVDP_RD_PLUG + i * 4);
for (i = 0; i < header->wr_size / 4; i++)
writel(fw_wr_plug[i], hqvdp->regs + HQVDP_WR_PLUG + i * 4);
sti_hqvdp_init_plugs(hqvdp);
/* Authorize Idle Mode */
writel(STARTUP_CTRL1_AUTH_IDLE, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1);
/* Prevent VTG interruption during the boot */
writel(SOFT_VSYNC_SW_CTRL_IRQ, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
/* Download PMEM & DMEM */
for (i = 0; i < header->pmem_size / 4; i++)
writel(fw_pmem[i], hqvdp->regs + HQVDP_PMEM + i * 4);
for (i = 0; i < header->dmem_size / 4; i++)
writel(fw_dmem[i], hqvdp->regs + HQVDP_DMEM + i * 4);
/* Enable fetch */
writel(STARTUP_CTRL2_FETCH_EN, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2);
/* Wait end of boot */
for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
& INFO_XP70_FW_READY)
break;
msleep(POLL_DELAY_MS);
}
if (i == POLL_MAX_ATTEMPT) {
DRM_ERROR("Could not boot\n");
clk_disable_unprepare(hqvdp->clk);
goto out;
}
/* Launch Vsync */
writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
DRM_INFO("HQVDP XP70 initialized\n");
hqvdp->xp70_initialized = true;
out:
release_firmware(firmware);
}
static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
/* no need for further checks if the plane is being disabled */
if (!crtc || !fb)
return 0;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
mode = &crtc_state->mode;
dst_x = new_plane_state->crtc_x;
dst_y = new_plane_state->crtc_y;
dst_w = clamp_val(new_plane_state->crtc_w, 0, mode->hdisplay - dst_x);
dst_h = clamp_val(new_plane_state->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
src_x = new_plane_state->src_x >> 16;
src_y = new_plane_state->src_y >> 16;
src_w = new_plane_state->src_w >> 16;
src_h = new_plane_state->src_h >> 16;
if (mode->clock && !sti_hqvdp_check_hw_scaling(hqvdp, mode,
src_w, src_h,
dst_w, dst_h)) {
DRM_ERROR("Scaling beyond HW capabilities\n");
return -EINVAL;
}
if (!drm_fb_dma_get_gem_obj(fb, 0)) {
DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
/*
* Input / output size
* Align to upper even value
*/
dst_w = ALIGN(dst_w, 2);
dst_h = ALIGN(dst_h, 2);
if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
(src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
(dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
(dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
src_w, src_h,
dst_w, dst_h);
return -EINVAL;
}
if (!hqvdp->xp70_initialized)
/* Start HQVDP XP70 coprocessor */
sti_hqvdp_start_xp70(hqvdp);
if (!hqvdp->vtg_registered) {
/* Prevent VTG shutdown */
if (clk_prepare_enable(hqvdp->clk_pix_main)) {
DRM_ERROR("Failed to prepare/enable pix main clk\n");
return -EINVAL;
}
/* Register VTG Vsync callback to handle bottom fields */
if (sti_vtg_register_client(hqvdp->vtg,
&hqvdp->vtg_nb,
crtc)) {
DRM_ERROR("Cannot register VTG notifier\n");
clk_disable_unprepare(hqvdp->clk_pix_main);
return -EINVAL;
}
hqvdp->vtg_registered = true;
}
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
sti_plane_to_str(plane),
dst_w, dst_h, dst_x, dst_y,
src_w, src_h, src_x, src_y);
return 0;
}
static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
drm_plane);
struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
struct drm_crtc *crtc = newstate->crtc;
struct drm_framebuffer *fb = newstate->fb;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
struct drm_gem_dma_object *dma_obj;
struct sti_hqvdp_cmd *cmd;
int scale_h, scale_v;
int cmd_offset;
if (!crtc || !fb)
return;
if ((oldstate->fb == newstate->fb) &&
(oldstate->crtc_x == newstate->crtc_x) &&
(oldstate->crtc_y == newstate->crtc_y) &&
(oldstate->crtc_w == newstate->crtc_w) &&
(oldstate->crtc_h == newstate->crtc_h) &&
(oldstate->src_x == newstate->src_x) &&
(oldstate->src_y == newstate->src_y) &&
(oldstate->src_w == newstate->src_w) &&
(oldstate->src_h == newstate->src_h)) {
/* No change since last update, do not post cmd */
DRM_DEBUG_DRIVER("No change, not posting cmd\n");
plane->status = STI_PLANE_UPDATED;
return;
}
mode = &crtc->mode;
dst_x = newstate->crtc_x;
dst_y = newstate->crtc_y;
dst_w = clamp_val(newstate->crtc_w, 0, mode->hdisplay - dst_x);
dst_h = clamp_val(newstate->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
src_x = newstate->src_x >> 16;
src_y = newstate->src_y >> 16;
src_w = newstate->src_w >> 16;
src_h = newstate->src_h >> 16;
cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
if (cmd_offset == -1) {
DRM_DEBUG_DRIVER("Warning: no cmd, will skip frame\n");
return;
}
cmd = hqvdp->hqvdp_cmd + cmd_offset;
/* Static parameters, defaulting to progressive mode */
cmd->top.config = TOP_CONFIG_PROGRESSIVE;
cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
cmd->csdi.config = CSDI_CONFIG_PROG;
/* VC1RE, FMD bypassed : keep everything set to 0
* IQI/P2I bypassed */
cmd->iqi.config = IQI_CONFIG_DFLT;
cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->format->format,
(unsigned long) dma_obj->dma_addr);
/* Buffer planes address */
cmd->top.current_luma = (u32) dma_obj->dma_addr + fb->offsets[0];
cmd->top.current_chroma = (u32) dma_obj->dma_addr + fb->offsets[1];
/* Pitches */
cmd->top.luma_processed_pitch = fb->pitches[0];
cmd->top.luma_src_pitch = fb->pitches[0];
cmd->top.chroma_processed_pitch = fb->pitches[1];
cmd->top.chroma_src_pitch = fb->pitches[1];
/* Input / output size
* Align to upper even value */
dst_w = ALIGN(dst_w, 2);
dst_h = ALIGN(dst_h, 2);
cmd->top.input_viewport_size = src_h << 16 | src_w;
cmd->top.input_frame_size = src_h << 16 | src_w;
cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
cmd->top.input_viewport_ori = src_y << 16 | src_x;
/* Handle interlaced */
if (fb->flags & DRM_MODE_FB_INTERLACED) {
/* Top field to display */
cmd->top.config = TOP_CONFIG_INTER_TOP;
/* Update pitches and vert size */
cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
cmd->top.luma_processed_pitch *= 2;
cmd->top.luma_src_pitch *= 2;
cmd->top.chroma_processed_pitch *= 2;
cmd->top.chroma_src_pitch *= 2;
/* Enable directional deinterlacing processing */
cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
}
/* Update hvsrc lut coef */
scale_h = SCALE_FACTOR * dst_w / src_w;
sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
scale_v = SCALE_FACTOR * dst_h / src_h;
sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
hqvdp->regs + HQVDP_MBX_NEXT_CMD);
/* Interlaced : get ready to display the bottom field at next Vsync */
if (fb->flags & DRM_MODE_FB_INTERLACED)
hqvdp->btm_field_pending = true;
dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
sti_plane_update_fps(plane, true, true);
plane->status = STI_PLANE_UPDATED;
}
static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
if (!oldstate->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
drm_plane->base.id);
return;
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
oldstate->crtc->base.id,
sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
.atomic_check = sti_hqvdp_atomic_check,
.atomic_update = sti_hqvdp_atomic_update,
.atomic_disable = sti_hqvdp_atomic_disable,
};
static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
return 0;
}
static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.late_register = sti_hqvdp_late_register,
};
static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
struct device *dev, int desc)
{
struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
int res;
hqvdp->plane.desc = desc;
hqvdp->plane.status = STI_PLANE_DISABLED;
sti_hqvdp_init(hqvdp);
res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
&sti_hqvdp_plane_helpers_funcs,
hqvdp_supported_formats,
ARRAY_SIZE(hqvdp_supported_formats),
NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
return NULL;
}
drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
return &hqvdp->plane.drm_plane;
}
static int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
{
struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct drm_plane *plane;
DRM_DEBUG_DRIVER("\n");
hqvdp->drm_dev = drm_dev;
/* Create HQVDP plane once xp70 is initialized */
plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
if (!plane)
DRM_ERROR("Can't create HQVDP plane\n");
return 0;
}
static void sti_hqvdp_unbind(struct device *dev,
struct device *master, void *data)
{
/* do nothing */
}
static const struct component_ops sti_hqvdp_ops = {
.bind = sti_hqvdp_bind,
.unbind = sti_hqvdp_unbind,
};
static int sti_hqvdp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *vtg_np;
struct sti_hqvdp *hqvdp;
struct resource *res;
DRM_DEBUG_DRIVER("\n");
hqvdp = devm_kzalloc(dev, sizeof(*hqvdp), GFP_KERNEL);
if (!hqvdp) {
DRM_ERROR("Failed to allocate HQVDP context\n");
return -ENOMEM;
}
hqvdp->dev = dev;
/* Get Memory resources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("Get memory resource failed\n");
return -ENXIO;
}
hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hqvdp->regs) {
DRM_ERROR("Register mapping failed\n");
return -ENXIO;
}
/* Get clock resources */
hqvdp->clk = devm_clk_get(dev, "hqvdp");
hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main");
if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk_pix_main)) {
DRM_ERROR("Cannot get clocks\n");
return -ENXIO;
}
/* Get reset resources */
hqvdp->reset = devm_reset_control_get(dev, "hqvdp");
if (!IS_ERR(hqvdp->reset))
reset_control_deassert(hqvdp->reset);
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
hqvdp->vtg = of_vtg_find(vtg_np);
of_node_put(vtg_np);
platform_set_drvdata(pdev, hqvdp);
return component_add(&pdev->dev, &sti_hqvdp_ops);
}
static void sti_hqvdp_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_hqvdp_ops);
}
static const struct of_device_id hqvdp_of_match[] = {
{ .compatible = "st,stih407-hqvdp", },
{ /* end node */ }
};
MODULE_DEVICE_TABLE(of, hqvdp_of_match);
struct platform_driver sti_hqvdp_driver = {
.driver = {
.name = "sti-hqvdp",
.owner = THIS_MODULE,
.of_match_table = hqvdp_of_match,
},
.probe = sti_hqvdp_probe,
.remove_new = sti_hqvdp_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sti/sti_hqvdp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <[email protected]>
* Fabien Dessenne <[email protected]>
* Vincent Abriou <[email protected]>
* for STMicroelectronics.
*/
#include <linux/module.h>
#include <linux/io.h>
#include <linux/notifier.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include "sti_drv.h"
#include "sti_vtg.h"
#define VTG_MODE_MASTER 0
/* registers offset */
#define VTG_MODE 0x0000
#define VTG_CLKLN 0x0008
#define VTG_HLFLN 0x000C
#define VTG_DRST_AUTOC 0x0010
#define VTG_VID_TFO 0x0040
#define VTG_VID_TFS 0x0044
#define VTG_VID_BFO 0x0048
#define VTG_VID_BFS 0x004C
#define VTG_HOST_ITS 0x0078
#define VTG_HOST_ITS_BCLR 0x007C
#define VTG_HOST_ITM_BCLR 0x0088
#define VTG_HOST_ITM_BSET 0x008C
#define VTG_H_HD_1 0x00C0
#define VTG_TOP_V_VD_1 0x00C4
#define VTG_BOT_V_VD_1 0x00C8
#define VTG_TOP_V_HD_1 0x00CC
#define VTG_BOT_V_HD_1 0x00D0
#define VTG_H_HD_2 0x00E0
#define VTG_TOP_V_VD_2 0x00E4
#define VTG_BOT_V_VD_2 0x00E8
#define VTG_TOP_V_HD_2 0x00EC
#define VTG_BOT_V_HD_2 0x00F0
#define VTG_H_HD_3 0x0100
#define VTG_TOP_V_VD_3 0x0104
#define VTG_BOT_V_VD_3 0x0108
#define VTG_TOP_V_HD_3 0x010C
#define VTG_BOT_V_HD_3 0x0110
#define VTG_H_HD_4 0x0120
#define VTG_TOP_V_VD_4 0x0124
#define VTG_BOT_V_VD_4 0x0128
#define VTG_TOP_V_HD_4 0x012c
#define VTG_BOT_V_HD_4 0x0130
#define VTG_IRQ_BOTTOM BIT(0)
#define VTG_IRQ_TOP BIT(1)
#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM)
/* Delay introduced by the HDMI in nb of pixel */
#define HDMI_DELAY (5)
/* Delay introduced by the DVO in nb of pixel */
#define DVO_DELAY (7)
/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
#define AWG_DELAY_HD (-9)
#define AWG_DELAY_ED (-8)
#define AWG_DELAY_SD (-7)
/*
* STI VTG register offset structure
*
*@h_hd: stores the VTG_H_HD_x register offset
*@top_v_vd: stores the VTG_TOP_V_VD_x register offset
*@bot_v_vd: stores the VTG_BOT_V_VD_x register offset
*@top_v_hd: stores the VTG_TOP_V_HD_x register offset
*@bot_v_hd: stores the VTG_BOT_V_HD_x register offset
*/
struct sti_vtg_regs_offs {
u32 h_hd;
u32 top_v_vd;
u32 bot_v_vd;
u32 top_v_hd;
u32 bot_v_hd;
};
#define VTG_MAX_SYNC_OUTPUT 4
static const struct sti_vtg_regs_offs vtg_regs_offs[VTG_MAX_SYNC_OUTPUT] = {
{ VTG_H_HD_1,
VTG_TOP_V_VD_1, VTG_BOT_V_VD_1, VTG_TOP_V_HD_1, VTG_BOT_V_HD_1 },
{ VTG_H_HD_2,
VTG_TOP_V_VD_2, VTG_BOT_V_VD_2, VTG_TOP_V_HD_2, VTG_BOT_V_HD_2 },
{ VTG_H_HD_3,
VTG_TOP_V_VD_3, VTG_BOT_V_VD_3, VTG_TOP_V_HD_3, VTG_BOT_V_HD_3 },
{ VTG_H_HD_4,
VTG_TOP_V_VD_4, VTG_BOT_V_VD_4, VTG_TOP_V_HD_4, VTG_BOT_V_HD_4 }
};
/*
* STI VTG synchronisation parameters structure
*
*@hsync: sample number falling and rising edge
*@vsync_line_top: vertical top field line number falling and rising edge
*@vsync_line_bot: vertical bottom field line number falling and rising edge
*@vsync_off_top: vertical top field sample number rising and falling edge
*@vsync_off_bot: vertical bottom field sample number rising and falling edge
*/
struct sti_vtg_sync_params {
u32 hsync;
u32 vsync_line_top;
u32 vsync_line_bot;
u32 vsync_off_top;
u32 vsync_off_bot;
};
/*
* STI VTG structure
*
* @regs: register mapping
* @sync_params: synchronisation parameters used to generate timings
* @irq: VTG irq
* @irq_status: store the IRQ status value
* @notifier_list: notifier callback
* @crtc: the CRTC for vblank event
*/
struct sti_vtg {
void __iomem *regs;
struct sti_vtg_sync_params sync_params[VTG_MAX_SYNC_OUTPUT];
int irq;
u32 irq_status;
struct raw_notifier_head notifier_list;
struct drm_crtc *crtc;
};
struct sti_vtg *of_vtg_find(struct device_node *np)
{
struct platform_device *pdev;
pdev = of_find_device_by_node(np);
if (!pdev)
return NULL;
return (struct sti_vtg *)platform_get_drvdata(pdev);
}
static void vtg_reset(struct sti_vtg *vtg)
{
writel(1, vtg->regs + VTG_DRST_AUTOC);
}
static void vtg_set_output_window(void __iomem *regs,
const struct drm_display_mode *mode)
{
u32 video_top_field_start;
u32 video_top_field_stop;
u32 video_bottom_field_start;
u32 video_bottom_field_stop;
u32 xstart = sti_vtg_get_pixel_number(*mode, 0);
u32 ystart = sti_vtg_get_line_number(*mode, 0);
u32 xstop = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
u32 ystop = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
/* Set output window to fit the display mode selected */
video_top_field_start = (ystart << 16) | xstart;
video_top_field_stop = (ystop << 16) | xstop;
/* Only progressive supported for now */
video_bottom_field_start = video_top_field_start;
video_bottom_field_stop = video_top_field_stop;
writel(video_top_field_start, regs + VTG_VID_TFO);
writel(video_top_field_stop, regs + VTG_VID_TFS);
writel(video_bottom_field_start, regs + VTG_VID_BFO);
writel(video_bottom_field_stop, regs + VTG_VID_BFS);
}
static void vtg_set_hsync_vsync_pos(struct sti_vtg_sync_params *sync,
int delay,
const struct drm_display_mode *mode)
{
long clocksperline, start, stop;
u32 risesync_top, fallsync_top;
u32 risesync_offs_top, fallsync_offs_top;
clocksperline = mode->htotal;
/* Get the hsync position */
start = 0;
stop = mode->hsync_end - mode->hsync_start;
start += delay;
stop += delay;
if (start < 0)
start += clocksperline;
else if (start >= clocksperline)
start -= clocksperline;
if (stop < 0)
stop += clocksperline;
else if (stop >= clocksperline)
stop -= clocksperline;
sync->hsync = (stop << 16) | start;
/* Get the vsync position */
if (delay >= 0) {
risesync_top = 1;
fallsync_top = risesync_top;
fallsync_top += mode->vsync_end - mode->vsync_start;
fallsync_offs_top = (u32)delay;
risesync_offs_top = (u32)delay;
} else {
risesync_top = mode->vtotal;
fallsync_top = mode->vsync_end - mode->vsync_start;
fallsync_offs_top = clocksperline + delay;
risesync_offs_top = clocksperline + delay;
}
sync->vsync_line_top = (fallsync_top << 16) | risesync_top;
sync->vsync_off_top = (fallsync_offs_top << 16) | risesync_offs_top;
/* Only progressive supported for now */
sync->vsync_line_bot = sync->vsync_line_top;
sync->vsync_off_bot = sync->vsync_off_top;
}
static void vtg_set_mode(struct sti_vtg *vtg,
int type,
struct sti_vtg_sync_params *sync,
const struct drm_display_mode *mode)
{
unsigned int i;
/* Set the number of clock cycles per line */
writel(mode->htotal, vtg->regs + VTG_CLKLN);
/* Set Half Line Per Field (only progressive supported for now) */
writel(mode->vtotal * 2, vtg->regs + VTG_HLFLN);
/* Program output window */
vtg_set_output_window(vtg->regs, mode);
/* Set hsync and vsync position for HDMI */
vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDMI - 1], HDMI_DELAY, mode);
/* Set hsync and vsync position for HD DCS */
vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDDCS - 1], 0, mode);
/* Set hsync and vsync position for HDF */
vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDF - 1], AWG_DELAY_HD, mode);
/* Set hsync and vsync position for DVO */
vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_DVO - 1], DVO_DELAY, mode);
/* Progam the syncs outputs */
for (i = 0; i < VTG_MAX_SYNC_OUTPUT ; i++) {
writel(sync[i].hsync,
vtg->regs + vtg_regs_offs[i].h_hd);
writel(sync[i].vsync_line_top,
vtg->regs + vtg_regs_offs[i].top_v_vd);
writel(sync[i].vsync_line_bot,
vtg->regs + vtg_regs_offs[i].bot_v_vd);
writel(sync[i].vsync_off_top,
vtg->regs + vtg_regs_offs[i].top_v_hd);
writel(sync[i].vsync_off_bot,
vtg->regs + vtg_regs_offs[i].bot_v_hd);
}
/* mode */
writel(type, vtg->regs + VTG_MODE);
}
static void vtg_enable_irq(struct sti_vtg *vtg)
{
/* clear interrupt status and mask */
writel(0xFFFF, vtg->regs + VTG_HOST_ITS_BCLR);
writel(0xFFFF, vtg->regs + VTG_HOST_ITM_BCLR);
writel(VTG_IRQ_MASK, vtg->regs + VTG_HOST_ITM_BSET);
}
void sti_vtg_set_config(struct sti_vtg *vtg,
const struct drm_display_mode *mode)
{
/* write configuration */
vtg_set_mode(vtg, VTG_MODE_MASTER, vtg->sync_params, mode);
vtg_reset(vtg);
vtg_enable_irq(vtg);
}
/**
* sti_vtg_get_line_number
*
* @mode: display mode to be used
* @y: line
*
* Return the line number according to the display mode taking
* into account the Sync and Back Porch information.
* Video frame line numbers start at 1, y starts at 0.
* In interlaced modes the start line is the field line number of the odd
* field, but y is still defined as a progressive frame.
*/
u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y)
{
u32 start_line = mode.vtotal - mode.vsync_start + 1;
if (mode.flags & DRM_MODE_FLAG_INTERLACE)
start_line *= 2;
return start_line + y;
}
/**
* sti_vtg_get_pixel_number
*
* @mode: display mode to be used
* @x: row
*
* Return the pixel number according to the display mode taking
* into account the Sync and Back Porch information.
* Pixels are counted from 0.
*/
u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x)
{
return mode.htotal - mode.hsync_start + x;
}
int sti_vtg_register_client(struct sti_vtg *vtg, struct notifier_block *nb,
struct drm_crtc *crtc)
{
vtg->crtc = crtc;
return raw_notifier_chain_register(&vtg->notifier_list, nb);
}
int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb)
{
return raw_notifier_chain_unregister(&vtg->notifier_list, nb);
}
static irqreturn_t vtg_irq_thread(int irq, void *arg)
{
struct sti_vtg *vtg = arg;
u32 event;
event = (vtg->irq_status & VTG_IRQ_TOP) ?
VTG_TOP_FIELD_EVENT : VTG_BOTTOM_FIELD_EVENT;
raw_notifier_call_chain(&vtg->notifier_list, event, vtg->crtc);
return IRQ_HANDLED;
}
static irqreturn_t vtg_irq(int irq, void *arg)
{
struct sti_vtg *vtg = arg;
vtg->irq_status = readl(vtg->regs + VTG_HOST_ITS);
writel(vtg->irq_status, vtg->regs + VTG_HOST_ITS_BCLR);
/* force sync bus write */
readl(vtg->regs + VTG_HOST_ITS);
return IRQ_WAKE_THREAD;
}
static int vtg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_vtg *vtg;
struct resource *res;
int ret;
vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL);
if (!vtg)
return -ENOMEM;
/* Get Memory ressources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("Get memory resource failed\n");
return -ENOMEM;
}
vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vtg->regs) {
DRM_ERROR("failed to remap I/O memory\n");
return -ENOMEM;
}
vtg->irq = platform_get_irq(pdev, 0);
if (vtg->irq < 0) {
DRM_ERROR("Failed to get VTG interrupt\n");
return vtg->irq;
}
RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list);
ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
vtg_irq_thread, IRQF_ONESHOT,
dev_name(dev), vtg);
if (ret < 0) {
DRM_ERROR("Failed to register VTG interrupt\n");
return ret;
}
platform_set_drvdata(pdev, vtg);
DRM_INFO("%s %s\n", __func__, dev_name(dev));
return 0;
}
static const struct of_device_id vtg_of_match[] = {
{ .compatible = "st,vtg", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, vtg_of_match);
struct platform_driver sti_vtg_driver = {
.driver = {
.name = "sti-vtg",
.owner = THIS_MODULE,
.of_match_table = vtg_of_match,
},
.probe = vtg_probe,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sti/sti_vtg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <[email protected]> for STMicroelectronics.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sti_awg_utils.h"
#include "sti_drv.h"
#include "sti_mixer.h"
/* DVO registers */
#define DVO_AWG_DIGSYNC_CTRL 0x0000
#define DVO_DOF_CFG 0x0004
#define DVO_LUT_PROG_LOW 0x0008
#define DVO_LUT_PROG_MID 0x000C
#define DVO_LUT_PROG_HIGH 0x0010
#define DVO_DIGSYNC_INSTR_I 0x0100
#define DVO_AWG_CTRL_EN BIT(0)
#define DVO_AWG_FRAME_BASED_SYNC BIT(2)
#define DVO_DOF_EN_LOWBYTE BIT(0)
#define DVO_DOF_EN_MIDBYTE BIT(1)
#define DVO_DOF_EN_HIGHBYTE BIT(2)
#define DVO_DOF_EN BIT(6)
#define DVO_DOF_MOD_COUNT_SHIFT 8
#define DVO_LUT_ZERO 0
#define DVO_LUT_Y_G 1
#define DVO_LUT_Y_G_DEL 2
#define DVO_LUT_CB_B 3
#define DVO_LUT_CB_B_DEL 4
#define DVO_LUT_CR_R 5
#define DVO_LUT_CR_R_DEL 6
#define DVO_LUT_HOLD 7
struct dvo_config {
u32 flags;
u32 lowbyte;
u32 midbyte;
u32 highbyte;
int (*awg_fwgen_fct)(
struct awg_code_generation_params *fw_gen_params,
struct awg_timing *timing);
};
static struct dvo_config rgb_24bit_de_cfg = {
.flags = (0L << DVO_DOF_MOD_COUNT_SHIFT),
.lowbyte = DVO_LUT_CR_R,
.midbyte = DVO_LUT_Y_G,
.highbyte = DVO_LUT_CB_B,
.awg_fwgen_fct = sti_awg_generate_code_data_enable_mode,
};
/*
* STI digital video output structure
*
* @dev: driver device
* @drm_dev: pointer to drm device
* @mode: current display mode selected
* @regs: dvo registers
* @clk_pix: pixel clock for dvo
* @clk: clock for dvo
* @clk_main_parent: dvo parent clock if main path used
* @clk_aux_parent: dvo parent clock if aux path used
* @panel_node: panel node reference from device tree
* @panel: reference to the panel connected to the dvo
* @enabled: true if dvo is enabled else false
* @encoder: drm_encoder it is bound
*/
struct sti_dvo {
struct device dev;
struct drm_device *drm_dev;
struct drm_display_mode mode;
void __iomem *regs;
struct clk *clk_pix;
struct clk *clk;
struct clk *clk_main_parent;
struct clk *clk_aux_parent;
struct device_node *panel_node;
struct drm_panel *panel;
struct dvo_config *config;
bool enabled;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
};
struct sti_dvo_connector {
struct drm_connector drm_connector;
struct drm_encoder *encoder;
struct sti_dvo *dvo;
};
#define to_sti_dvo_connector(x) \
container_of(x, struct sti_dvo_connector, drm_connector)
#define BLANKING_LEVEL 16
static int dvo_awg_generate_code(struct sti_dvo *dvo, u8 *ram_size, u32 *ram_code)
{
struct drm_display_mode *mode = &dvo->mode;
struct dvo_config *config = dvo->config;
struct awg_code_generation_params fw_gen_params;
struct awg_timing timing;
fw_gen_params.ram_code = ram_code;
fw_gen_params.instruction_offset = 0;
timing.total_lines = mode->vtotal;
timing.active_lines = mode->vdisplay;
timing.blanking_lines = mode->vsync_start - mode->vdisplay;
timing.trailing_lines = mode->vtotal - mode->vsync_start;
timing.total_pixels = mode->htotal;
timing.active_pixels = mode->hdisplay;
timing.blanking_pixels = mode->hsync_start - mode->hdisplay;
timing.trailing_pixels = mode->htotal - mode->hsync_start;
timing.blanking_level = BLANKING_LEVEL;
if (config->awg_fwgen_fct(&fw_gen_params, &timing)) {
DRM_ERROR("AWG firmware not properly generated\n");
return -EINVAL;
}
*ram_size = fw_gen_params.instruction_offset;
return 0;
}
/* Configure AWG, writing instructions
*
* @dvo: pointer to DVO structure
* @awg_ram_code: pointer to AWG instructions table
* @nb: nb of AWG instructions
*/
static void dvo_awg_configure(struct sti_dvo *dvo, u32 *awg_ram_code, int nb)
{
int i;
DRM_DEBUG_DRIVER("\n");
for (i = 0; i < nb; i++)
writel(awg_ram_code[i],
dvo->regs + DVO_DIGSYNC_INSTR_I + i * 4);
for (i = nb; i < AWG_MAX_INST; i++)
writel(0, dvo->regs + DVO_DIGSYNC_INSTR_I + i * 4);
writel(DVO_AWG_CTRL_EN, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
}
#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
readl(dvo->regs + reg))
static void dvo_dbg_awg_microcode(struct seq_file *s, void __iomem *reg)
{
unsigned int i;
seq_puts(s, "\n\n");
seq_puts(s, " DVO AWG microcode:");
for (i = 0; i < AWG_MAX_INST; i++) {
if (i % 8 == 0)
seq_printf(s, "\n %04X:", i);
seq_printf(s, " %04X", readl(reg + i * 4));
}
}
static int dvo_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
DBGFS_DUMP(DVO_DOF_CFG);
DBGFS_DUMP(DVO_LUT_PROG_LOW);
DBGFS_DUMP(DVO_LUT_PROG_MID);
DBGFS_DUMP(DVO_LUT_PROG_HIGH);
dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
seq_putc(s, '\n');
return 0;
}
static struct drm_info_list dvo_debugfs_files[] = {
{ "dvo", dvo_dbg_show, 0, NULL },
};
static void dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++)
dvo_debugfs_files[i].data = dvo;
drm_debugfs_create_files(dvo_debugfs_files,
ARRAY_SIZE(dvo_debugfs_files),
minor->debugfs_root, minor);
}
static void sti_dvo_disable(struct drm_bridge *bridge)
{
struct sti_dvo *dvo = bridge->driver_private;
if (!dvo->enabled)
return;
DRM_DEBUG_DRIVER("\n");
if (dvo->config->awg_fwgen_fct)
writel(0x00000000, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
writel(0x00000000, dvo->regs + DVO_DOF_CFG);
drm_panel_disable(dvo->panel);
/* Disable/unprepare dvo clock */
clk_disable_unprepare(dvo->clk_pix);
clk_disable_unprepare(dvo->clk);
dvo->enabled = false;
}
static void sti_dvo_pre_enable(struct drm_bridge *bridge)
{
struct sti_dvo *dvo = bridge->driver_private;
struct dvo_config *config = dvo->config;
u32 val;
DRM_DEBUG_DRIVER("\n");
if (dvo->enabled)
return;
/* Make sure DVO is disabled */
writel(0x00000000, dvo->regs + DVO_DOF_CFG);
writel(0x00000000, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
if (config->awg_fwgen_fct) {
u8 nb_instr;
u32 awg_ram_code[AWG_MAX_INST];
/* Configure AWG */
if (!dvo_awg_generate_code(dvo, &nb_instr, awg_ram_code))
dvo_awg_configure(dvo, awg_ram_code, nb_instr);
else
return;
}
/* Prepare/enable clocks */
if (clk_prepare_enable(dvo->clk_pix))
DRM_ERROR("Failed to prepare/enable dvo_pix clk\n");
if (clk_prepare_enable(dvo->clk))
DRM_ERROR("Failed to prepare/enable dvo clk\n");
drm_panel_enable(dvo->panel);
/* Set LUT */
writel(config->lowbyte, dvo->regs + DVO_LUT_PROG_LOW);
writel(config->midbyte, dvo->regs + DVO_LUT_PROG_MID);
writel(config->highbyte, dvo->regs + DVO_LUT_PROG_HIGH);
/* Digital output formatter config */
val = (config->flags | DVO_DOF_EN);
writel(val, dvo->regs + DVO_DOF_CFG);
dvo->enabled = true;
}
static void sti_dvo_set_mode(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct sti_dvo *dvo = bridge->driver_private;
struct sti_mixer *mixer = to_sti_mixer(dvo->encoder->crtc);
int rate = mode->clock * 1000;
struct clk *clkp;
int ret;
DRM_DEBUG_DRIVER("\n");
drm_mode_copy(&dvo->mode, mode);
/* According to the path used (main or aux), the dvo clocks should
* have a different parent clock. */
if (mixer->id == STI_MIXER_MAIN)
clkp = dvo->clk_main_parent;
else
clkp = dvo->clk_aux_parent;
if (clkp) {
clk_set_parent(dvo->clk_pix, clkp);
clk_set_parent(dvo->clk, clkp);
}
/* DVO clocks = compositor clock */
ret = clk_set_rate(dvo->clk_pix, rate);
if (ret < 0) {
DRM_ERROR("Cannot set rate (%dHz) for dvo_pix clk\n", rate);
return;
}
ret = clk_set_rate(dvo->clk, rate);
if (ret < 0) {
DRM_ERROR("Cannot set rate (%dHz) for dvo clk\n", rate);
return;
}
/* For now, we only support 24bit data enable (DE) synchro format */
dvo->config = &rgb_24bit_de_cfg;
}
static void sti_dvo_bridge_nope(struct drm_bridge *bridge)
{
/* do nothing */
}
static const struct drm_bridge_funcs sti_dvo_bridge_funcs = {
.pre_enable = sti_dvo_pre_enable,
.enable = sti_dvo_bridge_nope,
.disable = sti_dvo_disable,
.post_disable = sti_dvo_bridge_nope,
.mode_set = sti_dvo_set_mode,
};
static int sti_dvo_connector_get_modes(struct drm_connector *connector)
{
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
struct sti_dvo *dvo = dvo_connector->dvo;
if (dvo->panel)
return drm_panel_get_modes(dvo->panel, connector);
return 0;
}
#define CLK_TOLERANCE_HZ 50
static enum drm_mode_status
sti_dvo_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
int target_max = target + CLK_TOLERANCE_HZ;
int result;
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
struct sti_dvo *dvo = dvo_connector->dvo;
result = clk_round_rate(dvo->clk_pix, target);
DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
target, result);
if ((result < target_min) || (result > target_max)) {
DRM_DEBUG_DRIVER("dvo pixclk=%d not supported\n", target);
return MODE_BAD;
}
return MODE_OK;
}
static const
struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = {
.get_modes = sti_dvo_connector_get_modes,
.mode_valid = sti_dvo_connector_mode_valid,
};
static enum drm_connector_status
sti_dvo_connector_detect(struct drm_connector *connector, bool force)
{
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
struct sti_dvo *dvo = dvo_connector->dvo;
DRM_DEBUG_DRIVER("\n");
if (!dvo->panel) {
dvo->panel = of_drm_find_panel(dvo->panel_node);
if (IS_ERR(dvo->panel))
dvo->panel = NULL;
}
if (dvo->panel)
return connector_status_connected;
return connector_status_disconnected;
}
static int sti_dvo_late_register(struct drm_connector *connector)
{
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
struct sti_dvo *dvo = dvo_connector->dvo;
dvo_debugfs_init(dvo, dvo->drm_dev->primary);
return 0;
}
static const struct drm_connector_funcs sti_dvo_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_dvo_connector_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.late_register = sti_dvo_late_register,
};
static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev)
{
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
return encoder;
}
return NULL;
}
static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct drm_encoder *encoder;
struct sti_dvo_connector *connector;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
dvo->drm_dev = drm_dev;
encoder = sti_dvo_find_encoder(drm_dev);
if (!encoder)
return -ENOMEM;
connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
if (!connector)
return -ENOMEM;
connector->dvo = dvo;
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
bridge->driver_private = dvo;
bridge->funcs = &sti_dvo_bridge_funcs;
bridge->of_node = dvo->dev.of_node;
drm_bridge_add(bridge);
err = drm_bridge_attach(encoder, bridge, NULL, 0);
if (err)
return err;
dvo->bridge = bridge;
connector->encoder = encoder;
dvo->encoder = encoder;
drm_connector = (struct drm_connector *)connector;
drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_init(drm_dev, drm_connector,
&sti_dvo_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(drm_connector,
&sti_dvo_connector_helper_funcs);
err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
}
return 0;
err_sysfs:
drm_bridge_remove(bridge);
return -EINVAL;
}
static void sti_dvo_unbind(struct device *dev,
struct device *master, void *data)
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
drm_bridge_remove(dvo->bridge);
}
static const struct component_ops sti_dvo_ops = {
.bind = sti_dvo_bind,
.unbind = sti_dvo_unbind,
};
static int sti_dvo_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_dvo *dvo;
struct resource *res;
struct device_node *np = dev->of_node;
DRM_INFO("%s\n", __func__);
dvo = devm_kzalloc(dev, sizeof(*dvo), GFP_KERNEL);
if (!dvo) {
DRM_ERROR("Failed to allocate memory for DVO\n");
return -ENOMEM;
}
dvo->dev = pdev->dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvo-reg");
if (!res) {
DRM_ERROR("Invalid dvo resource\n");
return -ENOMEM;
}
dvo->regs = devm_ioremap(dev, res->start,
resource_size(res));
if (!dvo->regs)
return -ENOMEM;
dvo->clk_pix = devm_clk_get(dev, "dvo_pix");
if (IS_ERR(dvo->clk_pix)) {
DRM_ERROR("Cannot get dvo_pix clock\n");
return PTR_ERR(dvo->clk_pix);
}
dvo->clk = devm_clk_get(dev, "dvo");
if (IS_ERR(dvo->clk)) {
DRM_ERROR("Cannot get dvo clock\n");
return PTR_ERR(dvo->clk);
}
dvo->clk_main_parent = devm_clk_get(dev, "main_parent");
if (IS_ERR(dvo->clk_main_parent)) {
DRM_DEBUG_DRIVER("Cannot get main_parent clock\n");
dvo->clk_main_parent = NULL;
}
dvo->clk_aux_parent = devm_clk_get(dev, "aux_parent");
if (IS_ERR(dvo->clk_aux_parent)) {
DRM_DEBUG_DRIVER("Cannot get aux_parent clock\n");
dvo->clk_aux_parent = NULL;
}
dvo->panel_node = of_parse_phandle(np, "sti,panel", 0);
if (!dvo->panel_node)
DRM_ERROR("No panel associated to the dvo output\n");
of_node_put(dvo->panel_node);
platform_set_drvdata(pdev, dvo);
return component_add(&pdev->dev, &sti_dvo_ops);
}
static void sti_dvo_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_dvo_ops);
}
static const struct of_device_id dvo_of_match[] = {
{ .compatible = "st,stih407-dvo", },
{ /* end node */ }
};
MODULE_DEVICE_TABLE(of, dvo_of_match);
struct platform_driver sti_dvo_driver = {
.driver = {
.name = "sti-dvo",
.owner = THIS_MODULE,
.of_match_table = dvo_of_match,
},
.probe = sti_dvo_probe,
.remove_new = sti_dvo_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sti/sti_dvo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <[email protected]>
* Fabien Dessenne <[email protected]>
* for STMicroelectronics.
*/
#include <linux/clk.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "sti_compositor.h"
#include "sti_crtc.h"
#include "sti_drv.h"
#include "sti_vid.h"
#include "sti_vtg.h"
static void sti_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
DRM_DEBUG_DRIVER("\n");
mixer->status = STI_MIXER_READY;
drm_crtc_vblank_on(crtc);
}
static void sti_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
DRM_DEBUG_DRIVER("\n");
mixer->status = STI_MIXER_DISABLING;
drm_crtc_wait_one_vblank(crtc);
}
static int
sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
struct clk *compo_clk, *pix_clk;
int rate = mode->clock * 1000;
DRM_DEBUG_KMS("CRTC:%d (%s) mode: (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer), mode->name);
DRM_DEBUG_KMS(DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
if (mixer->id == STI_MIXER_MAIN) {
compo_clk = compo->clk_compo_main;
pix_clk = compo->clk_pix_main;
} else {
compo_clk = compo->clk_compo_aux;
pix_clk = compo->clk_pix_aux;
}
/* Prepare and enable the compo IP clock */
if (clk_prepare_enable(compo_clk)) {
DRM_INFO("Failed to prepare/enable compositor clk\n");
goto compo_error;
}
/* Set rate and prepare/enable pixel clock */
if (clk_set_rate(pix_clk, rate) < 0) {
DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
goto pix_error;
}
if (clk_prepare_enable(pix_clk)) {
DRM_ERROR("Failed to prepare/enable pix clk\n");
goto pix_error;
}
sti_vtg_set_config(compo->vtg[mixer->id], &crtc->mode);
if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
DRM_ERROR("Can't set active video area\n");
goto mixer_error;
}
return 0;
mixer_error:
clk_disable_unprepare(pix_clk);
pix_error:
clk_disable_unprepare(compo_clk);
compo_error:
return -EINVAL;
}
static void sti_crtc_disable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
/* Disable Background */
sti_mixer_set_background_status(mixer, false);
drm_crtc_vblank_off(crtc);
/* Disable pixel clock and compo IP clocks */
if (mixer->id == STI_MIXER_MAIN) {
clk_disable_unprepare(compo->clk_pix_main);
clk_disable_unprepare(compo->clk_compo_main);
} else {
clk_disable_unprepare(compo->clk_pix_aux);
clk_disable_unprepare(compo->clk_compo_aux);
}
mixer->status = STI_MIXER_DISABLED;
}
static void
sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
}
static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_device *drm_dev = crtc->dev;
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
struct drm_plane *p;
struct drm_pending_vblank_event *event;
unsigned long flags;
DRM_DEBUG_DRIVER("\n");
/* perform plane actions */
list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
struct sti_plane *plane = to_sti_plane(p);
switch (plane->status) {
case STI_PLANE_UPDATED:
/* ignore update for other CRTC */
if (p->state->crtc != crtc)
continue;
/* update planes tag as updated */
DRM_DEBUG_DRIVER("update plane %s\n",
sti_plane_to_str(plane));
if (sti_mixer_set_plane_depth(mixer, plane)) {
DRM_ERROR("Cannot set plane %s depth\n",
sti_plane_to_str(plane));
break;
}
if (sti_mixer_set_plane_status(mixer, plane, true)) {
DRM_ERROR("Cannot enable plane %s at mixer\n",
sti_plane_to_str(plane));
break;
}
/* if plane is HQVDP_0 then commit the vid[0] */
if (plane->desc == STI_HQVDP_0)
sti_vid_commit(compo->vid[0], p->state);
plane->status = STI_PLANE_READY;
break;
case STI_PLANE_DISABLING:
/* disabling sequence for planes tag as disabling */
DRM_DEBUG_DRIVER("disable plane %s from mixer\n",
sti_plane_to_str(plane));
if (sti_mixer_set_plane_status(mixer, plane, false)) {
DRM_ERROR("Cannot disable plane %s at mixer\n",
sti_plane_to_str(plane));
continue;
}
if (plane->desc == STI_CURSOR)
/* tag plane status for disabled */
plane->status = STI_PLANE_DISABLED;
else
/* tag plane status for flushing */
plane->status = STI_PLANE_FLUSHING;
/* if plane is HQVDP_0 then disable the vid[0] */
if (plane->desc == STI_HQVDP_0)
sti_vid_disable(compo->vid[0]);
break;
default:
/* Other status case are not handled */
break;
}
}
event = crtc->state->event;
if (event) {
crtc->state->event = NULL;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
}
static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
.mode_set_nofb = sti_crtc_mode_set_nofb,
.atomic_flush = sti_crtc_atomic_flush,
.atomic_enable = sti_crtc_atomic_enable,
.atomic_disable = sti_crtc_atomic_disable,
};
static void sti_crtc_destroy(struct drm_crtc *crtc)
{
DRM_DEBUG_KMS("\n");
drm_crtc_cleanup(crtc);
}
static int sti_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val)
{
DRM_DEBUG_KMS("\n");
return 0;
}
int sti_crtc_vblank_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct sti_compositor *compo;
struct drm_crtc *crtc = data;
struct sti_mixer *mixer;
unsigned int pipe;
pipe = drm_crtc_index(crtc);
compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]);
mixer = compo->mixer[pipe];
if ((event != VTG_TOP_FIELD_EVENT) &&
(event != VTG_BOTTOM_FIELD_EVENT)) {
DRM_ERROR("unknown event: %lu\n", event);
return -EINVAL;
}
drm_crtc_handle_vblank(crtc);
if (mixer->status == STI_MIXER_DISABLING) {
struct drm_plane *p;
/* Disable mixer only if all overlay planes (GDP and VDP)
* are disabled */
list_for_each_entry(p, &crtc->dev->mode_config.plane_list,
head) {
struct sti_plane *plane = to_sti_plane(p);
if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP)
if (plane->status != STI_PLANE_DISABLED)
return 0;
}
sti_crtc_disable(crtc);
}
return 0;
}
static int sti_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
struct sti_private *dev_priv = dev->dev_private;
struct sti_compositor *compo = dev_priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
struct sti_vtg *vtg = compo->vtg[pipe];
DRM_DEBUG_DRIVER("\n");
if (sti_vtg_register_client(vtg, vtg_vblank_nb, crtc)) {
DRM_ERROR("Cannot register VTG notifier\n");
return -EINVAL;
}
return 0;
}
static void sti_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *drm_dev = crtc->dev;
unsigned int pipe = crtc->index;
struct sti_private *priv = drm_dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
struct sti_vtg *vtg = compo->vtg[pipe];
DRM_DEBUG_DRIVER("\n");
if (sti_vtg_unregister_client(vtg, vtg_vblank_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
}
static int sti_crtc_late_register(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
if (drm_crtc_index(crtc) == 0)
sti_compositor_debugfs_init(compo, crtc->dev->primary);
return 0;
}
static const struct drm_crtc_funcs sti_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.destroy = sti_crtc_destroy,
.set_property = sti_crtc_set_property,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.late_register = sti_crtc_late_register,
.enable_vblank = sti_crtc_enable_vblank,
.disable_vblank = sti_crtc_disable_vblank,
};
bool sti_crtc_is_main(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
if (mixer->id == STI_MIXER_MAIN)
return true;
return false;
}
int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor)
{
struct drm_crtc *crtc = &mixer->drm_crtc;
int res;
res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
&sti_crtc_funcs, NULL);
if (res) {
DRM_ERROR("Can't initialize CRTC\n");
return -EINVAL;
}
drm_crtc_helper_add(crtc, &sti_crtc_helper_funcs);
DRM_DEBUG_DRIVER("drm CRTC:%d mapped to %s\n",
crtc->base.id, sti_mixer_to_str(mixer));
return 0;
}
| linux-master | drivers/gpu/drm/sti/sti_crtc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Fabien Dessenne <[email protected]> for STMicroelectronics.
*/
#include <linux/seq_file.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "sti_plane.h"
#include "sti_vid.h"
#include "sti_vtg.h"
/* Registers */
#define VID_CTL 0x00
#define VID_ALP 0x04
#define VID_CLF 0x08
#define VID_VPO 0x0C
#define VID_VPS 0x10
#define VID_KEY1 0x28
#define VID_KEY2 0x2C
#define VID_MPR0 0x30
#define VID_MPR1 0x34
#define VID_MPR2 0x38
#define VID_MPR3 0x3C
#define VID_MST 0x68
#define VID_BC 0x70
#define VID_TINT 0x74
#define VID_CSAT 0x78
/* Registers values */
#define VID_CTL_IGNORE (BIT(31) | BIT(30))
#define VID_CTL_PSI_ENABLE (BIT(2) | BIT(1) | BIT(0))
#define VID_ALP_OPAQUE 0x00000080
#define VID_BC_DFLT 0x00008000
#define VID_TINT_DFLT 0x00000000
#define VID_CSAT_DFLT 0x00000080
/* YCbCr to RGB BT709:
* R = Y+1.5391Cr
* G = Y-0.4590Cr-0.1826Cb
* B = Y+1.8125Cb */
#define VID_MPR0_BT709 0x0A800000
#define VID_MPR1_BT709 0x0AC50000
#define VID_MPR2_BT709 0x07150545
#define VID_MPR3_BT709 0x00000AE8
/* YCbCr to RGB BT709:
* R = Y+1.3711Cr
* G = Y-0.6992Cr-0.3359Cb
* B = Y+1.7344Cb
*/
#define VID_MPR0_BT601 0x0A800000
#define VID_MPR1_BT601 0x0AAF0000
#define VID_MPR2_BT601 0x094E0754
#define VID_MPR3_BT601 0x00000ADD
#define VID_MIN_HD_HEIGHT 720
#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
readl(vid->regs + reg))
static void vid_dbg_ctl(struct seq_file *s, int val)
{
val = val >> 30;
seq_putc(s, '\t');
if (!(val & 1))
seq_puts(s, "NOT ");
seq_puts(s, "ignored on main mixer - ");
if (!(val & 2))
seq_puts(s, "NOT ");
seq_puts(s, "ignored on aux mixer");
}
static void vid_dbg_vpo(struct seq_file *s, int val)
{
seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
}
static void vid_dbg_vps(struct seq_file *s, int val)
{
seq_printf(s, "\txds:%4d\tyds:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
}
static void vid_dbg_mst(struct seq_file *s, int val)
{
if (val & 1)
seq_puts(s, "\tBUFFER UNDERFLOW!");
}
static int vid_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
DBGFS_DUMP(VID_CTL);
vid_dbg_ctl(s, readl(vid->regs + VID_CTL));
DBGFS_DUMP(VID_ALP);
DBGFS_DUMP(VID_CLF);
DBGFS_DUMP(VID_VPO);
vid_dbg_vpo(s, readl(vid->regs + VID_VPO));
DBGFS_DUMP(VID_VPS);
vid_dbg_vps(s, readl(vid->regs + VID_VPS));
DBGFS_DUMP(VID_KEY1);
DBGFS_DUMP(VID_KEY2);
DBGFS_DUMP(VID_MPR0);
DBGFS_DUMP(VID_MPR1);
DBGFS_DUMP(VID_MPR2);
DBGFS_DUMP(VID_MPR3);
DBGFS_DUMP(VID_MST);
vid_dbg_mst(s, readl(vid->regs + VID_MST));
DBGFS_DUMP(VID_BC);
DBGFS_DUMP(VID_TINT);
DBGFS_DUMP(VID_CSAT);
seq_putc(s, '\n');
return 0;
}
static struct drm_info_list vid_debugfs_files[] = {
{ "vid", vid_dbg_show, 0, NULL },
};
void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++)
vid_debugfs_files[i].data = vid;
drm_debugfs_create_files(vid_debugfs_files,
ARRAY_SIZE(vid_debugfs_files),
minor->debugfs_root, minor);
}
void sti_vid_commit(struct sti_vid *vid,
struct drm_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_display_mode *mode = &crtc->mode;
int dst_x = state->crtc_x;
int dst_y = state->crtc_y;
int dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
int dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
int src_h = state->src_h >> 16;
u32 val, ydo, xdo, yds, xds;
/* Input / output size
* Align to upper even value */
dst_w = ALIGN(dst_w, 2);
dst_h = ALIGN(dst_h, 2);
/* Unmask */
val = readl(vid->regs + VID_CTL);
val &= ~VID_CTL_IGNORE;
writel(val, vid->regs + VID_CTL);
ydo = sti_vtg_get_line_number(*mode, dst_y);
yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
xdo = sti_vtg_get_pixel_number(*mode, dst_x);
xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
writel((ydo << 16) | xdo, vid->regs + VID_VPO);
writel((yds << 16) | xds, vid->regs + VID_VPS);
/* Color conversion parameters */
if (src_h >= VID_MIN_HD_HEIGHT) {
writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
} else {
writel(VID_MPR0_BT601, vid->regs + VID_MPR0);
writel(VID_MPR1_BT601, vid->regs + VID_MPR1);
writel(VID_MPR2_BT601, vid->regs + VID_MPR2);
writel(VID_MPR3_BT601, vid->regs + VID_MPR3);
}
}
void sti_vid_disable(struct sti_vid *vid)
{
u32 val;
/* Mask */
val = readl(vid->regs + VID_CTL);
val |= VID_CTL_IGNORE;
writel(val, vid->regs + VID_CTL);
}
static void sti_vid_init(struct sti_vid *vid)
{
/* Enable PSI, Mask layer */
writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
/* Opaque */
writel(VID_ALP_OPAQUE, vid->regs + VID_ALP);
/* Brightness, contrast, tint, saturation */
writel(VID_BC_DFLT, vid->regs + VID_BC);
writel(VID_TINT_DFLT, vid->regs + VID_TINT);
writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
}
struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
int id, void __iomem *baseaddr)
{
struct sti_vid *vid;
vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
if (!vid) {
DRM_ERROR("Failed to allocate memory for VID\n");
return NULL;
}
vid->dev = dev;
vid->regs = baseaddr;
vid->id = id;
sti_vid_init(vid);
return vid;
}
| linux-master | drivers/gpu/drm/sti/sti_vid.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <[email protected]>
* Vincent Abriou <[email protected]>
* for STMicroelectronics.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/seq_file.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "sti_crtc.h"
#include "sti_drv.h"
#include "sti_vtg.h"
/* glue registers */
#define TVO_CSC_MAIN_M0 0x000
#define TVO_CSC_MAIN_M1 0x004
#define TVO_CSC_MAIN_M2 0x008
#define TVO_CSC_MAIN_M3 0x00c
#define TVO_CSC_MAIN_M4 0x010
#define TVO_CSC_MAIN_M5 0x014
#define TVO_CSC_MAIN_M6 0x018
#define TVO_CSC_MAIN_M7 0x01c
#define TVO_MAIN_IN_VID_FORMAT 0x030
#define TVO_CSC_AUX_M0 0x100
#define TVO_CSC_AUX_M1 0x104
#define TVO_CSC_AUX_M2 0x108
#define TVO_CSC_AUX_M3 0x10c
#define TVO_CSC_AUX_M4 0x110
#define TVO_CSC_AUX_M5 0x114
#define TVO_CSC_AUX_M6 0x118
#define TVO_CSC_AUX_M7 0x11c
#define TVO_AUX_IN_VID_FORMAT 0x130
#define TVO_VIP_HDF 0x400
#define TVO_HD_SYNC_SEL 0x418
#define TVO_HD_DAC_CFG_OFF 0x420
#define TVO_VIP_HDMI 0x500
#define TVO_HDMI_FORCE_COLOR_0 0x504
#define TVO_HDMI_FORCE_COLOR_1 0x508
#define TVO_HDMI_CLIP_VALUE_B_CB 0x50c
#define TVO_HDMI_CLIP_VALUE_Y_G 0x510
#define TVO_HDMI_CLIP_VALUE_R_CR 0x514
#define TVO_HDMI_SYNC_SEL 0x518
#define TVO_HDMI_DFV_OBS 0x540
#define TVO_VIP_DVO 0x600
#define TVO_DVO_SYNC_SEL 0x618
#define TVO_DVO_CONFIG 0x620
#define TVO_IN_FMT_SIGNED BIT(0)
#define TVO_SYNC_EXT BIT(4)
#define TVO_VIP_REORDER_R_SHIFT 24
#define TVO_VIP_REORDER_G_SHIFT 20
#define TVO_VIP_REORDER_B_SHIFT 16
#define TVO_VIP_REORDER_MASK 0x3
#define TVO_VIP_REORDER_Y_G_SEL 0
#define TVO_VIP_REORDER_CB_B_SEL 1
#define TVO_VIP_REORDER_CR_R_SEL 2
#define TVO_VIP_CLIP_SHIFT 8
#define TVO_VIP_CLIP_MASK 0x7
#define TVO_VIP_CLIP_DISABLED 0
#define TVO_VIP_CLIP_EAV_SAV 1
#define TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y 2
#define TVO_VIP_CLIP_LIMITED_RANGE_CB_CR 3
#define TVO_VIP_CLIP_PROG_RANGE 4
#define TVO_VIP_RND_SHIFT 4
#define TVO_VIP_RND_MASK 0x3
#define TVO_VIP_RND_8BIT_ROUNDED 0
#define TVO_VIP_RND_10BIT_ROUNDED 1
#define TVO_VIP_RND_12BIT_ROUNDED 2
#define TVO_VIP_SEL_INPUT_MASK 0xf
#define TVO_VIP_SEL_INPUT_MAIN 0x0
#define TVO_VIP_SEL_INPUT_AUX 0x8
#define TVO_VIP_SEL_INPUT_FORCE_COLOR 0xf
#define TVO_VIP_SEL_INPUT_BYPASS_MASK 0x1
#define TVO_VIP_SEL_INPUT_BYPASSED 1
#define TVO_SYNC_MAIN_VTG_SET_REF 0x00
#define TVO_SYNC_AUX_VTG_SET_REF 0x10
#define TVO_SYNC_HD_DCS_SHIFT 8
#define TVO_SYNC_DVO_PAD_HSYNC_SHIFT 8
#define TVO_SYNC_DVO_PAD_VSYNC_SHIFT 16
#define ENCODER_CRTC_MASK (BIT(0) | BIT(1))
#define TVO_MIN_HD_HEIGHT 720
/* enum listing the supported output data format */
enum sti_tvout_video_out_type {
STI_TVOUT_VIDEO_OUT_RGB,
STI_TVOUT_VIDEO_OUT_YUV,
};
struct sti_tvout {
struct device *dev;
struct drm_device *drm_dev;
void __iomem *regs;
struct reset_control *reset;
struct drm_encoder *hdmi;
struct drm_encoder *hda;
struct drm_encoder *dvo;
bool debugfs_registered;
};
struct sti_tvout_encoder {
struct drm_encoder encoder;
struct sti_tvout *tvout;
};
#define to_sti_tvout_encoder(x) \
container_of(x, struct sti_tvout_encoder, encoder)
#define to_sti_tvout(x) to_sti_tvout_encoder(x)->tvout
/* preformatter conversion matrix */
static const u32 rgb_to_ycbcr_601[8] = {
0xF927082E, 0x04C9FEAB, 0x01D30964, 0xFA95FD3D,
0x0000082E, 0x00002000, 0x00002000, 0x00000000
};
/* 709 RGB to YCbCr */
static const u32 rgb_to_ycbcr_709[8] = {
0xF891082F, 0x0367FF40, 0x01280B71, 0xF9B1FE20,
0x0000082F, 0x00002000, 0x00002000, 0x00000000
};
static u32 tvout_read(struct sti_tvout *tvout, int offset)
{
return readl(tvout->regs + offset);
}
static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
{
writel(val, tvout->regs + offset);
}
/**
* tvout_vip_set_color_order - Set the clipping mode of a VIP
*
* @tvout: tvout structure
* @reg: register to set
* @cr_r: red chroma or red order
* @y_g: y or green order
* @cb_b: blue chroma or blue order
*/
static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
u32 cr_r, u32 y_g, u32 cb_b)
{
u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT);
val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT);
val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_B_SHIFT);
val |= cr_r << TVO_VIP_REORDER_R_SHIFT;
val |= y_g << TVO_VIP_REORDER_G_SHIFT;
val |= cb_b << TVO_VIP_REORDER_B_SHIFT;
tvout_write(tvout, val, reg);
}
/**
* tvout_vip_set_clip_mode - Set the clipping mode of a VIP
*
* @tvout: tvout structure
* @reg: register to set
* @range: clipping range
*/
static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, int reg, u32 range)
{
u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT);
val |= range << TVO_VIP_CLIP_SHIFT;
tvout_write(tvout, val, reg);
}
/**
* tvout_vip_set_rnd - Set the rounded value of a VIP
*
* @tvout: tvout structure
* @reg: register to set
* @rnd: rounded val per component
*/
static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
{
u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT);
val |= rnd << TVO_VIP_RND_SHIFT;
tvout_write(tvout, val, reg);
}
/**
* tvout_vip_set_sel_input - Select the VIP input
*
* @tvout: tvout structure
* @reg: register to set
* @main_path: main or auxiliary path
* @video_out: selected_input (main/aux + conv)
*/
static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
int reg,
bool main_path,
enum sti_tvout_video_out_type video_out)
{
u32 sel_input;
u32 val = tvout_read(tvout, reg);
if (main_path)
sel_input = TVO_VIP_SEL_INPUT_MAIN;
else
sel_input = TVO_VIP_SEL_INPUT_AUX;
switch (video_out) {
case STI_TVOUT_VIDEO_OUT_RGB:
sel_input |= TVO_VIP_SEL_INPUT_BYPASSED;
break;
case STI_TVOUT_VIDEO_OUT_YUV:
sel_input &= ~TVO_VIP_SEL_INPUT_BYPASSED;
break;
}
/* on stih407 chip the sel_input bypass mode logic is inverted */
sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
val &= ~TVO_VIP_SEL_INPUT_MASK;
val |= sel_input;
tvout_write(tvout, val, reg);
}
/**
* tvout_vip_set_in_vid_fmt - Select the input video signed or unsigned
*
* @tvout: tvout structure
* @reg: register to set
* @in_vid_fmt: used video input format
*/
static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
int reg, u32 in_vid_fmt)
{
u32 val = tvout_read(tvout, reg);
val &= ~TVO_IN_FMT_SIGNED;
val |= in_vid_fmt;
tvout_write(tvout, val, reg);
}
/**
* tvout_preformatter_set_matrix - Set preformatter matrix
*
* @tvout: tvout structure
* @mode: display mode structure
*/
static void tvout_preformatter_set_matrix(struct sti_tvout *tvout,
struct drm_display_mode *mode)
{
unsigned int i;
const u32 *pf_matrix;
if (mode->vdisplay >= TVO_MIN_HD_HEIGHT)
pf_matrix = rgb_to_ycbcr_709;
else
pf_matrix = rgb_to_ycbcr_601;
for (i = 0; i < 8; i++) {
tvout_write(tvout, *(pf_matrix + i),
TVO_CSC_MAIN_M0 + (i * 4));
tvout_write(tvout, *(pf_matrix + i),
TVO_CSC_AUX_M0 + (i * 4));
}
}
/**
* tvout_dvo_start - Start VIP block for DVO output
*
* @tvout: pointer on tvout structure
* @main_path: true if main path has to be used in the vip configuration
* else aux path is used.
*/
static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
{
u32 tvo_in_vid_format;
int val, tmp;
dev_dbg(tvout->dev, "%s\n", __func__);
if (main_path) {
DRM_DEBUG_DRIVER("main vip for DVO\n");
/* Select the input sync for dvo */
tmp = TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_DVO;
val = tmp << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
val |= tmp << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
val |= tmp;
tvout_write(tvout, val, TVO_DVO_SYNC_SEL);
tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
DRM_DEBUG_DRIVER("aux vip for DVO\n");
/* Select the input sync for dvo */
tmp = TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_DVO;
val = tmp << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
val |= tmp << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
val |= tmp;
tvout_write(tvout, val, TVO_DVO_SYNC_SEL);
tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
/* Set color channel order */
tvout_vip_set_color_order(tvout, TVO_VIP_DVO,
TVO_VIP_REORDER_CR_R_SEL,
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
/* Set clipping mode */
tvout_vip_set_clip_mode(tvout, TVO_VIP_DVO, TVO_VIP_CLIP_DISABLED);
/* Set round mode (rounded to 8-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_DVO, TVO_VIP_RND_8BIT_ROUNDED);
/* Set input video format */
tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
/* Input selection */
tvout_vip_set_sel_input(tvout, TVO_VIP_DVO, main_path,
STI_TVOUT_VIDEO_OUT_RGB);
}
/**
* tvout_hdmi_start - Start VIP block for HDMI output
*
* @tvout: pointer on tvout structure
* @main_path: true if main path has to be used in the vip configuration
* else aux path is used.
*/
static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
{
u32 tvo_in_vid_format;
dev_dbg(tvout->dev, "%s\n", __func__);
if (main_path) {
DRM_DEBUG_DRIVER("main vip for hdmi\n");
/* select the input sync for hdmi */
tvout_write(tvout,
TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDMI,
TVO_HDMI_SYNC_SEL);
tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
DRM_DEBUG_DRIVER("aux vip for hdmi\n");
/* select the input sync for hdmi */
tvout_write(tvout,
TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDMI,
TVO_HDMI_SYNC_SEL);
tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
/* set color channel order */
tvout_vip_set_color_order(tvout, TVO_VIP_HDMI,
TVO_VIP_REORDER_CR_R_SEL,
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
/* set clipping mode */
tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI, TVO_VIP_CLIP_DISABLED);
/* set round mode (rounded to 8-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
/* set input video format */
tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
/* input selection */
tvout_vip_set_sel_input(tvout, TVO_VIP_HDMI, main_path,
STI_TVOUT_VIDEO_OUT_RGB);
}
/**
* tvout_hda_start - Start HDF VIP and HD DAC
*
* @tvout: pointer on tvout structure
* @main_path: true if main path has to be used in the vip configuration
* else aux path is used.
*/
static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
{
u32 tvo_in_vid_format;
int val;
dev_dbg(tvout->dev, "%s\n", __func__);
if (main_path) {
DRM_DEBUG_DRIVER("main vip for HDF\n");
/* Select the input sync for HD analog and HD DCS */
val = TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDDCS;
val = val << TVO_SYNC_HD_DCS_SHIFT;
val |= TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDF;
tvout_write(tvout, val, TVO_HD_SYNC_SEL);
tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
DRM_DEBUG_DRIVER("aux vip for HDF\n");
/* Select the input sync for HD analog and HD DCS */
val = TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDDCS;
val = val << TVO_SYNC_HD_DCS_SHIFT;
val |= TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDF;
tvout_write(tvout, val, TVO_HD_SYNC_SEL);
tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
/* set color channel order */
tvout_vip_set_color_order(tvout, TVO_VIP_HDF,
TVO_VIP_REORDER_CR_R_SEL,
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
/* set clipping mode */
tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_DISABLED);
/* set round mode (rounded to 10-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
/* Set input video format */
tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
/* Input selection */
tvout_vip_set_sel_input(tvout, TVO_VIP_HDF, main_path,
STI_TVOUT_VIDEO_OUT_YUV);
/* power up HD DAC */
tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
}
#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
readl(tvout->regs + reg))
static void tvout_dbg_vip(struct seq_file *s, int val)
{
int r, g, b, tmp, mask;
char *const reorder[] = {"Y_G", "Cb_B", "Cr_R"};
char *const clipping[] = {"No", "EAV/SAV", "Limited range RGB/Y",
"Limited range Cb/Cr", "decided by register"};
char *const round[] = {"8-bit", "10-bit", "12-bit"};
char *const input_sel[] = {"Main (color matrix enabled)",
"Main (color matrix by-passed)",
"", "", "", "", "", "",
"Aux (color matrix enabled)",
"Aux (color matrix by-passed)",
"", "", "", "", "", "Force value"};
seq_putc(s, '\t');
mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT;
r = (val & mask) >> TVO_VIP_REORDER_R_SHIFT;
mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT;
g = (val & mask) >> TVO_VIP_REORDER_G_SHIFT;
mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_B_SHIFT;
b = (val & mask) >> TVO_VIP_REORDER_B_SHIFT;
seq_printf(s, "%-24s %s->%s %s->%s %s->%s\n", "Reorder:",
reorder[r], reorder[TVO_VIP_REORDER_CR_R_SEL],
reorder[g], reorder[TVO_VIP_REORDER_Y_G_SEL],
reorder[b], reorder[TVO_VIP_REORDER_CB_B_SEL]);
seq_puts(s, "\t\t\t\t\t");
mask = TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT;
tmp = (val & mask) >> TVO_VIP_CLIP_SHIFT;
seq_printf(s, "%-24s %s\n", "Clipping:", clipping[tmp]);
seq_puts(s, "\t\t\t\t\t");
mask = TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT;
tmp = (val & mask) >> TVO_VIP_RND_SHIFT;
seq_printf(s, "%-24s input data rounded to %s per component\n",
"Round:", round[tmp]);
seq_puts(s, "\t\t\t\t\t");
tmp = (val & TVO_VIP_SEL_INPUT_MASK);
seq_printf(s, "%-24s %s", "Input selection:", input_sel[tmp]);
}
static void tvout_dbg_hd_dac_cfg(struct seq_file *s, int val)
{
seq_printf(s, "\t%-24s %s", "HD DAC:",
val & 1 ? "disabled" : "enabled");
}
static int tvout_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
struct drm_crtc *crtc;
seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
seq_puts(s, "\n\n HDMI encoder: ");
crtc = tvout->hdmi->crtc;
if (crtc) {
seq_printf(s, "connected to %s path",
sti_crtc_is_main(crtc) ? "main" : "aux");
DBGFS_DUMP(TVO_HDMI_SYNC_SEL);
DBGFS_DUMP(TVO_VIP_HDMI);
tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_HDMI));
} else {
seq_puts(s, "disabled");
}
seq_puts(s, "\n\n DVO encoder: ");
crtc = tvout->dvo->crtc;
if (crtc) {
seq_printf(s, "connected to %s path",
sti_crtc_is_main(crtc) ? "main" : "aux");
DBGFS_DUMP(TVO_DVO_SYNC_SEL);
DBGFS_DUMP(TVO_DVO_CONFIG);
DBGFS_DUMP(TVO_VIP_DVO);
tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_DVO));
} else {
seq_puts(s, "disabled");
}
seq_puts(s, "\n\n HDA encoder: ");
crtc = tvout->hda->crtc;
if (crtc) {
seq_printf(s, "connected to %s path",
sti_crtc_is_main(crtc) ? "main" : "aux");
DBGFS_DUMP(TVO_HD_SYNC_SEL);
DBGFS_DUMP(TVO_HD_DAC_CFG_OFF);
tvout_dbg_hd_dac_cfg(s,
readl(tvout->regs + TVO_HD_DAC_CFG_OFF));
DBGFS_DUMP(TVO_VIP_HDF);
tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_HDF));
} else {
seq_puts(s, "disabled");
}
seq_puts(s, "\n\n main path configuration");
DBGFS_DUMP(TVO_CSC_MAIN_M0);
DBGFS_DUMP(TVO_CSC_MAIN_M1);
DBGFS_DUMP(TVO_CSC_MAIN_M2);
DBGFS_DUMP(TVO_CSC_MAIN_M3);
DBGFS_DUMP(TVO_CSC_MAIN_M4);
DBGFS_DUMP(TVO_CSC_MAIN_M5);
DBGFS_DUMP(TVO_CSC_MAIN_M6);
DBGFS_DUMP(TVO_CSC_MAIN_M7);
DBGFS_DUMP(TVO_MAIN_IN_VID_FORMAT);
seq_puts(s, "\n\n auxiliary path configuration");
DBGFS_DUMP(TVO_CSC_AUX_M0);
DBGFS_DUMP(TVO_CSC_AUX_M2);
DBGFS_DUMP(TVO_CSC_AUX_M3);
DBGFS_DUMP(TVO_CSC_AUX_M4);
DBGFS_DUMP(TVO_CSC_AUX_M5);
DBGFS_DUMP(TVO_CSC_AUX_M6);
DBGFS_DUMP(TVO_CSC_AUX_M7);
DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
seq_putc(s, '\n');
return 0;
}
static struct drm_info_list tvout_debugfs_files[] = {
{ "tvout", tvout_dbg_show, 0, NULL },
};
static void tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++)
tvout_debugfs_files[i].data = tvout;
drm_debugfs_create_files(tvout_debugfs_files,
ARRAY_SIZE(tvout_debugfs_files),
minor->debugfs_root, minor);
}
static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
{
}
static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
{
struct sti_tvout_encoder *sti_encoder = to_sti_tvout_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(sti_encoder);
}
static int sti_tvout_late_register(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
if (tvout->debugfs_registered)
return 0;
tvout_debugfs_init(tvout, encoder->dev->primary);
tvout->debugfs_registered = true;
return 0;
}
static void sti_tvout_early_unregister(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
if (!tvout->debugfs_registered)
return;
tvout->debugfs_registered = false;
}
static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
.destroy = sti_tvout_encoder_destroy,
.late_register = sti_tvout_late_register,
.early_unregister = sti_tvout_early_unregister,
};
static void sti_dvo_encoder_enable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
/* Reset VIP register */
tvout_write(tvout, 0x0, TVO_VIP_DVO);
}
static const struct drm_encoder_helper_funcs sti_dvo_encoder_helper_funcs = {
.dpms = sti_tvout_encoder_dpms,
.mode_set = sti_tvout_encoder_mode_set,
.enable = sti_dvo_encoder_enable,
.disable = sti_dvo_encoder_disable,
};
static struct drm_encoder *
sti_tvout_create_dvo_encoder(struct drm_device *dev,
struct sti_tvout *tvout)
{
struct sti_tvout_encoder *encoder;
struct drm_encoder *drm_encoder;
encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
if (!encoder)
return NULL;
encoder->tvout = tvout;
drm_encoder = &encoder->encoder;
drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
drm_encoder_init(dev, drm_encoder,
&sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS,
NULL);
drm_encoder_helper_add(drm_encoder, &sti_dvo_encoder_helper_funcs);
return drm_encoder;
}
static void sti_hda_encoder_enable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_hda_encoder_disable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
/* reset VIP register */
tvout_write(tvout, 0x0, TVO_VIP_HDF);
/* power down HD DAC */
tvout_write(tvout, 1, TVO_HD_DAC_CFG_OFF);
}
static const struct drm_encoder_helper_funcs sti_hda_encoder_helper_funcs = {
.dpms = sti_tvout_encoder_dpms,
.mode_set = sti_tvout_encoder_mode_set,
.commit = sti_hda_encoder_enable,
.disable = sti_hda_encoder_disable,
};
static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
struct sti_tvout *tvout)
{
struct sti_tvout_encoder *encoder;
struct drm_encoder *drm_encoder;
encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
if (!encoder)
return NULL;
encoder->tvout = tvout;
drm_encoder = &encoder->encoder;
drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
drm_encoder_init(dev, drm_encoder,
&sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs);
return drm_encoder;
}
static void sti_hdmi_encoder_enable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
/* reset VIP register */
tvout_write(tvout, 0x0, TVO_VIP_HDMI);
}
static const struct drm_encoder_helper_funcs sti_hdmi_encoder_helper_funcs = {
.dpms = sti_tvout_encoder_dpms,
.mode_set = sti_tvout_encoder_mode_set,
.commit = sti_hdmi_encoder_enable,
.disable = sti_hdmi_encoder_disable,
};
static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
struct sti_tvout *tvout)
{
struct sti_tvout_encoder *encoder;
struct drm_encoder *drm_encoder;
encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
if (!encoder)
return NULL;
encoder->tvout = tvout;
drm_encoder = &encoder->encoder;
drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
drm_encoder_init(dev, drm_encoder,
&sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs);
return drm_encoder;
}
static void sti_tvout_create_encoders(struct drm_device *dev,
struct sti_tvout *tvout)
{
tvout->hdmi = sti_tvout_create_hdmi_encoder(dev, tvout);
tvout->hda = sti_tvout_create_hda_encoder(dev, tvout);
tvout->dvo = sti_tvout_create_dvo_encoder(dev, tvout);
tvout->hdmi->possible_clones = drm_encoder_mask(tvout->hdmi) |
drm_encoder_mask(tvout->hda) | drm_encoder_mask(tvout->dvo);
tvout->hda->possible_clones = drm_encoder_mask(tvout->hdmi) |
drm_encoder_mask(tvout->hda) | drm_encoder_mask(tvout->dvo);
tvout->dvo->possible_clones = drm_encoder_mask(tvout->hdmi) |
drm_encoder_mask(tvout->hda) | drm_encoder_mask(tvout->dvo);
}
static void sti_tvout_destroy_encoders(struct sti_tvout *tvout)
{
if (tvout->hdmi)
drm_encoder_cleanup(tvout->hdmi);
tvout->hdmi = NULL;
if (tvout->hda)
drm_encoder_cleanup(tvout->hda);
tvout->hda = NULL;
if (tvout->dvo)
drm_encoder_cleanup(tvout->dvo);
tvout->dvo = NULL;
}
static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
{
struct sti_tvout *tvout = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
tvout->drm_dev = drm_dev;
sti_tvout_create_encoders(drm_dev, tvout);
return 0;
}
static void sti_tvout_unbind(struct device *dev, struct device *master,
void *data)
{
struct sti_tvout *tvout = dev_get_drvdata(dev);
sti_tvout_destroy_encoders(tvout);
}
static const struct component_ops sti_tvout_ops = {
.bind = sti_tvout_bind,
.unbind = sti_tvout_unbind,
};
static int sti_tvout_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct sti_tvout *tvout;
struct resource *res;
DRM_INFO("%s\n", __func__);
if (!node)
return -ENODEV;
tvout = devm_kzalloc(dev, sizeof(*tvout), GFP_KERNEL);
if (!tvout)
return -ENOMEM;
tvout->dev = dev;
/* get memory resources */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg");
if (!res) {
DRM_ERROR("Invalid glue resource\n");
return -ENOMEM;
}
tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!tvout->regs)
return -ENOMEM;
/* get reset resources */
tvout->reset = devm_reset_control_get(dev, "tvout");
/* take tvout out of reset */
if (!IS_ERR(tvout->reset))
reset_control_deassert(tvout->reset);
platform_set_drvdata(pdev, tvout);
return component_add(dev, &sti_tvout_ops);
}
static void sti_tvout_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_tvout_ops);
}
static const struct of_device_id tvout_of_match[] = {
{ .compatible = "st,stih407-tvout", },
{ /* end node */ }
};
MODULE_DEVICE_TABLE(of, tvout_of_match);
struct platform_driver sti_tvout_driver = {
.driver = {
.name = "sti-tvout",
.owner = THIS_MODULE,
.of_match_table = tvout_of_match,
},
.probe = sti_tvout_probe,
.remove_new = sti_tvout_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sti/sti_tvout.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <[email protected]>
* Fabien Dessenne <[email protected]>
* for STMicroelectronics.
*/
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/seq_file.h>
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_gdp.h"
#include "sti_plane.h"
#include "sti_vtg.h"
#define ALPHASWITCH BIT(6)
#define ENA_COLOR_FILL BIT(8)
#define BIGNOTLITTLE BIT(23)
#define WAIT_NEXT_VSYNC BIT(31)
/* GDP color formats */
#define GDP_RGB565 0x00
#define GDP_RGB888 0x01
#define GDP_RGB888_32 0x02
#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB8565 0x04
#define GDP_ARGB8888 0x05
#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB1555 0x06
#define GDP_ARGB4444 0x07
#define GDP2STR(fmt) { GDP_ ## fmt, #fmt }
static struct gdp_format_to_str {
int format;
char name[20];
} gdp_format_to_str[] = {
GDP2STR(RGB565),
GDP2STR(RGB888),
GDP2STR(RGB888_32),
GDP2STR(XBGR8888),
GDP2STR(ARGB8565),
GDP2STR(ARGB8888),
GDP2STR(ABGR8888),
GDP2STR(ARGB1555),
GDP2STR(ARGB4444)
};
#define GAM_GDP_CTL_OFFSET 0x00
#define GAM_GDP_AGC_OFFSET 0x04
#define GAM_GDP_VPO_OFFSET 0x0C
#define GAM_GDP_VPS_OFFSET 0x10
#define GAM_GDP_PML_OFFSET 0x14
#define GAM_GDP_PMP_OFFSET 0x18
#define GAM_GDP_SIZE_OFFSET 0x1C
#define GAM_GDP_NVN_OFFSET 0x24
#define GAM_GDP_KEY1_OFFSET 0x28
#define GAM_GDP_KEY2_OFFSET 0x2C
#define GAM_GDP_PPT_OFFSET 0x34
#define GAM_GDP_CML_OFFSET 0x3C
#define GAM_GDP_MST_OFFSET 0x68
#define GAM_GDP_ALPHARANGE_255 BIT(5)
#define GAM_GDP_AGC_FULL_RANGE 0x00808080
#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
#define GAM_GDP_SIZE_MAX_WIDTH 3840
#define GAM_GDP_SIZE_MAX_HEIGHT 2160
#define GDP_NODE_NB_BANK 2
#define GDP_NODE_PER_FIELD 2
struct sti_gdp_node {
u32 gam_gdp_ctl;
u32 gam_gdp_agc;
u32 reserved1;
u32 gam_gdp_vpo;
u32 gam_gdp_vps;
u32 gam_gdp_pml;
u32 gam_gdp_pmp;
u32 gam_gdp_size;
u32 reserved2;
u32 gam_gdp_nvn;
u32 gam_gdp_key1;
u32 gam_gdp_key2;
u32 reserved3;
u32 gam_gdp_ppt;
u32 reserved4;
u32 gam_gdp_cml;
};
struct sti_gdp_node_list {
struct sti_gdp_node *top_field;
dma_addr_t top_field_paddr;
struct sti_gdp_node *btm_field;
dma_addr_t btm_field_paddr;
};
/*
* STI GDP structure
*
* @sti_plane: sti_plane structure
* @dev: driver device
* @regs: gdp registers
* @clk_pix: pixel clock for the current gdp
* @clk_main_parent: gdp parent clock if main path used
* @clk_aux_parent: gdp parent clock if aux path used
* @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
* @is_curr_top: true if the current node processed is the top field
* @node_list: array of node list
* @vtg: registered vtg
*/
struct sti_gdp {
struct sti_plane plane;
struct device *dev;
void __iomem *regs;
struct clk *clk_pix;
struct clk *clk_main_parent;
struct clk *clk_aux_parent;
struct notifier_block vtg_field_nb;
bool is_curr_top;
struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
struct sti_vtg *vtg;
};
#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
};
#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
readl(gdp->regs + reg ## _OFFSET))
static void gdp_dbg_ctl(struct seq_file *s, int val)
{
int i;
seq_puts(s, "\tColor:");
for (i = 0; i < ARRAY_SIZE(gdp_format_to_str); i++) {
if (gdp_format_to_str[i].format == (val & 0x1F)) {
seq_puts(s, gdp_format_to_str[i].name);
break;
}
}
if (i == ARRAY_SIZE(gdp_format_to_str))
seq_puts(s, "<UNKNOWN>");
seq_printf(s, "\tWaitNextVsync:%d", val & WAIT_NEXT_VSYNC ? 1 : 0);
}
static void gdp_dbg_vpo(struct seq_file *s, int val)
{
seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
}
static void gdp_dbg_vps(struct seq_file *s, int val)
{
seq_printf(s, "\txds:%4d\tyds:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
}
static void gdp_dbg_size(struct seq_file *s, int val)
{
seq_printf(s, "\t%d x %d", val & 0xFFFF, (val >> 16) & 0xFFFF);
}
static void gdp_dbg_nvn(struct seq_file *s, struct sti_gdp *gdp, int val)
{
void *base = NULL;
unsigned int i;
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
if (gdp->node_list[i].top_field_paddr == val) {
base = gdp->node_list[i].top_field;
break;
}
if (gdp->node_list[i].btm_field_paddr == val) {
base = gdp->node_list[i].btm_field;
break;
}
}
if (base)
seq_printf(s, "\tVirt @: %p", base);
}
static void gdp_dbg_ppt(struct seq_file *s, int val)
{
if (val & GAM_GDP_PPT_IGNORE)
seq_puts(s, "\tNot displayed on mixer!");
}
static void gdp_dbg_mst(struct seq_file *s, int val)
{
if (val & 1)
seq_puts(s, "\tBUFFER UNDERFLOW!");
}
static int gdp_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
struct drm_crtc *crtc;
drm_modeset_lock(&drm_plane->mutex, NULL);
crtc = drm_plane->state->crtc;
drm_modeset_unlock(&drm_plane->mutex);
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&gdp->plane), gdp->regs);
DBGFS_DUMP(GAM_GDP_CTL);
gdp_dbg_ctl(s, readl(gdp->regs + GAM_GDP_CTL_OFFSET));
DBGFS_DUMP(GAM_GDP_AGC);
DBGFS_DUMP(GAM_GDP_VPO);
gdp_dbg_vpo(s, readl(gdp->regs + GAM_GDP_VPO_OFFSET));
DBGFS_DUMP(GAM_GDP_VPS);
gdp_dbg_vps(s, readl(gdp->regs + GAM_GDP_VPS_OFFSET));
DBGFS_DUMP(GAM_GDP_PML);
DBGFS_DUMP(GAM_GDP_PMP);
DBGFS_DUMP(GAM_GDP_SIZE);
gdp_dbg_size(s, readl(gdp->regs + GAM_GDP_SIZE_OFFSET));
DBGFS_DUMP(GAM_GDP_NVN);
gdp_dbg_nvn(s, gdp, readl(gdp->regs + GAM_GDP_NVN_OFFSET));
DBGFS_DUMP(GAM_GDP_KEY1);
DBGFS_DUMP(GAM_GDP_KEY2);
DBGFS_DUMP(GAM_GDP_PPT);
gdp_dbg_ppt(s, readl(gdp->regs + GAM_GDP_PPT_OFFSET));
DBGFS_DUMP(GAM_GDP_CML);
DBGFS_DUMP(GAM_GDP_MST);
gdp_dbg_mst(s, readl(gdp->regs + GAM_GDP_MST_OFFSET));
seq_puts(s, "\n\n");
if (!crtc)
seq_puts(s, " Not connected to any DRM CRTC\n");
else
seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
return 0;
}
static void gdp_node_dump_node(struct seq_file *s, struct sti_gdp_node *node)
{
seq_printf(s, "\t@:0x%p", node);
seq_printf(s, "\n\tCTL 0x%08X", node->gam_gdp_ctl);
gdp_dbg_ctl(s, node->gam_gdp_ctl);
seq_printf(s, "\n\tAGC 0x%08X", node->gam_gdp_agc);
seq_printf(s, "\n\tVPO 0x%08X", node->gam_gdp_vpo);
gdp_dbg_vpo(s, node->gam_gdp_vpo);
seq_printf(s, "\n\tVPS 0x%08X", node->gam_gdp_vps);
gdp_dbg_vps(s, node->gam_gdp_vps);
seq_printf(s, "\n\tPML 0x%08X", node->gam_gdp_pml);
seq_printf(s, "\n\tPMP 0x%08X", node->gam_gdp_pmp);
seq_printf(s, "\n\tSIZE 0x%08X", node->gam_gdp_size);
gdp_dbg_size(s, node->gam_gdp_size);
seq_printf(s, "\n\tNVN 0x%08X", node->gam_gdp_nvn);
seq_printf(s, "\n\tKEY1 0x%08X", node->gam_gdp_key1);
seq_printf(s, "\n\tKEY2 0x%08X", node->gam_gdp_key2);
seq_printf(s, "\n\tPPT 0x%08X", node->gam_gdp_ppt);
gdp_dbg_ppt(s, node->gam_gdp_ppt);
seq_printf(s, "\n\tCML 0x%08X\n", node->gam_gdp_cml);
}
static int gdp_node_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
unsigned int b;
for (b = 0; b < GDP_NODE_NB_BANK; b++) {
seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
gdp_node_dump_node(s, gdp->node_list[b].top_field);
seq_printf(s, "\n%s[%d].btm", sti_plane_to_str(&gdp->plane), b);
gdp_node_dump_node(s, gdp->node_list[b].btm_field);
}
return 0;
}
static struct drm_info_list gdp0_debugfs_files[] = {
{ "gdp0", gdp_dbg_show, 0, NULL },
{ "gdp0_node", gdp_node_dbg_show, 0, NULL },
};
static struct drm_info_list gdp1_debugfs_files[] = {
{ "gdp1", gdp_dbg_show, 0, NULL },
{ "gdp1_node", gdp_node_dbg_show, 0, NULL },
};
static struct drm_info_list gdp2_debugfs_files[] = {
{ "gdp2", gdp_dbg_show, 0, NULL },
{ "gdp2_node", gdp_node_dbg_show, 0, NULL },
};
static struct drm_info_list gdp3_debugfs_files[] = {
{ "gdp3", gdp_dbg_show, 0, NULL },
{ "gdp3_node", gdp_node_dbg_show, 0, NULL },
};
static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
{
unsigned int i;
struct drm_info_list *gdp_debugfs_files;
int nb_files;
switch (gdp->plane.desc) {
case STI_GDP_0:
gdp_debugfs_files = gdp0_debugfs_files;
nb_files = ARRAY_SIZE(gdp0_debugfs_files);
break;
case STI_GDP_1:
gdp_debugfs_files = gdp1_debugfs_files;
nb_files = ARRAY_SIZE(gdp1_debugfs_files);
break;
case STI_GDP_2:
gdp_debugfs_files = gdp2_debugfs_files;
nb_files = ARRAY_SIZE(gdp2_debugfs_files);
break;
case STI_GDP_3:
gdp_debugfs_files = gdp3_debugfs_files;
nb_files = ARRAY_SIZE(gdp3_debugfs_files);
break;
default:
return -EINVAL;
}
for (i = 0; i < nb_files; i++)
gdp_debugfs_files[i].data = gdp;
drm_debugfs_create_files(gdp_debugfs_files,
nb_files,
minor->debugfs_root, minor);
return 0;
}
static int sti_gdp_fourcc2format(int fourcc)
{
switch (fourcc) {
case DRM_FORMAT_XRGB8888:
return GDP_RGB888_32;
case DRM_FORMAT_XBGR8888:
return GDP_XBGR8888;
case DRM_FORMAT_ARGB8888:
return GDP_ARGB8888;
case DRM_FORMAT_ABGR8888:
return GDP_ABGR8888;
case DRM_FORMAT_ARGB4444:
return GDP_ARGB4444;
case DRM_FORMAT_ARGB1555:
return GDP_ARGB1555;
case DRM_FORMAT_RGB565:
return GDP_RGB565;
case DRM_FORMAT_RGB888:
return GDP_RGB888;
}
return -1;
}
static int sti_gdp_get_alpharange(int format)
{
switch (format) {
case GDP_ARGB8565:
case GDP_ARGB8888:
case GDP_ABGR8888:
return GAM_GDP_ALPHARANGE_255;
}
return 0;
}
/**
* sti_gdp_get_free_nodes
* @gdp: gdp pointer
*
* Look for a GDP node list that is not currently read by the HW.
*
* RETURNS:
* Pointer to the free GDP node list
*/
static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
{
int hw_nvn;
unsigned int i;
hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
if (!hw_nvn)
goto end;
for (i = 0; i < GDP_NODE_NB_BANK; i++)
if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
(hw_nvn != gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
/* in hazardous cases restart with the first node */
DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
sti_plane_to_str(&gdp->plane), hw_nvn);
end:
return &gdp->node_list[0];
}
/**
* sti_gdp_get_current_nodes
* @gdp: gdp pointer
*
* Look for GDP nodes that are currently read by the HW.
*
* RETURNS:
* Pointer to the current GDP node list
*/
static
struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
{
int hw_nvn;
unsigned int i;
hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
if (!hw_nvn)
goto end;
for (i = 0; i < GDP_NODE_NB_BANK; i++)
if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
(hw_nvn == gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
end:
DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
hw_nvn, sti_plane_to_str(&gdp->plane));
return NULL;
}
/**
* sti_gdp_disable
* @gdp: gdp pointer
*
* Disable a GDP.
*/
static void sti_gdp_disable(struct sti_gdp *gdp)
{
unsigned int i;
DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
/* Set the nodes as 'to be ignored on mixer' */
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
}
if (sti_vtg_unregister_client(gdp->vtg, &gdp->vtg_field_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
if (gdp->clk_pix)
clk_disable_unprepare(gdp->clk_pix);
gdp->plane.status = STI_PLANE_DISABLED;
gdp->vtg = NULL;
}
/**
* sti_gdp_field_cb
* @nb: notifier block
* @event: event message
* @data: private data
*
* Handle VTG top field and bottom field event.
*
* RETURNS:
* 0 on success.
*/
static int sti_gdp_field_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
if (gdp->plane.status == STI_PLANE_FLUSHING) {
/* disable need to be synchronize on vsync event */
DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
sti_plane_to_str(&gdp->plane));
sti_gdp_disable(gdp);
}
switch (event) {
case VTG_TOP_FIELD_EVENT:
gdp->is_curr_top = true;
break;
case VTG_BOTTOM_FIELD_EVENT:
gdp->is_curr_top = false;
break;
default:
DRM_ERROR("unsupported event: %lu\n", event);
break;
}
return 0;
}
static void sti_gdp_init(struct sti_gdp *gdp)
{
struct device_node *np = gdp->dev->of_node;
dma_addr_t dma_addr;
void *base;
unsigned int i, size;
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL);
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
return;
}
memset(base, 0, size);
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n");
return;
}
gdp->node_list[i].top_field = base;
gdp->node_list[i].top_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
dma_addr += sizeof(struct sti_gdp_node);
if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n");
return;
}
gdp->node_list[i].btm_field = base;
gdp->node_list[i].btm_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
dma_addr += sizeof(struct sti_gdp_node);
}
if (of_device_is_compatible(np, "st,stih407-compositor")) {
/* GDP of STiH407 chip have its own pixel clock */
char *clk_name;
switch (gdp->plane.desc) {
case STI_GDP_0:
clk_name = "pix_gdp1";
break;
case STI_GDP_1:
clk_name = "pix_gdp2";
break;
case STI_GDP_2:
clk_name = "pix_gdp3";
break;
case STI_GDP_3:
clk_name = "pix_gdp4";
break;
default:
DRM_ERROR("GDP id not recognized\n");
return;
}
gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
if (IS_ERR(gdp->clk_pix))
DRM_ERROR("Cannot get %s clock\n", clk_name);
gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
if (IS_ERR(gdp->clk_main_parent))
DRM_ERROR("Cannot get main_parent clock\n");
gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
if (IS_ERR(gdp->clk_aux_parent))
DRM_ERROR("Cannot get aux_parent clock\n");
}
}
/**
* sti_gdp_get_dst
* @dev: device
* @dst: requested destination size
* @src: source size
*
* Return the cropped / clamped destination size
*
* RETURNS:
* cropped / clamped destination size
*/
static int sti_gdp_get_dst(struct device *dev, int dst, int src)
{
if (dst == src)
return dst;
if (dst < src) {
dev_dbg(dev, "WARNING: GDP scale not supported, will crop\n");
return dst;
}
dev_dbg(dev, "WARNING: GDP scale not supported, will clamp\n");
return src;
}
static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_gdp *gdp = to_sti_gdp(plane);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
struct sti_mixer *mixer;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
int format;
/* no need for further checks if the plane is being disabled */
if (!crtc || !fb)
return 0;
mixer = to_sti_mixer(crtc);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
mode = &crtc_state->mode;
dst_x = new_plane_state->crtc_x;
dst_y = new_plane_state->crtc_y;
dst_w = clamp_val(new_plane_state->crtc_w, 0, mode->hdisplay - dst_x);
dst_h = clamp_val(new_plane_state->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
src_x = new_plane_state->src_x >> 16;
src_y = new_plane_state->src_y >> 16;
src_w = clamp_val(new_plane_state->src_w >> 16, 0,
GAM_GDP_SIZE_MAX_WIDTH);
src_h = clamp_val(new_plane_state->src_h >> 16, 0,
GAM_GDP_SIZE_MAX_HEIGHT);
format = sti_gdp_fourcc2format(fb->format->format);
if (format == -1) {
DRM_ERROR("Format not supported by GDP %.4s\n",
(char *)&fb->format->format);
return -EINVAL;
}
if (!drm_fb_dma_get_gem_obj(fb, 0)) {
DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
/* Set gdp clock */
if (mode->clock && gdp->clk_pix) {
struct clk *clkp;
int rate = mode->clock * 1000;
int res;
/*
* According to the mixer used, the gdp pixel clock
* should have a different parent clock.
*/
if (mixer->id == STI_MIXER_MAIN)
clkp = gdp->clk_main_parent;
else
clkp = gdp->clk_aux_parent;
if (clkp)
clk_set_parent(gdp->clk_pix, clkp);
res = clk_set_rate(gdp->clk_pix, rate);
if (res < 0) {
DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
rate);
return -EINVAL;
}
}
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
drm_plane->base.id, sti_plane_to_str(plane));
DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
sti_plane_to_str(plane),
dst_w, dst_h, dst_x, dst_y,
src_w, src_h, src_x, src_y);
return 0;
}
static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
drm_plane);
struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_gdp *gdp = to_sti_gdp(plane);
struct drm_crtc *crtc = newstate->crtc;
struct drm_framebuffer *fb = newstate->fb;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
struct drm_gem_dma_object *dma_obj;
struct sti_gdp_node_list *list;
struct sti_gdp_node_list *curr_list;
struct sti_gdp_node *top_field, *btm_field;
u32 dma_updated_top;
u32 dma_updated_btm;
int format;
unsigned int bpp;
u32 ydo, xdo, yds, xds;
if (!crtc || !fb)
return;
if ((oldstate->fb == newstate->fb) &&
(oldstate->crtc_x == newstate->crtc_x) &&
(oldstate->crtc_y == newstate->crtc_y) &&
(oldstate->crtc_w == newstate->crtc_w) &&
(oldstate->crtc_h == newstate->crtc_h) &&
(oldstate->src_x == newstate->src_x) &&
(oldstate->src_y == newstate->src_y) &&
(oldstate->src_w == newstate->src_w) &&
(oldstate->src_h == newstate->src_h)) {
/* No change since last update, do not post cmd */
DRM_DEBUG_DRIVER("No change, not posting cmd\n");
plane->status = STI_PLANE_UPDATED;
return;
}
if (!gdp->vtg) {
struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
struct sti_mixer *mixer = to_sti_mixer(crtc);
/* Register gdp callback */
gdp->vtg = compo->vtg[mixer->id];
sti_vtg_register_client(gdp->vtg, &gdp->vtg_field_nb, crtc);
clk_prepare_enable(gdp->clk_pix);
}
mode = &crtc->mode;
dst_x = newstate->crtc_x;
dst_y = newstate->crtc_y;
dst_w = clamp_val(newstate->crtc_w, 0, mode->hdisplay - dst_x);
dst_h = clamp_val(newstate->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
src_x = newstate->src_x >> 16;
src_y = newstate->src_y >> 16;
src_w = clamp_val(newstate->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
src_h = clamp_val(newstate->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
list = sti_gdp_get_free_nodes(gdp);
top_field = list->top_field;
btm_field = list->btm_field;
dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
sti_plane_to_str(plane), top_field, btm_field);
/* build the top field */
top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
format = sti_gdp_fourcc2format(fb->format->format);
top_field->gam_gdp_ctl |= format;
top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->format->format,
(unsigned long) dma_obj->dma_addr);
/* pixel memory location */
bpp = fb->format->cpp[0];
top_field->gam_gdp_pml = (u32) dma_obj->dma_addr + fb->offsets[0];
top_field->gam_gdp_pml += src_x * bpp;
top_field->gam_gdp_pml += src_y * fb->pitches[0];
/* output parameters (clamped / cropped) */
dst_w = sti_gdp_get_dst(gdp->dev, dst_w, src_w);
dst_h = sti_gdp_get_dst(gdp->dev, dst_h, src_h);
ydo = sti_vtg_get_line_number(*mode, dst_y);
yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
xdo = sti_vtg_get_pixel_number(*mode, dst_x);
xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
top_field->gam_gdp_vpo = (ydo << 16) | xdo;
top_field->gam_gdp_vps = (yds << 16) | xds;
/* input parameters */
src_w = dst_w;
top_field->gam_gdp_pmp = fb->pitches[0];
top_field->gam_gdp_size = src_h << 16 | src_w;
/* Same content and chained together */
memcpy(btm_field, top_field, sizeof(*btm_field));
top_field->gam_gdp_nvn = list->btm_field_paddr;
btm_field->gam_gdp_nvn = list->top_field_paddr;
/* Interlaced mode */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
fb->pitches[0];
/* Update the NVN field of the 'right' field of the current GDP node
* (being used by the HW) with the address of the updated ('free') top
* field GDP node.
* - In interlaced mode the 'right' field is the bottom field as we
* update frames starting from their top field
* - In progressive mode, we update both bottom and top fields which
* are equal nodes.
* At the next VSYNC, the updated node list will be used by the HW.
*/
curr_list = sti_gdp_get_current_nodes(gdp);
dma_updated_top = list->top_field_paddr;
dma_updated_btm = list->btm_field_paddr;
dev_dbg(gdp->dev, "Current NVN:0x%X\n",
readl(gdp->regs + GAM_GDP_NVN_OFFSET));
dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
(unsigned long) dma_obj->dma_addr,
readl(gdp->regs + GAM_GDP_PML_OFFSET));
if (!curr_list) {
/* First update or invalid node should directly write in the
* hw register */
DRM_DEBUG_DRIVER("%s first update (or invalid node)\n",
sti_plane_to_str(plane));
writel(gdp->is_curr_top ?
dma_updated_btm : dma_updated_top,
gdp->regs + GAM_GDP_NVN_OFFSET);
goto end;
}
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (gdp->is_curr_top) {
/* Do not update in the middle of the frame, but
* postpone the update after the bottom field has
* been displayed */
curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
} else {
/* Direct update to avoid one frame delay */
writel(dma_updated_top,
gdp->regs + GAM_GDP_NVN_OFFSET);
}
} else {
/* Direct update for progressive to avoid one frame delay */
writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
}
end:
sti_plane_update_fps(plane, true, false);
plane->status = STI_PLANE_UPDATED;
}
static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
if (!oldstate->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
drm_plane->base.id);
return;
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
oldstate->crtc->base.id,
sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
.atomic_check = sti_gdp_atomic_check,
.atomic_update = sti_gdp_atomic_update,
.atomic_disable = sti_gdp_atomic_disable,
};
static int sti_gdp_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_gdp *gdp = to_sti_gdp(plane);
return gdp_debugfs_init(gdp, drm_plane->dev->primary);
}
static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.late_register = sti_gdp_late_register,
};
struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
unsigned int possible_crtcs,
enum drm_plane_type type)
{
struct sti_gdp *gdp;
int res;
gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
if (!gdp) {
DRM_ERROR("Failed to allocate memory for GDP\n");
return NULL;
}
gdp->dev = dev;
gdp->regs = baseaddr;
gdp->plane.desc = desc;
gdp->plane.status = STI_PLANE_DISABLED;
gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
sti_gdp_init(gdp);
res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
possible_crtcs,
&sti_gdp_plane_helpers_funcs,
gdp_supported_formats,
ARRAY_SIZE(gdp_supported_formats),
NULL, type, NULL);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
goto err;
}
drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
sti_plane_init_property(&gdp->plane, type);
return &gdp->plane.drm_plane;
err:
devm_kfree(dev, gdp);
return NULL;
}
| linux-master | drivers/gpu/drm/sti/sti_gdp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Vincent Abriou <[email protected]>
* Fabien Dessenne <[email protected]>
* for STMicroelectronics.
*/
#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_cursor.h"
#include "sti_plane.h"
#include "sti_vtg.h"
/* Registers */
#define CUR_CTL 0x00
#define CUR_VPO 0x0C
#define CUR_PML 0x14
#define CUR_PMP 0x18
#define CUR_SIZE 0x1C
#define CUR_CML 0x20
#define CUR_AWS 0x28
#define CUR_AWE 0x2C
#define CUR_CTL_CLUT_UPDATE BIT(1)
#define STI_CURS_MIN_SIZE 1
#define STI_CURS_MAX_SIZE 128
/*
* pixmap dma buffer structure
*
* @paddr: physical address
* @size: buffer size
* @base: virtual address
*/
struct dma_pixmap {
dma_addr_t paddr;
size_t size;
void *base;
};
/*
* STI Cursor structure
*
* @sti_plane: sti_plane structure
* @dev: driver device
* @regs: cursor registers
* @width: cursor width
* @height: cursor height
* @clut: color look up table
* @clut_paddr: color look up table physical address
* @pixmap: pixmap dma buffer (clut8-format cursor)
*/
struct sti_cursor {
struct sti_plane plane;
struct device *dev;
void __iomem *regs;
unsigned int width;
unsigned int height;
unsigned short *clut;
dma_addr_t clut_paddr;
struct dma_pixmap pixmap;
};
static const uint32_t cursor_supported_formats[] = {
DRM_FORMAT_ARGB8888,
};
#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
readl(cursor->regs + reg))
static void cursor_dbg_vpo(struct seq_file *s, u32 val)
{
seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
}
static void cursor_dbg_size(struct seq_file *s, u32 val)
{
seq_printf(s, "\t%d x %d", val & 0x07FF, (val >> 16) & 0x07FF);
}
static void cursor_dbg_pml(struct seq_file *s,
struct sti_cursor *cursor, u32 val)
{
if (cursor->pixmap.paddr == val)
seq_printf(s, "\tVirt @: %p", cursor->pixmap.base);
}
static void cursor_dbg_cml(struct seq_file *s,
struct sti_cursor *cursor, u32 val)
{
if (cursor->clut_paddr == val)
seq_printf(s, "\tVirt @: %p", cursor->clut);
}
static int cursor_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&cursor->plane), cursor->regs);
DBGFS_DUMP(CUR_CTL);
DBGFS_DUMP(CUR_VPO);
cursor_dbg_vpo(s, readl(cursor->regs + CUR_VPO));
DBGFS_DUMP(CUR_PML);
cursor_dbg_pml(s, cursor, readl(cursor->regs + CUR_PML));
DBGFS_DUMP(CUR_PMP);
DBGFS_DUMP(CUR_SIZE);
cursor_dbg_size(s, readl(cursor->regs + CUR_SIZE));
DBGFS_DUMP(CUR_CML);
cursor_dbg_cml(s, cursor, readl(cursor->regs + CUR_CML));
DBGFS_DUMP(CUR_AWS);
DBGFS_DUMP(CUR_AWE);
seq_putc(s, '\n');
return 0;
}
static struct drm_info_list cursor_debugfs_files[] = {
{ "cursor", cursor_dbg_show, 0, NULL },
};
static void cursor_debugfs_init(struct sti_cursor *cursor,
struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
cursor_debugfs_files[i].data = cursor;
drm_debugfs_create_files(cursor_debugfs_files,
ARRAY_SIZE(cursor_debugfs_files),
minor->debugfs_root, minor);
}
static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
{
u8 *dst = cursor->pixmap.base;
unsigned int i, j;
u32 a, r, g, b;
for (i = 0; i < cursor->height; i++) {
for (j = 0; j < cursor->width; j++) {
/* Pick the 2 higher bits of each component */
a = (*src >> 30) & 3;
r = (*src >> 22) & 3;
g = (*src >> 14) & 3;
b = (*src >> 6) & 3;
*dst = a << 6 | r << 4 | g << 2 | b;
src++;
dst++;
}
}
}
static void sti_cursor_init(struct sti_cursor *cursor)
{
unsigned short *base = cursor->clut;
unsigned int a, r, g, b;
/* Assign CLUT values, ARGB444 format */
for (a = 0; a < 4; a++)
for (r = 0; r < 4; r++)
for (g = 0; g < 4; g++)
for (b = 0; b < 4; b++)
*base++ = (a * 5) << 12 |
(r * 5) << 8 |
(g * 5) << 4 |
(b * 5);
}
static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_w, src_h;
/* no need for further checks if the plane is being disabled */
if (!crtc || !fb)
return 0;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
mode = &crtc_state->mode;
dst_x = new_plane_state->crtc_x;
dst_y = new_plane_state->crtc_y;
dst_w = clamp_val(new_plane_state->crtc_w, 0,
mode->crtc_hdisplay - dst_x);
dst_h = clamp_val(new_plane_state->crtc_h, 0,
mode->crtc_vdisplay - dst_y);
/* src_x are in 16.16 format */
src_w = new_plane_state->src_w >> 16;
src_h = new_plane_state->src_h >> 16;
if (src_w < STI_CURS_MIN_SIZE ||
src_h < STI_CURS_MIN_SIZE ||
src_w > STI_CURS_MAX_SIZE ||
src_h > STI_CURS_MAX_SIZE) {
DRM_ERROR("Invalid cursor size (%dx%d)\n",
src_w, src_h);
return -EINVAL;
}
/* If the cursor size has changed, re-allocated the pixmap */
if (!cursor->pixmap.base ||
(cursor->width != src_w) ||
(cursor->height != src_h)) {
cursor->width = src_w;
cursor->height = src_h;
if (cursor->pixmap.base)
dma_free_wc(cursor->dev, cursor->pixmap.size,
cursor->pixmap.base, cursor->pixmap.paddr);
cursor->pixmap.size = cursor->width * cursor->height;
cursor->pixmap.base = dma_alloc_wc(cursor->dev,
cursor->pixmap.size,
&cursor->pixmap.paddr,
GFP_KERNEL | GFP_DMA);
if (!cursor->pixmap.base) {
DRM_ERROR("Failed to allocate memory for pixmap\n");
return -EINVAL;
}
}
if (!drm_fb_dma_get_gem_obj(fb, 0)) {
DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
return 0;
}
static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
struct drm_crtc *crtc = newstate->crtc;
struct drm_framebuffer *fb = newstate->fb;
struct drm_display_mode *mode;
int dst_x, dst_y;
struct drm_gem_dma_object *dma_obj;
u32 y, x;
u32 val;
if (!crtc || !fb)
return;
mode = &crtc->mode;
dst_x = newstate->crtc_x;
dst_y = newstate->crtc_y;
dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
/* Convert ARGB8888 to CLUT8 */
sti_cursor_argb8888_to_clut8(cursor, (u32 *)dma_obj->vaddr);
/* AWS and AWE depend on the mode */
y = sti_vtg_get_line_number(*mode, 0);
x = sti_vtg_get_pixel_number(*mode, 0);
val = y << 16 | x;
writel(val, cursor->regs + CUR_AWS);
y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
val = y << 16 | x;
writel(val, cursor->regs + CUR_AWE);
/* Set memory location, size, and position */
writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
writel(cursor->width, cursor->regs + CUR_PMP);
writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
y = sti_vtg_get_line_number(*mode, dst_y);
x = sti_vtg_get_pixel_number(*mode, dst_x);
writel((y << 16) | x, cursor->regs + CUR_VPO);
/* Set and fetch CLUT */
writel(cursor->clut_paddr, cursor->regs + CUR_CML);
writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
sti_plane_update_fps(plane, true, false);
plane->status = STI_PLANE_UPDATED;
}
static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
if (!oldstate->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
drm_plane->base.id);
return;
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
oldstate->crtc->base.id,
sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
.atomic_check = sti_cursor_atomic_check,
.atomic_update = sti_cursor_atomic_update,
.atomic_disable = sti_cursor_atomic_disable,
};
static int sti_cursor_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
cursor_debugfs_init(cursor, drm_plane->dev->primary);
return 0;
}
static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.late_register = sti_cursor_late_register,
};
struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
unsigned int possible_crtcs)
{
struct sti_cursor *cursor;
size_t size;
int res;
cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
if (!cursor) {
DRM_ERROR("Failed to allocate memory for cursor\n");
return NULL;
}
/* Allocate clut buffer */
size = 0x100 * sizeof(unsigned short);
cursor->clut = dma_alloc_wc(dev, size, &cursor->clut_paddr,
GFP_KERNEL | GFP_DMA);
if (!cursor->clut) {
DRM_ERROR("Failed to allocate memory for cursor clut\n");
goto err_clut;
}
cursor->dev = dev;
cursor->regs = baseaddr;
cursor->plane.desc = desc;
cursor->plane.status = STI_PLANE_DISABLED;
sti_cursor_init(cursor);
res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
possible_crtcs,
&sti_cursor_plane_helpers_funcs,
cursor_supported_formats,
ARRAY_SIZE(cursor_supported_formats),
NULL, DRM_PLANE_TYPE_CURSOR, NULL);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
goto err_plane;
}
drm_plane_helper_add(&cursor->plane.drm_plane,
&sti_cursor_helpers_funcs);
sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
return &cursor->plane.drm_plane;
err_plane:
dma_free_wc(dev, size, cursor->clut, cursor->clut_paddr);
err_clut:
devm_kfree(dev, cursor);
return NULL;
}
| linux-master | drivers/gpu/drm/sti/sti_cursor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <[email protected]> for STMicroelectronics.
*/
#include <drm/drm_print.h>
#include "sti_hdmi_tx3g4c28phy.h"
#define HDMI_SRZ_CFG 0x504
#define HDMI_SRZ_PLL_CFG 0x510
#define HDMI_SRZ_ICNTL 0x518
#define HDMI_SRZ_CALCODE_EXT 0x520
#define HDMI_SRZ_CFG_EN BIT(0)
#define HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT BIT(1)
#define HDMI_SRZ_CFG_EXTERNAL_DATA BIT(16)
#define HDMI_SRZ_CFG_RBIAS_EXT BIT(17)
#define HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION BIT(18)
#define HDMI_SRZ_CFG_EN_BIASRES_DETECTION BIT(19)
#define HDMI_SRZ_CFG_EN_SRC_TERMINATION BIT(24)
#define HDMI_SRZ_CFG_INTERNAL_MASK (HDMI_SRZ_CFG_EN | \
HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT | \
HDMI_SRZ_CFG_EXTERNAL_DATA | \
HDMI_SRZ_CFG_RBIAS_EXT | \
HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION | \
HDMI_SRZ_CFG_EN_BIASRES_DETECTION | \
HDMI_SRZ_CFG_EN_SRC_TERMINATION)
#define PLL_CFG_EN BIT(0)
#define PLL_CFG_NDIV_SHIFT (8)
#define PLL_CFG_IDF_SHIFT (16)
#define PLL_CFG_ODF_SHIFT (24)
#define ODF_DIV_1 (0)
#define ODF_DIV_2 (1)
#define ODF_DIV_4 (2)
#define ODF_DIV_8 (3)
#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */
struct plldividers_s {
uint32_t min;
uint32_t max;
uint32_t idf;
uint32_t odf;
};
/*
* Functional specification recommended values
*/
#define NB_PLL_MODE 5
static struct plldividers_s plldividers[NB_PLL_MODE] = {
{0, 20000000, 1, ODF_DIV_8},
{20000000, 42500000, 2, ODF_DIV_8},
{42500000, 85000000, 4, ODF_DIV_4},
{85000000, 170000000, 8, ODF_DIV_2},
{170000000, 340000000, 16, ODF_DIV_1}
};
#define NB_HDMI_PHY_CONFIG 2
static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
{0, 250000000, {0x0, 0x0, 0x0, 0x0} },
{250000000, 300000000, {0x1110, 0x0, 0x0, 0x0} },
};
/**
* sti_hdmi_tx3g4c28phy_start - Start hdmi phy macro cell tx3g4c28
*
* @hdmi: pointer on the hdmi internal structure
*
* Return false if an error occur
*/
static bool sti_hdmi_tx3g4c28phy_start(struct sti_hdmi *hdmi)
{
u32 ckpxpll = hdmi->mode.clock * 1000;
u32 val, tmdsck, idf, odf, pllctrl = 0;
bool foundplldivides = false;
int i;
DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
for (i = 0; i < NB_PLL_MODE; i++) {
if (ckpxpll >= plldividers[i].min &&
ckpxpll < plldividers[i].max) {
idf = plldividers[i].idf;
odf = plldividers[i].odf;
foundplldivides = true;
break;
}
}
if (!foundplldivides) {
DRM_ERROR("input TMDS clock speed (%d) not supported\n",
ckpxpll);
goto err;
}
/* Assuming no pixel repetition and 24bits color */
tmdsck = ckpxpll;
pllctrl |= 40 << PLL_CFG_NDIV_SHIFT;
if (tmdsck > 340000000) {
DRM_ERROR("output TMDS clock (%d) out of range\n", tmdsck);
goto err;
}
pllctrl |= idf << PLL_CFG_IDF_SHIFT;
pllctrl |= odf << PLL_CFG_ODF_SHIFT;
/*
* Configure and power up the PHY PLL
*/
hdmi->event_received = false;
DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
hdmi_write(hdmi, (pllctrl | PLL_CFG_EN), HDMI_SRZ_PLL_CFG);
/* wait PLL interrupt */
wait_event_interruptible_timeout(hdmi->wait_event,
hdmi->event_received == true,
msecs_to_jiffies
(HDMI_TIMEOUT_PLL_LOCK));
if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
DRM_ERROR("hdmi phy pll not locked\n");
goto err;
}
DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
val = (HDMI_SRZ_CFG_EN |
HDMI_SRZ_CFG_EXTERNAL_DATA |
HDMI_SRZ_CFG_EN_BIASRES_DETECTION |
HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION);
if (tmdsck > 165000000)
val |= HDMI_SRZ_CFG_EN_SRC_TERMINATION;
/*
* To configure the source termination and pre-emphasis appropriately
* for different high speed TMDS clock frequencies a phy configuration
* table must be provided, tailored to the SoC and board combination.
*/
for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
(hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
val |= (hdmiphy_config[i].config[0]
& ~HDMI_SRZ_CFG_INTERNAL_MASK);
hdmi_write(hdmi, val, HDMI_SRZ_CFG);
val = hdmiphy_config[i].config[1];
hdmi_write(hdmi, val, HDMI_SRZ_ICNTL);
val = hdmiphy_config[i].config[2];
hdmi_write(hdmi, val, HDMI_SRZ_CALCODE_EXT);
DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x\n",
hdmiphy_config[i].config[0],
hdmiphy_config[i].config[1],
hdmiphy_config[i].config[2]);
return true;
}
}
/*
* Default, power up the serializer with no pre-emphasis or
* output swing correction
*/
hdmi_write(hdmi, val, HDMI_SRZ_CFG);
hdmi_write(hdmi, 0x0, HDMI_SRZ_ICNTL);
hdmi_write(hdmi, 0x0, HDMI_SRZ_CALCODE_EXT);
return true;
err:
return false;
}
/**
* sti_hdmi_tx3g4c28phy_stop - Stop hdmi phy macro cell tx3g4c28
*
* @hdmi: pointer on the hdmi internal structure
*/
static void sti_hdmi_tx3g4c28phy_stop(struct sti_hdmi *hdmi)
{
int val = 0;
DRM_DEBUG_DRIVER("\n");
hdmi->event_received = false;
val = HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION;
val |= HDMI_SRZ_CFG_EN_BIASRES_DETECTION;
hdmi_write(hdmi, val, HDMI_SRZ_CFG);
hdmi_write(hdmi, 0, HDMI_SRZ_PLL_CFG);
/* wait PLL interrupt */
wait_event_interruptible_timeout(hdmi->wait_event,
hdmi->event_received == true,
msecs_to_jiffies
(HDMI_TIMEOUT_PLL_LOCK));
if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
DRM_ERROR("hdmi phy pll not well disabled\n");
}
struct hdmi_phy_ops tx3g4c28phy_ops = {
.start = sti_hdmi_tx3g4c28phy_start,
.stop = sti_hdmi_tx3g4c28phy_stop,
};
| linux-master | drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <[email protected]> for STMicroelectronics.
*/
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include "sti_drv.h"
#include "sti_plane.h"
#define DRIVER_NAME "sti"
#define DRIVER_DESC "STMicroelectronics SoC DRM"
#define DRIVER_DATE "20140601"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define STI_MAX_FB_HEIGHT 4096
#define STI_MAX_FB_WIDTH 4096
static int sti_drm_fps_get(void *data, u64 *val)
{
struct drm_device *drm_dev = data;
struct drm_plane *p;
unsigned int i = 0;
*val = 0;
list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
struct sti_plane *plane = to_sti_plane(p);
*val |= plane->fps_info.output << i;
i++;
}
return 0;
}
static int sti_drm_fps_set(void *data, u64 val)
{
struct drm_device *drm_dev = data;
struct drm_plane *p;
unsigned int i = 0;
list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
struct sti_plane *plane = to_sti_plane(p);
memset(&plane->fps_info, 0, sizeof(plane->fps_info));
plane->fps_info.output = (val >> i) & 1;
i++;
}
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(sti_drm_fps_fops,
sti_drm_fps_get, sti_drm_fps_set, "%llu\n");
static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct drm_device *dev = node->minor->dev;
struct drm_plane *p;
list_for_each_entry(p, &dev->mode_config.plane_list, head) {
struct sti_plane *plane = to_sti_plane(p);
seq_printf(s, "%s%s\n",
plane->fps_info.fps_str,
plane->fps_info.fips_str);
}
return 0;
}
static struct drm_info_list sti_drm_dbg_list[] = {
{"fps_get", sti_drm_fps_dbg_show, 0},
};
static void sti_drm_dbg_init(struct drm_minor *minor)
{
drm_debugfs_create_files(sti_drm_dbg_list,
ARRAY_SIZE(sti_drm_dbg_list),
minor->debugfs_root, minor);
debugfs_create_file("fps_show", S_IRUGO | S_IWUSR, minor->debugfs_root,
minor->dev, &sti_drm_fps_fops);
DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
}
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static void sti_mode_config_init(struct drm_device *dev)
{
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
/*
* set max width and height as default value.
* this value would be used to check framebuffer size limitation
* at drm_mode_addfb().
*/
dev->mode_config.max_width = STI_MAX_FB_WIDTH;
dev->mode_config.max_height = STI_MAX_FB_HEIGHT;
dev->mode_config.funcs = &sti_mode_config_funcs;
dev->mode_config.normalize_zpos = true;
}
DEFINE_DRM_GEM_DMA_FOPS(sti_driver_fops);
static const struct drm_driver sti_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &sti_driver_fops,
DRM_GEM_DMA_DRIVER_OPS,
.debugfs_init = sti_drm_dbg_init,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
static int sti_init(struct drm_device *ddev)
{
struct sti_private *private;
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
return -ENOMEM;
ddev->dev_private = (void *)private;
dev_set_drvdata(ddev->dev, ddev);
private->drm_dev = ddev;
drm_mode_config_init(ddev);
sti_mode_config_init(ddev);
drm_kms_helper_poll_init(ddev);
return 0;
}
static void sti_cleanup(struct drm_device *ddev)
{
struct sti_private *private = ddev->dev_private;
drm_kms_helper_poll_fini(ddev);
drm_atomic_helper_shutdown(ddev);
drm_mode_config_cleanup(ddev);
component_unbind_all(ddev->dev, ddev);
kfree(private);
ddev->dev_private = NULL;
}
static int sti_bind(struct device *dev)
{
struct drm_device *ddev;
int ret;
ddev = drm_dev_alloc(&sti_driver, dev);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
ret = sti_init(ddev);
if (ret)
goto err_drm_dev_put;
ret = component_bind_all(ddev->dev, ddev);
if (ret)
goto err_cleanup;
ret = drm_dev_register(ddev, 0);
if (ret)
goto err_cleanup;
drm_mode_config_reset(ddev);
drm_fbdev_dma_setup(ddev, 32);
return 0;
err_cleanup:
sti_cleanup(ddev);
err_drm_dev_put:
drm_dev_put(ddev);
return ret;
}
static void sti_unbind(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
drm_dev_unregister(ddev);
sti_cleanup(ddev);
drm_dev_put(ddev);
}
static const struct component_master_ops sti_ops = {
.bind = sti_bind,
.unbind = sti_unbind,
};
static int sti_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct device_node *child_np;
struct component_match *match = NULL;
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
devm_of_platform_populate(dev);
child_np = of_get_next_available_child(node, NULL);
while (child_np) {
drm_of_component_match_add(dev, &match, component_compare_of,
child_np);
child_np = of_get_next_available_child(node, child_np);
}
return component_master_add_with_match(dev, &sti_ops, match);
}
static void sti_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &sti_ops);
}
static const struct of_device_id sti_dt_ids[] = {
{ .compatible = "st,sti-display-subsystem", },
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, sti_dt_ids);
static struct platform_driver sti_platform_driver = {
.probe = sti_platform_probe,
.remove_new = sti_platform_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = sti_dt_ids,
},
};
static struct platform_driver * const drivers[] = {
&sti_tvout_driver,
&sti_hqvdp_driver,
&sti_hdmi_driver,
&sti_hda_driver,
&sti_dvo_driver,
&sti_vtg_driver,
&sti_compositor_driver,
&sti_platform_driver,
};
static int sti_drm_init(void)
{
if (drm_firmware_drivers_only())
return -ENODEV;
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(sti_drm_init);
static void sti_drm_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(sti_drm_exit);
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sti/sti_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <[email protected]> for STMicroelectronics.
*/
#include <drm/drm_print.h>
#include "sti_awg_utils.h"
#define AWG_DELAY (-5)
#define AWG_OPCODE_OFFSET 10
#define AWG_MAX_ARG 0x3ff
enum opcode {
SET,
RPTSET,
RPLSET,
SKIP,
STOP,
REPEAT,
REPLAY,
JUMP,
HOLD,
};
static int awg_generate_instr(enum opcode opcode,
long int arg,
long int mux_sel,
long int data_en,
struct awg_code_generation_params *fwparams)
{
u32 instruction = 0;
u32 mux = (mux_sel << 8) & 0x1ff;
u32 data_enable = (data_en << 9) & 0x2ff;
long int arg_tmp = arg;
/* skip, repeat and replay arg should not exceed 1023.
* If user wants to exceed this value, the instruction should be
* duplicate and arg should be adjust for each duplicated instruction.
*
* mux_sel is used in case of SAV/EAV synchronization.
*/
while (arg_tmp > 0) {
arg = arg_tmp;
if (fwparams->instruction_offset >= AWG_MAX_INST) {
DRM_ERROR("too many number of instructions\n");
return -EINVAL;
}
switch (opcode) {
case SKIP:
/* leave 'arg' + 1 pixel elapsing without changing
* output bus */
arg--; /* pixel adjustment */
arg_tmp--;
if (arg < 0) {
/* SKIP instruction not needed */
return 0;
}
if (arg == 0) {
/* SKIP 0 not permitted but we want to skip 1
* pixel. So we transform SKIP into SET
* instruction */
opcode = SET;
break;
}
mux = 0;
data_enable = 0;
arg &= AWG_MAX_ARG;
break;
case REPEAT:
case REPLAY:
if (arg == 0) {
/* REPEAT or REPLAY instruction not needed */
return 0;
}
mux = 0;
data_enable = 0;
arg &= AWG_MAX_ARG;
break;
case JUMP:
mux = 0;
data_enable = 0;
arg |= 0x40; /* for jump instruction 7th bit is 1 */
arg &= AWG_MAX_ARG;
break;
case STOP:
arg = 0;
break;
case SET:
case RPTSET:
case RPLSET:
case HOLD:
arg &= (0x0ff);
break;
default:
DRM_ERROR("instruction %d does not exist\n", opcode);
return -EINVAL;
}
arg_tmp = arg_tmp - arg;
arg = ((arg + mux) + data_enable);
instruction = ((opcode) << AWG_OPCODE_OFFSET) | arg;
fwparams->ram_code[fwparams->instruction_offset] =
instruction & (0x3fff);
fwparams->instruction_offset++;
}
return 0;
}
static int awg_generate_line_signal(
struct awg_code_generation_params *fwparams,
struct awg_timing *timing)
{
long int val;
int ret = 0;
if (timing->trailing_pixels > 0) {
/* skip trailing pixel */
val = timing->blanking_level;
ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
val = timing->trailing_pixels - 1 + AWG_DELAY;
ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
}
/* set DE signal high */
val = timing->blanking_level;
ret |= awg_generate_instr((timing->trailing_pixels > 0) ? SET : RPLSET,
val, 0, 1, fwparams);
if (timing->blanking_pixels > 0) {
/* skip the number of active pixel */
val = timing->active_pixels - 1;
ret |= awg_generate_instr(SKIP, val, 0, 1, fwparams);
/* set DE signal low */
val = timing->blanking_level;
ret |= awg_generate_instr(SET, val, 0, 0, fwparams);
}
return ret;
}
int sti_awg_generate_code_data_enable_mode(
struct awg_code_generation_params *fwparams,
struct awg_timing *timing)
{
long int val, tmp_val;
int ret = 0;
if (timing->trailing_lines > 0) {
/* skip trailing lines */
val = timing->blanking_level;
ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
val = timing->trailing_lines - 1;
ret |= awg_generate_instr(REPLAY, val, 0, 0, fwparams);
}
tmp_val = timing->active_lines - 1;
while (tmp_val > 0) {
/* generate DE signal for each line */
ret |= awg_generate_line_signal(fwparams, timing);
/* replay the sequence as many active lines defined */
ret |= awg_generate_instr(REPLAY,
min_t(int, AWG_MAX_ARG, tmp_val),
0, 0, fwparams);
tmp_val -= AWG_MAX_ARG;
}
if (timing->blanking_lines > 0) {
/* skip blanking lines */
val = timing->blanking_level;
ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
val = timing->blanking_lines - 1;
ret |= awg_generate_instr(REPLAY, val, 0, 0, fwparams);
}
return ret;
}
| linux-master | drivers/gpu/drm/sti/sti_awg_utils.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Fabien Dessenne <[email protected]> for STMicroelectronics.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
/* HDformatter registers */
#define HDA_ANA_CFG 0x0000
#define HDA_ANA_SCALE_CTRL_Y 0x0004
#define HDA_ANA_SCALE_CTRL_CB 0x0008
#define HDA_ANA_SCALE_CTRL_CR 0x000C
#define HDA_ANA_ANC_CTRL 0x0010
#define HDA_ANA_SRC_Y_CFG 0x0014
#define HDA_COEFF_Y_PH1_TAP123 0x0018
#define HDA_COEFF_Y_PH1_TAP456 0x001C
#define HDA_COEFF_Y_PH2_TAP123 0x0020
#define HDA_COEFF_Y_PH2_TAP456 0x0024
#define HDA_COEFF_Y_PH3_TAP123 0x0028
#define HDA_COEFF_Y_PH3_TAP456 0x002C
#define HDA_COEFF_Y_PH4_TAP123 0x0030
#define HDA_COEFF_Y_PH4_TAP456 0x0034
#define HDA_ANA_SRC_C_CFG 0x0040
#define HDA_COEFF_C_PH1_TAP123 0x0044
#define HDA_COEFF_C_PH1_TAP456 0x0048
#define HDA_COEFF_C_PH2_TAP123 0x004C
#define HDA_COEFF_C_PH2_TAP456 0x0050
#define HDA_COEFF_C_PH3_TAP123 0x0054
#define HDA_COEFF_C_PH3_TAP456 0x0058
#define HDA_COEFF_C_PH4_TAP123 0x005C
#define HDA_COEFF_C_PH4_TAP456 0x0060
#define HDA_SYNC_AWGI 0x0300
/* HDA_ANA_CFG */
#define CFG_AWG_ASYNC_EN BIT(0)
#define CFG_AWG_ASYNC_HSYNC_MTD BIT(1)
#define CFG_AWG_ASYNC_VSYNC_MTD BIT(2)
#define CFG_AWG_SYNC_DEL BIT(3)
#define CFG_AWG_FLTR_MODE_SHIFT 4
#define CFG_AWG_FLTR_MODE_MASK (0xF << CFG_AWG_FLTR_MODE_SHIFT)
#define CFG_AWG_FLTR_MODE_SD (0 << CFG_AWG_FLTR_MODE_SHIFT)
#define CFG_AWG_FLTR_MODE_ED (1 << CFG_AWG_FLTR_MODE_SHIFT)
#define CFG_AWG_FLTR_MODE_HD (2 << CFG_AWG_FLTR_MODE_SHIFT)
#define CFG_SYNC_ON_PBPR_MASK BIT(8)
#define CFG_PREFILTER_EN_MASK BIT(9)
#define CFG_PBPR_SYNC_OFF_SHIFT 16
#define CFG_PBPR_SYNC_OFF_MASK (0x7FF << CFG_PBPR_SYNC_OFF_SHIFT)
#define CFG_PBPR_SYNC_OFF_VAL 0x117 /* Voltage dependent. stiH416 */
/* Default scaling values */
#define SCALE_CTRL_Y_DFLT 0x00C50256
#define SCALE_CTRL_CB_DFLT 0x00DB0249
#define SCALE_CTRL_CR_DFLT 0x00DB0249
/* Video DACs control */
#define DAC_CFG_HD_HZUVW_OFF_MASK BIT(1)
/* Upsampler values for the alternative 2X Filter */
#define SAMPLER_COEF_NB 8
#define HDA_ANA_SRC_Y_CFG_ALT_2X 0x01130000
static u32 coef_y_alt_2x[] = {
0x00FE83FB, 0x1F900401, 0x00000000, 0x00000000,
0x00F408F9, 0x055F7C25, 0x00000000, 0x00000000
};
#define HDA_ANA_SRC_C_CFG_ALT_2X 0x01750004
static u32 coef_c_alt_2x[] = {
0x001305F7, 0x05274BD0, 0x00000000, 0x00000000,
0x0004907C, 0x09C80B9D, 0x00000000, 0x00000000
};
/* Upsampler values for the 4X Filter */
#define HDA_ANA_SRC_Y_CFG_4X 0x01ED0005
#define HDA_ANA_SRC_C_CFG_4X 0x01ED0004
static u32 coef_yc_4x[] = {
0x00FC827F, 0x008FE20B, 0x00F684FC, 0x050F7C24,
0x00F4857C, 0x0A1F402E, 0x00FA027F, 0x0E076E1D
};
/* AWG instructions for some video modes */
#define AWG_MAX_INST 64
/* 720p@50 */
static u32 AWGi_720p_50[] = {
0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA,
0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B,
0x00000D8E, 0x00000104, 0x00001804, 0x00000971,
0x00000C26, 0x0000003B, 0x00000FB4, 0x00000FB5,
0x00000104, 0x00001AE8
};
#define NN_720p_50 ARRAY_SIZE(AWGi_720p_50)
/* 720p@60 */
static u32 AWGi_720p_60[] = {
0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA,
0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B,
0x00000C44, 0x00000104, 0x00001804, 0x00000971,
0x00000C26, 0x0000003B, 0x00000F0F, 0x00000F10,
0x00000104, 0x00001AE8
};
#define NN_720p_60 ARRAY_SIZE(AWGi_720p_60)
/* 1080p@30 */
static u32 AWGi_1080p_30[] = {
0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
0x00000C2A, 0x00000104, 0x00001804, 0x00000971,
0x00000C2A, 0x0000003B, 0x00000EBE, 0x00000EBF,
0x00000EBF, 0x00000104, 0x00001A2F, 0x00001C4B,
0x00001C52
};
#define NN_1080p_30 ARRAY_SIZE(AWGi_1080p_30)
/* 1080p@25 */
static u32 AWGi_1080p_25[] = {
0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
0x00000DE2, 0x00000104, 0x00001804, 0x00000971,
0x00000C2A, 0x0000003B, 0x00000F51, 0x00000F51,
0x00000F52, 0x00000104, 0x00001A2F, 0x00001C4B,
0x00001C52
};
#define NN_1080p_25 ARRAY_SIZE(AWGi_1080p_25)
/* 1080p@24 */
static u32 AWGi_1080p_24[] = {
0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
0x00000E50, 0x00000104, 0x00001804, 0x00000971,
0x00000C2A, 0x0000003B, 0x00000F76, 0x00000F76,
0x00000F76, 0x00000104, 0x00001A2F, 0x00001C4B,
0x00001C52
};
#define NN_1080p_24 ARRAY_SIZE(AWGi_1080p_24)
/* 720x480p@60 */
static u32 AWGi_720x480p_60[] = {
0x00000904, 0x00000F18, 0x0000013B, 0x00001805,
0x00000904, 0x00000C3D, 0x0000003B, 0x00001A06
};
#define NN_720x480p_60 ARRAY_SIZE(AWGi_720x480p_60)
/* Video mode category */
enum sti_hda_vid_cat {
VID_SD,
VID_ED,
VID_HD_74M,
VID_HD_148M
};
struct sti_hda_video_config {
struct drm_display_mode mode;
u32 *awg_instr;
int nb_instr;
enum sti_hda_vid_cat vid_cat;
};
/* HD analog supported modes
* Interlaced modes may be added when supported by the whole display chain
*/
static const struct sti_hda_video_config hda_supported_modes[] = {
/* 1080p30 74.250Mhz */
{{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
AWGi_1080p_30, NN_1080p_30, VID_HD_74M},
/* 1080p30 74.176Mhz */
{{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
AWGi_1080p_30, NN_1080p_30, VID_HD_74M},
/* 1080p24 74.250Mhz */
{{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
AWGi_1080p_24, NN_1080p_24, VID_HD_74M},
/* 1080p24 74.176Mhz */
{{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
AWGi_1080p_24, NN_1080p_24, VID_HD_74M},
/* 1080p25 74.250Mhz */
{{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
AWGi_1080p_25, NN_1080p_25, VID_HD_74M},
/* 720p60 74.250Mhz */
{{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
AWGi_720p_60, NN_720p_60, VID_HD_74M},
/* 720p60 74.176Mhz */
{{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74176, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
AWGi_720p_60, NN_720p_60, VID_HD_74M},
/* 720p50 74.250Mhz */
{{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
AWGi_720p_50, NN_720p_50, VID_HD_74M},
/* 720x480p60 27.027Mhz */
{{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27027, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
AWGi_720x480p_60, NN_720x480p_60, VID_ED},
/* 720x480p60 27.000Mhz */
{{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
AWGi_720x480p_60, NN_720x480p_60, VID_ED}
};
/*
* STI hd analog structure
*
* @dev: driver device
* @drm_dev: pointer to drm device
* @mode: current display mode selected
* @regs: HD analog register
* @video_dacs_ctrl: video DACS control register
* @enabled: true if HD analog is enabled else false
*/
struct sti_hda {
struct device dev;
struct drm_device *drm_dev;
struct drm_display_mode mode;
void __iomem *regs;
void __iomem *video_dacs_ctrl;
struct clk *clk_pix;
struct clk *clk_hddac;
bool enabled;
};
struct sti_hda_connector {
struct drm_connector drm_connector;
struct drm_encoder *encoder;
struct sti_hda *hda;
};
#define to_sti_hda_connector(x) \
container_of(x, struct sti_hda_connector, drm_connector)
static u32 hda_read(struct sti_hda *hda, int offset)
{
return readl(hda->regs + offset);
}
static void hda_write(struct sti_hda *hda, u32 val, int offset)
{
writel(val, hda->regs + offset);
}
/**
* hda_get_mode_idx - Search for a video mode in the supported modes table
*
* @mode: mode being searched
* @idx: index of the found mode
*
* Return true if mode is found
*/
static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++)
if (drm_mode_equal(&hda_supported_modes[i].mode, &mode)) {
*idx = i;
return true;
}
return false;
}
/**
* hda_enable_hd_dacs - Enable the HD DACS
*
* @hda: pointer to HD analog structure
* @enable: true if HD DACS need to be enabled, else false
*/
static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable)
{
if (hda->video_dacs_ctrl) {
u32 val;
val = readl(hda->video_dacs_ctrl);
if (enable)
val &= ~DAC_CFG_HD_HZUVW_OFF_MASK;
else
val |= DAC_CFG_HD_HZUVW_OFF_MASK;
writel(val, hda->video_dacs_ctrl);
}
}
#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
readl(hda->regs + reg))
static void hda_dbg_cfg(struct seq_file *s, int val)
{
seq_puts(s, "\tAWG ");
seq_puts(s, val & CFG_AWG_ASYNC_EN ? "enabled" : "disabled");
}
static void hda_dbg_awg_microcode(struct seq_file *s, void __iomem *reg)
{
unsigned int i;
seq_puts(s, "\n\n HDA AWG microcode:");
for (i = 0; i < AWG_MAX_INST; i++) {
if (i % 8 == 0)
seq_printf(s, "\n %04X:", i);
seq_printf(s, " %04X", readl(reg + i * 4));
}
}
static void hda_dbg_video_dacs_ctrl(struct seq_file *s, void __iomem *reg)
{
u32 val = readl(reg);
seq_printf(s, "\n\n %-25s 0x%08X", "VIDEO_DACS_CONTROL", val);
seq_puts(s, "\tHD DACs ");
seq_puts(s, val & DAC_CFG_HD_HZUVW_OFF_MASK ? "disabled" : "enabled");
}
static int hda_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
DBGFS_DUMP(HDA_ANA_CFG);
hda_dbg_cfg(s, readl(hda->regs + HDA_ANA_CFG));
DBGFS_DUMP(HDA_ANA_SCALE_CTRL_Y);
DBGFS_DUMP(HDA_ANA_SCALE_CTRL_CB);
DBGFS_DUMP(HDA_ANA_SCALE_CTRL_CR);
DBGFS_DUMP(HDA_ANA_ANC_CTRL);
DBGFS_DUMP(HDA_ANA_SRC_Y_CFG);
DBGFS_DUMP(HDA_ANA_SRC_C_CFG);
hda_dbg_awg_microcode(s, hda->regs + HDA_SYNC_AWGI);
if (hda->video_dacs_ctrl)
hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
seq_putc(s, '\n');
return 0;
}
static struct drm_info_list hda_debugfs_files[] = {
{ "hda", hda_dbg_show, 0, NULL },
};
static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++)
hda_debugfs_files[i].data = hda;
drm_debugfs_create_files(hda_debugfs_files,
ARRAY_SIZE(hda_debugfs_files),
minor->debugfs_root, minor);
}
/**
* sti_hda_configure_awg - Configure AWG, writing instructions
*
* @hda: pointer to HD analog structure
* @awg_instr: pointer to AWG instructions table
* @nb: nb of AWG instructions
*/
static void sti_hda_configure_awg(struct sti_hda *hda, u32 *awg_instr, int nb)
{
unsigned int i;
DRM_DEBUG_DRIVER("\n");
for (i = 0; i < nb; i++)
hda_write(hda, awg_instr[i], HDA_SYNC_AWGI + i * 4);
for (i = nb; i < AWG_MAX_INST; i++)
hda_write(hda, 0, HDA_SYNC_AWGI + i * 4);
}
static void sti_hda_disable(struct drm_bridge *bridge)
{
struct sti_hda *hda = bridge->driver_private;
u32 val;
if (!hda->enabled)
return;
DRM_DEBUG_DRIVER("\n");
/* Disable HD DAC and AWG */
val = hda_read(hda, HDA_ANA_CFG);
val &= ~CFG_AWG_ASYNC_EN;
hda_write(hda, val, HDA_ANA_CFG);
hda_write(hda, 0, HDA_ANA_ANC_CTRL);
hda_enable_hd_dacs(hda, false);
/* Disable/unprepare hda clock */
clk_disable_unprepare(hda->clk_hddac);
clk_disable_unprepare(hda->clk_pix);
hda->enabled = false;
}
static void sti_hda_pre_enable(struct drm_bridge *bridge)
{
struct sti_hda *hda = bridge->driver_private;
u32 val, i, mode_idx;
u32 src_filter_y, src_filter_c;
u32 *coef_y, *coef_c;
u32 filter_mode;
DRM_DEBUG_DRIVER("\n");
if (hda->enabled)
return;
/* Prepare/enable clocks */
if (clk_prepare_enable(hda->clk_pix))
DRM_ERROR("Failed to prepare/enable hda_pix clk\n");
if (clk_prepare_enable(hda->clk_hddac))
DRM_ERROR("Failed to prepare/enable hda_hddac clk\n");
if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
DRM_ERROR("Undefined mode\n");
return;
}
switch (hda_supported_modes[mode_idx].vid_cat) {
case VID_HD_148M:
DRM_ERROR("Beyond HD analog capabilities\n");
return;
case VID_HD_74M:
/* HD use alternate 2x filter */
filter_mode = CFG_AWG_FLTR_MODE_HD;
src_filter_y = HDA_ANA_SRC_Y_CFG_ALT_2X;
src_filter_c = HDA_ANA_SRC_C_CFG_ALT_2X;
coef_y = coef_y_alt_2x;
coef_c = coef_c_alt_2x;
break;
case VID_ED:
/* ED uses 4x filter */
filter_mode = CFG_AWG_FLTR_MODE_ED;
src_filter_y = HDA_ANA_SRC_Y_CFG_4X;
src_filter_c = HDA_ANA_SRC_C_CFG_4X;
coef_y = coef_yc_4x;
coef_c = coef_yc_4x;
break;
case VID_SD:
DRM_ERROR("Not supported\n");
return;
default:
DRM_ERROR("Undefined resolution\n");
return;
}
DRM_DEBUG_DRIVER("Using HDA mode #%d\n", mode_idx);
/* Enable HD Video DACs */
hda_enable_hd_dacs(hda, true);
/* Configure scaler */
hda_write(hda, SCALE_CTRL_Y_DFLT, HDA_ANA_SCALE_CTRL_Y);
hda_write(hda, SCALE_CTRL_CB_DFLT, HDA_ANA_SCALE_CTRL_CB);
hda_write(hda, SCALE_CTRL_CR_DFLT, HDA_ANA_SCALE_CTRL_CR);
/* Configure sampler */
hda_write(hda , src_filter_y, HDA_ANA_SRC_Y_CFG);
hda_write(hda, src_filter_c, HDA_ANA_SRC_C_CFG);
for (i = 0; i < SAMPLER_COEF_NB; i++) {
hda_write(hda, coef_y[i], HDA_COEFF_Y_PH1_TAP123 + i * 4);
hda_write(hda, coef_c[i], HDA_COEFF_C_PH1_TAP123 + i * 4);
}
/* Configure main HDFormatter */
val = 0;
val |= (hda->mode.flags & DRM_MODE_FLAG_INTERLACE) ?
0 : CFG_AWG_ASYNC_VSYNC_MTD;
val |= (CFG_PBPR_SYNC_OFF_VAL << CFG_PBPR_SYNC_OFF_SHIFT);
val |= filter_mode;
hda_write(hda, val, HDA_ANA_CFG);
/* Configure AWG */
sti_hda_configure_awg(hda, hda_supported_modes[mode_idx].awg_instr,
hda_supported_modes[mode_idx].nb_instr);
/* Enable AWG */
val = hda_read(hda, HDA_ANA_CFG);
val |= CFG_AWG_ASYNC_EN;
hda_write(hda, val, HDA_ANA_CFG);
hda->enabled = true;
}
static void sti_hda_set_mode(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct sti_hda *hda = bridge->driver_private;
u32 mode_idx;
int hddac_rate;
int ret;
DRM_DEBUG_DRIVER("\n");
drm_mode_copy(&hda->mode, mode);
if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
DRM_ERROR("Undefined mode\n");
return;
}
switch (hda_supported_modes[mode_idx].vid_cat) {
case VID_HD_74M:
/* HD use alternate 2x filter */
hddac_rate = mode->clock * 1000 * 2;
break;
case VID_ED:
/* ED uses 4x filter */
hddac_rate = mode->clock * 1000 * 4;
break;
default:
DRM_ERROR("Undefined mode\n");
return;
}
/* HD DAC = 148.5Mhz or 108 Mhz */
ret = clk_set_rate(hda->clk_hddac, hddac_rate);
if (ret < 0)
DRM_ERROR("Cannot set rate (%dHz) for hda_hddac clk\n",
hddac_rate);
/* HDformatter clock = compositor clock */
ret = clk_set_rate(hda->clk_pix, mode->clock * 1000);
if (ret < 0)
DRM_ERROR("Cannot set rate (%dHz) for hda_pix clk\n",
mode->clock * 1000);
}
static void sti_hda_bridge_nope(struct drm_bridge *bridge)
{
/* do nothing */
}
static const struct drm_bridge_funcs sti_hda_bridge_funcs = {
.pre_enable = sti_hda_pre_enable,
.enable = sti_hda_bridge_nope,
.disable = sti_hda_disable,
.post_disable = sti_hda_bridge_nope,
.mode_set = sti_hda_set_mode,
};
static int sti_hda_connector_get_modes(struct drm_connector *connector)
{
unsigned int i;
int count = 0;
struct sti_hda_connector *hda_connector
= to_sti_hda_connector(connector);
struct sti_hda *hda = hda_connector->hda;
DRM_DEBUG_DRIVER("\n");
for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++) {
struct drm_display_mode *mode =
drm_mode_duplicate(hda->drm_dev,
&hda_supported_modes[i].mode);
if (!mode)
continue;
/* the first mode is the preferred mode */
if (i == 0)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
count++;
}
return count;
}
#define CLK_TOLERANCE_HZ 50
static enum drm_mode_status
sti_hda_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
int target_max = target + CLK_TOLERANCE_HZ;
int result;
int idx;
struct sti_hda_connector *hda_connector
= to_sti_hda_connector(connector);
struct sti_hda *hda = hda_connector->hda;
if (!hda_get_mode_idx(*mode, &idx)) {
return MODE_BAD;
} else {
result = clk_round_rate(hda->clk_pix, target);
DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
target, result);
if ((result < target_min) || (result > target_max)) {
DRM_DEBUG_DRIVER("hda pixclk=%d not supported\n",
target);
return MODE_BAD;
}
}
return MODE_OK;
}
static const
struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
.get_modes = sti_hda_connector_get_modes,
.mode_valid = sti_hda_connector_mode_valid,
};
static int sti_hda_late_register(struct drm_connector *connector)
{
struct sti_hda_connector *hda_connector
= to_sti_hda_connector(connector);
struct sti_hda *hda = hda_connector->hda;
hda_debugfs_init(hda, hda->drm_dev->primary);
return 0;
}
static const struct drm_connector_funcs sti_hda_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.late_register = sti_hda_late_register,
};
static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
{
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
return encoder;
}
return NULL;
}
static int sti_hda_bind(struct device *dev, struct device *master, void *data)
{
struct sti_hda *hda = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct drm_encoder *encoder;
struct sti_hda_connector *connector;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
hda->drm_dev = drm_dev;
encoder = sti_hda_find_encoder(drm_dev);
if (!encoder)
return -ENOMEM;
connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
if (!connector)
return -ENOMEM;
connector->hda = hda;
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
bridge->driver_private = hda;
bridge->funcs = &sti_hda_bridge_funcs;
drm_bridge_attach(encoder, bridge, NULL, 0);
connector->encoder = encoder;
drm_connector = (struct drm_connector *)connector;
drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_init(drm_dev, drm_connector,
&sti_hda_connector_funcs, DRM_MODE_CONNECTOR_Component);
drm_connector_helper_add(drm_connector,
&sti_hda_connector_helper_funcs);
err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
}
/* force to disable hd dacs at startup */
hda_enable_hd_dacs(hda, false);
return 0;
err_sysfs:
return -EINVAL;
}
static void sti_hda_unbind(struct device *dev,
struct device *master, void *data)
{
}
static const struct component_ops sti_hda_ops = {
.bind = sti_hda_bind,
.unbind = sti_hda_unbind,
};
static int sti_hda_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_hda *hda;
struct resource *res;
DRM_INFO("%s\n", __func__);
hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL);
if (!hda)
return -ENOMEM;
hda->dev = pdev->dev;
/* Get resources */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg");
if (!res) {
DRM_ERROR("Invalid hda resource\n");
return -ENOMEM;
}
hda->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hda->regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
if (res) {
hda->video_dacs_ctrl = devm_ioremap(dev, res->start,
resource_size(res));
if (!hda->video_dacs_ctrl)
return -ENOMEM;
} else {
/* If no existing video-dacs-ctrl resource continue the probe */
DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n");
hda->video_dacs_ctrl = NULL;
}
/* Get clock resources */
hda->clk_pix = devm_clk_get(dev, "pix");
if (IS_ERR(hda->clk_pix)) {
DRM_ERROR("Cannot get hda_pix clock\n");
return PTR_ERR(hda->clk_pix);
}
hda->clk_hddac = devm_clk_get(dev, "hddac");
if (IS_ERR(hda->clk_hddac)) {
DRM_ERROR("Cannot get hda_hddac clock\n");
return PTR_ERR(hda->clk_hddac);
}
platform_set_drvdata(pdev, hda);
return component_add(&pdev->dev, &sti_hda_ops);
}
static void sti_hda_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_hda_ops);
}
static const struct of_device_id hda_of_match[] = {
{ .compatible = "st,stih416-hda", },
{ .compatible = "st,stih407-hda", },
{ /* end node */ }
};
MODULE_DEVICE_TABLE(of, hda_of_match);
struct platform_driver sti_hda_driver = {
.driver = {
.name = "sti-hda",
.owner = THIS_MODULE,
.of_match_table = hda_of_match,
},
.probe = sti_hda_probe,
.remove_new = sti_hda_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/sti/sti_hda.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <[email protected]>
* Fabien Dessenne <[email protected]>
* for STMicroelectronics.
*/
#include <linux/types.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_drv.h"
#include "sti_plane.h"
const char *sti_plane_to_str(struct sti_plane *plane)
{
switch (plane->desc) {
case STI_GDP_0:
return "GDP0";
case STI_GDP_1:
return "GDP1";
case STI_GDP_2:
return "GDP2";
case STI_GDP_3:
return "GDP3";
case STI_HQVDP_0:
return "HQVDP0";
case STI_CURSOR:
return "CURSOR";
default:
return "<UNKNOWN PLANE>";
}
}
#define STI_FPS_INTERVAL_MS 3000
void sti_plane_update_fps(struct sti_plane *plane,
bool new_frame,
bool new_field)
{
struct drm_plane_state *state = plane->drm_plane.state;
ktime_t now;
struct sti_fps_info *fps;
int fpks, fipks, ms_since_last, num_frames, num_fields;
now = ktime_get();
/* Compute number of frame updates */
fps = &plane->fps_info;
if (new_field)
fps->curr_field_counter++;
/* do not perform fps calcul if new_frame is false */
if (!new_frame)
return;
fps->curr_frame_counter++;
ms_since_last = ktime_to_ms(ktime_sub(now, fps->last_timestamp));
num_frames = fps->curr_frame_counter - fps->last_frame_counter;
if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS)
return;
fps->last_timestamp = now;
fps->last_frame_counter = fps->curr_frame_counter;
if (state->fb) {
fpks = (num_frames * 1000000) / ms_since_last;
snprintf(plane->fps_info.fps_str, FPS_LENGTH,
"%-8s %4dx%-4d %.4s @ %3d.%-3.3d fps (%s)",
plane->drm_plane.name,
state->fb->width,
state->fb->height,
(char *)&state->fb->format->format,
fpks / 1000, fpks % 1000,
sti_plane_to_str(plane));
}
if (fps->curr_field_counter) {
/* Compute number of field updates */
num_fields = fps->curr_field_counter - fps->last_field_counter;
fps->last_field_counter = fps->curr_field_counter;
fipks = (num_fields * 1000000) / ms_since_last;
snprintf(plane->fps_info.fips_str,
FPS_LENGTH, " - %3d.%-3.3d field/sec",
fipks / 1000, fipks % 1000);
} else {
plane->fps_info.fips_str[0] = '\0';
}
if (fps->output)
DRM_INFO("%s%s\n",
plane->fps_info.fps_str,
plane->fps_info.fips_str);
}
static int sti_plane_get_default_zpos(enum drm_plane_type type)
{
switch (type) {
case DRM_PLANE_TYPE_PRIMARY:
return 0;
case DRM_PLANE_TYPE_OVERLAY:
return 1;
case DRM_PLANE_TYPE_CURSOR:
return 7;
}
return 0;
}
static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane,
enum drm_plane_type type)
{
int zpos = sti_plane_get_default_zpos(type);
switch (type) {
case DRM_PLANE_TYPE_PRIMARY:
case DRM_PLANE_TYPE_OVERLAY:
drm_plane_create_zpos_property(drm_plane, zpos, 0, 6);
break;
case DRM_PLANE_TYPE_CURSOR:
drm_plane_create_zpos_immutable_property(drm_plane, zpos);
break;
}
}
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type)
{
sti_plane_attach_zorder_property(&plane->drm_plane, type);
DRM_DEBUG_DRIVER("drm plane:%d mapped to %s\n",
plane->drm_plane.base.id, sti_plane_to_str(plane));
}
| linux-master | drivers/gpu/drm/sti/sti_plane.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
#include "armada_plane.h"
#include "armada_trace.h"
/*
* A note about interlacing. Let's consider HDMI 1920x1080i.
* The timing parameters we have from X are:
* Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
* 1920 2448 2492 2640 1080 1084 1094 1125
* Which get translated to:
* Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
* 1920 2448 2492 2640 540 542 547 562
*
* This is how it is defined by CEA-861-D - line and pixel numbers are
* referenced to the rising edge of VSYNC and HSYNC. Total clocks per
* line: 2640. The odd frame, the first active line is at line 21, and
* the even frame, the first active line is 584.
*
* LN: 560 561 562 563 567 568 569
* DE: ~~~|____________________________//__________________________
* HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
* VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
* 22 blanking lines. VSYNC at 1320 (referenced to the HSYNC rising edge).
*
* LN: 1123 1124 1125 1 5 6 7
* DE: ~~~|____________________________//__________________________
* HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
* VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
* 23 blanking lines
*
* The Armada LCD Controller line and pixel numbers are, like X timings,
* referenced to the top left of the active frame.
*
* So, translating these to our LCD controller:
* Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
* Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
* Note: Vsync front porch remains constant!
*
* if (odd_frame) {
* vtotal = mode->crtc_vtotal + 1;
* vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
* vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
* } else {
* vtotal = mode->crtc_vtotal;
* vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
* vhorizpos = mode->crtc_hsync_start;
* }
* vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
*
* So, we need to reprogram these registers on each vsync event:
* LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
*
* Note: we do not use the frame done interrupts because these appear
* to happen too early, and lead to jitter on the display (presumably
* they occur at the end of the last active line, before the vsync back
* porch, which we're reprogramming.)
*/
void
armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
{
while (regs->offset != ~0) {
void __iomem *reg = dcrtc->base + regs->offset;
uint32_t val;
val = regs->mask;
if (val != 0)
val &= readl_relaxed(reg);
writel_relaxed(val | regs->val, reg);
++regs;
}
}
static void armada_drm_crtc_update(struct armada_crtc *dcrtc, bool enable)
{
uint32_t dumb_ctrl;
dumb_ctrl = dcrtc->cfg_dumb_ctrl;
if (enable)
dumb_ctrl |= CFG_DUMB_ENA;
/*
* When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
* be using SPI or GPIO. If we set this to DUMB_BLANK, we will
* force LCD_D[23:0] to output blank color, overriding the GPIO or
* SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
*/
if (!enable && (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
dumb_ctrl &= ~DUMB_MASK;
dumb_ctrl |= DUMB_BLANK;
}
armada_updatel(dumb_ctrl,
~(CFG_INV_CSYNC | CFG_INV_HSYNC | CFG_INV_VSYNC),
dcrtc->base + LCD_SPU_DUMB_CTRL);
}
static void armada_drm_crtc_queue_state_event(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct drm_pending_vblank_event *event;
/* If we have an event, we need vblank events enabled */
event = xchg(&crtc->state->event, NULL);
if (event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
dcrtc->event = event;
}
}
static void armada_drm_update_gamma(struct drm_crtc *crtc)
{
struct drm_property_blob *blob = crtc->state->gamma_lut;
void __iomem *base = drm_to_armada_crtc(crtc)->base;
int i;
if (blob) {
struct drm_color_lut *lut = blob->data;
armada_updatel(CFG_CSB_256x8, CFG_CSB_256x8 | CFG_PDWN256x8,
base + LCD_SPU_SRAM_PARA1);
for (i = 0; i < 256; i++) {
writel_relaxed(drm_color_lut_extract(lut[i].red, 8),
base + LCD_SPU_SRAM_WRDAT);
writel_relaxed(i | SRAM_WRITE | SRAM_GAMMA_YR,
base + LCD_SPU_SRAM_CTRL);
readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN);
writel_relaxed(drm_color_lut_extract(lut[i].green, 8),
base + LCD_SPU_SRAM_WRDAT);
writel_relaxed(i | SRAM_WRITE | SRAM_GAMMA_UG,
base + LCD_SPU_SRAM_CTRL);
readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN);
writel_relaxed(drm_color_lut_extract(lut[i].blue, 8),
base + LCD_SPU_SRAM_WRDAT);
writel_relaxed(i | SRAM_WRITE | SRAM_GAMMA_VB,
base + LCD_SPU_SRAM_CTRL);
readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN);
}
armada_updatel(CFG_GAMMA_ENA, CFG_GAMMA_ENA,
base + LCD_SPU_DMA_CTRL0);
} else {
armada_updatel(0, CFG_GAMMA_ENA, base + LCD_SPU_DMA_CTRL0);
armada_updatel(CFG_PDWN256x8, CFG_CSB_256x8 | CFG_PDWN256x8,
base + LCD_SPU_SRAM_PARA1);
}
}
static enum drm_mode_status armada_drm_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
if (mode->vscan > 1)
return MODE_NO_VSCAN;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (mode->flags & DRM_MODE_FLAG_HSKEW)
return MODE_H_ILLEGAL;
/* We can't do interlaced modes if we don't have the SPU_ADV_REG */
if (!dcrtc->variant->has_spu_adv_reg &&
mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
if (mode->flags & (DRM_MODE_FLAG_BCAST | DRM_MODE_FLAG_PIXMUX |
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
return MODE_OK;
}
/* The mode_config.mutex will be held for this call */
static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode, struct drm_display_mode *adj)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
int ret;
/*
* Set CRTC modesetting parameters for the adjusted mode. This is
* applied after the connectors, bridges, and encoders have fixed up
* this mode, as described above drm_atomic_helper_check_modeset().
*/
drm_mode_set_crtcinfo(adj, CRTC_INTERLACE_HALVE_V);
/*
* Validate the adjusted mode in case an encoder/bridge has set
* something we don't support.
*/
if (armada_drm_crtc_mode_valid(crtc, adj) != MODE_OK)
return false;
/* Check whether the display mode is possible */
ret = dcrtc->variant->compute_clock(dcrtc, adj, NULL);
if (ret)
return false;
return true;
}
/* These are locked by dev->vbl_lock */
static void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
{
if (dcrtc->irq_ena & mask) {
dcrtc->irq_ena &= ~mask;
writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
}
}
static void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
{
if ((dcrtc->irq_ena & mask) != mask) {
dcrtc->irq_ena |= mask;
writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
}
}
static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
{
struct drm_pending_vblank_event *event;
void __iomem *base = dcrtc->base;
if (stat & DMA_FF_UNDERFLOW)
DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
if (stat & GRA_FF_UNDERFLOW)
DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
if (stat & VSYNC_IRQ)
drm_crtc_handle_vblank(&dcrtc->crtc);
spin_lock(&dcrtc->irq_lock);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
uint32_t val;
writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
writel_relaxed(dcrtc->v[i].spu_v_h_total,
base + LCD_SPUT_V_H_TOTAL);
val = readl_relaxed(base + LCD_SPU_ADV_REG);
val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
val |= dcrtc->v[i].spu_adv_reg;
writel_relaxed(val, base + LCD_SPU_ADV_REG);
}
if (stat & dcrtc->irq_ena & DUMB_FRAMEDONE) {
if (dcrtc->update_pending) {
armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
dcrtc->update_pending = false;
}
if (dcrtc->cursor_update) {
writel_relaxed(dcrtc->cursor_hw_pos,
base + LCD_SPU_HWC_OVSA_HPXL_VLN);
writel_relaxed(dcrtc->cursor_hw_sz,
base + LCD_SPU_HWC_HPXL_VLN);
armada_updatel(CFG_HWC_ENA,
CFG_HWC_ENA | CFG_HWC_1BITMOD |
CFG_HWC_1BITENA,
base + LCD_SPU_DMA_CTRL0);
dcrtc->cursor_update = false;
}
armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
}
spin_unlock(&dcrtc->irq_lock);
if (stat & VSYNC_IRQ && !dcrtc->update_pending) {
event = xchg(&dcrtc->event, NULL);
if (event) {
spin_lock(&dcrtc->crtc.dev->event_lock);
drm_crtc_send_vblank_event(&dcrtc->crtc, event);
spin_unlock(&dcrtc->crtc.dev->event_lock);
drm_crtc_vblank_put(&dcrtc->crtc);
}
}
}
static irqreturn_t armada_drm_irq(int irq, void *arg)
{
struct armada_crtc *dcrtc = arg;
u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
/*
* Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
* is set. Writing has some other effect to acknowledge the IRQ -
* without this, we only get a single IRQ.
*/
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
trace_armada_drm_irq(&dcrtc->crtc, stat);
/* Mask out those interrupts we haven't enabled */
v = stat & dcrtc->irq_ena;
if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
armada_drm_crtc_irq(dcrtc, stat);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/* The mode_config.mutex will be held for this call */
static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_display_mode *adj = &crtc->state->adjusted_mode;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_regs regs[17];
uint32_t lm, rm, tm, bm, val, sclk;
unsigned long flags;
unsigned i;
bool interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
i = 0;
rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
lm = adj->crtc_htotal - adj->crtc_hsync_end;
bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
tm = adj->crtc_vtotal - adj->crtc_vsync_end;
DRM_DEBUG_KMS("[CRTC:%d:%s] mode " DRM_MODE_FMT "\n",
crtc->base.id, crtc->name, DRM_MODE_ARG(adj));
DRM_DEBUG_KMS("lm %d rm %d tm %d bm %d\n", lm, rm, tm, bm);
/* Now compute the divider for real */
dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
spin_lock_irqsave(&dcrtc->irq_lock, flags);
dcrtc->interlaced = interlaced;
/* Even interlaced/progressive frame */
dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
adj->crtc_htotal;
dcrtc->v[1].spu_v_porch = tm << 16 | bm;
val = adj->crtc_hsync_start;
dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
if (interlaced) {
/* Odd interlaced frame */
val -= adj->crtc_htotal / 2;
dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
(1 << 16);
dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
} else {
dcrtc->v[0] = dcrtc->v[1];
}
val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
LCD_SPUT_V_H_TOTAL);
if (dcrtc->variant->has_spu_adv_reg)
armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
/*
* The documentation doesn't indicate what the normal state of
* the sync signals are. Sebastian Hesselbart kindly probed
* these signals on his board to determine their state.
*
* The non-inverted state of the sync signals is active high.
* Setting these bits makes the appropriate signal active low.
*/
val = 0;
if (adj->flags & DRM_MODE_FLAG_NCSYNC)
val |= CFG_INV_CSYNC;
if (adj->flags & DRM_MODE_FLAG_NHSYNC)
val |= CFG_INV_HSYNC;
if (adj->flags & DRM_MODE_FLAG_NVSYNC)
val |= CFG_INV_VSYNC;
armada_reg_queue_mod(regs, i, val, CFG_INV_CSYNC | CFG_INV_HSYNC |
CFG_INV_VSYNC, LCD_SPU_DUMB_CTRL);
armada_reg_queue_end(regs, i);
armada_drm_crtc_update_regs(dcrtc, regs);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
static int armada_drm_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
if (crtc_state->gamma_lut && drm_color_lut_size(crtc_state->gamma_lut) != 256)
return -EINVAL;
if (crtc_state->color_mgmt_changed)
crtc_state->planes_changed = true;
return 0;
}
static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
if (crtc_state->color_mgmt_changed)
armada_drm_update_gamma(crtc);
dcrtc->regs_idx = 0;
dcrtc->regs = dcrtc->atomic_regs;
}
static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
armada_reg_queue_end(dcrtc->regs, dcrtc->regs_idx);
/*
* If we aren't doing a full modeset, then we need to queue
* the event here.
*/
if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
dcrtc->update_pending = true;
armada_drm_crtc_queue_state_event(crtc);
spin_lock_irq(&dcrtc->irq_lock);
armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
spin_unlock_irq(&dcrtc->irq_lock);
} else {
spin_lock_irq(&dcrtc->irq_lock);
armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
spin_unlock_irq(&dcrtc->irq_lock);
}
}
static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct drm_pending_vblank_event *event;
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
if (old_state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
drm_crtc_vblank_put(crtc);
drm_crtc_vblank_off(crtc);
armada_drm_crtc_update(dcrtc, false);
if (!crtc->state->active) {
/*
* This modeset will be leaving the CRTC disabled, so
* call the backend to disable upstream clocks etc.
*/
if (dcrtc->variant->disable)
dcrtc->variant->disable(dcrtc);
/*
* We will not receive any further vblank events.
* Send the flip_done event manually.
*/
event = crtc->state->event;
crtc->state->event = NULL;
if (event) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
}
}
static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
if (!old_state->active) {
/*
* This modeset is enabling the CRTC after it having
* been disabled. Reverse the call to ->disable in
* the atomic_disable().
*/
if (dcrtc->variant->enable)
dcrtc->variant->enable(dcrtc, &crtc->state->adjusted_mode);
}
armada_drm_crtc_update(dcrtc, true);
drm_crtc_vblank_on(crtc);
if (crtc->state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
WARN_ON(drm_crtc_vblank_get(crtc));
armada_drm_crtc_queue_state_event(crtc);
}
static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
.mode_valid = armada_drm_crtc_mode_valid,
.mode_fixup = armada_drm_crtc_mode_fixup,
.mode_set_nofb = armada_drm_crtc_mode_set_nofb,
.atomic_check = armada_drm_crtc_atomic_check,
.atomic_begin = armada_drm_crtc_atomic_begin,
.atomic_flush = armada_drm_crtc_atomic_flush,
.atomic_disable = armada_drm_crtc_atomic_disable,
.atomic_enable = armada_drm_crtc_atomic_enable,
};
static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
unsigned stride, unsigned width, unsigned height)
{
uint32_t addr;
unsigned y;
addr = SRAM_HWC32_RAM1;
for (y = 0; y < height; y++) {
uint32_t *p = &pix[y * stride];
unsigned x;
for (x = 0; x < width; x++, p++) {
uint32_t val = *p;
/*
* In "ARGB888" (HWC32) mode, writing to the SRAM
* requires these bits to contain:
* 31:24 = alpha 23:16 = blue 15:8 = green 7:0 = red
* So, it's actually ABGR8888. This is independent
* of the SWAPRB bits in DMA control register 0.
*/
val = (val & 0xff00ff00) |
(val & 0x000000ff) << 16 |
(val & 0x00ff0000) >> 16;
writel_relaxed(val,
base + LCD_SPU_SRAM_WRDAT);
writel_relaxed(addr | SRAM_WRITE,
base + LCD_SPU_SRAM_CTRL);
readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN);
addr += 1;
if ((addr & 0x00ff) == 0)
addr += 0xf00;
if ((addr & 0x30ff) == 0)
addr = SRAM_HWC32_RAM2;
}
}
}
static void armada_drm_crtc_cursor_tran(void __iomem *base)
{
unsigned addr;
for (addr = 0; addr < 256; addr++) {
/* write the default value */
writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
base + LCD_SPU_SRAM_CTRL);
}
}
static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
{
uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
uint32_t yoff, yscr, h = dcrtc->cursor_h;
uint32_t para1;
/*
* Calculate the visible width and height of the cursor,
* screen position, and the position in the cursor bitmap.
*/
if (dcrtc->cursor_x < 0) {
xoff = -dcrtc->cursor_x;
xscr = 0;
w -= min(xoff, w);
} else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
xoff = 0;
xscr = dcrtc->cursor_x;
w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
} else {
xoff = 0;
xscr = dcrtc->cursor_x;
}
if (dcrtc->cursor_y < 0) {
yoff = -dcrtc->cursor_y;
yscr = 0;
h -= min(yoff, h);
} else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
yoff = 0;
yscr = dcrtc->cursor_y;
h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
} else {
yoff = 0;
yscr = dcrtc->cursor_y;
}
/* On interlaced modes, the vertical cursor size must be halved */
s = dcrtc->cursor_w;
if (dcrtc->interlaced) {
s *= 2;
yscr /= 2;
h /= 2;
}
if (!dcrtc->cursor_obj || !h || !w) {
spin_lock_irq(&dcrtc->irq_lock);
dcrtc->cursor_update = false;
armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
spin_unlock_irq(&dcrtc->irq_lock);
return 0;
}
spin_lock_irq(&dcrtc->irq_lock);
para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
dcrtc->base + LCD_SPU_SRAM_PARA1);
spin_unlock_irq(&dcrtc->irq_lock);
/*
* Initialize the transparency if the SRAM was powered down.
* We must also reload the cursor data as well.
*/
if (!(para1 & CFG_CSB_256x32)) {
armada_drm_crtc_cursor_tran(dcrtc->base);
reload = true;
}
if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
spin_lock_irq(&dcrtc->irq_lock);
dcrtc->cursor_update = false;
armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
spin_unlock_irq(&dcrtc->irq_lock);
reload = true;
}
if (reload) {
struct armada_gem_object *obj = dcrtc->cursor_obj;
uint32_t *pix;
/* Set the top-left corner of the cursor image */
pix = obj->addr;
pix += yoff * s + xoff;
armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
}
/* Reload the cursor position, size and enable in the IRQ handler */
spin_lock_irq(&dcrtc->irq_lock);
dcrtc->cursor_hw_pos = yscr << 16 | xscr;
dcrtc->cursor_hw_sz = h << 16 | w;
dcrtc->cursor_update = true;
armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
spin_unlock_irq(&dcrtc->irq_lock);
return 0;
}
static void cursor_update(void *data)
{
armada_drm_crtc_cursor_update(data, true);
}
static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_gem_object *obj = NULL;
int ret;
/* If no cursor support, replicate drm's return value */
if (!dcrtc->variant->has_spu_adv_reg)
return -ENXIO;
if (handle && w > 0 && h > 0) {
/* maximum size is 64x32 or 32x64 */
if (w > 64 || h > 64 || (w > 32 && h > 32))
return -ENOMEM;
obj = armada_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
/* Must be a kernel-mapped object */
if (!obj->addr) {
drm_gem_object_put(&obj->obj);
return -EINVAL;
}
if (obj->obj.size < w * h * 4) {
DRM_ERROR("buffer is too small\n");
drm_gem_object_put(&obj->obj);
return -ENOMEM;
}
}
if (dcrtc->cursor_obj) {
dcrtc->cursor_obj->update = NULL;
dcrtc->cursor_obj->update_data = NULL;
drm_gem_object_put(&dcrtc->cursor_obj->obj);
}
dcrtc->cursor_obj = obj;
dcrtc->cursor_w = w;
dcrtc->cursor_h = h;
ret = armada_drm_crtc_cursor_update(dcrtc, true);
if (obj) {
obj->update_data = dcrtc;
obj->update = cursor_update;
}
return ret;
}
static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
int ret;
/* If no cursor support, replicate drm's return value */
if (!dcrtc->variant->has_spu_adv_reg)
return -EFAULT;
dcrtc->cursor_x = x;
dcrtc->cursor_y = y;
ret = armada_drm_crtc_cursor_update(dcrtc, false);
return ret;
}
static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_private *priv = drm_to_armada_dev(crtc->dev);
if (dcrtc->cursor_obj)
drm_gem_object_put(&dcrtc->cursor_obj->obj);
priv->dcrtc[dcrtc->num] = NULL;
drm_crtc_cleanup(&dcrtc->crtc);
if (dcrtc->variant->disable)
dcrtc->variant->disable(dcrtc);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
of_node_put(dcrtc->crtc.port);
kfree(dcrtc);
}
static int armada_drm_crtc_late_register(struct drm_crtc *crtc)
{
if (IS_ENABLED(CONFIG_DEBUG_FS))
armada_drm_crtc_debugfs_init(drm_to_armada_crtc(crtc));
return 0;
}
/* These are called under the vbl_lock. */
static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
unsigned long flags;
spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
return 0;
}
static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
unsigned long flags;
spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
static const struct drm_crtc_funcs armada_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.cursor_set = armada_drm_crtc_cursor_set,
.cursor_move = armada_drm_crtc_cursor_move,
.destroy = armada_drm_crtc_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.late_register = armada_drm_crtc_late_register,
.enable_vblank = armada_drm_crtc_enable_vblank,
.disable_vblank = armada_drm_crtc_disable_vblank,
};
int armada_crtc_select_clock(struct armada_crtc *dcrtc,
struct armada_clk_result *res,
const struct armada_clocking_params *params,
struct clk *clks[], size_t num_clks,
unsigned long desired_khz)
{
unsigned long desired_hz = desired_khz * 1000;
unsigned long desired_clk_hz; // requested clk input
unsigned long real_clk_hz; // actual clk input
unsigned long real_hz; // actual pixel clk
unsigned long permillage;
struct clk *clk;
u32 div;
int i;
DRM_DEBUG_KMS("[CRTC:%u:%s] desired clock=%luHz\n",
dcrtc->crtc.base.id, dcrtc->crtc.name, desired_hz);
for (i = 0; i < num_clks; i++) {
clk = clks[i];
if (!clk)
continue;
if (params->settable & BIT(i)) {
real_clk_hz = clk_round_rate(clk, desired_hz);
desired_clk_hz = desired_hz;
} else {
real_clk_hz = clk_get_rate(clk);
desired_clk_hz = real_clk_hz;
}
/* If the clock can do exactly the desired rate, we're done */
if (real_clk_hz == desired_hz) {
real_hz = real_clk_hz;
div = 1;
goto found;
}
/* Calculate the divider - if invalid, we can't do this rate */
div = DIV_ROUND_CLOSEST(real_clk_hz, desired_hz);
if (div == 0 || div > params->div_max)
continue;
/* Calculate the actual rate - HDMI requires -0.6%..+0.5% */
real_hz = DIV_ROUND_CLOSEST(real_clk_hz, div);
DRM_DEBUG_KMS("[CRTC:%u:%s] clk=%u %luHz div=%u real=%luHz\n",
dcrtc->crtc.base.id, dcrtc->crtc.name,
i, real_clk_hz, div, real_hz);
/* Avoid repeated division */
if (real_hz < desired_hz) {
permillage = real_hz / desired_khz;
if (permillage < params->permillage_min)
continue;
} else {
permillage = DIV_ROUND_UP(real_hz, desired_khz);
if (permillage > params->permillage_max)
continue;
}
goto found;
}
return -ERANGE;
found:
DRM_DEBUG_KMS("[CRTC:%u:%s] selected clk=%u %luHz div=%u real=%luHz\n",
dcrtc->crtc.base.id, dcrtc->crtc.name,
i, real_clk_hz, div, real_hz);
res->desired_clk_hz = desired_clk_hz;
res->clk = clk;
res->div = div;
return i;
}
static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
struct resource *res, int irq, const struct armada_variant *variant,
struct device_node *port)
{
struct armada_private *priv = drm_to_armada_dev(drm);
struct armada_crtc *dcrtc;
struct drm_plane *primary;
void __iomem *base;
int ret;
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
if (!dcrtc) {
DRM_ERROR("failed to allocate Armada crtc\n");
return -ENOMEM;
}
if (dev != drm->dev)
dev_set_drvdata(dev, dcrtc);
dcrtc->variant = variant;
dcrtc->base = base;
dcrtc->num = drm->mode_config.num_crtc;
dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
spin_lock_init(&dcrtc->irq_lock);
dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
/* Initialize some registers which we don't otherwise set */
writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
writel_relaxed(dcrtc->spu_iopad_ctrl,
dcrtc->base + LCD_SPU_IOPAD_CONTROL);
writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
dcrtc);
if (ret < 0)
goto err_crtc;
if (dcrtc->variant->init) {
ret = dcrtc->variant->init(dcrtc, dev);
if (ret)
goto err_crtc;
}
/* Ensure AXI pipeline is enabled */
armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
priv->dcrtc[dcrtc->num] = dcrtc;
dcrtc->crtc.port = port;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
if (!primary) {
ret = -ENOMEM;
goto err_crtc;
}
ret = armada_drm_primary_plane_init(drm, primary);
if (ret) {
kfree(primary);
goto err_crtc;
}
ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, primary, NULL,
&armada_crtc_funcs, NULL);
if (ret)
goto err_crtc_init;
drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
ret = drm_mode_crtc_set_gamma_size(&dcrtc->crtc, 256);
if (ret)
return ret;
drm_crtc_enable_color_mgmt(&dcrtc->crtc, 0, false, 256);
return armada_overlay_plane_create(drm, 1 << dcrtc->num);
err_crtc_init:
primary->funcs->destroy(primary);
err_crtc:
kfree(dcrtc);
return ret;
}
static int
armada_lcd_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
const struct armada_variant *variant;
struct device_node *port = NULL;
if (irq < 0)
return irq;
if (!dev->of_node) {
const struct platform_device_id *id;
id = platform_get_device_id(pdev);
if (!id)
return -ENXIO;
variant = (const struct armada_variant *)id->driver_data;
} else {
const struct of_device_id *match;
struct device_node *np, *parent = dev->of_node;
match = of_match_device(dev->driver->of_match_table, dev);
if (!match)
return -ENXIO;
np = of_get_child_by_name(parent, "ports");
if (np)
parent = np;
port = of_get_child_by_name(parent, "port");
of_node_put(np);
if (!port) {
dev_err(dev, "no port node found in %pOF\n", parent);
return -ENXIO;
}
variant = match->data;
}
return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
}
static void
armada_lcd_unbind(struct device *dev, struct device *master, void *data)
{
struct armada_crtc *dcrtc = dev_get_drvdata(dev);
armada_drm_crtc_destroy(&dcrtc->crtc);
}
static const struct component_ops armada_lcd_ops = {
.bind = armada_lcd_bind,
.unbind = armada_lcd_unbind,
};
static int armada_lcd_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &armada_lcd_ops);
}
static int armada_lcd_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &armada_lcd_ops);
return 0;
}
static const struct of_device_id armada_lcd_of_match[] = {
{
.compatible = "marvell,dove-lcd",
.data = &armada510_ops,
},
{}
};
MODULE_DEVICE_TABLE(of, armada_lcd_of_match);
static const struct platform_device_id armada_lcd_platform_ids[] = {
{
.name = "armada-lcd",
.driver_data = (unsigned long)&armada510_ops,
}, {
.name = "armada-510-lcd",
.driver_data = (unsigned long)&armada510_ops,
},
{ },
};
MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
struct platform_driver armada_lcd_platform_driver = {
.probe = armada_lcd_probe,
.remove = armada_lcd_remove,
.driver = {
.name = "armada-lcd",
.owner = THIS_MODULE,
.of_match_table = armada_lcd_of_match,
},
.id_table = armada_lcd_platform_ids,
};
| linux-master | drivers/gpu/drm/armada/armada_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
#include "armada_plane.h"
#include "armada_trace.h"
static const uint32_t armada_primary_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_VYUY,
DRM_FORMAT_YVYU,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
};
void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
u16 pitches[3], bool interlaced)
{
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
unsigned int num_planes = format->num_planes;
unsigned int x = state->src.x1 >> 16;
unsigned int y = state->src.y1 >> 16;
u32 addr = drm_fb_obj(fb)->dev_addr;
int i;
DRM_DEBUG_KMS("pitch %u x %d y %d bpp %d\n",
fb->pitches[0], x, y, format->cpp[0] * 8);
if (num_planes > 3)
num_planes = 3;
addrs[0][0] = addr + fb->offsets[0] + y * fb->pitches[0] +
x * format->cpp[0];
pitches[0] = fb->pitches[0];
y /= format->vsub;
x /= format->hsub;
for (i = 1; i < num_planes; i++) {
addrs[0][i] = addr + fb->offsets[i] + y * fb->pitches[i] +
x * format->cpp[i];
pitches[i] = fb->pitches[i];
}
for (; i < 3; i++) {
addrs[0][i] = 0;
pitches[i] = 0;
}
if (interlaced) {
for (i = 0; i < 3; i++) {
addrs[1][i] = addrs[0][i] + pitches[i];
pitches[i] *= 2;
}
} else {
for (i = 0; i < 3; i++)
addrs[1][i] = addrs[0][i];
}
}
int armada_drm_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct armada_plane_state *st = to_armada_plane_state(new_plane_state);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
bool interlace;
int ret;
if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) {
new_plane_state->visible = false;
return 0;
}
if (state)
crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
else
crtc_state = crtc->state;
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
0,
INT_MAX, true, false);
if (ret)
return ret;
interlace = crtc_state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE;
if (interlace) {
if ((new_plane_state->dst.y1 | new_plane_state->dst.y2) & 1)
return -EINVAL;
st->src_hw = drm_rect_height(&new_plane_state->src) >> 17;
st->dst_yx = new_plane_state->dst.y1 >> 1;
st->dst_hw = drm_rect_height(&new_plane_state->dst) >> 1;
} else {
st->src_hw = drm_rect_height(&new_plane_state->src) >> 16;
st->dst_yx = new_plane_state->dst.y1;
st->dst_hw = drm_rect_height(&new_plane_state->dst);
}
st->src_hw <<= 16;
st->src_hw |= drm_rect_width(&new_plane_state->src) >> 16;
st->dst_yx <<= 16;
st->dst_yx |= new_plane_state->dst.x1 & 0x0000ffff;
st->dst_hw <<= 16;
st->dst_hw |= drm_rect_width(&new_plane_state->dst) & 0x0000ffff;
armada_drm_plane_calc(new_plane_state, st->addrs, st->pitches,
interlace);
st->interlace = interlace;
return 0;
}
static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
u32 cfg, cfg_mask, val;
unsigned int idx;
DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
if (!new_state->fb || WARN_ON(!new_state->crtc))
return;
DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
plane->base.id, plane->name,
new_state->crtc->base.id, new_state->crtc->name,
new_state->fb->base.id,
old_state->visible, new_state->visible);
dcrtc = drm_to_armada_crtc(new_state->crtc);
regs = dcrtc->regs + dcrtc->regs_idx;
idx = 0;
if (!old_state->visible && new_state->visible) {
val = CFG_PDWN64x66;
if (drm_fb_to_armada_fb(new_state->fb)->fmt > CFG_420)
val |= CFG_PDWN256x24;
armada_reg_queue_mod(regs, idx, 0, val, LCD_SPU_SRAM_PARA1);
}
val = armada_src_hw(new_state);
if (armada_src_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_HPXL_VLN);
val = armada_dst_yx(new_state);
if (armada_dst_yx(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_OVSA_HPXL_VLN);
val = armada_dst_hw(new_state);
if (armada_dst_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_GZM_HPXL_VLN);
if (old_state->src.x1 != new_state->src.x1 ||
old_state->src.y1 != new_state->src.y1 ||
old_state->fb != new_state->fb ||
new_state->crtc->state->mode_changed) {
armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 0),
LCD_CFG_GRA_START_ADDR0);
armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 0),
LCD_CFG_GRA_START_ADDR1);
armada_reg_queue_mod(regs, idx, armada_pitch(new_state, 0),
0xffff,
LCD_CFG_GRA_PITCH);
}
if (old_state->fb != new_state->fb ||
new_state->crtc->state->mode_changed) {
cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(new_state->fb)->fmt) |
CFG_GRA_MOD(drm_fb_to_armada_fb(new_state->fb)->mod);
if (drm_fb_to_armada_fb(new_state->fb)->fmt > CFG_420)
cfg |= CFG_PALETTE_ENA;
if (new_state->visible)
cfg |= CFG_GRA_ENA;
if (to_armada_plane_state(new_state)->interlace)
cfg |= CFG_GRA_FTOGGLE;
cfg_mask = CFG_GRAFORMAT |
CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
CFG_SWAPYU | CFG_YUV2RGB) |
CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
CFG_GRA_ENA;
} else if (old_state->visible != new_state->visible) {
cfg = new_state->visible ? CFG_GRA_ENA : 0;
cfg_mask = CFG_GRA_ENA;
} else {
cfg = cfg_mask = 0;
}
if (drm_rect_width(&old_state->src) != drm_rect_width(&new_state->src) ||
drm_rect_width(&old_state->dst) != drm_rect_width(&new_state->dst)) {
cfg_mask |= CFG_GRA_HSMOOTH;
if (drm_rect_width(&new_state->src) >> 16 !=
drm_rect_width(&new_state->dst))
cfg |= CFG_GRA_HSMOOTH;
}
if (cfg_mask)
armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
LCD_SPU_DMA_CTRL0);
dcrtc->regs_idx += idx;
}
static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
unsigned int idx = 0;
DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
if (!old_state->crtc)
return;
DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
plane->base.id, plane->name,
old_state->crtc->base.id, old_state->crtc->name,
old_state->fb->base.id);
dcrtc = drm_to_armada_crtc(old_state->crtc);
regs = dcrtc->regs + dcrtc->regs_idx;
/* Disable plane and power down most RAMs and FIFOs */
armada_reg_queue_mod(regs, idx, 0, CFG_GRA_ENA, LCD_SPU_DMA_CTRL0);
armada_reg_queue_mod(regs, idx, CFG_PDWN256x32 | CFG_PDWN256x24 |
CFG_PDWN32x32 | CFG_PDWN64x66,
0, LCD_SPU_SRAM_PARA1);
dcrtc->regs_idx += idx;
}
static const struct drm_plane_helper_funcs armada_primary_plane_helper_funcs = {
.atomic_check = armada_drm_plane_atomic_check,
.atomic_update = armada_drm_primary_plane_atomic_update,
.atomic_disable = armada_drm_primary_plane_atomic_disable,
};
void armada_plane_reset(struct drm_plane *plane)
{
struct armada_plane_state *st;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(plane->state);
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (st)
__drm_atomic_helper_plane_reset(plane, &st->base);
}
struct drm_plane_state *armada_plane_duplicate_state(struct drm_plane *plane)
{
struct armada_plane_state *st;
if (WARN_ON(!plane->state))
return NULL;
st = kmemdup(plane->state, sizeof(*st), GFP_KERNEL);
if (st)
__drm_atomic_helper_plane_duplicate_state(plane, &st->base);
return &st->base;
}
static const struct drm_plane_funcs armada_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_helper_destroy,
.reset = armada_plane_reset,
.atomic_duplicate_state = armada_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
int armada_drm_primary_plane_init(struct drm_device *drm,
struct drm_plane *primary)
{
int ret;
drm_plane_helper_add(primary, &armada_primary_plane_helper_funcs);
ret = drm_universal_plane_init(drm, primary, 0,
&armada_primary_plane_funcs,
armada_primary_formats,
ARRAY_SIZE(armada_primary_formats),
NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
return ret;
}
| linux-master | drivers/gpu/drm/armada/armada_plane.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include "armada_crtc.h"
#include "armada_drm.h"
static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct armada_private *priv = drm_to_armada_dev(dev);
struct drm_printer p = drm_seq_file_printer(m);
mutex_lock(&priv->linear_lock);
drm_mm_print(&priv->linear, &p);
mutex_unlock(&priv->linear_lock);
return 0;
}
static int armada_debugfs_crtc_reg_show(struct seq_file *m, void *data)
{
struct armada_crtc *dcrtc = m->private;
int i;
for (i = 0x84; i <= 0x1c4; i += 4) {
u32 v = readl_relaxed(dcrtc->base + i);
seq_printf(m, "0x%04x: 0x%08x\n", i, v);
}
return 0;
}
static int armada_debugfs_crtc_reg_open(struct inode *inode, struct file *file)
{
return single_open(file, armada_debugfs_crtc_reg_show,
inode->i_private);
}
static int armada_debugfs_crtc_reg_write(struct file *file,
const char __user *ptr, size_t len, loff_t *off)
{
struct armada_crtc *dcrtc;
unsigned long reg, mask, val;
char buf[32];
int ret;
u32 v;
if (*off != 0)
return 0;
if (len > sizeof(buf) - 1)
len = sizeof(buf) - 1;
ret = strncpy_from_user(buf, ptr, len);
if (ret < 0)
return ret;
buf[len] = '\0';
if (sscanf(buf, "%lx %lx %lx", ®, &mask, &val) != 3)
return -EINVAL;
if (reg < 0x84 || reg > 0x1c4 || reg & 3)
return -ERANGE;
dcrtc = ((struct seq_file *)file->private_data)->private;
v = readl(dcrtc->base + reg);
v &= ~mask;
v |= val & mask;
writel(v, dcrtc->base + reg);
return len;
}
static const struct file_operations armada_debugfs_crtc_reg_fops = {
.owner = THIS_MODULE,
.open = armada_debugfs_crtc_reg_open,
.read = seq_read,
.write = armada_debugfs_crtc_reg_write,
.llseek = seq_lseek,
.release = single_release,
};
void armada_drm_crtc_debugfs_init(struct armada_crtc *dcrtc)
{
debugfs_create_file("armada-regs", 0600, dcrtc->crtc.debugfs_entry,
dcrtc, &armada_debugfs_crtc_reg_fops);
}
static struct drm_info_list armada_debugfs_list[] = {
{ "gem_linear", armada_debugfs_gem_linear_show, 0 },
};
#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
int armada_drm_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
return 0;
}
| linux-master | drivers/gpu/drm/armada/armada_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Russell King
*
* Armada 510 (aka Dove) variant support
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <drm/drm_probe_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_hw.h"
struct armada510_variant_data {
struct clk *clks[4];
struct clk *sel_clk;
};
static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
{
struct armada510_variant_data *v;
struct clk *clk;
int idx;
v = devm_kzalloc(dev, sizeof(*v), GFP_KERNEL);
if (!v)
return -ENOMEM;
dcrtc->variant_data = v;
if (dev->of_node) {
struct property *prop;
const char *s;
of_property_for_each_string(dev->of_node, "clock-names", prop,
s) {
if (!strcmp(s, "ext_ref_clk0"))
idx = 0;
else if (!strcmp(s, "ext_ref_clk1"))
idx = 1;
else if (!strcmp(s, "plldivider"))
idx = 2;
else if (!strcmp(s, "axibus"))
idx = 3;
else
continue;
clk = devm_clk_get(dev, s);
if (IS_ERR(clk))
return PTR_ERR(clk) == -ENOENT ? -EPROBE_DEFER :
PTR_ERR(clk);
v->clks[idx] = clk;
}
} else {
clk = devm_clk_get(dev, "ext_ref_clk1");
if (IS_ERR(clk))
return PTR_ERR(clk) == -ENOENT ? -EPROBE_DEFER :
PTR_ERR(clk);
v->clks[1] = clk;
}
/*
* Lower the watermark so to eliminate jitter at higher bandwidths.
* Disable SRAM read wait state to avoid system hang with external
* clock.
*/
armada_updatel(CFG_DMA_WM(0x20), CFG_SRAM_WAIT | CFG_DMA_WM_MASK,
dcrtc->base + LCD_CFG_RDREG4F);
/* Initialise SPU register */
writel_relaxed(ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
dcrtc->base + LCD_SPU_ADV_REG);
return 0;
}
static const u32 armada510_clk_sels[] = {
SCLK_510_EXTCLK0,
SCLK_510_EXTCLK1,
SCLK_510_PLL,
SCLK_510_AXI,
};
static const struct armada_clocking_params armada510_clocking = {
/* HDMI requires -0.6%..+0.5% */
.permillage_min = 994,
.permillage_max = 1005,
.settable = BIT(0) | BIT(1),
.div_max = SCLK_510_INT_DIV_MASK,
};
/*
* Armada510 specific SCLK register selection.
* This gets called with sclk = NULL to test whether the mode is
* supportable, and again with sclk != NULL to set the clocks up for
* that. The former can return an error, but the latter is expected
* not to.
*/
static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
const struct drm_display_mode *mode, uint32_t *sclk)
{
struct armada510_variant_data *v = dcrtc->variant_data;
unsigned long desired_khz = mode->crtc_clock;
struct armada_clk_result res;
int ret, idx;
idx = armada_crtc_select_clock(dcrtc, &res, &armada510_clocking,
v->clks, ARRAY_SIZE(v->clks),
desired_khz);
if (idx < 0)
return idx;
ret = clk_prepare_enable(res.clk);
if (ret)
return ret;
if (sclk) {
clk_set_rate(res.clk, res.desired_clk_hz);
*sclk = res.div | armada510_clk_sels[idx];
/* We are now using this clock */
v->sel_clk = res.clk;
swap(dcrtc->clk, res.clk);
}
clk_disable_unprepare(res.clk);
return 0;
}
static void armada510_crtc_disable(struct armada_crtc *dcrtc)
{
if (dcrtc->clk) {
clk_disable_unprepare(dcrtc->clk);
dcrtc->clk = NULL;
}
}
static void armada510_crtc_enable(struct armada_crtc *dcrtc,
const struct drm_display_mode *mode)
{
struct armada510_variant_data *v = dcrtc->variant_data;
if (!dcrtc->clk && v->sel_clk) {
if (!WARN_ON(clk_prepare_enable(v->sel_clk)))
dcrtc->clk = v->sel_clk;
}
}
const struct armada_variant armada510_ops = {
.has_spu_adv_reg = true,
.init = armada510_crtc_init,
.compute_clock = armada510_crtc_compute_clock,
.disable = armada510_crtc_disable,
.enable = armada510_crtc_enable,
};
| linux-master | drivers/gpu/drm/armada/armada_510.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
#include <linux/bitfield.h>
#include <drm/armada_drm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
#include "armada_ioctlP.h"
#include "armada_plane.h"
#include "armada_trace.h"
#define DEFAULT_BRIGHTNESS 0
#define DEFAULT_CONTRAST 0x4000
#define DEFAULT_SATURATION 0x4000
#define DEFAULT_ENCODING DRM_COLOR_YCBCR_BT601
struct armada_overlay_state {
struct armada_plane_state base;
u32 colorkey_yr;
u32 colorkey_ug;
u32 colorkey_vb;
u32 colorkey_mode;
u32 colorkey_enable;
s16 brightness;
u16 contrast;
u16 saturation;
};
#define drm_to_overlay_state(s) \
container_of(s, struct armada_overlay_state, base.base)
static inline u32 armada_spu_contrast(struct drm_plane_state *state)
{
return drm_to_overlay_state(state)->brightness << 16 |
drm_to_overlay_state(state)->contrast;
}
static inline u32 armada_spu_saturation(struct drm_plane_state *state)
{
/* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
return drm_to_overlay_state(state)->saturation << 16;
}
static inline u32 armada_csc(struct drm_plane_state *state)
{
/*
* The CFG_CSC_RGB_* settings control the output of the colour space
* converter, setting the range of output values it produces. Since
* we will be blending with the full-range graphics, we need to
* produce full-range RGB output from the conversion.
*/
return CFG_CSC_RGB_COMPUTER |
(state->color_encoding == DRM_COLOR_YCBCR_BT709 ?
CFG_CSC_YUV_CCIR709 : CFG_CSC_YUV_CCIR601);
}
/* === Plane support === */
static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
unsigned int idx;
u32 cfg, cfg_mask, val;
DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
if (!new_state->fb || WARN_ON(!new_state->crtc))
return;
DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
plane->base.id, plane->name,
new_state->crtc->base.id, new_state->crtc->name,
new_state->fb->base.id,
old_state->visible, new_state->visible);
dcrtc = drm_to_armada_crtc(new_state->crtc);
regs = dcrtc->regs + dcrtc->regs_idx;
idx = 0;
if (!old_state->visible && new_state->visible)
armada_reg_queue_mod(regs, idx,
0, CFG_PDWN16x66 | CFG_PDWN32x66,
LCD_SPU_SRAM_PARA1);
val = armada_src_hw(new_state);
if (armada_src_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_HPXL_VLN);
val = armada_dst_yx(new_state);
if (armada_dst_yx(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN);
val = armada_dst_hw(new_state);
if (armada_dst_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_DZM_HPXL_VLN);
/* FIXME: overlay on an interlaced display */
if (old_state->src.x1 != new_state->src.x1 ||
old_state->src.y1 != new_state->src.y1 ||
old_state->fb != new_state->fb ||
new_state->crtc->state->mode_changed) {
const struct drm_format_info *format;
u16 src_x;
armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 0),
LCD_SPU_DMA_START_ADDR_Y0);
armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 1),
LCD_SPU_DMA_START_ADDR_U0);
armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 2),
LCD_SPU_DMA_START_ADDR_V0);
armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 0),
LCD_SPU_DMA_START_ADDR_Y1);
armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 1),
LCD_SPU_DMA_START_ADDR_U1);
armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 2),
LCD_SPU_DMA_START_ADDR_V1);
val = armada_pitch(new_state, 0) << 16 | armada_pitch(new_state,
0);
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_YC);
val = armada_pitch(new_state, 1) << 16 | armada_pitch(new_state,
2);
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_UV);
cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(new_state->fb)->fmt) |
CFG_DMA_MOD(drm_fb_to_armada_fb(new_state->fb)->mod) |
CFG_CBSH_ENA;
if (new_state->visible)
cfg |= CFG_DMA_ENA;
/*
* Shifting a YUV packed format image by one pixel causes the
* U/V planes to swap. Compensate for it by also toggling
* the UV swap.
*/
format = new_state->fb->format;
src_x = new_state->src.x1 >> 16;
if (format->num_planes == 1 && src_x & (format->hsub - 1))
cfg ^= CFG_DMA_MOD(CFG_SWAPUV);
if (to_armada_plane_state(new_state)->interlace)
cfg |= CFG_DMA_FTOGGLE;
cfg_mask = CFG_CBSH_ENA | CFG_DMAFORMAT |
CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV |
CFG_SWAPYU | CFG_YUV2RGB) |
CFG_DMA_FTOGGLE | CFG_DMA_TSTMODE |
CFG_DMA_ENA;
} else if (old_state->visible != new_state->visible) {
cfg = new_state->visible ? CFG_DMA_ENA : 0;
cfg_mask = CFG_DMA_ENA;
} else {
cfg = cfg_mask = 0;
}
if (drm_rect_width(&old_state->src) != drm_rect_width(&new_state->src) ||
drm_rect_width(&old_state->dst) != drm_rect_width(&new_state->dst)) {
cfg_mask |= CFG_DMA_HSMOOTH;
if (drm_rect_width(&new_state->src) >> 16 !=
drm_rect_width(&new_state->dst))
cfg |= CFG_DMA_HSMOOTH;
}
if (cfg_mask)
armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
LCD_SPU_DMA_CTRL0);
val = armada_spu_contrast(new_state);
if ((!old_state->visible && new_state->visible) ||
armada_spu_contrast(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_CONTRAST);
val = armada_spu_saturation(new_state);
if ((!old_state->visible && new_state->visible) ||
armada_spu_saturation(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_SATURATION);
if (!old_state->visible && new_state->visible)
armada_reg_queue_set(regs, idx, 0x00002000, LCD_SPU_CBSH_HUE);
val = armada_csc(new_state);
if ((!old_state->visible && new_state->visible) ||
armada_csc(old_state) != val)
armada_reg_queue_mod(regs, idx, val, CFG_CSC_MASK,
LCD_SPU_IOPAD_CONTROL);
val = drm_to_overlay_state(new_state)->colorkey_yr;
if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_yr != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_Y);
val = drm_to_overlay_state(new_state)->colorkey_ug;
if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_ug != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_U);
val = drm_to_overlay_state(new_state)->colorkey_vb;
if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_vb != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_V);
val = drm_to_overlay_state(new_state)->colorkey_mode;
if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_mode != val)
armada_reg_queue_mod(regs, idx, val, CFG_CKMODE_MASK |
CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
LCD_SPU_DMA_CTRL1);
val = drm_to_overlay_state(new_state)->colorkey_enable;
if (((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_enable != val) &&
dcrtc->variant->has_spu_adv_reg)
armada_reg_queue_mod(regs, idx, val, ADV_GRACOLORKEY |
ADV_VIDCOLORKEY, LCD_SPU_ADV_REG);
dcrtc->regs_idx += idx;
}
static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
unsigned int idx = 0;
DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
if (!old_state->crtc)
return;
DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
plane->base.id, plane->name,
old_state->crtc->base.id, old_state->crtc->name,
old_state->fb->base.id);
dcrtc = drm_to_armada_crtc(old_state->crtc);
regs = dcrtc->regs + dcrtc->regs_idx;
/* Disable plane and power down the YUV FIFOs */
armada_reg_queue_mod(regs, idx, 0, CFG_DMA_ENA, LCD_SPU_DMA_CTRL0);
armada_reg_queue_mod(regs, idx, CFG_PDWN16x66 | CFG_PDWN32x66, 0,
LCD_SPU_SRAM_PARA1);
dcrtc->regs_idx += idx;
}
static const struct drm_plane_helper_funcs armada_overlay_plane_helper_funcs = {
.atomic_check = armada_drm_plane_atomic_check,
.atomic_update = armada_drm_overlay_plane_atomic_update,
.atomic_disable = armada_drm_overlay_plane_atomic_disable,
};
static int
armada_overlay_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
int ret = 0;
trace_armada_ovl_plane_update(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
state = drm_atomic_state_alloc(plane->dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto fail;
}
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
plane_state->crtc_x = crtc_x;
plane_state->crtc_y = crtc_y;
plane_state->crtc_h = crtc_h;
plane_state->crtc_w = crtc_w;
plane_state->src_x = src_x;
plane_state->src_y = src_y;
plane_state->src_h = src_h;
plane_state->src_w = src_w;
ret = drm_atomic_nonblocking_commit(state);
fail:
drm_atomic_state_put(state);
return ret;
}
static void armada_overlay_reset(struct drm_plane *plane)
{
struct armada_overlay_state *state;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(plane->state);
plane->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state) {
state->colorkey_yr = 0xfefefe00;
state->colorkey_ug = 0x01010100;
state->colorkey_vb = 0x01010100;
state->colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
CFG_ALPHAM_GRA | CFG_ALPHA(0);
state->colorkey_enable = ADV_GRACOLORKEY;
state->brightness = DEFAULT_BRIGHTNESS;
state->contrast = DEFAULT_CONTRAST;
state->saturation = DEFAULT_SATURATION;
__drm_atomic_helper_plane_reset(plane, &state->base.base);
state->base.base.color_encoding = DEFAULT_ENCODING;
state->base.base.color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
}
}
static struct drm_plane_state *
armada_overlay_duplicate_state(struct drm_plane *plane)
{
struct armada_overlay_state *state;
if (WARN_ON(!plane->state))
return NULL;
state = kmemdup(plane->state, sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_plane_duplicate_state(plane,
&state->base.base);
return &state->base.base;
}
static int armada_overlay_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_property *property,
uint64_t val)
{
struct armada_private *priv = drm_to_armada_dev(plane->dev);
#define K2R(val) (((val) >> 0) & 0xff)
#define K2G(val) (((val) >> 8) & 0xff)
#define K2B(val) (((val) >> 16) & 0xff)
if (property == priv->colorkey_prop) {
#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
drm_to_overlay_state(state)->colorkey_yr = CCC(K2R(val));
drm_to_overlay_state(state)->colorkey_ug = CCC(K2G(val));
drm_to_overlay_state(state)->colorkey_vb = CCC(K2B(val));
#undef CCC
} else if (property == priv->colorkey_min_prop) {
drm_to_overlay_state(state)->colorkey_yr &= ~0x00ff0000;
drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 16;
drm_to_overlay_state(state)->colorkey_ug &= ~0x00ff0000;
drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 16;
drm_to_overlay_state(state)->colorkey_vb &= ~0x00ff0000;
drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 16;
} else if (property == priv->colorkey_max_prop) {
drm_to_overlay_state(state)->colorkey_yr &= ~0xff000000;
drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 24;
drm_to_overlay_state(state)->colorkey_ug &= ~0xff000000;
drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 24;
drm_to_overlay_state(state)->colorkey_vb &= ~0xff000000;
drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 24;
} else if (property == priv->colorkey_val_prop) {
drm_to_overlay_state(state)->colorkey_yr &= ~0x0000ff00;
drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 8;
drm_to_overlay_state(state)->colorkey_ug &= ~0x0000ff00;
drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 8;
drm_to_overlay_state(state)->colorkey_vb &= ~0x0000ff00;
drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 8;
} else if (property == priv->colorkey_alpha_prop) {
drm_to_overlay_state(state)->colorkey_yr &= ~0x000000ff;
drm_to_overlay_state(state)->colorkey_yr |= K2R(val);
drm_to_overlay_state(state)->colorkey_ug &= ~0x000000ff;
drm_to_overlay_state(state)->colorkey_ug |= K2G(val);
drm_to_overlay_state(state)->colorkey_vb &= ~0x000000ff;
drm_to_overlay_state(state)->colorkey_vb |= K2B(val);
} else if (property == priv->colorkey_mode_prop) {
if (val == CKMODE_DISABLE) {
drm_to_overlay_state(state)->colorkey_mode =
CFG_CKMODE(CKMODE_DISABLE) |
CFG_ALPHAM_CFG | CFG_ALPHA(255);
drm_to_overlay_state(state)->colorkey_enable = 0;
} else {
drm_to_overlay_state(state)->colorkey_mode =
CFG_CKMODE(val) |
CFG_ALPHAM_GRA | CFG_ALPHA(0);
drm_to_overlay_state(state)->colorkey_enable =
ADV_GRACOLORKEY;
}
} else if (property == priv->brightness_prop) {
drm_to_overlay_state(state)->brightness = val - 256;
} else if (property == priv->contrast_prop) {
drm_to_overlay_state(state)->contrast = val;
} else if (property == priv->saturation_prop) {
drm_to_overlay_state(state)->saturation = val;
} else {
return -EINVAL;
}
return 0;
}
static int armada_overlay_get_property(struct drm_plane *plane,
const struct drm_plane_state *state, struct drm_property *property,
uint64_t *val)
{
struct armada_private *priv = drm_to_armada_dev(plane->dev);
#define C2K(c,s) (((c) >> (s)) & 0xff)
#define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16)
if (property == priv->colorkey_prop) {
/* Do best-efforts here for this property */
*val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
drm_to_overlay_state(state)->colorkey_ug,
drm_to_overlay_state(state)->colorkey_vb, 16);
/* If min != max, or min != val, error out */
if (*val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
drm_to_overlay_state(state)->colorkey_ug,
drm_to_overlay_state(state)->colorkey_vb, 24) ||
*val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
drm_to_overlay_state(state)->colorkey_ug,
drm_to_overlay_state(state)->colorkey_vb, 8))
return -EINVAL;
} else if (property == priv->colorkey_min_prop) {
*val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
drm_to_overlay_state(state)->colorkey_ug,
drm_to_overlay_state(state)->colorkey_vb, 16);
} else if (property == priv->colorkey_max_prop) {
*val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
drm_to_overlay_state(state)->colorkey_ug,
drm_to_overlay_state(state)->colorkey_vb, 24);
} else if (property == priv->colorkey_val_prop) {
*val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
drm_to_overlay_state(state)->colorkey_ug,
drm_to_overlay_state(state)->colorkey_vb, 8);
} else if (property == priv->colorkey_alpha_prop) {
*val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
drm_to_overlay_state(state)->colorkey_ug,
drm_to_overlay_state(state)->colorkey_vb, 0);
} else if (property == priv->colorkey_mode_prop) {
*val = FIELD_GET(CFG_CKMODE_MASK,
drm_to_overlay_state(state)->colorkey_mode);
} else if (property == priv->brightness_prop) {
*val = drm_to_overlay_state(state)->brightness + 256;
} else if (property == priv->contrast_prop) {
*val = drm_to_overlay_state(state)->contrast;
} else if (property == priv->saturation_prop) {
*val = drm_to_overlay_state(state)->saturation;
} else {
return -EINVAL;
}
return 0;
}
static const struct drm_plane_funcs armada_ovl_plane_funcs = {
.update_plane = armada_overlay_plane_update,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_helper_destroy,
.reset = armada_overlay_reset,
.atomic_duplicate_state = armada_overlay_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_set_property = armada_overlay_set_property,
.atomic_get_property = armada_overlay_get_property,
};
static const uint32_t armada_ovl_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YVU422,
DRM_FORMAT_VYUY,
DRM_FORMAT_YVYU,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
};
static const struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
{ CKMODE_DISABLE, "disabled" },
{ CKMODE_Y, "Y component" },
{ CKMODE_U, "U component" },
{ CKMODE_V, "V component" },
{ CKMODE_RGB, "RGB" },
{ CKMODE_R, "R component" },
{ CKMODE_G, "G component" },
{ CKMODE_B, "B component" },
};
static int armada_overlay_create_properties(struct drm_device *dev)
{
struct armada_private *priv = drm_to_armada_dev(dev);
if (priv->colorkey_prop)
return 0;
priv->colorkey_prop = drm_property_create_range(dev, 0,
"colorkey", 0, 0xffffff);
priv->colorkey_min_prop = drm_property_create_range(dev, 0,
"colorkey_min", 0, 0xffffff);
priv->colorkey_max_prop = drm_property_create_range(dev, 0,
"colorkey_max", 0, 0xffffff);
priv->colorkey_val_prop = drm_property_create_range(dev, 0,
"colorkey_val", 0, 0xffffff);
priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
"colorkey_alpha", 0, 0xffffff);
priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
"colorkey_mode",
armada_drm_colorkey_enum_list,
ARRAY_SIZE(armada_drm_colorkey_enum_list));
priv->brightness_prop = drm_property_create_range(dev, 0,
"brightness", 0, 256 + 255);
priv->contrast_prop = drm_property_create_range(dev, 0,
"contrast", 0, 0x7fff);
priv->saturation_prop = drm_property_create_range(dev, 0,
"saturation", 0, 0x7fff);
if (!priv->colorkey_prop)
return -ENOMEM;
return 0;
}
int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
{
struct armada_private *priv = drm_to_armada_dev(dev);
struct drm_mode_object *mobj;
struct drm_plane *overlay;
int ret;
ret = armada_overlay_create_properties(dev);
if (ret)
return ret;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return -ENOMEM;
drm_plane_helper_add(overlay, &armada_overlay_plane_helper_funcs);
ret = drm_universal_plane_init(dev, overlay, crtcs,
&armada_ovl_plane_funcs,
armada_ovl_formats,
ARRAY_SIZE(armada_ovl_formats),
NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
kfree(overlay);
return ret;
}
mobj = &overlay->base;
drm_object_attach_property(mobj, priv->colorkey_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_min_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_max_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_val_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
0x000000);
drm_object_attach_property(mobj, priv->colorkey_mode_prop,
CKMODE_RGB);
drm_object_attach_property(mobj, priv->brightness_prop,
256 + DEFAULT_BRIGHTNESS);
drm_object_attach_property(mobj, priv->contrast_prop,
DEFAULT_CONTRAST);
drm_object_attach_property(mobj, priv->saturation_prop,
DEFAULT_SATURATION);
ret = drm_plane_create_color_properties(overlay,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
DEFAULT_ENCODING,
DRM_COLOR_YCBCR_LIMITED_RANGE);
return ret;
}
| linux-master | drivers/gpu/drm/armada/armada_overlay.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Russell King
* Written from the i915 driver.
*/
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
static void armada_fbdev_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fbh = info->par;
drm_fb_helper_fini(fbh);
fbh->fb->funcs->destroy(fbh->fb);
drm_client_release(&fbh->client);
drm_fb_helper_unprepare(fbh);
kfree(fbh);
}
static const struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_destroy = armada_fbdev_fb_destroy,
};
static int armada_fbdev_create(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbh->dev;
struct drm_mode_fb_cmd2 mode;
struct armada_framebuffer *dfb;
struct armada_gem_object *obj;
struct fb_info *info;
int size, ret;
void *ptr;
memset(&mode, 0, sizeof(mode));
mode.width = sizes->surface_width;
mode.height = sizes->surface_height;
mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = mode.pitches[0] * mode.height;
obj = armada_gem_alloc_private_object(dev, size);
if (!obj) {
DRM_ERROR("failed to allocate fb memory\n");
return -ENOMEM;
}
ret = armada_gem_linear_back(dev, obj);
if (ret) {
drm_gem_object_put(&obj->obj);
return ret;
}
ptr = armada_gem_map_object(dev, obj);
if (!ptr) {
drm_gem_object_put(&obj->obj);
return -ENOMEM;
}
dfb = armada_framebuffer_create(dev, &mode, obj);
/*
* A reference is now held by the framebuffer object if
* successful, otherwise this drops the ref for the error path.
*/
drm_gem_object_put(&obj->obj);
if (IS_ERR(dfb))
return PTR_ERR(dfb);
info = drm_fb_helper_alloc_info(fbh);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_fballoc;
}
info->fbops = &armada_fb_ops;
info->fix.smem_start = obj->phys_addr;
info->fix.smem_len = obj->obj.size;
info->screen_size = obj->obj.size;
info->screen_base = ptr;
fbh->fb = &dfb->fb;
drm_fb_helper_fill_info(info, fbh, sizes);
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
dfb->fb.width, dfb->fb.height, dfb->fb.format->cpp[0] * 8,
(unsigned long long)obj->phys_addr);
return 0;
err_fballoc:
dfb->fb.funcs->destroy(&dfb->fb);
return ret;
}
static int armada_fb_probe(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
int ret = 0;
if (!fbh->fb) {
ret = armada_fbdev_create(fbh, sizes);
if (ret == 0)
ret = 1;
}
return ret;
}
static const struct drm_fb_helper_funcs armada_fb_helper_funcs = {
.fb_probe = armada_fb_probe,
};
/*
* Fbdev client and struct drm_client_funcs
*/
static void armada_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fbh = drm_fb_helper_from_client(client);
if (fbh->info) {
drm_fb_helper_unregister_info(fbh);
} else {
drm_client_release(&fbh->client);
drm_fb_helper_unprepare(fbh);
kfree(fbh);
}
}
static int armada_fbdev_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
static int armada_fbdev_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fbh = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
int ret;
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
ret = drm_fb_helper_init(dev, fbh);
if (ret)
goto err_drm_err;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(fbh);
if (ret)
goto err_drm_fb_helper_fini;
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(fbh);
err_drm_err:
drm_err(dev, "armada: Failed to setup fbdev emulation (ret=%d)\n", ret);
return ret;
}
static const struct drm_client_funcs armada_fbdev_client_funcs = {
.owner = THIS_MODULE,
.unregister = armada_fbdev_client_unregister,
.restore = armada_fbdev_client_restore,
.hotplug = armada_fbdev_client_hotplug,
};
void armada_fbdev_setup(struct drm_device *dev)
{
struct drm_fb_helper *fbh;
int ret;
drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
fbh = kzalloc(sizeof(*fbh), GFP_KERNEL);
if (!fbh)
return;
drm_fb_helper_prepare(dev, fbh, 32, &armada_fb_helper_funcs);
ret = drm_client_init(dev, &fbh->client, "fbdev", &armada_fbdev_client_funcs);
if (ret) {
drm_err(dev, "Failed to register client: %d\n", ret);
goto err_drm_client_init;
}
drm_client_register(&fbh->client);
return;
err_drm_client_init:
drm_fb_helper_unprepare(fbh);
kfree(fbh);
return;
}
| linux-master | drivers/gpu/drm/armada/armada_fbdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Russell King
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_vblank.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_gem.h"
#include "armada_fb.h"
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
static const struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0),
};
DEFINE_DRM_GEM_FOPS(armada_drm_fops);
static const struct drm_driver armada_drm_driver = {
.gem_prime_import = armada_gem_prime_import,
.dumb_create = armada_gem_dumb_create,
.major = 1,
.minor = 0,
.name = "armada-drm",
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.num_ioctls = ARRAY_SIZE(armada_ioctls),
.fops = &armada_drm_fops,
};
static const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
.fb_create = armada_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int armada_drm_bind(struct device *dev)
{
struct armada_private *priv;
struct resource *mem = NULL;
int ret, n;
for (n = 0; ; n++) {
struct resource *r = platform_get_resource(to_platform_device(dev),
IORESOURCE_MEM, n);
if (!r)
break;
/* Resources above 64K are graphics memory */
if (resource_size(r) > SZ_64K)
mem = r;
else
return -EINVAL;
}
if (!mem)
return -ENXIO;
if (!devm_request_mem_region(dev, mem->start, resource_size(mem),
"armada-drm"))
return -EBUSY;
priv = devm_drm_dev_alloc(dev, &armada_drm_driver,
struct armada_private, drm);
if (IS_ERR(priv)) {
dev_err(dev, "[" DRM_NAME ":%s] devm_drm_dev_alloc failed: %li\n",
__func__, PTR_ERR(priv));
return PTR_ERR(priv);
}
/* Remove early framebuffers */
ret = drm_aperture_remove_framebuffers(&armada_drm_driver);
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
__func__, ret);
return ret;
}
dev_set_drvdata(dev, &priv->drm);
/* Mode setting support */
drm_mode_config_init(&priv->drm);
priv->drm.mode_config.min_width = 320;
priv->drm.mode_config.min_height = 200;
/*
* With vscale enabled, the maximum width is 1920 due to the
* 1920 by 3 lines RAM
*/
priv->drm.mode_config.max_width = 1920;
priv->drm.mode_config.max_height = 2048;
priv->drm.mode_config.preferred_depth = 24;
priv->drm.mode_config.funcs = &armada_drm_mode_config_funcs;
drm_mm_init(&priv->linear, mem->start, resource_size(mem));
mutex_init(&priv->linear_lock);
ret = component_bind_all(dev, &priv->drm);
if (ret)
goto err_kms;
ret = drm_vblank_init(&priv->drm, priv->drm.mode_config.num_crtc);
if (ret)
goto err_comp;
drm_mode_config_reset(&priv->drm);
drm_kms_helper_poll_init(&priv->drm);
ret = drm_dev_register(&priv->drm, 0);
if (ret)
goto err_poll;
#ifdef CONFIG_DEBUG_FS
armada_drm_debugfs_init(priv->drm.primary);
#endif
armada_fbdev_setup(&priv->drm);
return 0;
err_poll:
drm_kms_helper_poll_fini(&priv->drm);
err_comp:
component_unbind_all(dev, &priv->drm);
err_kms:
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
return ret;
}
static void armada_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct armada_private *priv = drm_to_armada_dev(drm);
drm_kms_helper_poll_fini(&priv->drm);
drm_dev_unregister(&priv->drm);
drm_atomic_helper_shutdown(&priv->drm);
component_unbind_all(dev, &priv->drm);
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
}
static void armada_add_endpoints(struct device *dev,
struct component_match **match, struct device_node *dev_node)
{
struct device_node *ep, *remote;
for_each_endpoint_of_node(dev_node, ep) {
remote = of_graph_get_remote_port_parent(ep);
if (remote && of_device_is_available(remote))
drm_of_component_match_add(dev, match, component_compare_of,
remote);
of_node_put(remote);
}
}
static const struct component_master_ops armada_master_ops = {
.bind = armada_drm_bind,
.unbind = armada_drm_unbind,
};
static int armada_drm_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
struct device *dev = &pdev->dev;
int ret;
ret = drm_of_component_probe(dev, component_compare_dev_name, &armada_master_ops);
if (ret != -EINVAL)
return ret;
if (dev->platform_data) {
char **devices = dev->platform_data;
struct device *d;
int i;
for (i = 0; devices[i]; i++)
component_match_add(dev, &match, component_compare_dev_name,
devices[i]);
if (i == 0) {
dev_err(dev, "missing 'ports' property\n");
return -ENODEV;
}
for (i = 0; devices[i]; i++) {
d = bus_find_device_by_name(&platform_bus_type, NULL,
devices[i]);
if (d && d->of_node)
armada_add_endpoints(dev, &match, d->of_node);
put_device(d);
}
}
return component_master_add_with_match(&pdev->dev, &armada_master_ops,
match);
}
static int armada_drm_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &armada_master_ops);
return 0;
}
static const struct platform_device_id armada_drm_platform_ids[] = {
{
.name = "armada-drm",
}, {
.name = "armada-510-drm",
},
{ },
};
MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
static struct platform_driver armada_drm_platform_driver = {
.probe = armada_drm_probe,
.remove = armada_drm_remove,
.driver = {
.name = "armada-drm",
},
.id_table = armada_drm_platform_ids,
};
static int __init armada_drm_init(void)
{
int ret;
if (drm_firmware_drivers_only())
return -ENODEV;
ret = platform_driver_register(&armada_lcd_platform_driver);
if (ret)
return ret;
ret = platform_driver_register(&armada_drm_platform_driver);
if (ret)
platform_driver_unregister(&armada_lcd_platform_driver);
return ret;
}
module_init(armada_drm_init);
static void __exit armada_drm_exit(void)
{
platform_driver_unregister(&armada_drm_platform_driver);
platform_driver_unregister(&armada_lcd_platform_driver);
}
module_exit(armada_drm_exit);
MODULE_AUTHOR("Russell King <[email protected]>");
MODULE_DESCRIPTION("Armada DRM Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:armada-drm");
| linux-master | drivers/gpu/drm/armada/armada_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Russell King
*/
#include <drm/drm_modeset_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
static const struct drm_framebuffer_funcs armada_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
{
struct armada_framebuffer *dfb;
uint8_t format, config;
int ret;
switch (mode->pixel_format) {
#define FMT(drm, fmt, mod) \
case DRM_FORMAT_##drm: \
format = CFG_##fmt; \
config = mod; \
break
FMT(RGB565, 565, CFG_SWAPRB);
FMT(BGR565, 565, 0);
FMT(ARGB1555, 1555, CFG_SWAPRB);
FMT(ABGR1555, 1555, 0);
FMT(RGB888, 888PACK, CFG_SWAPRB);
FMT(BGR888, 888PACK, 0);
FMT(XRGB8888, X888, CFG_SWAPRB);
FMT(XBGR8888, X888, 0);
FMT(ARGB8888, 8888, CFG_SWAPRB);
FMT(ABGR8888, 8888, 0);
FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
FMT(UYVY, 422PACK, CFG_YUV2RGB);
FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV);
FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU);
FMT(YUV422, 422, CFG_YUV2RGB);
FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV);
FMT(YUV420, 420, CFG_YUV2RGB);
FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV);
FMT(C8, PSEUDO8, 0);
#undef FMT
default:
return ERR_PTR(-EINVAL);
}
dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
if (!dfb) {
DRM_ERROR("failed to allocate Armada fb object\n");
return ERR_PTR(-ENOMEM);
}
dfb->fmt = format;
dfb->mod = config;
dfb->fb.obj[0] = &obj->obj;
drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
if (ret) {
kfree(dfb);
return ERR_PTR(ret);
}
/*
* Take a reference on our object as we're successful - the
* caller already holds a reference, which keeps us safe for
* the above call, but the caller will drop their reference
* to it. Hence we need to take our own reference.
*/
drm_gem_object_get(&obj->obj);
return dfb;
}
struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
{
const struct drm_format_info *info = drm_get_format_info(dev, mode);
struct armada_gem_object *obj;
struct armada_framebuffer *dfb;
int ret;
DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
mode->width, mode->height, mode->pixel_format,
mode->flags, mode->pitches[0], mode->pitches[1],
mode->pitches[2]);
/* We can only handle a single plane at the moment */
if (info->num_planes > 1 &&
(mode->handles[0] != mode->handles[1] ||
mode->handles[0] != mode->handles[2])) {
ret = -EINVAL;
goto err;
}
obj = armada_gem_object_lookup(dfile, mode->handles[0]);
if (!obj) {
ret = -ENOENT;
goto err;
}
if (obj->obj.import_attach && !obj->sgt) {
ret = armada_gem_map_import(obj);
if (ret)
goto err_unref;
}
/* Framebuffer objects must have a valid device address for scanout */
if (!obj->mapped) {
ret = -EINVAL;
goto err_unref;
}
dfb = armada_framebuffer_create(dev, mode, obj);
if (IS_ERR(dfb)) {
ret = PTR_ERR(dfb);
goto err;
}
drm_gem_object_put(&obj->obj);
return &dfb->fb;
err_unref:
drm_gem_object_put(&obj->obj);
err:
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/armada/armada_fb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Russell King
*/
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/mman.h>
#include <linux/shmem_fs.h>
#include <drm/armada_drm.h>
#include <drm/drm_prime.h>
#include "armada_drm.h"
#include "armada_gem.h"
#include "armada_ioctlP.h"
MODULE_IMPORT_NS(DMA_BUF);
static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
{
struct drm_gem_object *gobj = vmf->vma->vm_private_data;
struct armada_gem_object *obj = drm_to_armada_gem(gobj);
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
}
static const struct vm_operations_struct armada_gem_vm_ops = {
.fault = armada_gem_vm_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static size_t roundup_gem_size(size_t size)
{
return roundup(size, PAGE_SIZE);
}
void armada_gem_free_object(struct drm_gem_object *obj)
{
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
struct armada_private *priv = drm_to_armada_dev(obj->dev);
DRM_DEBUG_DRIVER("release obj %p\n", dobj);
drm_gem_free_mmap_offset(&dobj->obj);
might_lock(&priv->linear_lock);
if (dobj->page) {
/* page backed memory */
unsigned int order = get_order(dobj->obj.size);
__free_pages(dobj->page, order);
} else if (dobj->linear) {
/* linear backed memory */
mutex_lock(&priv->linear_lock);
drm_mm_remove_node(dobj->linear);
mutex_unlock(&priv->linear_lock);
kfree(dobj->linear);
if (dobj->addr)
iounmap(dobj->addr);
}
if (dobj->obj.import_attach) {
/* We only ever display imported data */
if (dobj->sgt)
dma_buf_unmap_attachment_unlocked(dobj->obj.import_attach,
dobj->sgt, DMA_TO_DEVICE);
drm_prime_gem_destroy(&dobj->obj, NULL);
}
drm_gem_object_release(&dobj->obj);
kfree(dobj);
}
int
armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
{
struct armada_private *priv = drm_to_armada_dev(dev);
size_t size = obj->obj.size;
if (obj->page || obj->linear)
return 0;
/*
* If it is a small allocation (typically cursor, which will
* be 32x64 or 64x32 ARGB pixels) try to get it from the system.
* Framebuffers will never be this small (our minimum size for
* framebuffers is larger than this anyway.) Such objects are
* only accessed by the CPU so we don't need any special handing
* here.
*/
if (size <= 8192) {
unsigned int order = get_order(size);
struct page *p = alloc_pages(GFP_KERNEL, order);
if (p) {
obj->addr = page_address(p);
obj->phys_addr = page_to_phys(p);
obj->page = p;
memset(obj->addr, 0, PAGE_ALIGN(size));
}
}
/*
* We could grab something from DMA if it's enabled, but that
* involves building in a problem:
*
* GEM DMA helper interface uses dma_alloc_coherent(), which provides
* us with an CPU virtual address and a device address.
*
* The CPU virtual address may be either an address in the kernel
* direct mapped region (for example, as it would be on x86) or
* it may be remapped into another part of kernel memory space
* (eg, as it would be on ARM.) This means virt_to_phys() on the
* returned virtual address is invalid depending on the architecture
* implementation.
*
* The device address may also not be a physical address; it may
* be that there is some kind of remapping between the device and
* system RAM, which makes the use of the device address also
* unsafe to re-use as a physical address.
*
* This makes DRM usage of dma_alloc_coherent() in a generic way
* at best very questionable and unsafe.
*/
/* Otherwise, grab it from our linear allocation */
if (!obj->page) {
struct drm_mm_node *node;
unsigned align = min_t(unsigned, size, SZ_2M);
void __iomem *ptr;
int ret;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOSPC;
mutex_lock(&priv->linear_lock);
ret = drm_mm_insert_node_generic(&priv->linear, node,
size, align, 0, 0);
mutex_unlock(&priv->linear_lock);
if (ret) {
kfree(node);
return ret;
}
obj->linear = node;
/* Ensure that the memory we're returning is cleared. */
ptr = ioremap_wc(obj->linear->start, size);
if (!ptr) {
mutex_lock(&priv->linear_lock);
drm_mm_remove_node(obj->linear);
mutex_unlock(&priv->linear_lock);
kfree(obj->linear);
obj->linear = NULL;
return -ENOMEM;
}
memset_io(ptr, 0, size);
iounmap(ptr);
obj->phys_addr = obj->linear->start;
obj->dev_addr = obj->linear->start;
obj->mapped = true;
}
DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
(unsigned long long)obj->phys_addr,
(unsigned long long)obj->dev_addr);
return 0;
}
void *
armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
{
/* only linear objects need to be ioremap'd */
if (!dobj->addr && dobj->linear)
dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
return dobj->addr;
}
static const struct drm_gem_object_funcs armada_gem_object_funcs = {
.free = armada_gem_free_object,
.export = armada_gem_prime_export,
.vm_ops = &armada_gem_vm_ops,
};
struct armada_gem_object *
armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
{
struct armada_gem_object *obj;
size = roundup_gem_size(size);
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
obj->obj.funcs = &armada_gem_object_funcs;
drm_gem_private_object_init(dev, &obj->obj, size);
DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
return obj;
}
static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
size_t size)
{
struct armada_gem_object *obj;
struct address_space *mapping;
size = roundup_gem_size(size);
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
obj->obj.funcs = &armada_gem_object_funcs;
if (drm_gem_object_init(dev, &obj->obj, size)) {
kfree(obj);
return NULL;
}
mapping = obj->obj.filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
return obj;
}
/* Dumb alloc support */
int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct armada_gem_object *dobj;
u32 handle;
size_t size;
int ret;
args->pitch = armada_pitch(args->width, args->bpp);
args->size = size = args->pitch * args->height;
dobj = armada_gem_alloc_private_object(dev, size);
if (dobj == NULL)
return -ENOMEM;
ret = armada_gem_linear_back(dev, dobj);
if (ret)
goto err;
ret = drm_gem_handle_create(file, &dobj->obj, &handle);
if (ret)
goto err;
args->handle = handle;
/* drop reference from allocate - handle holds it now */
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
err:
drm_gem_object_put(&dobj->obj);
return ret;
}
/* Private driver gem ioctls */
int armada_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_armada_gem_create *args = data;
struct armada_gem_object *dobj;
size_t size;
u32 handle;
int ret;
if (args->size == 0)
return -ENOMEM;
size = args->size;
dobj = armada_gem_alloc_object(dev, size);
if (dobj == NULL)
return -ENOMEM;
ret = drm_gem_handle_create(file, &dobj->obj, &handle);
if (ret)
goto err;
args->handle = handle;
/* drop reference from allocate - handle holds it now */
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
err:
drm_gem_object_put(&dobj->obj);
return ret;
}
/* Map a shmem-backed object into process memory space */
int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_armada_gem_mmap *args = data;
struct armada_gem_object *dobj;
unsigned long addr;
dobj = armada_gem_object_lookup(file, args->handle);
if (dobj == NULL)
return -ENOENT;
if (!dobj->obj.filp) {
drm_gem_object_put(&dobj->obj);
return -EINVAL;
}
addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
MAP_SHARED, args->offset);
drm_gem_object_put(&dobj->obj);
if (IS_ERR_VALUE(addr))
return addr;
args->addr = addr;
return 0;
}
int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_armada_gem_pwrite *args = data;
struct armada_gem_object *dobj;
char __user *ptr;
int ret = 0;
DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
args->handle, args->offset, args->size, args->ptr);
if (args->size == 0)
return 0;
ptr = (char __user *)(uintptr_t)args->ptr;
if (!access_ok(ptr, args->size))
return -EFAULT;
if (fault_in_readable(ptr, args->size))
return -EFAULT;
dobj = armada_gem_object_lookup(file, args->handle);
if (dobj == NULL)
return -ENOENT;
/* Must be a kernel-mapped object */
if (!dobj->addr)
return -EINVAL;
if (args->offset > dobj->obj.size ||
args->size > dobj->obj.size - args->offset) {
DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
ret = -EINVAL;
goto unref;
}
if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
ret = -EFAULT;
} else if (dobj->update) {
dobj->update(dobj->update_data);
ret = 0;
}
unref:
drm_gem_object_put(&dobj->obj);
return ret;
}
/* Prime support */
static struct sg_table *
armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
struct scatterlist *sg;
struct sg_table *sgt;
int i;
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return NULL;
if (dobj->obj.filp) {
struct address_space *mapping;
int count;
count = dobj->obj.size / PAGE_SIZE;
if (sg_alloc_table(sgt, count, GFP_KERNEL))
goto free_sgt;
mapping = dobj->obj.filp->f_mapping;
for_each_sgtable_sg(sgt, sg, i) {
struct page *page;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
goto release;
sg_set_page(sg, page, PAGE_SIZE, 0);
}
if (dma_map_sgtable(attach->dev, sgt, dir, 0))
goto release;
} else if (dobj->page) {
/* Single contiguous page */
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
goto free_sgt;
sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
if (dma_map_sgtable(attach->dev, sgt, dir, 0))
goto free_table;
} else if (dobj->linear) {
/* Single contiguous physical region - no struct page */
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
goto free_sgt;
sg_dma_address(sgt->sgl) = dobj->dev_addr;
sg_dma_len(sgt->sgl) = dobj->obj.size;
} else {
goto free_sgt;
}
return sgt;
release:
for_each_sgtable_sg(sgt, sg, i)
if (sg_page(sg))
put_page(sg_page(sg));
free_table:
sg_free_table(sgt);
free_sgt:
kfree(sgt);
return NULL;
}
static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt, enum dma_data_direction dir)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
int i;
if (!dobj->linear)
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
if (dobj->obj.filp) {
struct scatterlist *sg;
for_each_sgtable_sg(sgt, sg, i)
put_page(sg_page(sg));
}
sg_free_table(sgt);
kfree(sgt);
}
static int
armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
return -EINVAL;
}
static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = armada_gem_dmabuf_mmap,
};
struct dma_buf *
armada_gem_prime_export(struct drm_gem_object *obj, int flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &armada_gem_prime_dmabuf_ops;
exp_info.size = obj->size;
exp_info.flags = O_RDWR;
exp_info.priv = obj;
return drm_gem_dmabuf_export(obj->dev, &exp_info);
}
struct drm_gem_object *
armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
{
struct dma_buf_attachment *attach;
struct armada_gem_object *dobj;
if (buf->ops == &armada_gem_prime_dmabuf_ops) {
struct drm_gem_object *obj = buf->priv;
if (obj->dev == dev) {
/*
* Importing our own dmabuf(s) increases the
* refcount on the gem object itself.
*/
drm_gem_object_get(obj);
return obj;
}
}
attach = dma_buf_attach(buf, dev->dev);
if (IS_ERR(attach))
return ERR_CAST(attach);
dobj = armada_gem_alloc_private_object(dev, buf->size);
if (!dobj) {
dma_buf_detach(buf, attach);
return ERR_PTR(-ENOMEM);
}
dobj->obj.import_attach = attach;
get_dma_buf(buf);
/*
* Don't call dma_buf_map_attachment() here - it maps the
* scatterlist immediately for DMA, and this is not always
* an appropriate thing to do.
*/
return &dobj->obj;
}
int armada_gem_map_import(struct armada_gem_object *dobj)
{
int ret;
dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach,
DMA_TO_DEVICE);
if (IS_ERR(dobj->sgt)) {
ret = PTR_ERR(dobj->sgt);
dobj->sgt = NULL;
DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
return ret;
}
if (dobj->sgt->nents > 1) {
DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
return -EINVAL;
}
if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
return -EINVAL;
}
dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
dobj->mapped = true;
return 0;
}
| linux-master | drivers/gpu/drm/armada/armada_gem.c |
// SPDX-License-Identifier: GPL-2.0
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "armada_trace.h"
#endif
| linux-master | drivers/gpu/drm/armada/armada_trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* GPU memory trace points
*
* Copyright (C) 2020 Google, Inc.
*/
#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include <trace/events/gpu_mem.h>
EXPORT_TRACEPOINT_SYMBOL(gpu_mem_total);
| linux-master | drivers/gpu/trace/trace_gpu_mem.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2014 Mentor Graphics Inc.
* Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
*/
#include <linux/export.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include <uapi/linux/v4l2-mediabus.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include "ipu-prv.h"
struct ipu_csi {
void __iomem *base;
int id;
u32 module;
struct clk *clk_ipu; /* IPU bus clock */
spinlock_t lock;
bool inuse;
struct ipu_soc *ipu;
};
/* CSI Register Offsets */
#define CSI_SENS_CONF 0x0000
#define CSI_SENS_FRM_SIZE 0x0004
#define CSI_ACT_FRM_SIZE 0x0008
#define CSI_OUT_FRM_CTRL 0x000c
#define CSI_TST_CTRL 0x0010
#define CSI_CCIR_CODE_1 0x0014
#define CSI_CCIR_CODE_2 0x0018
#define CSI_CCIR_CODE_3 0x001c
#define CSI_MIPI_DI 0x0020
#define CSI_SKIP 0x0024
#define CSI_CPD_CTRL 0x0028
#define CSI_CPD_RC(n) (0x002c + ((n)*4))
#define CSI_CPD_RS(n) (0x004c + ((n)*4))
#define CSI_CPD_GRC(n) (0x005c + ((n)*4))
#define CSI_CPD_GRS(n) (0x007c + ((n)*4))
#define CSI_CPD_GBC(n) (0x008c + ((n)*4))
#define CSI_CPD_GBS(n) (0x00Ac + ((n)*4))
#define CSI_CPD_BC(n) (0x00Bc + ((n)*4))
#define CSI_CPD_BS(n) (0x00Dc + ((n)*4))
#define CSI_CPD_OFFSET1 0x00ec
#define CSI_CPD_OFFSET2 0x00f0
/* CSI Register Fields */
#define CSI_SENS_CONF_DATA_FMT_SHIFT 8
#define CSI_SENS_CONF_DATA_FMT_MASK 0x00000700
#define CSI_SENS_CONF_DATA_FMT_RGB_YUV444 0L
#define CSI_SENS_CONF_DATA_FMT_YUV422_YUYV 1L
#define CSI_SENS_CONF_DATA_FMT_YUV422_UYVY 2L
#define CSI_SENS_CONF_DATA_FMT_BAYER 3L
#define CSI_SENS_CONF_DATA_FMT_RGB565 4L
#define CSI_SENS_CONF_DATA_FMT_RGB555 5L
#define CSI_SENS_CONF_DATA_FMT_RGB444 6L
#define CSI_SENS_CONF_DATA_FMT_JPEG 7L
#define CSI_SENS_CONF_VSYNC_POL_SHIFT 0
#define CSI_SENS_CONF_HSYNC_POL_SHIFT 1
#define CSI_SENS_CONF_DATA_POL_SHIFT 2
#define CSI_SENS_CONF_PIX_CLK_POL_SHIFT 3
#define CSI_SENS_CONF_SENS_PRTCL_MASK 0x00000070
#define CSI_SENS_CONF_SENS_PRTCL_SHIFT 4
#define CSI_SENS_CONF_PACK_TIGHT_SHIFT 7
#define CSI_SENS_CONF_DATA_WIDTH_SHIFT 11
#define CSI_SENS_CONF_EXT_VSYNC_SHIFT 15
#define CSI_SENS_CONF_DIVRATIO_SHIFT 16
#define CSI_SENS_CONF_DIVRATIO_MASK 0x00ff0000
#define CSI_SENS_CONF_DATA_DEST_SHIFT 24
#define CSI_SENS_CONF_DATA_DEST_MASK 0x07000000
#define CSI_SENS_CONF_JPEG8_EN_SHIFT 27
#define CSI_SENS_CONF_JPEG_EN_SHIFT 28
#define CSI_SENS_CONF_FORCE_EOF_SHIFT 29
#define CSI_SENS_CONF_DATA_EN_POL_SHIFT 31
#define CSI_DATA_DEST_IC 2
#define CSI_DATA_DEST_IDMAC 4
#define CSI_CCIR_ERR_DET_EN 0x01000000
#define CSI_HORI_DOWNSIZE_EN 0x80000000
#define CSI_VERT_DOWNSIZE_EN 0x40000000
#define CSI_TEST_GEN_MODE_EN 0x01000000
#define CSI_HSC_MASK 0x1fff0000
#define CSI_HSC_SHIFT 16
#define CSI_VSC_MASK 0x00000fff
#define CSI_VSC_SHIFT 0
#define CSI_TEST_GEN_R_MASK 0x000000ff
#define CSI_TEST_GEN_R_SHIFT 0
#define CSI_TEST_GEN_G_MASK 0x0000ff00
#define CSI_TEST_GEN_G_SHIFT 8
#define CSI_TEST_GEN_B_MASK 0x00ff0000
#define CSI_TEST_GEN_B_SHIFT 16
#define CSI_MAX_RATIO_SKIP_SMFC_MASK 0x00000007
#define CSI_MAX_RATIO_SKIP_SMFC_SHIFT 0
#define CSI_SKIP_SMFC_MASK 0x000000f8
#define CSI_SKIP_SMFC_SHIFT 3
#define CSI_ID_2_SKIP_MASK 0x00000300
#define CSI_ID_2_SKIP_SHIFT 8
#define CSI_COLOR_FIRST_ROW_MASK 0x00000002
#define CSI_COLOR_FIRST_COMP_MASK 0x00000001
/* MIPI CSI-2 data types */
#define MIPI_DT_YUV420 0x18 /* YYY.../UYVY.... */
#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY... */
#define MIPI_DT_YUV422 0x1e /* UYVY... */
#define MIPI_DT_RGB444 0x20
#define MIPI_DT_RGB555 0x21
#define MIPI_DT_RGB565 0x22
#define MIPI_DT_RGB666 0x23
#define MIPI_DT_RGB888 0x24
#define MIPI_DT_RAW6 0x28
#define MIPI_DT_RAW7 0x29
#define MIPI_DT_RAW8 0x2a
#define MIPI_DT_RAW10 0x2b
#define MIPI_DT_RAW12 0x2c
#define MIPI_DT_RAW14 0x2d
/*
* Bitfield of CSI bus signal polarities and modes.
*/
struct ipu_csi_bus_config {
unsigned data_width:4;
unsigned clk_mode:3;
unsigned ext_vsync:1;
unsigned vsync_pol:1;
unsigned hsync_pol:1;
unsigned pixclk_pol:1;
unsigned data_pol:1;
unsigned sens_clksrc:1;
unsigned pack_tight:1;
unsigned force_eof:1;
unsigned data_en_pol:1;
unsigned data_fmt;
unsigned mipi_dt;
};
/*
* Enumeration of CSI data bus widths.
*/
enum ipu_csi_data_width {
IPU_CSI_DATA_WIDTH_4 = 0,
IPU_CSI_DATA_WIDTH_8 = 1,
IPU_CSI_DATA_WIDTH_10 = 3,
IPU_CSI_DATA_WIDTH_12 = 5,
IPU_CSI_DATA_WIDTH_16 = 9,
};
/*
* Enumeration of CSI clock modes.
*/
enum ipu_csi_clk_mode {
IPU_CSI_CLK_MODE_GATED_CLK,
IPU_CSI_CLK_MODE_NONGATED_CLK,
IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE,
IPU_CSI_CLK_MODE_CCIR656_INTERLACED,
IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR,
IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR,
IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR,
IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR,
};
static inline u32 ipu_csi_read(struct ipu_csi *csi, unsigned offset)
{
return readl(csi->base + offset);
}
static inline void ipu_csi_write(struct ipu_csi *csi, u32 value,
unsigned offset)
{
writel(value, csi->base + offset);
}
/*
* Set mclk division ratio for generating test mode mclk. Only used
* for test generator.
*/
static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
u32 ipu_clk)
{
u32 temp;
int div_ratio;
div_ratio = (ipu_clk / pixel_clk) - 1;
if (div_ratio > 0xFF || div_ratio < 0) {
dev_err(csi->ipu->dev,
"value of pixel_clk extends normal range\n");
return -EINVAL;
}
temp = ipu_csi_read(csi, CSI_SENS_CONF);
temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
CSI_SENS_CONF);
return 0;
}
/*
* Find the CSI data format and data width for the given V4L2 media
* bus pixel format code.
*/
static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
enum v4l2_mbus_type mbus_type)
{
switch (mbus_code) {
case MEDIA_BUS_FMT_BGR565_2X8_BE:
case MEDIA_BUS_FMT_BGR565_2X8_LE:
case MEDIA_BUS_FMT_RGB565_2X8_BE:
case MEDIA_BUS_FMT_RGB565_2X8_LE:
if (mbus_type == V4L2_MBUS_CSI2_DPHY)
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
else
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RGB565;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444;
cfg->mipi_dt = MIPI_DT_RGB444;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
cfg->mipi_dt = MIPI_DT_RGB555;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_RGB888_1X24:
case MEDIA_BUS_FMT_BGR888_1X24:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
cfg->mipi_dt = MIPI_DT_RGB888;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
cfg->mipi_dt = MIPI_DT_YUV422;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_YUYV8_2X8:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
cfg->mipi_dt = MIPI_DT_YUV422;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_UYVY8_1X16:
if (mbus_type == V4L2_MBUS_BT656) {
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
} else {
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->data_width = IPU_CSI_DATA_WIDTH_16;
}
cfg->mipi_dt = MIPI_DT_YUV422;
break;
case MEDIA_BUS_FMT_YUYV8_1X16:
if (mbus_type == V4L2_MBUS_BT656) {
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
} else {
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->data_width = IPU_CSI_DATA_WIDTH_16;
}
cfg->mipi_dt = MIPI_DT_YUV422;
break;
case MEDIA_BUS_FMT_SBGGR8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
case MEDIA_BUS_FMT_Y8_1X8:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW8;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW10;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_SBGGR10_1X10:
case MEDIA_BUS_FMT_SGBRG10_1X10:
case MEDIA_BUS_FMT_SGRBG10_1X10:
case MEDIA_BUS_FMT_SRGGB10_1X10:
case MEDIA_BUS_FMT_Y10_1X10:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW10;
cfg->data_width = IPU_CSI_DATA_WIDTH_10;
break;
case MEDIA_BUS_FMT_SBGGR12_1X12:
case MEDIA_BUS_FMT_SGBRG12_1X12:
case MEDIA_BUS_FMT_SGRBG12_1X12:
case MEDIA_BUS_FMT_SRGGB12_1X12:
case MEDIA_BUS_FMT_Y12_1X12:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW12;
cfg->data_width = IPU_CSI_DATA_WIDTH_12;
break;
case MEDIA_BUS_FMT_JPEG_1X8:
/* TODO */
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG;
cfg->mipi_dt = MIPI_DT_RAW8;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
default:
return -EINVAL;
}
return 0;
}
/* translate alternate field mode based on given standard */
static inline enum v4l2_field
ipu_csi_translate_field(enum v4l2_field field, v4l2_std_id std)
{
return (field != V4L2_FIELD_ALTERNATE) ? field :
((std & V4L2_STD_525_60) ?
V4L2_FIELD_SEQ_BT : V4L2_FIELD_SEQ_TB);
}
/*
* Fill a CSI bus config struct from mbus_config and mbus_framefmt.
*/
static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
const struct v4l2_mbus_config *mbus_cfg,
const struct v4l2_mbus_framefmt *mbus_fmt)
{
int ret, is_bt1120;
memset(csicfg, 0, sizeof(*csicfg));
ret = mbus_code_to_bus_cfg(csicfg, mbus_fmt->code, mbus_cfg->type);
if (ret < 0)
return ret;
switch (mbus_cfg->type) {
case V4L2_MBUS_PARALLEL:
csicfg->ext_vsync = 1;
csicfg->vsync_pol = (mbus_cfg->bus.parallel.flags &
V4L2_MBUS_VSYNC_ACTIVE_LOW) ? 1 : 0;
csicfg->hsync_pol = (mbus_cfg->bus.parallel.flags &
V4L2_MBUS_HSYNC_ACTIVE_LOW) ? 1 : 0;
csicfg->pixclk_pol = (mbus_cfg->bus.parallel.flags &
V4L2_MBUS_PCLK_SAMPLE_FALLING) ? 1 : 0;
csicfg->clk_mode = IPU_CSI_CLK_MODE_GATED_CLK;
break;
case V4L2_MBUS_BT656:
csicfg->ext_vsync = 0;
/* UYVY10_1X20 etc. should be supported as well */
is_bt1120 = mbus_fmt->code == MEDIA_BUS_FMT_UYVY8_1X16 ||
mbus_fmt->code == MEDIA_BUS_FMT_YUYV8_1X16;
if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
mbus_fmt->field == V4L2_FIELD_ALTERNATE)
csicfg->clk_mode = is_bt1120 ?
IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR :
IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
else
csicfg->clk_mode = is_bt1120 ?
IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR :
IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
break;
case V4L2_MBUS_CSI2_DPHY:
/*
* MIPI CSI-2 requires non gated clock mode, all other
* parameters are not applicable for MIPI CSI-2 bus.
*/
csicfg->clk_mode = IPU_CSI_CLK_MODE_NONGATED_CLK;
break;
default:
/* will never get here, keep compiler quiet */
break;
}
return 0;
}
static int
ipu_csi_set_bt_interlaced_codes(struct ipu_csi *csi,
const struct v4l2_mbus_framefmt *infmt,
const struct v4l2_mbus_framefmt *outfmt,
v4l2_std_id std)
{
enum v4l2_field infield, outfield;
bool swap_fields;
/* get translated field type of input and output */
infield = ipu_csi_translate_field(infmt->field, std);
outfield = ipu_csi_translate_field(outfmt->field, std);
/*
* Write the H-V-F codes the CSI will match against the
* incoming data for start/end of active and blanking
* field intervals. If input and output field types are
* sequential but not the same (one is SEQ_BT and the other
* is SEQ_TB), swap the F-bit so that the CSI will capture
* field 1 lines before field 0 lines.
*/
swap_fields = (V4L2_FIELD_IS_SEQUENTIAL(infield) &&
V4L2_FIELD_IS_SEQUENTIAL(outfield) &&
infield != outfield);
if (!swap_fields) {
/*
* Field0BlankEnd = 110, Field0BlankStart = 010
* Field0ActiveEnd = 100, Field0ActiveStart = 000
* Field1BlankEnd = 111, Field1BlankStart = 011
* Field1ActiveEnd = 101, Field1ActiveStart = 001
*/
ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
CSI_CCIR_CODE_1);
ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
} else {
dev_dbg(csi->ipu->dev, "capture field swap\n");
/* same as above but with F-bit inverted */
ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
CSI_CCIR_CODE_1);
ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
}
ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
return 0;
}
int ipu_csi_init_interface(struct ipu_csi *csi,
const struct v4l2_mbus_config *mbus_cfg,
const struct v4l2_mbus_framefmt *infmt,
const struct v4l2_mbus_framefmt *outfmt)
{
struct ipu_csi_bus_config cfg;
unsigned long flags;
u32 width, height, data = 0;
v4l2_std_id std;
int ret;
ret = fill_csi_bus_cfg(&cfg, mbus_cfg, infmt);
if (ret < 0)
return ret;
/* set default sensor frame width and height */
width = infmt->width;
height = infmt->height;
if (infmt->field == V4L2_FIELD_ALTERNATE)
height *= 2;
/* Set the CSI_SENS_CONF register remaining fields */
data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
cfg.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
cfg.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT |
cfg.vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT |
cfg.hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT |
cfg.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT |
cfg.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT |
cfg.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT |
cfg.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT |
cfg.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT |
cfg.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT;
spin_lock_irqsave(&csi->lock, flags);
ipu_csi_write(csi, data, CSI_SENS_CONF);
/* Set CCIR registers */
switch (cfg.clk_mode) {
case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
ipu_csi_write(csi, 0x40030, CSI_CCIR_CODE_1);
ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
break;
case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
if (width == 720 && height == 480) {
std = V4L2_STD_NTSC;
height = 525;
} else if (width == 720 && height == 576) {
std = V4L2_STD_PAL;
height = 625;
} else {
dev_err(csi->ipu->dev,
"Unsupported interlaced video mode\n");
ret = -EINVAL;
goto out_unlock;
}
ret = ipu_csi_set_bt_interlaced_codes(csi, infmt, outfmt, std);
if (ret)
goto out_unlock;
break;
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
ipu_csi_write(csi, 0x40030 | CSI_CCIR_ERR_DET_EN,
CSI_CCIR_CODE_1);
ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
break;
case IPU_CSI_CLK_MODE_GATED_CLK:
case IPU_CSI_CLK_MODE_NONGATED_CLK:
ipu_csi_write(csi, 0, CSI_CCIR_CODE_1);
break;
}
/* Setup sensor frame size */
ipu_csi_write(csi, (width - 1) | ((height - 1) << 16),
CSI_SENS_FRM_SIZE);
dev_dbg(csi->ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
ipu_csi_read(csi, CSI_SENS_CONF));
dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
out_unlock:
spin_unlock_irqrestore(&csi->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(ipu_csi_init_interface);
bool ipu_csi_is_interlaced(struct ipu_csi *csi)
{
unsigned long flags;
u32 sensor_protocol;
spin_lock_irqsave(&csi->lock, flags);
sensor_protocol =
(ipu_csi_read(csi, CSI_SENS_CONF) &
CSI_SENS_CONF_SENS_PRTCL_MASK) >>
CSI_SENS_CONF_SENS_PRTCL_SHIFT;
spin_unlock_irqrestore(&csi->lock, flags);
switch (sensor_protocol) {
case IPU_CSI_CLK_MODE_GATED_CLK:
case IPU_CSI_CLK_MODE_NONGATED_CLK:
case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
return false;
case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
return true;
default:
dev_err(csi->ipu->dev,
"CSI %d sensor protocol unsupported\n", csi->id);
return false;
}
}
EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced);
void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w)
{
unsigned long flags;
u32 reg;
spin_lock_irqsave(&csi->lock, flags);
reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE);
w->width = (reg & 0xFFFF) + 1;
w->height = (reg >> 16 & 0xFFFF) + 1;
reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT;
w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT;
spin_unlock_irqrestore(&csi->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_csi_get_window);
void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
{
unsigned long flags;
u32 reg;
spin_lock_irqsave(&csi->lock, flags);
ipu_csi_write(csi, (w->width - 1) | ((w->height - 1) << 16),
CSI_ACT_FRM_SIZE);
reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
reg &= ~(CSI_HSC_MASK | CSI_VSC_MASK);
reg |= ((w->top << CSI_VSC_SHIFT) | (w->left << CSI_HSC_SHIFT));
ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
spin_unlock_irqrestore(&csi->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_csi_set_window);
void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert)
{
unsigned long flags;
u32 reg;
spin_lock_irqsave(&csi->lock, flags);
reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
reg &= ~(CSI_HORI_DOWNSIZE_EN | CSI_VERT_DOWNSIZE_EN);
reg |= (horiz ? CSI_HORI_DOWNSIZE_EN : 0) |
(vert ? CSI_VERT_DOWNSIZE_EN : 0);
ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
spin_unlock_irqrestore(&csi->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_csi_set_downsize);
void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
u32 r_value, u32 g_value, u32 b_value,
u32 pix_clk)
{
unsigned long flags;
u32 ipu_clk = clk_get_rate(csi->clk_ipu);
u32 temp;
spin_lock_irqsave(&csi->lock, flags);
temp = ipu_csi_read(csi, CSI_TST_CTRL);
if (!active) {
temp &= ~CSI_TEST_GEN_MODE_EN;
ipu_csi_write(csi, temp, CSI_TST_CTRL);
} else {
/* Set sensb_mclk div_ratio */
ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk);
temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
CSI_TEST_GEN_B_MASK);
temp |= CSI_TEST_GEN_MODE_EN;
temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
(g_value << CSI_TEST_GEN_G_SHIFT) |
(b_value << CSI_TEST_GEN_B_SHIFT);
ipu_csi_write(csi, temp, CSI_TST_CTRL);
}
spin_unlock_irqrestore(&csi->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator);
int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
struct v4l2_mbus_framefmt *mbus_fmt)
{
struct ipu_csi_bus_config cfg;
unsigned long flags;
u32 temp;
int ret;
if (vc > 3)
return -EINVAL;
ret = mbus_code_to_bus_cfg(&cfg, mbus_fmt->code, V4L2_MBUS_CSI2_DPHY);
if (ret < 0)
return ret;
spin_lock_irqsave(&csi->lock, flags);
temp = ipu_csi_read(csi, CSI_MIPI_DI);
temp &= ~(0xff << (vc * 8));
temp |= (cfg.mipi_dt << (vc * 8));
ipu_csi_write(csi, temp, CSI_MIPI_DI);
spin_unlock_irqrestore(&csi->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_csi_set_mipi_datatype);
int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip,
u32 max_ratio, u32 id)
{
unsigned long flags;
u32 temp;
if (max_ratio > 5 || id > 3)
return -EINVAL;
spin_lock_irqsave(&csi->lock, flags);
temp = ipu_csi_read(csi, CSI_SKIP);
temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK |
CSI_SKIP_SMFC_MASK);
temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) |
(id << CSI_ID_2_SKIP_SHIFT) |
(skip << CSI_SKIP_SMFC_SHIFT);
ipu_csi_write(csi, temp, CSI_SKIP);
spin_unlock_irqrestore(&csi->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_csi_set_skip_smfc);
int ipu_csi_set_dest(struct ipu_csi *csi, enum ipu_csi_dest csi_dest)
{
unsigned long flags;
u32 csi_sens_conf, dest;
if (csi_dest == IPU_CSI_DEST_IDMAC)
dest = CSI_DATA_DEST_IDMAC;
else
dest = CSI_DATA_DEST_IC; /* IC or VDIC */
spin_lock_irqsave(&csi->lock, flags);
csi_sens_conf = ipu_csi_read(csi, CSI_SENS_CONF);
csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK;
csi_sens_conf |= (dest << CSI_SENS_CONF_DATA_DEST_SHIFT);
ipu_csi_write(csi, csi_sens_conf, CSI_SENS_CONF);
spin_unlock_irqrestore(&csi->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_csi_set_dest);
int ipu_csi_enable(struct ipu_csi *csi)
{
ipu_module_enable(csi->ipu, csi->module);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_csi_enable);
int ipu_csi_disable(struct ipu_csi *csi)
{
ipu_module_disable(csi->ipu, csi->module);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_csi_disable);
struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id)
{
unsigned long flags;
struct ipu_csi *csi, *ret;
if (id > 1)
return ERR_PTR(-EINVAL);
csi = ipu->csi_priv[id];
ret = csi;
spin_lock_irqsave(&csi->lock, flags);
if (csi->inuse) {
ret = ERR_PTR(-EBUSY);
goto unlock;
}
csi->inuse = true;
unlock:
spin_unlock_irqrestore(&csi->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(ipu_csi_get);
void ipu_csi_put(struct ipu_csi *csi)
{
unsigned long flags;
spin_lock_irqsave(&csi->lock, flags);
csi->inuse = false;
spin_unlock_irqrestore(&csi->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_csi_put);
int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
unsigned long base, u32 module, struct clk *clk_ipu)
{
struct ipu_csi *csi;
if (id > 1)
return -ENODEV;
csi = devm_kzalloc(dev, sizeof(*csi), GFP_KERNEL);
if (!csi)
return -ENOMEM;
ipu->csi_priv[id] = csi;
spin_lock_init(&csi->lock);
csi->module = module;
csi->id = id;
csi->clk_ipu = clk_ipu;
csi->base = devm_ioremap(dev, base, PAGE_SIZE);
if (!csi->base)
return -ENOMEM;
dev_dbg(dev, "CSI%d base: 0x%08lx remapped to %p\n",
id, base, csi->base);
csi->ipu = ipu;
return 0;
}
void ipu_csi_exit(struct ipu_soc *ipu, int id)
{
}
void ipu_csi_dump(struct ipu_csi *csi)
{
dev_dbg(csi->ipu->dev, "CSI_SENS_CONF: %08x\n",
ipu_csi_read(csi, CSI_SENS_CONF));
dev_dbg(csi->ipu->dev, "CSI_SENS_FRM_SIZE: %08x\n",
ipu_csi_read(csi, CSI_SENS_FRM_SIZE));
dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE: %08x\n",
ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
dev_dbg(csi->ipu->dev, "CSI_OUT_FRM_CTRL: %08x\n",
ipu_csi_read(csi, CSI_OUT_FRM_CTRL));
dev_dbg(csi->ipu->dev, "CSI_TST_CTRL: %08x\n",
ipu_csi_read(csi, CSI_TST_CTRL));
dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_1: %08x\n",
ipu_csi_read(csi, CSI_CCIR_CODE_1));
dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_2: %08x\n",
ipu_csi_read(csi, CSI_CCIR_CODE_2));
dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_3: %08x\n",
ipu_csi_read(csi, CSI_CCIR_CODE_3));
dev_dbg(csi->ipu->dev, "CSI_MIPI_DI: %08x\n",
ipu_csi_read(csi, CSI_MIPI_DI));
dev_dbg(csi->ipu->dev, "CSI_SKIP: %08x\n",
ipu_csi_read(csi, CSI_SKIP));
}
EXPORT_SYMBOL_GPL(ipu_csi_dump);
| linux-master | drivers/gpu/ipu-v3/ipu-csi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2017 Lucas Stach, Pengutronix
*/
#include <drm/drm_fourcc.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
#define IPU_PRG_CTL 0x00
#define IPU_PRG_CTL_BYPASS(i) (1 << (0 + i))
#define IPU_PRG_CTL_SOFT_ARID_MASK 0x3
#define IPU_PRG_CTL_SOFT_ARID_SHIFT(i) (8 + i * 2)
#define IPU_PRG_CTL_SOFT_ARID(i, v) ((v & 0x3) << (8 + 2 * i))
#define IPU_PRG_CTL_SO(i) (1 << (16 + i))
#define IPU_PRG_CTL_VFLIP(i) (1 << (19 + i))
#define IPU_PRG_CTL_BLOCK_MODE(i) (1 << (22 + i))
#define IPU_PRG_CTL_CNT_LOAD_EN(i) (1 << (25 + i))
#define IPU_PRG_CTL_SOFTRST (1 << 30)
#define IPU_PRG_CTL_SHADOW_EN (1 << 31)
#define IPU_PRG_STATUS 0x04
#define IPU_PRG_STATUS_BUFFER0_READY(i) (1 << (0 + i * 2))
#define IPU_PRG_STATUS_BUFFER1_READY(i) (1 << (1 + i * 2))
#define IPU_PRG_QOS 0x08
#define IPU_PRG_QOS_ARID_MASK 0xf
#define IPU_PRG_QOS_ARID_SHIFT(i) (0 + i * 4)
#define IPU_PRG_REG_UPDATE 0x0c
#define IPU_PRG_REG_UPDATE_REG_UPDATE (1 << 0)
#define IPU_PRG_STRIDE(i) (0x10 + i * 0x4)
#define IPU_PRG_STRIDE_STRIDE_MASK 0x3fff
#define IPU_PRG_CROP_LINE 0x1c
#define IPU_PRG_THD 0x20
#define IPU_PRG_BADDR(i) (0x24 + i * 0x4)
#define IPU_PRG_OFFSET(i) (0x30 + i * 0x4)
#define IPU_PRG_ILO(i) (0x3c + i * 0x4)
#define IPU_PRG_HEIGHT(i) (0x48 + i * 0x4)
#define IPU_PRG_HEIGHT_PRE_HEIGHT_MASK 0xfff
#define IPU_PRG_HEIGHT_PRE_HEIGHT_SHIFT 0
#define IPU_PRG_HEIGHT_IPU_HEIGHT_MASK 0xfff
#define IPU_PRG_HEIGHT_IPU_HEIGHT_SHIFT 16
struct ipu_prg_channel {
bool enabled;
int used_pre;
};
struct ipu_prg {
struct list_head list;
struct device *dev;
int id;
void __iomem *regs;
struct clk *clk_ipg, *clk_axi;
struct regmap *iomuxc_gpr;
struct ipu_pre *pres[3];
struct ipu_prg_channel chan[3];
};
static DEFINE_MUTEX(ipu_prg_list_mutex);
static LIST_HEAD(ipu_prg_list);
struct ipu_prg *
ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
{
struct device_node *prg_node = of_parse_phandle(dev->of_node,
name, 0);
struct ipu_prg *prg;
mutex_lock(&ipu_prg_list_mutex);
list_for_each_entry(prg, &ipu_prg_list, list) {
if (prg_node == prg->dev->of_node) {
mutex_unlock(&ipu_prg_list_mutex);
device_link_add(dev, prg->dev,
DL_FLAG_AUTOREMOVE_CONSUMER);
prg->id = ipu_id;
of_node_put(prg_node);
return prg;
}
}
mutex_unlock(&ipu_prg_list_mutex);
of_node_put(prg_node);
return NULL;
}
int ipu_prg_max_active_channels(void)
{
return ipu_pre_get_available_count();
}
EXPORT_SYMBOL_GPL(ipu_prg_max_active_channels);
bool ipu_prg_present(struct ipu_soc *ipu)
{
if (ipu->prg_priv)
return true;
return false;
}
EXPORT_SYMBOL_GPL(ipu_prg_present);
bool ipu_prg_format_supported(struct ipu_soc *ipu, uint32_t format,
uint64_t modifier)
{
const struct drm_format_info *info = drm_format_info(format);
if (info->num_planes != 1)
return false;
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case DRM_FORMAT_MOD_VIVANTE_TILED:
case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
return true;
default:
return false;
}
}
EXPORT_SYMBOL_GPL(ipu_prg_format_supported);
int ipu_prg_enable(struct ipu_soc *ipu)
{
struct ipu_prg *prg = ipu->prg_priv;
if (!prg)
return 0;
return pm_runtime_get_sync(prg->dev);
}
EXPORT_SYMBOL_GPL(ipu_prg_enable);
void ipu_prg_disable(struct ipu_soc *ipu)
{
struct ipu_prg *prg = ipu->prg_priv;
if (!prg)
return;
pm_runtime_put(prg->dev);
}
EXPORT_SYMBOL_GPL(ipu_prg_disable);
/*
* The channel configuartion functions below are not thread safe, as they
* must be only called from the atomic commit path in the DRM driver, which
* is properly serialized.
*/
static int ipu_prg_ipu_to_prg_chan(int ipu_chan)
{
/*
* This isn't clearly documented in the RM, but IPU to PRG channel
* assignment is fixed, as only with this mapping the control signals
* match up.
*/
switch (ipu_chan) {
case IPUV3_CHANNEL_MEM_BG_SYNC:
return 0;
case IPUV3_CHANNEL_MEM_FG_SYNC:
return 1;
case IPUV3_CHANNEL_MEM_DC_SYNC:
return 2;
default:
return -EINVAL;
}
}
static int ipu_prg_get_pre(struct ipu_prg *prg, int prg_chan)
{
int i, ret;
/* channel 0 is special as it is hardwired to one of the PREs */
if (prg_chan == 0) {
ret = ipu_pre_get(prg->pres[0]);
if (ret)
goto fail;
prg->chan[prg_chan].used_pre = 0;
return 0;
}
for (i = 1; i < 3; i++) {
ret = ipu_pre_get(prg->pres[i]);
if (!ret) {
u32 val, mux;
int shift;
prg->chan[prg_chan].used_pre = i;
/* configure the PRE to PRG channel mux */
shift = (i == 1) ? 12 : 14;
mux = (prg->id << 1) | (prg_chan - 1);
regmap_update_bits(prg->iomuxc_gpr, IOMUXC_GPR5,
0x3 << shift, mux << shift);
/* check other mux, must not point to same channel */
shift = (i == 1) ? 14 : 12;
regmap_read(prg->iomuxc_gpr, IOMUXC_GPR5, &val);
if (((val >> shift) & 0x3) == mux) {
regmap_update_bits(prg->iomuxc_gpr, IOMUXC_GPR5,
0x3 << shift,
(mux ^ 0x1) << shift);
}
return 0;
}
}
fail:
dev_err(prg->dev, "could not get PRE for PRG chan %d", prg_chan);
return ret;
}
static void ipu_prg_put_pre(struct ipu_prg *prg, int prg_chan)
{
struct ipu_prg_channel *chan = &prg->chan[prg_chan];
ipu_pre_put(prg->pres[chan->used_pre]);
chan->used_pre = -1;
}
void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
{
int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
struct ipu_prg_channel *chan;
u32 val;
if (prg_chan < 0)
return;
chan = &prg->chan[prg_chan];
if (!chan->enabled)
return;
pm_runtime_get_sync(prg->dev);
val = readl(prg->regs + IPU_PRG_CTL);
val |= IPU_PRG_CTL_BYPASS(prg_chan);
writel(val, prg->regs + IPU_PRG_CTL);
val = IPU_PRG_REG_UPDATE_REG_UPDATE;
writel(val, prg->regs + IPU_PRG_REG_UPDATE);
pm_runtime_put(prg->dev);
ipu_prg_put_pre(prg, prg_chan);
chan->enabled = false;
}
EXPORT_SYMBOL_GPL(ipu_prg_channel_disable);
int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
unsigned int axi_id, unsigned int width,
unsigned int height, unsigned int stride,
u32 format, uint64_t modifier, unsigned long *eba)
{
int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
struct ipu_prg_channel *chan;
u32 val;
int ret;
if (prg_chan < 0)
return prg_chan;
chan = &prg->chan[prg_chan];
if (chan->enabled) {
ipu_pre_update(prg->pres[chan->used_pre], *eba);
return 0;
}
ret = ipu_prg_get_pre(prg, prg_chan);
if (ret)
return ret;
ipu_pre_configure(prg->pres[chan->used_pre],
width, height, stride, format, modifier, *eba);
pm_runtime_get_sync(prg->dev);
val = (stride - 1) & IPU_PRG_STRIDE_STRIDE_MASK;
writel(val, prg->regs + IPU_PRG_STRIDE(prg_chan));
val = ((height & IPU_PRG_HEIGHT_PRE_HEIGHT_MASK) <<
IPU_PRG_HEIGHT_PRE_HEIGHT_SHIFT) |
((height & IPU_PRG_HEIGHT_IPU_HEIGHT_MASK) <<
IPU_PRG_HEIGHT_IPU_HEIGHT_SHIFT);
writel(val, prg->regs + IPU_PRG_HEIGHT(prg_chan));
val = ipu_pre_get_baddr(prg->pres[chan->used_pre]);
*eba = val;
writel(val, prg->regs + IPU_PRG_BADDR(prg_chan));
val = readl(prg->regs + IPU_PRG_CTL);
/* config AXI ID */
val &= ~(IPU_PRG_CTL_SOFT_ARID_MASK <<
IPU_PRG_CTL_SOFT_ARID_SHIFT(prg_chan));
val |= IPU_PRG_CTL_SOFT_ARID(prg_chan, axi_id);
/* enable channel */
val &= ~IPU_PRG_CTL_BYPASS(prg_chan);
writel(val, prg->regs + IPU_PRG_CTL);
val = IPU_PRG_REG_UPDATE_REG_UPDATE;
writel(val, prg->regs + IPU_PRG_REG_UPDATE);
/* wait for both double buffers to be filled */
readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
(val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
(val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
5, 1000);
pm_runtime_put(prg->dev);
chan->enabled = true;
return 0;
}
EXPORT_SYMBOL_GPL(ipu_prg_channel_configure);
bool ipu_prg_channel_configure_pending(struct ipuv3_channel *ipu_chan)
{
int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
struct ipu_prg_channel *chan;
if (prg_chan < 0)
return false;
chan = &prg->chan[prg_chan];
WARN_ON(!chan->enabled);
return ipu_pre_update_pending(prg->pres[chan->used_pre]);
}
EXPORT_SYMBOL_GPL(ipu_prg_channel_configure_pending);
static int ipu_prg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ipu_prg *prg;
u32 val;
int i, ret;
prg = devm_kzalloc(dev, sizeof(*prg), GFP_KERNEL);
if (!prg)
return -ENOMEM;
prg->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(prg->regs))
return PTR_ERR(prg->regs);
prg->clk_ipg = devm_clk_get(dev, "ipg");
if (IS_ERR(prg->clk_ipg))
return PTR_ERR(prg->clk_ipg);
prg->clk_axi = devm_clk_get(dev, "axi");
if (IS_ERR(prg->clk_axi))
return PTR_ERR(prg->clk_axi);
prg->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
if (IS_ERR(prg->iomuxc_gpr))
return PTR_ERR(prg->iomuxc_gpr);
for (i = 0; i < 3; i++) {
prg->pres[i] = ipu_pre_lookup_by_phandle(dev, "fsl,pres", i);
if (!prg->pres[i])
return -EPROBE_DEFER;
}
ret = clk_prepare_enable(prg->clk_ipg);
if (ret)
return ret;
ret = clk_prepare_enable(prg->clk_axi);
if (ret) {
clk_disable_unprepare(prg->clk_ipg);
return ret;
}
/* init to free running mode */
val = readl(prg->regs + IPU_PRG_CTL);
val |= IPU_PRG_CTL_SHADOW_EN;
writel(val, prg->regs + IPU_PRG_CTL);
/* disable address threshold */
writel(0xffffffff, prg->regs + IPU_PRG_THD);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
prg->dev = dev;
platform_set_drvdata(pdev, prg);
mutex_lock(&ipu_prg_list_mutex);
list_add(&prg->list, &ipu_prg_list);
mutex_unlock(&ipu_prg_list_mutex);
return 0;
}
static int ipu_prg_remove(struct platform_device *pdev)
{
struct ipu_prg *prg = platform_get_drvdata(pdev);
mutex_lock(&ipu_prg_list_mutex);
list_del(&prg->list);
mutex_unlock(&ipu_prg_list_mutex);
return 0;
}
#ifdef CONFIG_PM
static int prg_suspend(struct device *dev)
{
struct ipu_prg *prg = dev_get_drvdata(dev);
clk_disable_unprepare(prg->clk_axi);
clk_disable_unprepare(prg->clk_ipg);
return 0;
}
static int prg_resume(struct device *dev)
{
struct ipu_prg *prg = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(prg->clk_ipg);
if (ret)
return ret;
ret = clk_prepare_enable(prg->clk_axi);
if (ret) {
clk_disable_unprepare(prg->clk_ipg);
return ret;
}
return 0;
}
#endif
static const struct dev_pm_ops prg_pm_ops = {
SET_RUNTIME_PM_OPS(prg_suspend, prg_resume, NULL)
};
static const struct of_device_id ipu_prg_dt_ids[] = {
{ .compatible = "fsl,imx6qp-prg", },
{ /* sentinel */ },
};
struct platform_driver ipu_prg_drv = {
.probe = ipu_prg_probe,
.remove = ipu_prg_remove,
.driver = {
.name = "imx-ipu-prg",
.pm = &prg_pm_ops,
.of_match_table = ipu_prg_dt_ids,
},
};
| linux-master | drivers/gpu/ipu-v3/ipu-prg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2010 Sascha Hauer <[email protected]>
* Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
*/
#include <linux/module.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/list.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <drm/drm_fourcc.h>
#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
{
return readl(ipu->cm_reg + offset);
}
static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
{
writel(value, ipu->cm_reg + offset);
}
int ipu_get_num(struct ipu_soc *ipu)
{
return ipu->id;
}
EXPORT_SYMBOL_GPL(ipu_get_num);
void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync)
{
u32 val;
val = ipu_cm_read(ipu, IPU_SRM_PRI2);
val &= ~DP_S_SRM_MODE_MASK;
val |= sync ? DP_S_SRM_MODE_NEXT_FRAME :
DP_S_SRM_MODE_NOW;
ipu_cm_write(ipu, val, IPU_SRM_PRI2);
}
EXPORT_SYMBOL_GPL(ipu_srm_dp_update);
enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
{
switch (drm_fourcc) {
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_RGB565_A8:
case DRM_FORMAT_BGR565_A8:
case DRM_FORMAT_RGB888_A8:
case DRM_FORMAT_BGR888_A8:
case DRM_FORMAT_RGBX8888_A8:
case DRM_FORMAT_BGRX8888_A8:
return IPUV3_COLORSPACE_RGB;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
return IPUV3_COLORSPACE_YUV;
default:
return IPUV3_COLORSPACE_UNKNOWN;
}
}
EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
{
switch (pixelformat) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
return IPUV3_COLORSPACE_YUV;
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_BGR24:
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_BGRA32:
case V4L2_PIX_FMT_BGRX32:
case V4L2_PIX_FMT_RGBA32:
case V4L2_PIX_FMT_RGBX32:
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_XRGB32:
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
return IPUV3_COLORSPACE_RGB;
default:
return IPUV3_COLORSPACE_UNKNOWN;
}
}
EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
bool hflip, bool vflip)
{
u32 r90, vf, hf;
switch (degrees) {
case 0:
vf = hf = r90 = 0;
break;
case 90:
vf = hf = 0;
r90 = 1;
break;
case 180:
vf = hf = 1;
r90 = 0;
break;
case 270:
vf = hf = r90 = 1;
break;
default:
return -EINVAL;
}
hf ^= (u32)hflip;
vf ^= (u32)vflip;
*mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
bool hflip, bool vflip)
{
u32 r90, vf, hf;
r90 = ((u32)mode >> 2) & 0x1;
hf = ((u32)mode >> 1) & 0x1;
vf = ((u32)mode >> 0) & 0x1;
hf ^= (u32)hflip;
vf ^= (u32)vflip;
switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
case IPU_ROTATE_NONE:
*degrees = 0;
break;
case IPU_ROTATE_90_RIGHT:
*degrees = 90;
break;
case IPU_ROTATE_180:
*degrees = 180;
break;
case IPU_ROTATE_90_LEFT:
*degrees = 270;
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
{
struct ipuv3_channel *channel;
dev_dbg(ipu->dev, "%s %d\n", __func__, num);
if (num > 63)
return ERR_PTR(-ENODEV);
mutex_lock(&ipu->channel_lock);
list_for_each_entry(channel, &ipu->channels, list) {
if (channel->num == num) {
channel = ERR_PTR(-EBUSY);
goto out;
}
}
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel) {
channel = ERR_PTR(-ENOMEM);
goto out;
}
channel->num = num;
channel->ipu = ipu;
list_add(&channel->list, &ipu->channels);
out:
mutex_unlock(&ipu->channel_lock);
return channel;
}
EXPORT_SYMBOL_GPL(ipu_idmac_get);
void ipu_idmac_put(struct ipuv3_channel *channel)
{
struct ipu_soc *ipu = channel->ipu;
dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
mutex_lock(&ipu->channel_lock);
list_del(&channel->list);
kfree(channel);
mutex_unlock(&ipu->channel_lock);
}
EXPORT_SYMBOL_GPL(ipu_idmac_put);
#define idma_mask(ch) (1 << ((ch) & 0x1f))
/*
* This is an undocumented feature, a write one to a channel bit in
* IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
* internal current buffer pointer so that transfers start from buffer
* 0 on the next channel enable (that's the theory anyway, the imx6 TRM
* only says these are read-only registers). This operation is required
* for channel linking to work correctly, for instance video capture
* pipelines that carry out image rotations will fail after the first
* streaming unless this function is called for each channel before
* re-enabling the channels.
*/
static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
{
struct ipu_soc *ipu = channel->ipu;
unsigned int chno = channel->num;
ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
}
void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
bool doublebuffer)
{
struct ipu_soc *ipu = channel->ipu;
unsigned long flags;
u32 reg;
spin_lock_irqsave(&ipu->lock, flags);
reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
if (doublebuffer)
reg |= idma_mask(channel->num);
else
reg &= ~idma_mask(channel->num);
ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
__ipu_idmac_reset_current_buffer(channel);
spin_unlock_irqrestore(&ipu->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
static const struct {
int chnum;
u32 reg;
int shift;
} idmac_lock_en_info[] = {
{ .chnum = 5, .reg = IDMAC_CH_LOCK_EN_1, .shift = 0, },
{ .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift = 2, },
{ .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift = 4, },
{ .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift = 6, },
{ .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift = 8, },
{ .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
{ .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
{ .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
{ .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
{ .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
{ .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
{ .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift = 0, },
{ .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift = 2, },
{ .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift = 4, },
{ .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift = 6, },
{ .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift = 8, },
{ .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
};
int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
{
struct ipu_soc *ipu = channel->ipu;
unsigned long flags;
u32 bursts, regval;
int i;
switch (num_bursts) {
case 0:
case 1:
bursts = 0x00; /* locking disabled */
break;
case 2:
bursts = 0x01;
break;
case 4:
bursts = 0x02;
break;
case 8:
bursts = 0x03;
break;
default:
return -EINVAL;
}
/*
* IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
* i.MX53 channel arbitration locking doesn't seem to work properly.
* Allow enabling the lock feature on IPUv3H / i.MX6 only.
*/
if (bursts && ipu->ipu_type != IPUV3H)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
if (channel->num == idmac_lock_en_info[i].chnum)
break;
}
if (i >= ARRAY_SIZE(idmac_lock_en_info))
return -EINVAL;
spin_lock_irqsave(&ipu->lock, flags);
regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
regval &= ~(0x03 << idmac_lock_en_info[i].shift);
regval |= (bursts << idmac_lock_en_info[i].shift);
ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
spin_unlock_irqrestore(&ipu->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
{
unsigned long lock_flags;
u32 val;
spin_lock_irqsave(&ipu->lock, lock_flags);
val = ipu_cm_read(ipu, IPU_DISP_GEN);
if (mask & IPU_CONF_DI0_EN)
val |= IPU_DI0_COUNTER_RELEASE;
if (mask & IPU_CONF_DI1_EN)
val |= IPU_DI1_COUNTER_RELEASE;
ipu_cm_write(ipu, val, IPU_DISP_GEN);
val = ipu_cm_read(ipu, IPU_CONF);
val |= mask;
ipu_cm_write(ipu, val, IPU_CONF);
spin_unlock_irqrestore(&ipu->lock, lock_flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_module_enable);
int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
{
unsigned long lock_flags;
u32 val;
spin_lock_irqsave(&ipu->lock, lock_flags);
val = ipu_cm_read(ipu, IPU_CONF);
val &= ~mask;
ipu_cm_write(ipu, val, IPU_CONF);
val = ipu_cm_read(ipu, IPU_DISP_GEN);
if (mask & IPU_CONF_DI0_EN)
val &= ~IPU_DI0_COUNTER_RELEASE;
if (mask & IPU_CONF_DI1_EN)
val &= ~IPU_DI1_COUNTER_RELEASE;
ipu_cm_write(ipu, val, IPU_DISP_GEN);
spin_unlock_irqrestore(&ipu->lock, lock_flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_module_disable);
int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
{
struct ipu_soc *ipu = channel->ipu;
unsigned int chno = channel->num;
return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
}
EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
{
struct ipu_soc *ipu = channel->ipu;
unsigned long flags;
u32 reg = 0;
spin_lock_irqsave(&ipu->lock, flags);
switch (buf_num) {
case 0:
reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
break;
case 1:
reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
break;
case 2:
reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
break;
}
spin_unlock_irqrestore(&ipu->lock, flags);
return ((reg & idma_mask(channel->num)) != 0);
}
EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
{
struct ipu_soc *ipu = channel->ipu;
unsigned int chno = channel->num;
unsigned long flags;
spin_lock_irqsave(&ipu->lock, flags);
/* Mark buffer as ready. */
if (buf_num == 0)
ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
else
ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
spin_unlock_irqrestore(&ipu->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
{
struct ipu_soc *ipu = channel->ipu;
unsigned int chno = channel->num;
unsigned long flags;
spin_lock_irqsave(&ipu->lock, flags);
ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
switch (buf_num) {
case 0:
ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
break;
case 1:
ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
break;
case 2:
ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
break;
default:
break;
}
ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
spin_unlock_irqrestore(&ipu->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
{
struct ipu_soc *ipu = channel->ipu;
u32 val;
unsigned long flags;
spin_lock_irqsave(&ipu->lock, flags);
val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
val |= idma_mask(channel->num);
ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
spin_unlock_irqrestore(&ipu->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
{
return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
}
EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
{
struct ipu_soc *ipu = channel->ipu;
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(ms);
while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
idma_mask(channel->num)) {
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
cpu_relax();
}
return 0;
}
EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
{
struct ipu_soc *ipu = channel->ipu;
u32 val;
unsigned long flags;
spin_lock_irqsave(&ipu->lock, flags);
/* Disable DMA channel(s) */
val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
val &= ~idma_mask(channel->num);
ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
__ipu_idmac_reset_current_buffer(channel);
/* Set channel buffers NOT to be ready */
ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
idma_mask(channel->num)) {
ipu_cm_write(ipu, idma_mask(channel->num),
IPU_CHA_BUF0_RDY(channel->num));
}
if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
idma_mask(channel->num)) {
ipu_cm_write(ipu, idma_mask(channel->num),
IPU_CHA_BUF1_RDY(channel->num));
}
ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
/* Reset the double buffer */
val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
val &= ~idma_mask(channel->num);
ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
spin_unlock_irqrestore(&ipu->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
/*
* The imx6 rev. D TRM says that enabling the WM feature will increase
* a channel's priority. Refer to Table 36-8 Calculated priority value.
* The sub-module that is the sink or source for the channel must enable
* watermark signal for this to take effect (SMFC_WM for instance).
*/
void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
{
struct ipu_soc *ipu = channel->ipu;
unsigned long flags;
u32 val;
spin_lock_irqsave(&ipu->lock, flags);
val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
if (enable)
val |= 1 << (channel->num % 32);
else
val &= ~(1 << (channel->num % 32));
ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
spin_unlock_irqrestore(&ipu->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
static int ipu_memory_reset(struct ipu_soc *ipu)
{
unsigned long timeout;
ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
timeout = jiffies + msecs_to_jiffies(1000);
while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
if (time_after(jiffies, timeout))
return -ETIME;
cpu_relax();
}
return 0;
}
/*
* Set the source mux for the given CSI. Selects either parallel or
* MIPI CSI2 sources.
*/
void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
{
unsigned long flags;
u32 val, mask;
mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
IPU_CONF_CSI0_DATA_SOURCE;
spin_lock_irqsave(&ipu->lock, flags);
val = ipu_cm_read(ipu, IPU_CONF);
if (mipi_csi2)
val |= mask;
else
val &= ~mask;
ipu_cm_write(ipu, val, IPU_CONF);
spin_unlock_irqrestore(&ipu->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
/*
* Set the source mux for the IC. Selects either CSI[01] or the VDI.
*/
void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&ipu->lock, flags);
val = ipu_cm_read(ipu, IPU_CONF);
if (vdi)
val |= IPU_CONF_IC_INPUT;
else
val &= ~IPU_CONF_IC_INPUT;
if (csi_id == 1)
val |= IPU_CONF_CSI_SEL;
else
val &= ~IPU_CONF_CSI_SEL;
ipu_cm_write(ipu, val, IPU_CONF);
spin_unlock_irqrestore(&ipu->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
/* Frame Synchronization Unit Channel Linking */
struct fsu_link_reg_info {
int chno;
u32 reg;
u32 mask;
u32 val;
};
struct fsu_link_info {
struct fsu_link_reg_info src;
struct fsu_link_reg_info sink;
};
static const struct fsu_link_info fsu_link_info[] = {
{
.src = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
.sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
}, {
.src = { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
.sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
}, {
.src = { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
.sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
}, {
.src = { IPUV3_CHANNEL_CSI_DIRECT, 0 },
.sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
},
};
static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
{
int i;
for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
if (src == fsu_link_info[i].src.chno &&
sink == fsu_link_info[i].sink.chno)
return &fsu_link_info[i];
}
return NULL;
}
/*
* Links a source channel to a sink channel in the FSU.
*/
int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
{
const struct fsu_link_info *link;
u32 src_reg, sink_reg;
unsigned long flags;
link = find_fsu_link_info(src_ch, sink_ch);
if (!link)
return -EINVAL;
spin_lock_irqsave(&ipu->lock, flags);
if (link->src.mask) {
src_reg = ipu_cm_read(ipu, link->src.reg);
src_reg &= ~link->src.mask;
src_reg |= link->src.val;
ipu_cm_write(ipu, src_reg, link->src.reg);
}
if (link->sink.mask) {
sink_reg = ipu_cm_read(ipu, link->sink.reg);
sink_reg &= ~link->sink.mask;
sink_reg |= link->sink.val;
ipu_cm_write(ipu, sink_reg, link->sink.reg);
}
spin_unlock_irqrestore(&ipu->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_fsu_link);
/*
* Unlinks source and sink channels in the FSU.
*/
int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
{
const struct fsu_link_info *link;
u32 src_reg, sink_reg;
unsigned long flags;
link = find_fsu_link_info(src_ch, sink_ch);
if (!link)
return -EINVAL;
spin_lock_irqsave(&ipu->lock, flags);
if (link->src.mask) {
src_reg = ipu_cm_read(ipu, link->src.reg);
src_reg &= ~link->src.mask;
ipu_cm_write(ipu, src_reg, link->src.reg);
}
if (link->sink.mask) {
sink_reg = ipu_cm_read(ipu, link->sink.reg);
sink_reg &= ~link->sink.mask;
ipu_cm_write(ipu, sink_reg, link->sink.reg);
}
spin_unlock_irqrestore(&ipu->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
/* Link IDMAC channels in the FSU */
int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
{
return ipu_fsu_link(src->ipu, src->num, sink->num);
}
EXPORT_SYMBOL_GPL(ipu_idmac_link);
/* Unlink IDMAC channels in the FSU */
int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
{
return ipu_fsu_unlink(src->ipu, src->num, sink->num);
}
EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
struct ipu_devtype {
const char *name;
unsigned long cm_ofs;
unsigned long cpmem_ofs;
unsigned long srm_ofs;
unsigned long tpm_ofs;
unsigned long csi0_ofs;
unsigned long csi1_ofs;
unsigned long ic_ofs;
unsigned long disp0_ofs;
unsigned long disp1_ofs;
unsigned long dc_tmpl_ofs;
unsigned long vdi_ofs;
enum ipuv3_type type;
};
static struct ipu_devtype ipu_type_imx51 = {
.name = "IPUv3EX",
.cm_ofs = 0x1e000000,
.cpmem_ofs = 0x1f000000,
.srm_ofs = 0x1f040000,
.tpm_ofs = 0x1f060000,
.csi0_ofs = 0x1e030000,
.csi1_ofs = 0x1e038000,
.ic_ofs = 0x1e020000,
.disp0_ofs = 0x1e040000,
.disp1_ofs = 0x1e048000,
.dc_tmpl_ofs = 0x1f080000,
.vdi_ofs = 0x1e068000,
.type = IPUV3EX,
};
static struct ipu_devtype ipu_type_imx53 = {
.name = "IPUv3M",
.cm_ofs = 0x06000000,
.cpmem_ofs = 0x07000000,
.srm_ofs = 0x07040000,
.tpm_ofs = 0x07060000,
.csi0_ofs = 0x06030000,
.csi1_ofs = 0x06038000,
.ic_ofs = 0x06020000,
.disp0_ofs = 0x06040000,
.disp1_ofs = 0x06048000,
.dc_tmpl_ofs = 0x07080000,
.vdi_ofs = 0x06068000,
.type = IPUV3M,
};
static struct ipu_devtype ipu_type_imx6q = {
.name = "IPUv3H",
.cm_ofs = 0x00200000,
.cpmem_ofs = 0x00300000,
.srm_ofs = 0x00340000,
.tpm_ofs = 0x00360000,
.csi0_ofs = 0x00230000,
.csi1_ofs = 0x00238000,
.ic_ofs = 0x00220000,
.disp0_ofs = 0x00240000,
.disp1_ofs = 0x00248000,
.dc_tmpl_ofs = 0x00380000,
.vdi_ofs = 0x00268000,
.type = IPUV3H,
};
static const struct of_device_id imx_ipu_dt_ids[] = {
{ .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
{ .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
{ .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
{ .compatible = "fsl,imx6qp-ipu", .data = &ipu_type_imx6q, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
static int ipu_submodules_init(struct ipu_soc *ipu,
struct platform_device *pdev, unsigned long ipu_base,
struct clk *ipu_clk)
{
char *unit;
int ret;
struct device *dev = &pdev->dev;
const struct ipu_devtype *devtype = ipu->devtype;
ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
if (ret) {
unit = "cpmem";
goto err_cpmem;
}
ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
IPU_CONF_CSI0_EN, ipu_clk);
if (ret) {
unit = "csi0";
goto err_csi_0;
}
ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
IPU_CONF_CSI1_EN, ipu_clk);
if (ret) {
unit = "csi1";
goto err_csi_1;
}
ret = ipu_ic_init(ipu, dev,
ipu_base + devtype->ic_ofs,
ipu_base + devtype->tpm_ofs);
if (ret) {
unit = "ic";
goto err_ic;
}
ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
IPU_CONF_IC_INPUT);
if (ret) {
unit = "vdi";
goto err_vdi;
}
ret = ipu_image_convert_init(ipu, dev);
if (ret) {
unit = "image_convert";
goto err_image_convert;
}
ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
IPU_CONF_DI0_EN, ipu_clk);
if (ret) {
unit = "di0";
goto err_di_0;
}
ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
IPU_CONF_DI1_EN, ipu_clk);
if (ret) {
unit = "di1";
goto err_di_1;
}
ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
if (ret) {
unit = "dc_template";
goto err_dc;
}
ret = ipu_dmfc_init(ipu, dev, ipu_base +
devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
if (ret) {
unit = "dmfc";
goto err_dmfc;
}
ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
if (ret) {
unit = "dp";
goto err_dp;
}
ret = ipu_smfc_init(ipu, dev, ipu_base +
devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
if (ret) {
unit = "smfc";
goto err_smfc;
}
return 0;
err_smfc:
ipu_dp_exit(ipu);
err_dp:
ipu_dmfc_exit(ipu);
err_dmfc:
ipu_dc_exit(ipu);
err_dc:
ipu_di_exit(ipu, 1);
err_di_1:
ipu_di_exit(ipu, 0);
err_di_0:
ipu_image_convert_exit(ipu);
err_image_convert:
ipu_vdi_exit(ipu);
err_vdi:
ipu_ic_exit(ipu);
err_ic:
ipu_csi_exit(ipu, 1);
err_csi_1:
ipu_csi_exit(ipu, 0);
err_csi_0:
ipu_cpmem_exit(ipu);
err_cpmem:
dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
return ret;
}
static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
{
unsigned long status;
int i, bit;
for (i = 0; i < num_regs; i++) {
status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
for_each_set_bit(bit, &status, 32)
generic_handle_domain_irq(ipu->domain,
regs[i] * 32 + bit);
}
}
static void ipu_irq_handler(struct irq_desc *desc)
{
struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
chained_irq_enter(chip, desc);
ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
chained_irq_exit(chip, desc);
}
static void ipu_err_irq_handler(struct irq_desc *desc)
{
struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
static const int int_reg[] = { 4, 5, 8, 9};
chained_irq_enter(chip, desc);
ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
chained_irq_exit(chip, desc);
}
int ipu_map_irq(struct ipu_soc *ipu, int irq)
{
int virq;
virq = irq_linear_revmap(ipu->domain, irq);
if (!virq)
virq = irq_create_mapping(ipu->domain, irq);
return virq;
}
EXPORT_SYMBOL_GPL(ipu_map_irq);
int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
enum ipu_channel_irq irq_type)
{
return ipu_map_irq(ipu, irq_type + channel->num);
}
EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
static void ipu_submodules_exit(struct ipu_soc *ipu)
{
ipu_smfc_exit(ipu);
ipu_dp_exit(ipu);
ipu_dmfc_exit(ipu);
ipu_dc_exit(ipu);
ipu_di_exit(ipu, 1);
ipu_di_exit(ipu, 0);
ipu_image_convert_exit(ipu);
ipu_vdi_exit(ipu);
ipu_ic_exit(ipu);
ipu_csi_exit(ipu, 1);
ipu_csi_exit(ipu, 0);
ipu_cpmem_exit(ipu);
}
static int platform_remove_devices_fn(struct device *dev, void *unused)
{
struct platform_device *pdev = to_platform_device(dev);
platform_device_unregister(pdev);
return 0;
}
static void platform_device_unregister_children(struct platform_device *pdev)
{
device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
}
struct ipu_platform_reg {
struct ipu_client_platformdata pdata;
const char *name;
};
/* These must be in the order of the corresponding device tree port nodes */
static struct ipu_platform_reg client_reg[] = {
{
.pdata = {
.csi = 0,
.dma[0] = IPUV3_CHANNEL_CSI0,
.dma[1] = -EINVAL,
},
.name = "imx-ipuv3-csi",
}, {
.pdata = {
.csi = 1,
.dma[0] = IPUV3_CHANNEL_CSI1,
.dma[1] = -EINVAL,
},
.name = "imx-ipuv3-csi",
}, {
.pdata = {
.di = 0,
.dc = 5,
.dp = IPU_DP_FLOW_SYNC_BG,
.dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
.dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
},
.name = "imx-ipuv3-crtc",
}, {
.pdata = {
.di = 1,
.dc = 1,
.dp = -EINVAL,
.dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
.dma[1] = -EINVAL,
},
.name = "imx-ipuv3-crtc",
},
};
static DEFINE_MUTEX(ipu_client_id_mutex);
static int ipu_client_id;
static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
{
struct device *dev = ipu->dev;
unsigned i;
int id, ret;
mutex_lock(&ipu_client_id_mutex);
id = ipu_client_id;
ipu_client_id += ARRAY_SIZE(client_reg);
mutex_unlock(&ipu_client_id_mutex);
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
struct ipu_platform_reg *reg = &client_reg[i];
struct platform_device *pdev;
struct device_node *of_node;
/* Associate subdevice with the corresponding port node */
of_node = of_graph_get_port_by_id(dev->of_node, i);
if (!of_node) {
dev_info(dev,
"no port@%d node in %pOF, not using %s%d\n",
i, dev->of_node,
(i / 2) ? "DI" : "CSI", i % 2);
continue;
}
pdev = platform_device_alloc(reg->name, id++);
if (!pdev) {
ret = -ENOMEM;
of_node_put(of_node);
goto err_register;
}
pdev->dev.parent = dev;
reg->pdata.of_node = of_node;
ret = platform_device_add_data(pdev, ®->pdata,
sizeof(reg->pdata));
if (!ret)
ret = platform_device_add(pdev);
if (ret) {
platform_device_put(pdev);
goto err_register;
}
}
return 0;
err_register:
platform_device_unregister_children(to_platform_device(dev));
return ret;
}
static int ipu_irq_init(struct ipu_soc *ipu)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
unsigned long unused[IPU_NUM_IRQS / 32] = {
0x400100d0, 0xffe000fd,
0x400100d0, 0xffe000fd,
0x400100d0, 0xffe000fd,
0x4077ffff, 0xffe7e1fd,
0x23fffffe, 0x8880fff0,
0xf98fe7d0, 0xfff81fff,
0x400100d0, 0xffe000fd,
0x00000000,
};
int ret, i;
ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
&irq_generic_chip_ops, ipu);
if (!ipu->domain) {
dev_err(ipu->dev, "failed to add irq domain\n");
return -ENODEV;
}
ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
handle_level_irq, 0, 0, 0);
if (ret < 0) {
dev_err(ipu->dev, "failed to alloc generic irq chips\n");
irq_domain_remove(ipu->domain);
return ret;
}
/* Mask and clear all interrupts */
for (i = 0; i < IPU_NUM_IRQS; i += 32) {
ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
}
for (i = 0; i < IPU_NUM_IRQS; i += 32) {
gc = irq_get_domain_generic_chip(ipu->domain, i);
gc->reg_base = ipu->cm_reg;
gc->unused = unused[i / 32];
ct = gc->chip_types;
ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->regs.ack = IPU_INT_STAT(i / 32);
ct->regs.mask = IPU_INT_CTRL(i / 32);
}
irq_set_chained_handler_and_data(ipu->irq_sync, ipu_irq_handler, ipu);
irq_set_chained_handler_and_data(ipu->irq_err, ipu_err_irq_handler,
ipu);
return 0;
}
static void ipu_irq_exit(struct ipu_soc *ipu)
{
int i, irq;
irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
irq_set_chained_handler_and_data(ipu->irq_sync, NULL, NULL);
/* TODO: remove irq_domain_generic_chips */
for (i = 0; i < IPU_NUM_IRQS; i++) {
irq = irq_linear_revmap(ipu->domain, i);
if (irq)
irq_dispose_mapping(irq);
}
irq_domain_remove(ipu->domain);
}
void ipu_dump(struct ipu_soc *ipu)
{
int i;
dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
ipu_cm_read(ipu, IPU_CONF));
dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
ipu_idmac_read(ipu, IDMAC_CONF));
dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
for (i = 0; i < 15; i++)
dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
ipu_cm_read(ipu, IPU_INT_CTRL(i)));
}
EXPORT_SYMBOL_GPL(ipu_dump);
static int ipu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ipu_soc *ipu;
struct resource *res;
unsigned long ipu_base;
int ret, irq_sync, irq_err;
const struct ipu_devtype *devtype;
devtype = of_device_get_match_data(&pdev->dev);
if (!devtype)
return -EINVAL;
irq_sync = platform_get_irq(pdev, 0);
irq_err = platform_get_irq(pdev, 1);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
irq_sync, irq_err);
if (!res || irq_sync < 0 || irq_err < 0)
return -ENODEV;
ipu_base = res->start;
ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
if (!ipu)
return -ENODEV;
ipu->id = of_alias_get_id(np, "ipu");
if (ipu->id < 0)
ipu->id = 0;
if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
IS_ENABLED(CONFIG_DRM)) {
ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
"fsl,prg", ipu->id);
if (!ipu->prg_priv)
return -EPROBE_DEFER;
}
ipu->devtype = devtype;
ipu->ipu_type = devtype->type;
spin_lock_init(&ipu->lock);
mutex_init(&ipu->channel_lock);
INIT_LIST_HEAD(&ipu->channels);
dev_dbg(&pdev->dev, "cm_reg: 0x%08lx\n",
ipu_base + devtype->cm_ofs);
dev_dbg(&pdev->dev, "idmac: 0x%08lx\n",
ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n",
ipu_base + devtype->cpmem_ofs);
dev_dbg(&pdev->dev, "csi0: 0x%08lx\n",
ipu_base + devtype->csi0_ofs);
dev_dbg(&pdev->dev, "csi1: 0x%08lx\n",
ipu_base + devtype->csi1_ofs);
dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
ipu_base + devtype->ic_ofs);
dev_dbg(&pdev->dev, "disp0: 0x%08lx\n",
ipu_base + devtype->disp0_ofs);
dev_dbg(&pdev->dev, "disp1: 0x%08lx\n",
ipu_base + devtype->disp1_ofs);
dev_dbg(&pdev->dev, "srm: 0x%08lx\n",
ipu_base + devtype->srm_ofs);
dev_dbg(&pdev->dev, "tpm: 0x%08lx\n",
ipu_base + devtype->tpm_ofs);
dev_dbg(&pdev->dev, "dc: 0x%08lx\n",
ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
dev_dbg(&pdev->dev, "dmfc: 0x%08lx\n",
ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
dev_dbg(&pdev->dev, "vdi: 0x%08lx\n",
ipu_base + devtype->vdi_ofs);
ipu->cm_reg = devm_ioremap(&pdev->dev,
ipu_base + devtype->cm_ofs, PAGE_SIZE);
ipu->idmac_reg = devm_ioremap(&pdev->dev,
ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
PAGE_SIZE);
if (!ipu->cm_reg || !ipu->idmac_reg)
return -ENOMEM;
ipu->clk = devm_clk_get(&pdev->dev, "bus");
if (IS_ERR(ipu->clk)) {
ret = PTR_ERR(ipu->clk);
dev_err(&pdev->dev, "clk_get failed with %d", ret);
return ret;
}
platform_set_drvdata(pdev, ipu);
ret = clk_prepare_enable(ipu->clk);
if (ret) {
dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
return ret;
}
ipu->dev = &pdev->dev;
ipu->irq_sync = irq_sync;
ipu->irq_err = irq_err;
ret = device_reset(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to reset: %d\n", ret);
goto out_failed_reset;
}
ret = ipu_memory_reset(ipu);
if (ret)
goto out_failed_reset;
ret = ipu_irq_init(ipu);
if (ret)
goto out_failed_irq;
/* Set MCU_T to divide MCU access window into 2 */
ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
IPU_DISP_GEN);
ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
if (ret)
goto failed_submodules_init;
ret = ipu_add_client_devices(ipu, ipu_base);
if (ret) {
dev_err(&pdev->dev, "adding client devices failed with %d\n",
ret);
goto failed_add_clients;
}
dev_info(&pdev->dev, "%s probed\n", devtype->name);
return 0;
failed_add_clients:
ipu_submodules_exit(ipu);
failed_submodules_init:
ipu_irq_exit(ipu);
out_failed_irq:
out_failed_reset:
clk_disable_unprepare(ipu->clk);
return ret;
}
static int ipu_remove(struct platform_device *pdev)
{
struct ipu_soc *ipu = platform_get_drvdata(pdev);
platform_device_unregister_children(pdev);
ipu_submodules_exit(ipu);
ipu_irq_exit(ipu);
clk_disable_unprepare(ipu->clk);
return 0;
}
static struct platform_driver imx_ipu_driver = {
.driver = {
.name = "imx-ipuv3",
.of_match_table = imx_ipu_dt_ids,
},
.probe = ipu_probe,
.remove = ipu_remove,
};
static struct platform_driver * const drivers[] = {
#if IS_ENABLED(CONFIG_DRM)
&ipu_pre_drv,
&ipu_prg_drv,
#endif
&imx_ipu_driver,
};
static int __init imx_ipu_init(void)
{
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(imx_ipu_init);
static void __exit imx_ipu_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(imx_ipu_exit);
MODULE_ALIAS("platform:imx-ipuv3");
MODULE_DESCRIPTION("i.MX IPU v3 driver");
MODULE_AUTHOR("Sascha Hauer <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/ipu-v3/ipu-common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2010 Sascha Hauer <[email protected]>
* Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
*/
#include <linux/export.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
#define DMFC_RD_CHAN 0x0000
#define DMFC_WR_CHAN 0x0004
#define DMFC_WR_CHAN_DEF 0x0008
#define DMFC_DP_CHAN 0x000c
#define DMFC_DP_CHAN_DEF 0x0010
#define DMFC_GENERAL1 0x0014
#define DMFC_GENERAL2 0x0018
#define DMFC_IC_CTRL 0x001c
#define DMFC_WR_CHAN_ALT 0x0020
#define DMFC_WR_CHAN_DEF_ALT 0x0024
#define DMFC_DP_CHAN_ALT 0x0028
#define DMFC_DP_CHAN_DEF_ALT 0x002c
#define DMFC_GENERAL1_ALT 0x0030
#define DMFC_STAT 0x0034
#define DMFC_WR_CHAN_1_28 0
#define DMFC_WR_CHAN_2_41 8
#define DMFC_WR_CHAN_1C_42 16
#define DMFC_WR_CHAN_2C_43 24
#define DMFC_DP_CHAN_5B_23 0
#define DMFC_DP_CHAN_5F_27 8
#define DMFC_DP_CHAN_6B_24 16
#define DMFC_DP_CHAN_6F_29 24
struct dmfc_channel_data {
int ipu_channel;
unsigned long channel_reg;
unsigned long shift;
unsigned eot_shift;
unsigned max_fifo_lines;
};
static const struct dmfc_channel_data dmfcdata[] = {
{
.ipu_channel = IPUV3_CHANNEL_MEM_BG_SYNC,
.channel_reg = DMFC_DP_CHAN,
.shift = DMFC_DP_CHAN_5B_23,
.eot_shift = 20,
.max_fifo_lines = 3,
}, {
.ipu_channel = 24,
.channel_reg = DMFC_DP_CHAN,
.shift = DMFC_DP_CHAN_6B_24,
.eot_shift = 22,
.max_fifo_lines = 1,
}, {
.ipu_channel = IPUV3_CHANNEL_MEM_FG_SYNC,
.channel_reg = DMFC_DP_CHAN,
.shift = DMFC_DP_CHAN_5F_27,
.eot_shift = 21,
.max_fifo_lines = 2,
}, {
.ipu_channel = IPUV3_CHANNEL_MEM_DC_SYNC,
.channel_reg = DMFC_WR_CHAN,
.shift = DMFC_WR_CHAN_1_28,
.eot_shift = 16,
.max_fifo_lines = 2,
}, {
.ipu_channel = 29,
.channel_reg = DMFC_DP_CHAN,
.shift = DMFC_DP_CHAN_6F_29,
.eot_shift = 23,
.max_fifo_lines = 1,
},
};
#define DMFC_NUM_CHANNELS ARRAY_SIZE(dmfcdata)
struct ipu_dmfc_priv;
struct dmfc_channel {
unsigned slots;
struct ipu_soc *ipu;
struct ipu_dmfc_priv *priv;
const struct dmfc_channel_data *data;
};
struct ipu_dmfc_priv {
struct ipu_soc *ipu;
struct device *dev;
struct dmfc_channel channels[DMFC_NUM_CHANNELS];
struct mutex mutex;
void __iomem *base;
int use_count;
};
int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
mutex_lock(&priv->mutex);
if (!priv->use_count)
ipu_module_enable(priv->ipu, IPU_CONF_DMFC_EN);
priv->use_count++;
mutex_unlock(&priv->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
mutex_lock(&priv->mutex);
priv->use_count--;
if (!priv->use_count)
ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
if (priv->use_count < 0)
priv->use_count = 0;
mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
u32 dmfc_gen1;
mutex_lock(&priv->mutex);
dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
dmfc_gen1 |= 1 << dmfc->data->eot_shift;
else
dmfc_gen1 &= ~(1 << dmfc->data->eot_shift);
writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
{
struct ipu_dmfc_priv *priv = ipu->dmfc_priv;
int i;
for (i = 0; i < DMFC_NUM_CHANNELS; i++)
if (dmfcdata[i].ipu_channel == ipu_channel)
return &priv->channels[i];
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(ipu_dmfc_get);
void ipu_dmfc_put(struct dmfc_channel *dmfc)
{
}
EXPORT_SYMBOL_GPL(ipu_dmfc_put);
int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
struct clk *ipu_clk)
{
struct ipu_dmfc_priv *priv;
int i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_ioremap(dev, base, PAGE_SIZE);
if (!priv->base)
return -ENOMEM;
priv->dev = dev;
priv->ipu = ipu;
mutex_init(&priv->mutex);
ipu->dmfc_priv = priv;
for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
priv->channels[i].priv = priv;
priv->channels[i].ipu = ipu;
priv->channels[i].data = &dmfcdata[i];
if (dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC ||
dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_FG_SYNC ||
dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_DC_SYNC)
priv->channels[i].slots = 2;
}
writel(0x00000050, priv->base + DMFC_WR_CHAN);
writel(0x00005654, priv->base + DMFC_DP_CHAN);
writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
writel(0x00000003, priv->base + DMFC_GENERAL1);
return 0;
}
void ipu_dmfc_exit(struct ipu_soc *ipu)
{
}
| linux-master | drivers/gpu/ipu-v3/ipu-dmfc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2016 Mentor Graphics Inc.
* Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
*/
#include <linux/io.h>
#include "ipu-prv.h"
struct ipu_vdi {
void __iomem *base;
u32 module;
spinlock_t lock;
int use_count;
struct ipu_soc *ipu;
};
/* VDI Register Offsets */
#define VDI_FSIZE 0x0000
#define VDI_C 0x0004
/* VDI Register Fields */
#define VDI_C_CH_420 (0 << 1)
#define VDI_C_CH_422 (1 << 1)
#define VDI_C_MOT_SEL_MASK (0x3 << 2)
#define VDI_C_MOT_SEL_FULL (2 << 2)
#define VDI_C_MOT_SEL_LOW (1 << 2)
#define VDI_C_MOT_SEL_MED (0 << 2)
#define VDI_C_BURST_SIZE1_4 (3 << 4)
#define VDI_C_BURST_SIZE2_4 (3 << 8)
#define VDI_C_BURST_SIZE3_4 (3 << 12)
#define VDI_C_BURST_SIZE_MASK 0xF
#define VDI_C_BURST_SIZE1_OFFSET 4
#define VDI_C_BURST_SIZE2_OFFSET 8
#define VDI_C_BURST_SIZE3_OFFSET 12
#define VDI_C_VWM1_SET_1 (0 << 16)
#define VDI_C_VWM1_SET_2 (1 << 16)
#define VDI_C_VWM1_CLR_2 (1 << 19)
#define VDI_C_VWM3_SET_1 (0 << 22)
#define VDI_C_VWM3_SET_2 (1 << 22)
#define VDI_C_VWM3_CLR_2 (1 << 25)
#define VDI_C_TOP_FIELD_MAN_1 (1 << 30)
#define VDI_C_TOP_FIELD_AUTO_1 (1 << 31)
static inline u32 ipu_vdi_read(struct ipu_vdi *vdi, unsigned int offset)
{
return readl(vdi->base + offset);
}
static inline void ipu_vdi_write(struct ipu_vdi *vdi, u32 value,
unsigned int offset)
{
writel(value, vdi->base + offset);
}
void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field)
{
bool top_field_0 = false;
unsigned long flags;
u32 reg;
switch (field) {
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_SEQ_TB:
case V4L2_FIELD_TOP:
top_field_0 = true;
break;
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_SEQ_BT:
case V4L2_FIELD_BOTTOM:
top_field_0 = false;
break;
default:
top_field_0 = (std & V4L2_STD_525_60) ? true : false;
break;
}
spin_lock_irqsave(&vdi->lock, flags);
reg = ipu_vdi_read(vdi, VDI_C);
if (top_field_0)
reg &= ~(VDI_C_TOP_FIELD_MAN_1 | VDI_C_TOP_FIELD_AUTO_1);
else
reg |= VDI_C_TOP_FIELD_MAN_1 | VDI_C_TOP_FIELD_AUTO_1;
ipu_vdi_write(vdi, reg, VDI_C);
spin_unlock_irqrestore(&vdi->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_vdi_set_field_order);
void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel)
{
unsigned long flags;
u32 reg;
spin_lock_irqsave(&vdi->lock, flags);
reg = ipu_vdi_read(vdi, VDI_C);
reg &= ~VDI_C_MOT_SEL_MASK;
switch (motion_sel) {
case MED_MOTION:
reg |= VDI_C_MOT_SEL_MED;
break;
case HIGH_MOTION:
reg |= VDI_C_MOT_SEL_FULL;
break;
default:
reg |= VDI_C_MOT_SEL_LOW;
break;
}
ipu_vdi_write(vdi, reg, VDI_C);
spin_unlock_irqrestore(&vdi->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_vdi_set_motion);
void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres)
{
unsigned long flags;
u32 pixel_fmt, reg;
spin_lock_irqsave(&vdi->lock, flags);
reg = ((yres - 1) << 16) | (xres - 1);
ipu_vdi_write(vdi, reg, VDI_FSIZE);
/*
* Full motion, only vertical filter is used.
* Burst size is 4 accesses
*/
if (code == MEDIA_BUS_FMT_UYVY8_2X8 ||
code == MEDIA_BUS_FMT_UYVY8_1X16 ||
code == MEDIA_BUS_FMT_YUYV8_2X8 ||
code == MEDIA_BUS_FMT_YUYV8_1X16)
pixel_fmt = VDI_C_CH_422;
else
pixel_fmt = VDI_C_CH_420;
reg = ipu_vdi_read(vdi, VDI_C);
reg |= pixel_fmt;
reg |= VDI_C_BURST_SIZE2_4;
reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_CLR_2;
reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_CLR_2;
ipu_vdi_write(vdi, reg, VDI_C);
spin_unlock_irqrestore(&vdi->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_vdi_setup);
void ipu_vdi_unsetup(struct ipu_vdi *vdi)
{
unsigned long flags;
spin_lock_irqsave(&vdi->lock, flags);
ipu_vdi_write(vdi, 0, VDI_FSIZE);
ipu_vdi_write(vdi, 0, VDI_C);
spin_unlock_irqrestore(&vdi->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_vdi_unsetup);
int ipu_vdi_enable(struct ipu_vdi *vdi)
{
unsigned long flags;
spin_lock_irqsave(&vdi->lock, flags);
if (!vdi->use_count)
ipu_module_enable(vdi->ipu, vdi->module);
vdi->use_count++;
spin_unlock_irqrestore(&vdi->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_vdi_enable);
int ipu_vdi_disable(struct ipu_vdi *vdi)
{
unsigned long flags;
spin_lock_irqsave(&vdi->lock, flags);
if (vdi->use_count) {
if (!--vdi->use_count)
ipu_module_disable(vdi->ipu, vdi->module);
}
spin_unlock_irqrestore(&vdi->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_vdi_disable);
struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu)
{
return ipu->vdi_priv;
}
EXPORT_SYMBOL_GPL(ipu_vdi_get);
void ipu_vdi_put(struct ipu_vdi *vdi)
{
}
EXPORT_SYMBOL_GPL(ipu_vdi_put);
int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
unsigned long base, u32 module)
{
struct ipu_vdi *vdi;
vdi = devm_kzalloc(dev, sizeof(*vdi), GFP_KERNEL);
if (!vdi)
return -ENOMEM;
ipu->vdi_priv = vdi;
spin_lock_init(&vdi->lock);
vdi->module = module;
vdi->base = devm_ioremap(dev, base, PAGE_SIZE);
if (!vdi->base)
return -ENOMEM;
dev_dbg(dev, "VDI base: 0x%08lx remapped to %p\n", base, vdi->base);
vdi->ipu = ipu;
return 0;
}
void ipu_vdi_exit(struct ipu_soc *ipu)
{
}
| linux-master | drivers/gpu/ipu-v3/ipu-vdi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2010 Sascha Hauer <[email protected]>
* Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
*/
#include <linux/export.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
#define DC_MAP_CONF_PTR(n) (0x108 + ((n) & ~0x1) * 2)
#define DC_MAP_CONF_VAL(n) (0x144 + ((n) & ~0x1) * 2)
#define DC_EVT_NF 0
#define DC_EVT_NL 1
#define DC_EVT_EOF 2
#define DC_EVT_NFIELD 3
#define DC_EVT_EOL 4
#define DC_EVT_EOFIELD 5
#define DC_EVT_NEW_ADDR 6
#define DC_EVT_NEW_CHAN 7
#define DC_EVT_NEW_DATA 8
#define DC_EVT_NEW_ADDR_W_0 0
#define DC_EVT_NEW_ADDR_W_1 1
#define DC_EVT_NEW_CHAN_W_0 2
#define DC_EVT_NEW_CHAN_W_1 3
#define DC_EVT_NEW_DATA_W_0 4
#define DC_EVT_NEW_DATA_W_1 5
#define DC_EVT_NEW_ADDR_R_0 6
#define DC_EVT_NEW_ADDR_R_1 7
#define DC_EVT_NEW_CHAN_R_0 8
#define DC_EVT_NEW_CHAN_R_1 9
#define DC_EVT_NEW_DATA_R_0 10
#define DC_EVT_NEW_DATA_R_1 11
#define DC_WR_CH_CONF 0x0
#define DC_WR_CH_ADDR 0x4
#define DC_RL_CH(evt) (8 + ((evt) & ~0x1) * 2)
#define DC_GEN 0xd4
#define DC_DISP_CONF1(disp) (0xd8 + (disp) * 4)
#define DC_DISP_CONF2(disp) (0xe8 + (disp) * 4)
#define DC_STAT 0x1c8
#define WROD(lf) (0x18 | ((lf) << 1))
#define WRG 0x01
#define WCLK 0xc9
#define SYNC_WAVE 0
#define NULL_WAVE (-1)
#define DC_GEN_SYNC_1_6_SYNC (2 << 1)
#define DC_GEN_SYNC_PRIORITY_1 (1 << 7)
#define DC_WR_CH_CONF_WORD_SIZE_8 (0 << 0)
#define DC_WR_CH_CONF_WORD_SIZE_16 (1 << 0)
#define DC_WR_CH_CONF_WORD_SIZE_24 (2 << 0)
#define DC_WR_CH_CONF_WORD_SIZE_32 (3 << 0)
#define DC_WR_CH_CONF_DISP_ID_PARALLEL(i) (((i) & 0x1) << 3)
#define DC_WR_CH_CONF_DISP_ID_SERIAL (2 << 3)
#define DC_WR_CH_CONF_DISP_ID_ASYNC (3 << 4)
#define DC_WR_CH_CONF_FIELD_MODE (1 << 9)
#define DC_WR_CH_CONF_PROG_TYPE_NORMAL (4 << 5)
#define DC_WR_CH_CONF_PROG_TYPE_MASK (7 << 5)
#define DC_WR_CH_CONF_PROG_DI_ID (1 << 2)
#define DC_WR_CH_CONF_PROG_DISP_ID(i) (((i) & 0x1) << 3)
#define IPU_DC_NUM_CHANNELS 10
struct ipu_dc_priv;
enum ipu_dc_map {
IPU_DC_MAP_RGB24,
IPU_DC_MAP_RGB565,
IPU_DC_MAP_GBR24, /* TVEv2 */
IPU_DC_MAP_BGR666,
IPU_DC_MAP_LVDS666,
IPU_DC_MAP_BGR24,
};
struct ipu_dc {
/* The display interface number assigned to this dc channel */
unsigned int di;
void __iomem *base;
struct ipu_dc_priv *priv;
int chno;
bool in_use;
};
struct ipu_dc_priv {
void __iomem *dc_reg;
void __iomem *dc_tmpl_reg;
struct ipu_soc *ipu;
struct device *dev;
struct ipu_dc channels[IPU_DC_NUM_CHANNELS];
struct mutex mutex;
struct completion comp;
int use_count;
};
static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority)
{
u32 reg;
reg = readl(dc->base + DC_RL_CH(event));
reg &= ~(0xffff << (16 * (event & 0x1)));
reg |= ((addr << 8) | priority) << (16 * (event & 0x1));
writel(reg, dc->base + DC_RL_CH(event));
}
static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
int map, int wave, int glue, int sync, int stop)
{
struct ipu_dc_priv *priv = dc->priv;
u32 reg1, reg2;
if (opcode == WCLK) {
reg1 = (operand << 20) & 0xfff00000;
reg2 = operand >> 12 | opcode << 1 | stop << 9;
} else if (opcode == WRG) {
reg1 = sync | glue << 4 | ++wave << 11 | ((operand << 15) & 0xffff8000);
reg2 = operand >> 17 | opcode << 7 | stop << 9;
} else {
reg1 = sync | glue << 4 | ++wave << 11 | ++map << 15 | ((operand << 20) & 0xfff00000);
reg2 = operand >> 12 | opcode << 4 | stop << 9;
}
writel(reg1, priv->dc_tmpl_reg + word * 8);
writel(reg2, priv->dc_tmpl_reg + word * 8 + 4);
}
static int ipu_bus_format_to_map(u32 fmt)
{
switch (fmt) {
default:
WARN_ON(1);
fallthrough;
case MEDIA_BUS_FMT_RGB888_1X24:
return IPU_DC_MAP_RGB24;
case MEDIA_BUS_FMT_RGB565_1X16:
return IPU_DC_MAP_RGB565;
case MEDIA_BUS_FMT_GBR888_1X24:
return IPU_DC_MAP_GBR24;
case MEDIA_BUS_FMT_RGB666_1X18:
return IPU_DC_MAP_BGR666;
case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
return IPU_DC_MAP_LVDS666;
case MEDIA_BUS_FMT_BGR888_1X24:
return IPU_DC_MAP_BGR24;
}
}
int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
u32 bus_format, u32 width)
{
struct ipu_dc_priv *priv = dc->priv;
int addr, sync;
u32 reg = 0;
int map;
dc->di = ipu_di_get_num(di);
if (!IS_ALIGNED(width, 8)) {
dev_warn(priv->dev,
"%s: hactive does not align to 8 byte\n", __func__);
}
map = ipu_bus_format_to_map(bus_format);
/*
* In interlaced mode we need more counters to create the asymmetric
* per-field VSYNC signals. The pixel active signal synchronising DC
* to DI moves to signal generator #6 (see ipu-di.c). In progressive
* mode counter #5 is used.
*/
sync = interlaced ? 6 : 5;
/* Reserve 5 microcode template words for each DI */
if (dc->di)
addr = 5;
else
addr = 0;
if (interlaced) {
dc_link_event(dc, DC_EVT_NL, addr, 3);
dc_link_event(dc, DC_EVT_EOL, addr, 2);
dc_link_event(dc, DC_EVT_NEW_DATA, addr, 1);
/* Init template microcode */
dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
} else {
dc_link_event(dc, DC_EVT_NL, addr + 2, 3);
dc_link_event(dc, DC_EVT_EOL, addr + 3, 2);
dc_link_event(dc, DC_EVT_NEW_DATA, addr + 1, 1);
/* Init template microcode */
dc_write_tmpl(dc, addr + 2, WROD(0), 0, map, SYNC_WAVE, 8, sync, 1);
dc_write_tmpl(dc, addr + 3, WROD(0), 0, map, SYNC_WAVE, 4, sync, 0);
dc_write_tmpl(dc, addr + 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
dc_write_tmpl(dc, addr + 1, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
}
dc_link_event(dc, DC_EVT_NF, 0, 0);
dc_link_event(dc, DC_EVT_NFIELD, 0, 0);
dc_link_event(dc, DC_EVT_EOF, 0, 0);
dc_link_event(dc, DC_EVT_EOFIELD, 0, 0);
dc_link_event(dc, DC_EVT_NEW_CHAN, 0, 0);
dc_link_event(dc, DC_EVT_NEW_ADDR, 0, 0);
reg = readl(dc->base + DC_WR_CH_CONF);
if (interlaced)
reg |= DC_WR_CH_CONF_FIELD_MODE;
else
reg &= ~DC_WR_CH_CONF_FIELD_MODE;
writel(reg, dc->base + DC_WR_CH_CONF);
writel(0x0, dc->base + DC_WR_CH_ADDR);
writel(width, priv->dc_reg + DC_DISP_CONF2(dc->di));
return 0;
}
EXPORT_SYMBOL_GPL(ipu_dc_init_sync);
void ipu_dc_enable(struct ipu_soc *ipu)
{
struct ipu_dc_priv *priv = ipu->dc_priv;
mutex_lock(&priv->mutex);
if (!priv->use_count)
ipu_module_enable(priv->ipu, IPU_CONF_DC_EN);
priv->use_count++;
mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(ipu_dc_enable);
void ipu_dc_enable_channel(struct ipu_dc *dc)
{
u32 reg;
reg = readl(dc->base + DC_WR_CH_CONF);
reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
writel(reg, dc->base + DC_WR_CH_CONF);
}
EXPORT_SYMBOL_GPL(ipu_dc_enable_channel);
void ipu_dc_disable_channel(struct ipu_dc *dc)
{
u32 val;
val = readl(dc->base + DC_WR_CH_CONF);
val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
writel(val, dc->base + DC_WR_CH_CONF);
}
EXPORT_SYMBOL_GPL(ipu_dc_disable_channel);
void ipu_dc_disable(struct ipu_soc *ipu)
{
struct ipu_dc_priv *priv = ipu->dc_priv;
mutex_lock(&priv->mutex);
priv->use_count--;
if (!priv->use_count)
ipu_module_disable(priv->ipu, IPU_CONF_DC_EN);
if (priv->use_count < 0)
priv->use_count = 0;
mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(ipu_dc_disable);
static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map,
int byte_num, int offset, int mask)
{
int ptr = map * 3 + byte_num;
u32 reg;
reg = readl(priv->dc_reg + DC_MAP_CONF_VAL(ptr));
reg &= ~(0xffff << (16 * (ptr & 0x1)));
reg |= ((offset << 8) | mask) << (16 * (ptr & 0x1));
writel(reg, priv->dc_reg + DC_MAP_CONF_VAL(ptr));
reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
reg &= ~(0x1f << ((16 * (map & 0x1)) + (5 * byte_num)));
reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num));
writel(reg, priv->dc_reg + DC_MAP_CONF_PTR(map));
}
static void ipu_dc_map_clear(struct ipu_dc_priv *priv, int map)
{
u32 reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
writel(reg & ~(0xffff << (16 * (map & 0x1))),
priv->dc_reg + DC_MAP_CONF_PTR(map));
}
struct ipu_dc *ipu_dc_get(struct ipu_soc *ipu, int channel)
{
struct ipu_dc_priv *priv = ipu->dc_priv;
struct ipu_dc *dc;
if (channel >= IPU_DC_NUM_CHANNELS)
return ERR_PTR(-ENODEV);
dc = &priv->channels[channel];
mutex_lock(&priv->mutex);
if (dc->in_use) {
mutex_unlock(&priv->mutex);
return ERR_PTR(-EBUSY);
}
dc->in_use = true;
mutex_unlock(&priv->mutex);
return dc;
}
EXPORT_SYMBOL_GPL(ipu_dc_get);
void ipu_dc_put(struct ipu_dc *dc)
{
struct ipu_dc_priv *priv = dc->priv;
mutex_lock(&priv->mutex);
dc->in_use = false;
mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(ipu_dc_put);
int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
unsigned long base, unsigned long template_base)
{
struct ipu_dc_priv *priv;
static const int channel_offsets[] = {
0, 0x1c, 0x38, 0x54, 0x58, 0x5c, 0x78, 0, 0x94, 0xb4
};
int i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_init(&priv->mutex);
priv->dev = dev;
priv->ipu = ipu;
priv->dc_reg = devm_ioremap(dev, base, PAGE_SIZE);
priv->dc_tmpl_reg = devm_ioremap(dev, template_base, PAGE_SIZE);
if (!priv->dc_reg || !priv->dc_tmpl_reg)
return -ENOMEM;
for (i = 0; i < IPU_DC_NUM_CHANNELS; i++) {
priv->channels[i].chno = i;
priv->channels[i].priv = priv;
priv->channels[i].base = priv->dc_reg + channel_offsets[i];
}
writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) |
DC_WR_CH_CONF_PROG_DI_ID,
priv->channels[1].base + DC_WR_CH_CONF);
writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(0),
priv->channels[5].base + DC_WR_CH_CONF);
writel(DC_GEN_SYNC_1_6_SYNC | DC_GEN_SYNC_PRIORITY_1,
priv->dc_reg + DC_GEN);
ipu->dc_priv = priv;
dev_dbg(dev, "DC base: 0x%08lx template base: 0x%08lx\n",
base, template_base);
/* rgb24 */
ipu_dc_map_clear(priv, IPU_DC_MAP_RGB24);
ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 0, 7, 0xff); /* blue */
ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 1, 15, 0xff); /* green */
ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 2, 23, 0xff); /* red */
/* rgb565 */
ipu_dc_map_clear(priv, IPU_DC_MAP_RGB565);
ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 0, 4, 0xf8); /* blue */
ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 1, 10, 0xfc); /* green */
ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 2, 15, 0xf8); /* red */
/* gbr24 */
ipu_dc_map_clear(priv, IPU_DC_MAP_GBR24);
ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 2, 15, 0xff); /* green */
ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 1, 7, 0xff); /* blue */
ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 0, 23, 0xff); /* red */
/* bgr666 */
ipu_dc_map_clear(priv, IPU_DC_MAP_BGR666);
ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 0, 5, 0xfc); /* blue */
ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 1, 11, 0xfc); /* green */
ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 2, 17, 0xfc); /* red */
/* lvds666 */
ipu_dc_map_clear(priv, IPU_DC_MAP_LVDS666);
ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 0, 5, 0xfc); /* blue */
ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 1, 13, 0xfc); /* green */
ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 2, 21, 0xfc); /* red */
/* bgr24 */
ipu_dc_map_clear(priv, IPU_DC_MAP_BGR24);
ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 2, 7, 0xff); /* red */
ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 1, 15, 0xff); /* green */
ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 0, 23, 0xff); /* blue */
return 0;
}
void ipu_dc_exit(struct ipu_soc *ipu)
{
}
| linux-master | drivers/gpu/ipu-v3/ipu-dc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2010 Sascha Hauer <[email protected]>
* Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
*/
#include <linux/export.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
struct ipu_di {
void __iomem *base;
int id;
u32 module;
struct clk *clk_di; /* display input clock */
struct clk *clk_ipu; /* IPU bus clock */
struct clk *clk_di_pixel; /* resulting pixel clock */
bool inuse;
struct ipu_soc *ipu;
};
static DEFINE_MUTEX(di_mutex);
struct di_sync_config {
int run_count;
int run_src;
int offset_count;
int offset_src;
int repeat_count;
int cnt_clr_src;
int cnt_polarity_gen_en;
int cnt_polarity_clr_src;
int cnt_polarity_trigger_src;
int cnt_up;
int cnt_down;
};
enum di_pins {
DI_PIN11 = 0,
DI_PIN12 = 1,
DI_PIN13 = 2,
DI_PIN14 = 3,
DI_PIN15 = 4,
DI_PIN16 = 5,
DI_PIN17 = 6,
DI_PIN_CS = 7,
DI_PIN_SER_CLK = 0,
DI_PIN_SER_RS = 1,
};
enum di_sync_wave {
DI_SYNC_NONE = 0,
DI_SYNC_CLK = 1,
DI_SYNC_INT_HSYNC = 2,
DI_SYNC_HSYNC = 3,
DI_SYNC_VSYNC = 4,
DI_SYNC_DE = 6,
DI_SYNC_CNT1 = 2, /* counter >= 2 only */
DI_SYNC_CNT4 = 5, /* counter >= 5 only */
DI_SYNC_CNT5 = 6, /* counter >= 6 only */
};
#define SYNC_WAVE 0
#define DI_GENERAL 0x0000
#define DI_BS_CLKGEN0 0x0004
#define DI_BS_CLKGEN1 0x0008
#define DI_SW_GEN0(gen) (0x000c + 4 * ((gen) - 1))
#define DI_SW_GEN1(gen) (0x0030 + 4 * ((gen) - 1))
#define DI_STP_REP(gen) (0x0148 + 4 * (((gen) - 1)/2))
#define DI_SYNC_AS_GEN 0x0054
#define DI_DW_GEN(gen) (0x0058 + 4 * (gen))
#define DI_DW_SET(gen, set) (0x0088 + 4 * ((gen) + 0xc * (set)))
#define DI_SER_CONF 0x015c
#define DI_SSC 0x0160
#define DI_POL 0x0164
#define DI_AW0 0x0168
#define DI_AW1 0x016c
#define DI_SCR_CONF 0x0170
#define DI_STAT 0x0174
#define DI_SW_GEN0_RUN_COUNT(x) ((x) << 19)
#define DI_SW_GEN0_RUN_SRC(x) ((x) << 16)
#define DI_SW_GEN0_OFFSET_COUNT(x) ((x) << 3)
#define DI_SW_GEN0_OFFSET_SRC(x) ((x) << 0)
#define DI_SW_GEN1_CNT_POL_GEN_EN(x) ((x) << 29)
#define DI_SW_GEN1_CNT_CLR_SRC(x) ((x) << 25)
#define DI_SW_GEN1_CNT_POL_TRIGGER_SRC(x) ((x) << 12)
#define DI_SW_GEN1_CNT_POL_CLR_SRC(x) ((x) << 9)
#define DI_SW_GEN1_CNT_DOWN(x) ((x) << 16)
#define DI_SW_GEN1_CNT_UP(x) (x)
#define DI_SW_GEN1_AUTO_RELOAD (0x10000000)
#define DI_DW_GEN_ACCESS_SIZE_OFFSET 24
#define DI_DW_GEN_COMPONENT_SIZE_OFFSET 16
#define DI_GEN_POLARITY_1 (1 << 0)
#define DI_GEN_POLARITY_2 (1 << 1)
#define DI_GEN_POLARITY_3 (1 << 2)
#define DI_GEN_POLARITY_4 (1 << 3)
#define DI_GEN_POLARITY_5 (1 << 4)
#define DI_GEN_POLARITY_6 (1 << 5)
#define DI_GEN_POLARITY_7 (1 << 6)
#define DI_GEN_POLARITY_8 (1 << 7)
#define DI_GEN_POLARITY_DISP_CLK (1 << 17)
#define DI_GEN_DI_CLK_EXT (1 << 20)
#define DI_GEN_DI_VSYNC_EXT (1 << 21)
#define DI_POL_DRDY_DATA_POLARITY (1 << 7)
#define DI_POL_DRDY_POLARITY_15 (1 << 4)
#define DI_VSYNC_SEL_OFFSET 13
static inline u32 ipu_di_read(struct ipu_di *di, unsigned offset)
{
return readl(di->base + offset);
}
static inline void ipu_di_write(struct ipu_di *di, u32 value, unsigned offset)
{
writel(value, di->base + offset);
}
static void ipu_di_data_wave_config(struct ipu_di *di,
int wave_gen,
int access_size, int component_size)
{
u32 reg;
reg = (access_size << DI_DW_GEN_ACCESS_SIZE_OFFSET) |
(component_size << DI_DW_GEN_COMPONENT_SIZE_OFFSET);
ipu_di_write(di, reg, DI_DW_GEN(wave_gen));
}
static void ipu_di_data_pin_config(struct ipu_di *di, int wave_gen, int di_pin,
int set, int up, int down)
{
u32 reg;
reg = ipu_di_read(di, DI_DW_GEN(wave_gen));
reg &= ~(0x3 << (di_pin * 2));
reg |= set << (di_pin * 2);
ipu_di_write(di, reg, DI_DW_GEN(wave_gen));
ipu_di_write(di, (down << 16) | up, DI_DW_SET(wave_gen, set));
}
static void ipu_di_sync_config(struct ipu_di *di, struct di_sync_config *config,
int start, int count)
{
u32 reg;
int i;
for (i = 0; i < count; i++) {
struct di_sync_config *c = &config[i];
int wave_gen = start + i + 1;
if ((c->run_count >= 0x1000) || (c->offset_count >= 0x1000) ||
(c->repeat_count >= 0x1000) ||
(c->cnt_up >= 0x400) ||
(c->cnt_down >= 0x400)) {
dev_err(di->ipu->dev, "DI%d counters out of range.\n",
di->id);
return;
}
reg = DI_SW_GEN0_RUN_COUNT(c->run_count) |
DI_SW_GEN0_RUN_SRC(c->run_src) |
DI_SW_GEN0_OFFSET_COUNT(c->offset_count) |
DI_SW_GEN0_OFFSET_SRC(c->offset_src);
ipu_di_write(di, reg, DI_SW_GEN0(wave_gen));
reg = DI_SW_GEN1_CNT_POL_GEN_EN(c->cnt_polarity_gen_en) |
DI_SW_GEN1_CNT_CLR_SRC(c->cnt_clr_src) |
DI_SW_GEN1_CNT_POL_TRIGGER_SRC(
c->cnt_polarity_trigger_src) |
DI_SW_GEN1_CNT_POL_CLR_SRC(c->cnt_polarity_clr_src) |
DI_SW_GEN1_CNT_DOWN(c->cnt_down) |
DI_SW_GEN1_CNT_UP(c->cnt_up);
/* Enable auto reload */
if (c->repeat_count == 0)
reg |= DI_SW_GEN1_AUTO_RELOAD;
ipu_di_write(di, reg, DI_SW_GEN1(wave_gen));
reg = ipu_di_read(di, DI_STP_REP(wave_gen));
reg &= ~(0xffff << (16 * ((wave_gen - 1) & 0x1)));
reg |= c->repeat_count << (16 * ((wave_gen - 1) & 0x1));
ipu_di_write(di, reg, DI_STP_REP(wave_gen));
}
}
static void ipu_di_sync_config_interlaced(struct ipu_di *di,
struct ipu_di_signal_cfg *sig)
{
u32 h_total = sig->mode.hactive + sig->mode.hsync_len +
sig->mode.hback_porch + sig->mode.hfront_porch;
u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
sig->mode.vback_porch + sig->mode.vfront_porch;
struct di_sync_config cfg[] = {
{
/* 1: internal VSYNC for each frame */
.run_count = v_total * 2 - 1,
.run_src = 3, /* == counter 7 */
}, {
/* PIN2: HSYNC waveform */
.run_count = h_total - 1,
.run_src = DI_SYNC_CLK,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_CLK,
.cnt_down = sig->mode.hsync_len * 2,
}, {
/* PIN3: VSYNC waveform */
.run_count = v_total - 1,
.run_src = 4, /* == counter 7 */
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = 4, /* == counter 7 */
.cnt_down = sig->mode.vsync_len * 2,
.cnt_clr_src = DI_SYNC_CNT1,
}, {
/* 4: Field */
.run_count = v_total / 2,
.run_src = DI_SYNC_HSYNC,
.offset_count = h_total / 2,
.offset_src = DI_SYNC_CLK,
.repeat_count = 2,
.cnt_clr_src = DI_SYNC_CNT1,
}, {
/* 5: Active lines */
.run_src = DI_SYNC_HSYNC,
.offset_count = (sig->mode.vsync_len +
sig->mode.vback_porch) / 2,
.offset_src = DI_SYNC_HSYNC,
.repeat_count = sig->mode.vactive / 2,
.cnt_clr_src = DI_SYNC_CNT4,
}, {
/* 6: Active pixel, referenced by DC */
.run_src = DI_SYNC_CLK,
.offset_count = sig->mode.hsync_len +
sig->mode.hback_porch,
.offset_src = DI_SYNC_CLK,
.repeat_count = sig->mode.hactive,
.cnt_clr_src = DI_SYNC_CNT5,
}, {
/* 7: Half line HSYNC */
.run_count = h_total / 2 - 1,
.run_src = DI_SYNC_CLK,
}
};
ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
ipu_di_write(di, v_total / 2 - 1, DI_SCR_CONF);
}
static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
struct ipu_di_signal_cfg *sig, int div)
{
u32 h_total = sig->mode.hactive + sig->mode.hsync_len +
sig->mode.hback_porch + sig->mode.hfront_porch;
u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
sig->mode.vback_porch + sig->mode.vfront_porch;
struct di_sync_config cfg[] = {
{
/* 1: INT_HSYNC */
.run_count = h_total - 1,
.run_src = DI_SYNC_CLK,
} , {
/* PIN2: HSYNC */
.run_count = h_total - 1,
.run_src = DI_SYNC_CLK,
.offset_count = div * sig->v_to_h_sync,
.offset_src = DI_SYNC_CLK,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_CLK,
.cnt_down = sig->mode.hsync_len * 2,
} , {
/* PIN3: VSYNC */
.run_count = v_total - 1,
.run_src = DI_SYNC_INT_HSYNC,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
.cnt_down = sig->mode.vsync_len * 2,
} , {
/* 4: Line Active */
.run_src = DI_SYNC_HSYNC,
.offset_count = sig->mode.vsync_len +
sig->mode.vback_porch,
.offset_src = DI_SYNC_HSYNC,
.repeat_count = sig->mode.vactive,
.cnt_clr_src = DI_SYNC_VSYNC,
} , {
/* 5: Pixel Active, referenced by DC */
.run_src = DI_SYNC_CLK,
.offset_count = sig->mode.hsync_len +
sig->mode.hback_porch,
.offset_src = DI_SYNC_CLK,
.repeat_count = sig->mode.hactive,
.cnt_clr_src = 5, /* Line Active */
} , {
/* unused */
} , {
/* unused */
},
};
/* can't use #7 and #8 for line active and pixel active counters */
struct di_sync_config cfg_vga[] = {
{
/* 1: INT_HSYNC */
.run_count = h_total - 1,
.run_src = DI_SYNC_CLK,
} , {
/* 2: VSYNC */
.run_count = v_total - 1,
.run_src = DI_SYNC_INT_HSYNC,
} , {
/* 3: Line Active */
.run_src = DI_SYNC_INT_HSYNC,
.offset_count = sig->mode.vsync_len +
sig->mode.vback_porch,
.offset_src = DI_SYNC_INT_HSYNC,
.repeat_count = sig->mode.vactive,
.cnt_clr_src = 3 /* VSYNC */,
} , {
/* PIN4: HSYNC for VGA via TVEv2 on TQ MBa53 */
.run_count = h_total - 1,
.run_src = DI_SYNC_CLK,
.offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */
.offset_src = DI_SYNC_CLK,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_CLK,
.cnt_down = sig->mode.hsync_len * 2,
} , {
/* 5: Pixel Active signal to DC */
.run_src = DI_SYNC_CLK,
.offset_count = sig->mode.hsync_len +
sig->mode.hback_porch,
.offset_src = DI_SYNC_CLK,
.repeat_count = sig->mode.hactive,
.cnt_clr_src = 4, /* Line Active */
} , {
/* PIN6: VSYNC for VGA via TVEv2 on TQ MBa53 */
.run_count = v_total - 1,
.run_src = DI_SYNC_INT_HSYNC,
.offset_count = 1, /* magic value from Freescale TVE driver */
.offset_src = DI_SYNC_INT_HSYNC,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
.cnt_down = sig->mode.vsync_len * 2,
} , {
/* PIN4: HSYNC for VGA via TVEv2 on i.MX53-QSB */
.run_count = h_total - 1,
.run_src = DI_SYNC_CLK,
.offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */
.offset_src = DI_SYNC_CLK,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_CLK,
.cnt_down = sig->mode.hsync_len * 2,
} , {
/* PIN6: VSYNC for VGA via TVEv2 on i.MX53-QSB */
.run_count = v_total - 1,
.run_src = DI_SYNC_INT_HSYNC,
.offset_count = 1, /* magic value from Freescale TVE driver */
.offset_src = DI_SYNC_INT_HSYNC,
.cnt_polarity_gen_en = 1,
.cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
.cnt_down = sig->mode.vsync_len * 2,
} , {
/* unused */
},
};
ipu_di_write(di, v_total - 1, DI_SCR_CONF);
if (sig->hsync_pin == 2 && sig->vsync_pin == 3)
ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
else
ipu_di_sync_config(di, cfg_vga, 0, ARRAY_SIZE(cfg_vga));
}
static void ipu_di_config_clock(struct ipu_di *di,
const struct ipu_di_signal_cfg *sig)
{
struct clk *clk;
unsigned clkgen0;
uint32_t val;
if (sig->clkflags & IPU_DI_CLKMODE_EXT) {
/*
* CLKMODE_EXT means we must use the DI clock: this is
* needed for things like LVDS which needs to feed the
* DI and LDB with the same pixel clock.
*/
clk = di->clk_di;
if (sig->clkflags & IPU_DI_CLKMODE_SYNC) {
/*
* CLKMODE_SYNC means that we want the DI to be
* clocked at the same rate as the parent clock.
* This is needed (eg) for LDB which needs to be
* fed with the same pixel clock. We assume that
* the LDB clock has already been set correctly.
*/
clkgen0 = 1 << 4;
} else {
/*
* We can use the divider. We should really have
* a flag here indicating whether the bridge can
* cope with a fractional divider or not. For the
* time being, let's go for simplicitly and
* reliability.
*/
unsigned long in_rate;
unsigned div;
clk_set_rate(clk, sig->mode.pixelclock);
in_rate = clk_get_rate(clk);
div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
div = clamp(div, 1U, 255U);
clkgen0 = div << 4;
}
} else {
/*
* For other interfaces, we can arbitarily select between
* the DI specific clock and the internal IPU clock. See
* DI_GENERAL bit 20. We select the IPU clock if it can
* give us a clock rate within 1% of the requested frequency,
* otherwise we use the DI clock.
*/
unsigned long rate, clkrate;
unsigned div, error;
clkrate = clk_get_rate(di->clk_ipu);
div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
div = clamp(div, 1U, 255U);
rate = clkrate / div;
error = rate / (sig->mode.pixelclock / 1000);
dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
rate, div, error < 1000 ? '-' : '+',
abs(error - 1000) / 10, abs(error - 1000) % 10);
/* Allow a 1% error */
if (error < 1010 && error >= 990) {
clk = di->clk_ipu;
clkgen0 = div << 4;
} else {
unsigned long in_rate;
unsigned div;
clk = di->clk_di;
clk_set_rate(clk, sig->mode.pixelclock);
in_rate = clk_get_rate(clk);
div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
div = clamp(div, 1U, 255U);
clkgen0 = div << 4;
}
}
di->clk_di_pixel = clk;
/* Set the divider */
ipu_di_write(di, clkgen0, DI_BS_CLKGEN0);
/*
* Set the high/low periods. Bits 24:16 give us the falling edge,
* and bits 8:0 give the rising edge. LSB is fraction, and is
* based on the divider above. We want a 50% duty cycle, so set
* the falling edge to be half the divider.
*/
ipu_di_write(di, (clkgen0 >> 4) << 16, DI_BS_CLKGEN1);
/* Finally select the input clock */
val = ipu_di_read(di, DI_GENERAL) & ~DI_GEN_DI_CLK_EXT;
if (clk == di->clk_di)
val |= DI_GEN_DI_CLK_EXT;
ipu_di_write(di, val, DI_GENERAL);
dev_dbg(di->ipu->dev, "Want %luHz IPU %luHz DI %luHz using %s, %luHz\n",
sig->mode.pixelclock,
clk_get_rate(di->clk_ipu),
clk_get_rate(di->clk_di),
clk == di->clk_di ? "DI" : "IPU",
clk_get_rate(di->clk_di_pixel) / (clkgen0 >> 4));
}
/*
* This function is called to adjust a video mode to IPU restrictions.
* It is meant to be called from drm crtc mode_fixup() methods.
*/
int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
{
u32 diff;
if (!IS_ALIGNED(mode->hactive, 8) &&
mode->hfront_porch < ALIGN(mode->hactive, 8) - mode->hactive) {
dev_err(di->ipu->dev, "hactive %d is not aligned to 8 and front porch is too small to compensate\n",
mode->hactive);
return -EINVAL;
}
if (mode->vfront_porch >= 2)
return 0;
diff = 2 - mode->vfront_porch;
if (mode->vback_porch >= diff) {
mode->vfront_porch = 2;
mode->vback_porch -= diff;
} else if (mode->vsync_len > diff) {
mode->vfront_porch = 2;
mode->vsync_len = mode->vsync_len - diff;
} else {
dev_warn(di->ipu->dev, "failed to adjust videomode\n");
return -EINVAL;
}
dev_dbg(di->ipu->dev, "videomode adapted for IPU restrictions\n");
return 0;
}
EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
static u32 ipu_di_gen_polarity(int pin)
{
switch (pin) {
case 1:
return DI_GEN_POLARITY_1;
case 2:
return DI_GEN_POLARITY_2;
case 3:
return DI_GEN_POLARITY_3;
case 4:
return DI_GEN_POLARITY_4;
case 5:
return DI_GEN_POLARITY_5;
case 6:
return DI_GEN_POLARITY_6;
case 7:
return DI_GEN_POLARITY_7;
case 8:
return DI_GEN_POLARITY_8;
}
return 0;
}
int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
{
u32 reg;
u32 di_gen, vsync_cnt;
u32 div;
dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
di->id, sig->mode.hactive, sig->mode.vactive);
dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
clk_get_rate(di->clk_ipu),
clk_get_rate(di->clk_di),
sig->mode.pixelclock);
mutex_lock(&di_mutex);
ipu_di_config_clock(di, sig);
div = ipu_di_read(di, DI_BS_CLKGEN0) & 0xfff;
div = div / 16; /* Now divider is integer portion */
/* Setup pixel clock timing */
/* Down time is half of period */
ipu_di_write(di, (div << 16), DI_BS_CLKGEN1);
ipu_di_data_wave_config(di, SYNC_WAVE, div - 1, div - 1);
ipu_di_data_pin_config(di, SYNC_WAVE, DI_PIN15, 3, 0, div * 2);
di_gen = ipu_di_read(di, DI_GENERAL) & DI_GEN_DI_CLK_EXT;
di_gen |= DI_GEN_DI_VSYNC_EXT;
if (sig->mode.flags & DISPLAY_FLAGS_INTERLACED) {
ipu_di_sync_config_interlaced(di, sig);
/* set y_sel = 1 */
di_gen |= 0x10000000;
vsync_cnt = 3;
} else {
ipu_di_sync_config_noninterlaced(di, sig, div);
vsync_cnt = 3;
if (di->id == 1)
/*
* TODO: change only for TVEv2, parallel display
* uses pin 2 / 3
*/
if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3))
vsync_cnt = 6;
}
if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH)
di_gen |= ipu_di_gen_polarity(sig->hsync_pin);
if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH)
di_gen |= ipu_di_gen_polarity(sig->vsync_pin);
if (sig->clk_pol)
di_gen |= DI_GEN_POLARITY_DISP_CLK;
ipu_di_write(di, di_gen, DI_GENERAL);
ipu_di_write(di, (--vsync_cnt << DI_VSYNC_SEL_OFFSET) | 0x00000002,
DI_SYNC_AS_GEN);
reg = ipu_di_read(di, DI_POL);
reg &= ~(DI_POL_DRDY_DATA_POLARITY | DI_POL_DRDY_POLARITY_15);
if (sig->enable_pol)
reg |= DI_POL_DRDY_POLARITY_15;
if (sig->data_pol)
reg |= DI_POL_DRDY_DATA_POLARITY;
ipu_di_write(di, reg, DI_POL);
mutex_unlock(&di_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_di_init_sync_panel);
int ipu_di_enable(struct ipu_di *di)
{
int ret;
WARN_ON(IS_ERR(di->clk_di_pixel));
ret = clk_prepare_enable(di->clk_di_pixel);
if (ret)
return ret;
ipu_module_enable(di->ipu, di->module);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_di_enable);
int ipu_di_disable(struct ipu_di *di)
{
WARN_ON(IS_ERR(di->clk_di_pixel));
ipu_module_disable(di->ipu, di->module);
clk_disable_unprepare(di->clk_di_pixel);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_di_disable);
int ipu_di_get_num(struct ipu_di *di)
{
return di->id;
}
EXPORT_SYMBOL_GPL(ipu_di_get_num);
static DEFINE_MUTEX(ipu_di_lock);
struct ipu_di *ipu_di_get(struct ipu_soc *ipu, int disp)
{
struct ipu_di *di;
if (disp > 1)
return ERR_PTR(-EINVAL);
di = ipu->di_priv[disp];
mutex_lock(&ipu_di_lock);
if (di->inuse) {
di = ERR_PTR(-EBUSY);
goto out;
}
di->inuse = true;
out:
mutex_unlock(&ipu_di_lock);
return di;
}
EXPORT_SYMBOL_GPL(ipu_di_get);
void ipu_di_put(struct ipu_di *di)
{
mutex_lock(&ipu_di_lock);
di->inuse = false;
mutex_unlock(&ipu_di_lock);
}
EXPORT_SYMBOL_GPL(ipu_di_put);
int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
unsigned long base,
u32 module, struct clk *clk_ipu)
{
struct ipu_di *di;
if (id > 1)
return -ENODEV;
di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
ipu->di_priv[id] = di;
di->clk_di = devm_clk_get(dev, id ? "di1" : "di0");
if (IS_ERR(di->clk_di))
return PTR_ERR(di->clk_di);
di->module = module;
di->id = id;
di->clk_ipu = clk_ipu;
di->base = devm_ioremap(dev, base, PAGE_SIZE);
if (!di->base)
return -ENOMEM;
ipu_di_write(di, 0x10, DI_BS_CLKGEN0);
dev_dbg(dev, "DI%d base: 0x%08lx remapped to %p\n",
id, base, di->base);
di->inuse = false;
di->ipu = ipu;
return 0;
}
void ipu_di_exit(struct ipu_soc *ipu, int id)
{
}
| linux-master | drivers/gpu/ipu-v3/ipu-di.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2010 Sascha Hauer <[email protected]>
* Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/err.h>
#include <drm/drm_color_mgmt.h>
#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
#define DP_SYNC 0
#define DP_ASYNC0 0x60
#define DP_ASYNC1 0xBC
#define DP_COM_CONF 0x0
#define DP_GRAPH_WIND_CTRL 0x0004
#define DP_FG_POS 0x0008
#define DP_CSC_A_0 0x0044
#define DP_CSC_A_1 0x0048
#define DP_CSC_A_2 0x004C
#define DP_CSC_A_3 0x0050
#define DP_CSC_0 0x0054
#define DP_CSC_1 0x0058
#define DP_COM_CONF_FG_EN (1 << 0)
#define DP_COM_CONF_GWSEL (1 << 1)
#define DP_COM_CONF_GWAM (1 << 2)
#define DP_COM_CONF_GWCKE (1 << 3)
#define DP_COM_CONF_CSC_DEF_MASK (3 << 8)
#define DP_COM_CONF_CSC_DEF_OFFSET 8
#define DP_COM_CONF_CSC_DEF_FG (3 << 8)
#define DP_COM_CONF_CSC_DEF_BG (2 << 8)
#define DP_COM_CONF_CSC_DEF_BOTH (1 << 8)
#define IPUV3_NUM_FLOWS 3
struct ipu_dp_priv;
struct ipu_dp {
u32 flow;
bool in_use;
bool foreground;
enum ipu_color_space in_cs;
};
struct ipu_flow {
struct ipu_dp foreground;
struct ipu_dp background;
enum ipu_color_space out_cs;
void __iomem *base;
struct ipu_dp_priv *priv;
};
struct ipu_dp_priv {
struct ipu_soc *ipu;
struct device *dev;
void __iomem *base;
struct ipu_flow flow[IPUV3_NUM_FLOWS];
struct mutex mutex;
int use_count;
};
static u32 ipu_dp_flow_base[] = {DP_SYNC, DP_ASYNC0, DP_ASYNC1};
static inline struct ipu_flow *to_flow(struct ipu_dp *dp)
{
if (dp->foreground)
return container_of(dp, struct ipu_flow, foreground);
else
return container_of(dp, struct ipu_flow, background);
}
int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable,
u8 alpha, bool bg_chan)
{
struct ipu_flow *flow = to_flow(dp);
struct ipu_dp_priv *priv = flow->priv;
u32 reg;
mutex_lock(&priv->mutex);
reg = readl(flow->base + DP_COM_CONF);
if (bg_chan)
reg &= ~DP_COM_CONF_GWSEL;
else
reg |= DP_COM_CONF_GWSEL;
writel(reg, flow->base + DP_COM_CONF);
if (enable) {
reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL;
writel(reg | ((u32) alpha << 24),
flow->base + DP_GRAPH_WIND_CTRL);
reg = readl(flow->base + DP_COM_CONF);
writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
} else {
reg = readl(flow->base + DP_COM_CONF);
writel(reg & ~DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
}
ipu_srm_dp_update(priv->ipu, true);
mutex_unlock(&priv->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_dp_set_global_alpha);
int ipu_dp_set_window_pos(struct ipu_dp *dp, u16 x_pos, u16 y_pos)
{
struct ipu_flow *flow = to_flow(dp);
struct ipu_dp_priv *priv = flow->priv;
writel((x_pos << 16) | y_pos, flow->base + DP_FG_POS);
ipu_srm_dp_update(priv->ipu, true);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_dp_set_window_pos);
static void ipu_dp_csc_init(struct ipu_flow *flow,
enum drm_color_encoding ycbcr_enc,
enum drm_color_range range,
enum ipu_color_space in,
enum ipu_color_space out,
u32 place)
{
u32 reg;
reg = readl(flow->base + DP_COM_CONF);
reg &= ~DP_COM_CONF_CSC_DEF_MASK;
if (in == out) {
writel(reg, flow->base + DP_COM_CONF);
return;
}
if (in == IPUV3_COLORSPACE_RGB && out == IPUV3_COLORSPACE_YUV) {
writel(0x099 | (0x12d << 16), flow->base + DP_CSC_A_0);
writel(0x03a | (0x3a9 << 16), flow->base + DP_CSC_A_1);
writel(0x356 | (0x100 << 16), flow->base + DP_CSC_A_2);
writel(0x100 | (0x329 << 16), flow->base + DP_CSC_A_3);
writel(0x3d6 | (0x0000 << 16) | (2 << 30),
flow->base + DP_CSC_0);
writel(0x200 | (2 << 14) | (0x200 << 16) | (2 << 30),
flow->base + DP_CSC_1);
} else if (ycbcr_enc == DRM_COLOR_YCBCR_BT709) {
/* Rec.709 limited range */
writel(0x095 | (0x000 << 16), flow->base + DP_CSC_A_0);
writel(0x0e5 | (0x095 << 16), flow->base + DP_CSC_A_1);
writel(0x3e5 | (0x3bc << 16), flow->base + DP_CSC_A_2);
writel(0x095 | (0x10e << 16), flow->base + DP_CSC_A_3);
writel(0x000 | (0x3e10 << 16) | (1 << 30),
flow->base + DP_CSC_0);
writel(0x09a | (1 << 14) | (0x3dbe << 16) | (1 << 30),
flow->base + DP_CSC_1);
} else {
/* BT.601 limited range */
writel(0x095 | (0x000 << 16), flow->base + DP_CSC_A_0);
writel(0x0cc | (0x095 << 16), flow->base + DP_CSC_A_1);
writel(0x3ce | (0x398 << 16), flow->base + DP_CSC_A_2);
writel(0x095 | (0x0ff << 16), flow->base + DP_CSC_A_3);
writel(0x000 | (0x3e42 << 16) | (1 << 30),
flow->base + DP_CSC_0);
writel(0x10a | (1 << 14) | (0x3dd6 << 16) | (1 << 30),
flow->base + DP_CSC_1);
}
reg |= place;
writel(reg, flow->base + DP_COM_CONF);
}
int ipu_dp_setup_channel(struct ipu_dp *dp,
enum drm_color_encoding ycbcr_enc,
enum drm_color_range range,
enum ipu_color_space in,
enum ipu_color_space out)
{
struct ipu_flow *flow = to_flow(dp);
struct ipu_dp_priv *priv = flow->priv;
mutex_lock(&priv->mutex);
dp->in_cs = in;
if (!dp->foreground)
flow->out_cs = out;
if (flow->foreground.in_cs == flow->background.in_cs) {
/*
* foreground and background are of same colorspace, put
* colorspace converter after combining unit.
*/
ipu_dp_csc_init(flow, ycbcr_enc, range,
flow->foreground.in_cs, flow->out_cs,
DP_COM_CONF_CSC_DEF_BOTH);
} else {
if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
flow->foreground.in_cs == flow->out_cs)
/*
* foreground identical to output, apply color
* conversion on background
*/
ipu_dp_csc_init(flow, ycbcr_enc, range,
flow->background.in_cs,
flow->out_cs, DP_COM_CONF_CSC_DEF_BG);
else
ipu_dp_csc_init(flow, ycbcr_enc, range,
flow->foreground.in_cs,
flow->out_cs, DP_COM_CONF_CSC_DEF_FG);
}
ipu_srm_dp_update(priv->ipu, true);
mutex_unlock(&priv->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_dp_setup_channel);
int ipu_dp_enable(struct ipu_soc *ipu)
{
struct ipu_dp_priv *priv = ipu->dp_priv;
mutex_lock(&priv->mutex);
if (!priv->use_count)
ipu_module_enable(priv->ipu, IPU_CONF_DP_EN);
priv->use_count++;
mutex_unlock(&priv->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_dp_enable);
int ipu_dp_enable_channel(struct ipu_dp *dp)
{
struct ipu_flow *flow = to_flow(dp);
struct ipu_dp_priv *priv = flow->priv;
u32 reg;
if (!dp->foreground)
return 0;
mutex_lock(&priv->mutex);
reg = readl(flow->base + DP_COM_CONF);
reg |= DP_COM_CONF_FG_EN;
writel(reg, flow->base + DP_COM_CONF);
ipu_srm_dp_update(priv->ipu, true);
mutex_unlock(&priv->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_dp_enable_channel);
void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
{
struct ipu_flow *flow = to_flow(dp);
struct ipu_dp_priv *priv = flow->priv;
u32 reg, csc;
dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
if (!dp->foreground)
return;
mutex_lock(&priv->mutex);
reg = readl(flow->base + DP_COM_CONF);
csc = reg & DP_COM_CONF_CSC_DEF_MASK;
reg &= ~DP_COM_CONF_CSC_DEF_MASK;
if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
reg |= DP_COM_CONF_CSC_DEF_BG;
reg &= ~DP_COM_CONF_FG_EN;
writel(reg, flow->base + DP_COM_CONF);
writel(0, flow->base + DP_FG_POS);
ipu_srm_dp_update(priv->ipu, sync);
mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
void ipu_dp_disable(struct ipu_soc *ipu)
{
struct ipu_dp_priv *priv = ipu->dp_priv;
mutex_lock(&priv->mutex);
priv->use_count--;
if (!priv->use_count)
ipu_module_disable(priv->ipu, IPU_CONF_DP_EN);
if (priv->use_count < 0)
priv->use_count = 0;
mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(ipu_dp_disable);
struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow)
{
struct ipu_dp_priv *priv = ipu->dp_priv;
struct ipu_dp *dp;
if ((flow >> 1) >= IPUV3_NUM_FLOWS)
return ERR_PTR(-EINVAL);
if (flow & 1)
dp = &priv->flow[flow >> 1].foreground;
else
dp = &priv->flow[flow >> 1].background;
if (dp->in_use)
return ERR_PTR(-EBUSY);
dp->in_use = true;
return dp;
}
EXPORT_SYMBOL_GPL(ipu_dp_get);
void ipu_dp_put(struct ipu_dp *dp)
{
dp->in_use = false;
}
EXPORT_SYMBOL_GPL(ipu_dp_put);
int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
{
struct ipu_dp_priv *priv;
int i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->ipu = ipu;
ipu->dp_priv = priv;
priv->base = devm_ioremap(dev, base, PAGE_SIZE);
if (!priv->base)
return -ENOMEM;
mutex_init(&priv->mutex);
for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
priv->flow[i].foreground.foreground = true;
priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
priv->flow[i].priv = priv;
}
return 0;
}
void ipu_dp_exit(struct ipu_soc *ipu)
{
}
| linux-master | drivers/gpu/ipu-v3/ipu-dp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
*/
#include <linux/export.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
struct ipu_smfc {
struct ipu_smfc_priv *priv;
int chno;
bool inuse;
};
struct ipu_smfc_priv {
void __iomem *base;
spinlock_t lock;
struct ipu_soc *ipu;
struct ipu_smfc channel[4];
int use_count;
};
/*SMFC Registers */
#define SMFC_MAP 0x0000
#define SMFC_WMC 0x0004
#define SMFC_BS 0x0008
int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize)
{
struct ipu_smfc_priv *priv = smfc->priv;
unsigned long flags;
u32 val, shift;
spin_lock_irqsave(&priv->lock, flags);
shift = smfc->chno * 4;
val = readl(priv->base + SMFC_BS);
val &= ~(0xf << shift);
val |= burstsize << shift;
writel(val, priv->base + SMFC_BS);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize);
int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id)
{
struct ipu_smfc_priv *priv = smfc->priv;
unsigned long flags;
u32 val, shift;
spin_lock_irqsave(&priv->lock, flags);
shift = smfc->chno * 3;
val = readl(priv->base + SMFC_MAP);
val &= ~(0x7 << shift);
val |= ((csi_id << 2) | mipi_id) << shift;
writel(val, priv->base + SMFC_MAP);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_smfc_map_channel);
int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level)
{
struct ipu_smfc_priv *priv = smfc->priv;
unsigned long flags;
u32 val, shift;
spin_lock_irqsave(&priv->lock, flags);
shift = smfc->chno * 6 + (smfc->chno > 1 ? 4 : 0);
val = readl(priv->base + SMFC_WMC);
val &= ~(0x3f << shift);
val |= ((clr_level << 3) | set_level) << shift;
writel(val, priv->base + SMFC_WMC);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_smfc_set_watermark);
int ipu_smfc_enable(struct ipu_smfc *smfc)
{
struct ipu_smfc_priv *priv = smfc->priv;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (!priv->use_count)
ipu_module_enable(priv->ipu, IPU_CONF_SMFC_EN);
priv->use_count++;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_smfc_enable);
int ipu_smfc_disable(struct ipu_smfc *smfc)
{
struct ipu_smfc_priv *priv = smfc->priv;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
priv->use_count--;
if (!priv->use_count)
ipu_module_disable(priv->ipu, IPU_CONF_SMFC_EN);
if (priv->use_count < 0)
priv->use_count = 0;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_smfc_disable);
struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno)
{
struct ipu_smfc_priv *priv = ipu->smfc_priv;
struct ipu_smfc *smfc, *ret;
unsigned long flags;
if (chno >= 4)
return ERR_PTR(-EINVAL);
smfc = &priv->channel[chno];
ret = smfc;
spin_lock_irqsave(&priv->lock, flags);
if (smfc->inuse) {
ret = ERR_PTR(-EBUSY);
goto unlock;
}
smfc->inuse = true;
unlock:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(ipu_smfc_get);
void ipu_smfc_put(struct ipu_smfc *smfc)
{
struct ipu_smfc_priv *priv = smfc->priv;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
smfc->inuse = false;
spin_unlock_irqrestore(&priv->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_smfc_put);
int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev,
unsigned long base)
{
struct ipu_smfc_priv *priv;
int i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ipu->smfc_priv = priv;
spin_lock_init(&priv->lock);
priv->ipu = ipu;
priv->base = devm_ioremap(dev, base, PAGE_SIZE);
if (!priv->base)
return -ENOMEM;
for (i = 0; i < 4; i++) {
priv->channel[i].priv = priv;
priv->channel[i].chno = i;
}
pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, priv->base);
return 0;
}
void ipu_smfc_exit(struct ipu_soc *ipu)
{
}
| linux-master | drivers/gpu/ipu-v3/ipu-smfc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Mentor Graphics Inc.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/sizes.h>
#include "ipu-prv.h"
#define QUANT_MAP(q) \
((q) == V4L2_QUANTIZATION_FULL_RANGE || \
(q) == V4L2_QUANTIZATION_DEFAULT ? 0 : 1)
/* identity matrix */
static const struct ipu_ic_csc_params identity = {
.coeff = {
{ 128, 0, 0, },
{ 0, 128, 0, },
{ 0, 0, 128, },
},
.offset = { 0, 0, 0, },
.scale = 2,
};
/*
* RGB full-range to RGB limited-range
*
* R_lim = 0.8588 * R_full + 16
* G_lim = 0.8588 * G_full + 16
* B_lim = 0.8588 * B_full + 16
*/
static const struct ipu_ic_csc_params rgbf2rgbl = {
.coeff = {
{ 220, 0, 0, },
{ 0, 220, 0, },
{ 0, 0, 220, },
},
.offset = { 64, 64, 64, },
.scale = 1,
};
/*
* RGB limited-range to RGB full-range
*
* R_full = 1.1644 * (R_lim - 16)
* G_full = 1.1644 * (G_lim - 16)
* B_full = 1.1644 * (B_lim - 16)
*/
static const struct ipu_ic_csc_params rgbl2rgbf = {
.coeff = {
{ 149, 0, 0, },
{ 0, 149, 0, },
{ 0, 0, 149, },
},
.offset = { -37, -37, -37, },
.scale = 2,
};
/*
* YUV full-range to YUV limited-range
*
* Y_lim = 0.8588 * Y_full + 16
* Cb_lim = 0.8784 * (Cb_full - 128) + 128
* Cr_lim = 0.8784 * (Cr_full - 128) + 128
*/
static const struct ipu_ic_csc_params yuvf2yuvl = {
.coeff = {
{ 220, 0, 0, },
{ 0, 225, 0, },
{ 0, 0, 225, },
},
.offset = { 64, 62, 62, },
.scale = 1,
.sat = true,
};
/*
* YUV limited-range to YUV full-range
*
* Y_full = 1.1644 * (Y_lim - 16)
* Cb_full = 1.1384 * (Cb_lim - 128) + 128
* Cr_full = 1.1384 * (Cr_lim - 128) + 128
*/
static const struct ipu_ic_csc_params yuvl2yuvf = {
.coeff = {
{ 149, 0, 0, },
{ 0, 146, 0, },
{ 0, 0, 146, },
},
.offset = { -37, -35, -35, },
.scale = 2,
};
static const struct ipu_ic_csc_params *rgb2rgb[] = {
&identity,
&rgbf2rgbl,
&rgbl2rgbf,
&identity,
};
static const struct ipu_ic_csc_params *yuv2yuv[] = {
&identity,
&yuvf2yuvl,
&yuvl2yuvf,
&identity,
};
/*
* BT.601 RGB full-range to YUV full-range
*
* Y = .2990 * R + .5870 * G + .1140 * B
* U = -.1687 * R - .3313 * G + .5000 * B + 128
* V = .5000 * R - .4187 * G - .0813 * B + 128
*/
static const struct ipu_ic_csc_params rgbf2yuvf_601 = {
.coeff = {
{ 77, 150, 29, },
{ -43, -85, 128, },
{ 128, -107, -21, },
},
.offset = { 0, 512, 512, },
.scale = 1,
};
/* BT.601 RGB full-range to YUV limited-range */
static const struct ipu_ic_csc_params rgbf2yuvl_601 = {
.coeff = {
{ 66, 129, 25, },
{ -38, -74, 112, },
{ 112, -94, -18, },
},
.offset = { 64, 512, 512, },
.scale = 1,
.sat = true,
};
/* BT.601 RGB limited-range to YUV full-range */
static const struct ipu_ic_csc_params rgbl2yuvf_601 = {
.coeff = {
{ 89, 175, 34, },
{ -50, -99, 149, },
{ 149, -125, -24, },
},
.offset = { -75, 512, 512, },
.scale = 1,
};
/* BT.601 RGB limited-range to YUV limited-range */
static const struct ipu_ic_csc_params rgbl2yuvl_601 = {
.coeff = {
{ 77, 150, 29, },
{ -44, -87, 131, },
{ 131, -110, -21, },
},
.offset = { 0, 512, 512, },
.scale = 1,
.sat = true,
};
/*
* BT.601 YUV full-range to RGB full-range
*
* R = 1. * Y + 0 * (Cb - 128) + 1.4020 * (Cr - 128)
* G = 1. * Y - .3441 * (Cb - 128) - .7141 * (Cr - 128)
* B = 1. * Y + 1.7720 * (Cb - 128) + 0 * (Cr - 128)
*
* equivalently (factoring out the offsets):
*
* R = 1. * Y + 0 * Cb + 1.4020 * Cr - 179.456
* G = 1. * Y - .3441 * Cb - .7141 * Cr + 135.450
* B = 1. * Y + 1.7720 * Cb + 0 * Cr - 226.816
*/
static const struct ipu_ic_csc_params yuvf2rgbf_601 = {
.coeff = {
{ 128, 0, 179, },
{ 128, -44, -91, },
{ 128, 227, 0, },
},
.offset = { -359, 271, -454, },
.scale = 2,
};
/* BT.601 YUV full-range to RGB limited-range */
static const struct ipu_ic_csc_params yuvf2rgbl_601 = {
.coeff = {
{ 110, 0, 154, },
{ 110, -38, -78, },
{ 110, 195, 0, },
},
.offset = { -276, 265, -358, },
.scale = 2,
};
/* BT.601 YUV limited-range to RGB full-range */
static const struct ipu_ic_csc_params yuvl2rgbf_601 = {
.coeff = {
{ 75, 0, 102, },
{ 75, -25, -52, },
{ 75, 129, 0, },
},
.offset = { -223, 136, -277, },
.scale = 3,
};
/* BT.601 YUV limited-range to RGB limited-range */
static const struct ipu_ic_csc_params yuvl2rgbl_601 = {
.coeff = {
{ 128, 0, 175, },
{ 128, -43, -89, },
{ 128, 222, 0, },
},
.offset = { -351, 265, -443, },
.scale = 2,
};
static const struct ipu_ic_csc_params *rgb2yuv_601[] = {
&rgbf2yuvf_601,
&rgbf2yuvl_601,
&rgbl2yuvf_601,
&rgbl2yuvl_601,
};
static const struct ipu_ic_csc_params *yuv2rgb_601[] = {
&yuvf2rgbf_601,
&yuvf2rgbl_601,
&yuvl2rgbf_601,
&yuvl2rgbl_601,
};
/*
* REC.709 encoding from RGB full range to YUV full range:
*
* Y = .2126 * R + .7152 * G + .0722 * B
* U = -.1146 * R - .3854 * G + .5000 * B + 128
* V = .5000 * R - .4542 * G - .0458 * B + 128
*/
static const struct ipu_ic_csc_params rgbf2yuvf_709 = {
.coeff = {
{ 54, 183, 19 },
{ -29, -99, 128 },
{ 128, -116, -12 },
},
.offset = { 0, 512, 512 },
.scale = 1,
};
/* Rec.709 RGB full-range to YUV limited-range */
static const struct ipu_ic_csc_params rgbf2yuvl_709 = {
.coeff = {
{ 47, 157, 16, },
{ -26, -87, 112, },
{ 112, -102, -10, },
},
.offset = { 64, 512, 512, },
.scale = 1,
.sat = true,
};
/* Rec.709 RGB limited-range to YUV full-range */
static const struct ipu_ic_csc_params rgbl2yuvf_709 = {
.coeff = {
{ 63, 213, 22, },
{ -34, -115, 149, },
{ 149, -135, -14, },
},
.offset = { -75, 512, 512, },
.scale = 1,
};
/* Rec.709 RGB limited-range to YUV limited-range */
static const struct ipu_ic_csc_params rgbl2yuvl_709 = {
.coeff = {
{ 54, 183, 18, },
{ -30, -101, 131, },
{ 131, -119, -12, },
},
.offset = { 0, 512, 512, },
.scale = 1,
.sat = true,
};
/*
* Inverse REC.709 encoding from YUV full range to RGB full range:
*
* R = 1. * Y + 0 * (Cb - 128) + 1.5748 * (Cr - 128)
* G = 1. * Y - .1873 * (Cb - 128) - .4681 * (Cr - 128)
* B = 1. * Y + 1.8556 * (Cb - 128) + 0 * (Cr - 128)
*
* equivalently (factoring out the offsets):
*
* R = 1. * Y + 0 * Cb + 1.5748 * Cr - 201.574
* G = 1. * Y - .1873 * Cb - .4681 * Cr + 83.891
* B = 1. * Y + 1.8556 * Cb + 0 * Cr - 237.517
*/
static const struct ipu_ic_csc_params yuvf2rgbf_709 = {
.coeff = {
{ 128, 0, 202 },
{ 128, -24, -60 },
{ 128, 238, 0 },
},
.offset = { -403, 168, -475 },
.scale = 2,
};
/* Rec.709 YUV full-range to RGB limited-range */
static const struct ipu_ic_csc_params yuvf2rgbl_709 = {
.coeff = {
{ 110, 0, 173, },
{ 110, -21, -51, },
{ 110, 204, 0, },
},
.offset = { -314, 176, -376, },
.scale = 2,
};
/* Rec.709 YUV limited-range to RGB full-range */
static const struct ipu_ic_csc_params yuvl2rgbf_709 = {
.coeff = {
{ 75, 0, 115, },
{ 75, -14, -34, },
{ 75, 135, 0, },
},
.offset = { -248, 77, -289, },
.scale = 3,
};
/* Rec.709 YUV limited-range to RGB limited-range */
static const struct ipu_ic_csc_params yuvl2rgbl_709 = {
.coeff = {
{ 128, 0, 197, },
{ 128, -23, -59, },
{ 128, 232, 0, },
},
.offset = { -394, 164, -464, },
.scale = 2,
};
static const struct ipu_ic_csc_params *rgb2yuv_709[] = {
&rgbf2yuvf_709,
&rgbf2yuvl_709,
&rgbl2yuvf_709,
&rgbl2yuvl_709,
};
static const struct ipu_ic_csc_params *yuv2rgb_709[] = {
&yuvf2rgbf_709,
&yuvf2rgbl_709,
&yuvl2rgbf_709,
&yuvl2rgbl_709,
};
static int calc_csc_coeffs(struct ipu_ic_csc *csc)
{
const struct ipu_ic_csc_params **params_tbl;
int tbl_idx;
tbl_idx = (QUANT_MAP(csc->in_cs.quant) << 1) |
QUANT_MAP(csc->out_cs.quant);
if (csc->in_cs.cs == csc->out_cs.cs) {
csc->params = (csc->in_cs.cs == IPUV3_COLORSPACE_YUV) ?
*yuv2yuv[tbl_idx] : *rgb2rgb[tbl_idx];
return 0;
}
/* YUV <-> RGB encoding is required */
switch (csc->out_cs.enc) {
case V4L2_YCBCR_ENC_601:
params_tbl = (csc->in_cs.cs == IPUV3_COLORSPACE_YUV) ?
yuv2rgb_601 : rgb2yuv_601;
break;
case V4L2_YCBCR_ENC_709:
params_tbl = (csc->in_cs.cs == IPUV3_COLORSPACE_YUV) ?
yuv2rgb_709 : rgb2yuv_709;
break;
default:
return -ENOTSUPP;
}
csc->params = *params_tbl[tbl_idx];
return 0;
}
int __ipu_ic_calc_csc(struct ipu_ic_csc *csc)
{
return calc_csc_coeffs(csc);
}
EXPORT_SYMBOL_GPL(__ipu_ic_calc_csc);
int ipu_ic_calc_csc(struct ipu_ic_csc *csc,
enum v4l2_ycbcr_encoding in_enc,
enum v4l2_quantization in_quant,
enum ipu_color_space in_cs,
enum v4l2_ycbcr_encoding out_enc,
enum v4l2_quantization out_quant,
enum ipu_color_space out_cs)
{
ipu_ic_fill_colorspace(&csc->in_cs, in_enc, in_quant, in_cs);
ipu_ic_fill_colorspace(&csc->out_cs, out_enc, out_quant, out_cs);
return __ipu_ic_calc_csc(csc);
}
EXPORT_SYMBOL_GPL(ipu_ic_calc_csc);
| linux-master | drivers/gpu/ipu-v3/ipu-ic-csc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012 Mentor Graphics Inc.
* Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <linux/bitrev.h>
#include <linux/io.h>
#include <linux/sizes.h>
#include <drm/drm_fourcc.h>
#include "ipu-prv.h"
struct ipu_cpmem_word {
u32 data[5];
u32 res[3];
};
struct ipu_ch_param {
struct ipu_cpmem_word word[2];
};
struct ipu_cpmem {
struct ipu_ch_param __iomem *base;
u32 module;
spinlock_t lock;
int use_count;
struct ipu_soc *ipu;
};
#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size))
#define IPU_FIELD_UBO IPU_CPMEM_WORD(0, 46, 22)
#define IPU_FIELD_VBO IPU_CPMEM_WORD(0, 68, 22)
#define IPU_FIELD_IOX IPU_CPMEM_WORD(0, 90, 4)
#define IPU_FIELD_RDRW IPU_CPMEM_WORD(0, 94, 1)
#define IPU_FIELD_SO IPU_CPMEM_WORD(0, 113, 1)
#define IPU_FIELD_SLY IPU_CPMEM_WORD(1, 102, 14)
#define IPU_FIELD_SLUV IPU_CPMEM_WORD(1, 128, 14)
#define IPU_FIELD_XV IPU_CPMEM_WORD(0, 0, 10)
#define IPU_FIELD_YV IPU_CPMEM_WORD(0, 10, 9)
#define IPU_FIELD_XB IPU_CPMEM_WORD(0, 19, 13)
#define IPU_FIELD_YB IPU_CPMEM_WORD(0, 32, 12)
#define IPU_FIELD_NSB_B IPU_CPMEM_WORD(0, 44, 1)
#define IPU_FIELD_CF IPU_CPMEM_WORD(0, 45, 1)
#define IPU_FIELD_SX IPU_CPMEM_WORD(0, 46, 12)
#define IPU_FIELD_SY IPU_CPMEM_WORD(0, 58, 11)
#define IPU_FIELD_NS IPU_CPMEM_WORD(0, 69, 10)
#define IPU_FIELD_SDX IPU_CPMEM_WORD(0, 79, 7)
#define IPU_FIELD_SM IPU_CPMEM_WORD(0, 86, 10)
#define IPU_FIELD_SCC IPU_CPMEM_WORD(0, 96, 1)
#define IPU_FIELD_SCE IPU_CPMEM_WORD(0, 97, 1)
#define IPU_FIELD_SDY IPU_CPMEM_WORD(0, 98, 7)
#define IPU_FIELD_SDRX IPU_CPMEM_WORD(0, 105, 1)
#define IPU_FIELD_SDRY IPU_CPMEM_WORD(0, 106, 1)
#define IPU_FIELD_BPP IPU_CPMEM_WORD(0, 107, 3)
#define IPU_FIELD_DEC_SEL IPU_CPMEM_WORD(0, 110, 2)
#define IPU_FIELD_DIM IPU_CPMEM_WORD(0, 112, 1)
#define IPU_FIELD_BNDM IPU_CPMEM_WORD(0, 114, 3)
#define IPU_FIELD_BM IPU_CPMEM_WORD(0, 117, 2)
#define IPU_FIELD_ROT IPU_CPMEM_WORD(0, 119, 1)
#define IPU_FIELD_ROT_HF_VF IPU_CPMEM_WORD(0, 119, 3)
#define IPU_FIELD_HF IPU_CPMEM_WORD(0, 120, 1)
#define IPU_FIELD_VF IPU_CPMEM_WORD(0, 121, 1)
#define IPU_FIELD_THE IPU_CPMEM_WORD(0, 122, 1)
#define IPU_FIELD_CAP IPU_CPMEM_WORD(0, 123, 1)
#define IPU_FIELD_CAE IPU_CPMEM_WORD(0, 124, 1)
#define IPU_FIELD_FW IPU_CPMEM_WORD(0, 125, 13)
#define IPU_FIELD_FH IPU_CPMEM_WORD(0, 138, 12)
#define IPU_FIELD_EBA0 IPU_CPMEM_WORD(1, 0, 29)
#define IPU_FIELD_EBA1 IPU_CPMEM_WORD(1, 29, 29)
#define IPU_FIELD_ILO IPU_CPMEM_WORD(1, 58, 20)
#define IPU_FIELD_NPB IPU_CPMEM_WORD(1, 78, 7)
#define IPU_FIELD_PFS IPU_CPMEM_WORD(1, 85, 4)
#define IPU_FIELD_ALU IPU_CPMEM_WORD(1, 89, 1)
#define IPU_FIELD_ALBM IPU_CPMEM_WORD(1, 90, 3)
#define IPU_FIELD_ID IPU_CPMEM_WORD(1, 93, 2)
#define IPU_FIELD_TH IPU_CPMEM_WORD(1, 95, 7)
#define IPU_FIELD_SL IPU_CPMEM_WORD(1, 102, 14)
#define IPU_FIELD_WID0 IPU_CPMEM_WORD(1, 116, 3)
#define IPU_FIELD_WID1 IPU_CPMEM_WORD(1, 119, 3)
#define IPU_FIELD_WID2 IPU_CPMEM_WORD(1, 122, 3)
#define IPU_FIELD_WID3 IPU_CPMEM_WORD(1, 125, 3)
#define IPU_FIELD_OFS0 IPU_CPMEM_WORD(1, 128, 5)
#define IPU_FIELD_OFS1 IPU_CPMEM_WORD(1, 133, 5)
#define IPU_FIELD_OFS2 IPU_CPMEM_WORD(1, 138, 5)
#define IPU_FIELD_OFS3 IPU_CPMEM_WORD(1, 143, 5)
#define IPU_FIELD_SXYS IPU_CPMEM_WORD(1, 148, 1)
#define IPU_FIELD_CRE IPU_CPMEM_WORD(1, 149, 1)
#define IPU_FIELD_DEC_SEL2 IPU_CPMEM_WORD(1, 150, 1)
static inline struct ipu_ch_param __iomem *
ipu_get_cpmem(struct ipuv3_channel *ch)
{
struct ipu_cpmem *cpmem = ch->ipu->cpmem_priv;
return cpmem->base + ch->num;
}
static void ipu_ch_param_write_field(struct ipuv3_channel *ch, u32 wbs, u32 v)
{
struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch);
u32 bit = (wbs >> 8) % 160;
u32 size = wbs & 0xff;
u32 word = (wbs >> 8) / 160;
u32 i = bit / 32;
u32 ofs = bit % 32;
u32 mask = (1 << size) - 1;
u32 val;
pr_debug("%s %d %d %d\n", __func__, word, bit , size);
val = readl(&base->word[word].data[i]);
val &= ~(mask << ofs);
val |= v << ofs;
writel(val, &base->word[word].data[i]);
if ((bit + size - 1) / 32 > i) {
val = readl(&base->word[word].data[i + 1]);
val &= ~(mask >> (ofs ? (32 - ofs) : 0));
val |= v >> (ofs ? (32 - ofs) : 0);
writel(val, &base->word[word].data[i + 1]);
}
}
static u32 ipu_ch_param_read_field(struct ipuv3_channel *ch, u32 wbs)
{
struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch);
u32 bit = (wbs >> 8) % 160;
u32 size = wbs & 0xff;
u32 word = (wbs >> 8) / 160;
u32 i = bit / 32;
u32 ofs = bit % 32;
u32 mask = (1 << size) - 1;
u32 val = 0;
pr_debug("%s %d %d %d\n", __func__, word, bit , size);
val = (readl(&base->word[word].data[i]) >> ofs) & mask;
if ((bit + size - 1) / 32 > i) {
u32 tmp;
tmp = readl(&base->word[word].data[i + 1]);
tmp &= mask >> (ofs ? (32 - ofs) : 0);
val |= tmp << (ofs ? (32 - ofs) : 0);
}
return val;
}
/*
* The V4L2 spec defines packed RGB formats in memory byte order, which from
* point of view of the IPU corresponds to little-endian words with the first
* component in the least significant bits.
* The DRM pixel formats and IPU internal representation are ordered the other
* way around, with the first named component ordered at the most significant
* bits. Further, V4L2 formats are not well defined:
* https://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html
* We choose the interpretation which matches GStreamer behavior.
*/
static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
{
switch (pixelformat) {
case V4L2_PIX_FMT_RGB565:
/*
* Here we choose the 'corrected' interpretation of RGBP, a
* little-endian 16-bit word with the red component at the most
* significant bits:
* g[2:0]b[4:0] r[4:0]g[5:3] <=> [16:0] R:G:B
*/
return DRM_FORMAT_RGB565;
case V4L2_PIX_FMT_BGR24:
/* B G R <=> [24:0] R:G:B */
return DRM_FORMAT_RGB888;
case V4L2_PIX_FMT_RGB24:
/* R G B <=> [24:0] B:G:R */
return DRM_FORMAT_BGR888;
case V4L2_PIX_FMT_BGR32:
/* B G R A <=> [32:0] A:B:G:R */
return DRM_FORMAT_XRGB8888;
case V4L2_PIX_FMT_RGB32:
/* R G B A <=> [32:0] A:B:G:R */
return DRM_FORMAT_XBGR8888;
case V4L2_PIX_FMT_ABGR32:
/* B G R A <=> [32:0] A:R:G:B */
return DRM_FORMAT_ARGB8888;
case V4L2_PIX_FMT_XBGR32:
/* B G R X <=> [32:0] X:R:G:B */
return DRM_FORMAT_XRGB8888;
case V4L2_PIX_FMT_BGRA32:
/* A B G R <=> [32:0] R:G:B:A */
return DRM_FORMAT_RGBA8888;
case V4L2_PIX_FMT_BGRX32:
/* X B G R <=> [32:0] R:G:B:X */
return DRM_FORMAT_RGBX8888;
case V4L2_PIX_FMT_RGBA32:
/* R G B A <=> [32:0] A:B:G:R */
return DRM_FORMAT_ABGR8888;
case V4L2_PIX_FMT_RGBX32:
/* R G B X <=> [32:0] X:B:G:R */
return DRM_FORMAT_XBGR8888;
case V4L2_PIX_FMT_ARGB32:
/* A R G B <=> [32:0] B:G:R:A */
return DRM_FORMAT_BGRA8888;
case V4L2_PIX_FMT_XRGB32:
/* X R G B <=> [32:0] B:G:R:X */
return DRM_FORMAT_BGRX8888;
case V4L2_PIX_FMT_UYVY:
return DRM_FORMAT_UYVY;
case V4L2_PIX_FMT_YUYV:
return DRM_FORMAT_YUYV;
case V4L2_PIX_FMT_YUV420:
return DRM_FORMAT_YUV420;
case V4L2_PIX_FMT_YUV422P:
return DRM_FORMAT_YUV422;
case V4L2_PIX_FMT_YVU420:
return DRM_FORMAT_YVU420;
case V4L2_PIX_FMT_NV12:
return DRM_FORMAT_NV12;
case V4L2_PIX_FMT_NV16:
return DRM_FORMAT_NV16;
}
return -EINVAL;
}
void ipu_cpmem_zero(struct ipuv3_channel *ch)
{
struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
void __iomem *base = p;
int i;
for (i = 0; i < sizeof(*p) / sizeof(u32); i++)
writel(0, base + i * sizeof(u32));
}
EXPORT_SYMBOL_GPL(ipu_cpmem_zero);
void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres)
{
ipu_ch_param_write_field(ch, IPU_FIELD_FW, xres - 1);
ipu_ch_param_write_field(ch, IPU_FIELD_FH, yres - 1);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_resolution);
void ipu_cpmem_skip_odd_chroma_rows(struct ipuv3_channel *ch)
{
ipu_ch_param_write_field(ch, IPU_FIELD_RDRW, 1);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_skip_odd_chroma_rows);
void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride)
{
ipu_ch_param_write_field(ch, IPU_FIELD_SLY, stride - 1);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_stride);
void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch)
{
struct ipu_soc *ipu = ch->ipu;
u32 val;
if (ipu->ipu_type == IPUV3EX)
ipu_ch_param_write_field(ch, IPU_FIELD_ID, 1);
val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(ch->num));
val |= 1 << (ch->num % 32);
ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(ch->num));
};
EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf)
{
WARN_ON_ONCE(buf & 0x7);
if (bufnum)
ipu_ch_param_write_field(ch, IPU_FIELD_EBA1, buf >> 3);
else
ipu_ch_param_write_field(ch, IPU_FIELD_EBA0, buf >> 3);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
{
WARN_ON_ONCE((u_off & 0x7) || (v_off & 0x7));
ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_off / 8);
ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_off / 8);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride,
u32 pixelformat)
{
u32 ilo, sly, sluv;
if (stride < 0) {
stride = -stride;
ilo = 0x100000 - (stride / 8);
} else {
ilo = stride / 8;
}
sly = (stride * 2) - 1;
switch (pixelformat) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
sluv = stride / 2 - 1;
break;
case V4L2_PIX_FMT_NV12:
sluv = stride - 1;
break;
case V4L2_PIX_FMT_YUV422P:
sluv = stride - 1;
break;
case V4L2_PIX_FMT_NV16:
sluv = stride * 2 - 1;
break;
default:
sluv = 0;
break;
}
ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
ipu_ch_param_write_field(ch, IPU_FIELD_ILO, ilo);
ipu_ch_param_write_field(ch, IPU_FIELD_SLY, sly);
if (sluv)
ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, sluv);
};
EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id)
{
id &= 0x3;
ipu_ch_param_write_field(ch, IPU_FIELD_ID, id);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch)
{
return ipu_ch_param_read_field(ch, IPU_FIELD_NPB) + 1;
}
EXPORT_SYMBOL_GPL(ipu_cpmem_get_burstsize);
void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
{
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
};
EXPORT_SYMBOL_GPL(ipu_cpmem_set_burstsize);
void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch)
{
ipu_ch_param_write_field(ch, IPU_FIELD_BM, 1);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_block_mode);
void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
enum ipu_rotate_mode rot)
{
u32 temp_rot = bitrev8(rot) >> 5;
ipu_ch_param_write_field(ch, IPU_FIELD_ROT_HF_VF, temp_rot);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_rotation);
int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
const struct ipu_rgb *rgb)
{
int bpp = 0, npb = 0, ro, go, bo, to;
ro = rgb->bits_per_pixel - rgb->red.length - rgb->red.offset;
go = rgb->bits_per_pixel - rgb->green.length - rgb->green.offset;
bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset;
to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
ipu_ch_param_write_field(ch, IPU_FIELD_WID0, rgb->red.length - 1);
ipu_ch_param_write_field(ch, IPU_FIELD_OFS0, ro);
ipu_ch_param_write_field(ch, IPU_FIELD_WID1, rgb->green.length - 1);
ipu_ch_param_write_field(ch, IPU_FIELD_OFS1, go);
ipu_ch_param_write_field(ch, IPU_FIELD_WID2, rgb->blue.length - 1);
ipu_ch_param_write_field(ch, IPU_FIELD_OFS2, bo);
if (rgb->transp.length) {
ipu_ch_param_write_field(ch, IPU_FIELD_WID3,
rgb->transp.length - 1);
ipu_ch_param_write_field(ch, IPU_FIELD_OFS3, to);
} else {
ipu_ch_param_write_field(ch, IPU_FIELD_WID3, 7);
ipu_ch_param_write_field(ch, IPU_FIELD_OFS3,
rgb->bits_per_pixel);
}
switch (rgb->bits_per_pixel) {
case 32:
bpp = 0;
npb = 15;
break;
case 24:
bpp = 1;
npb = 19;
break;
case 16:
bpp = 3;
npb = 31;
break;
case 8:
bpp = 5;
npb = 63;
break;
default:
return -EINVAL;
}
ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp);
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb);
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 7); /* rgb mode */
return 0;
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb);
int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width)
{
int bpp = 0, npb = 0;
switch (width) {
case 32:
bpp = 0;
npb = 15;
break;
case 24:
bpp = 1;
npb = 19;
break;
case 16:
bpp = 3;
npb = 31;
break;
case 8:
bpp = 5;
npb = 63;
break;
default:
return -EINVAL;
}
ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp);
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb);
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 6); /* raw mode */
return 0;
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
{
switch (pixel_format) {
case V4L2_PIX_FMT_UYVY:
ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);/* pix fmt */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
break;
case V4L2_PIX_FMT_YUYV:
ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);/* pix fmt */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
break;
}
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
unsigned int uv_stride,
unsigned int u_offset, unsigned int v_offset)
{
WARN_ON_ONCE((u_offset & 0x7) || (v_offset & 0x7));
ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
static const struct ipu_rgb def_xrgb_32 = {
.red = { .offset = 16, .length = 8, },
.green = { .offset = 8, .length = 8, },
.blue = { .offset = 0, .length = 8, },
.transp = { .offset = 24, .length = 8, },
.bits_per_pixel = 32,
};
static const struct ipu_rgb def_xbgr_32 = {
.red = { .offset = 0, .length = 8, },
.green = { .offset = 8, .length = 8, },
.blue = { .offset = 16, .length = 8, },
.transp = { .offset = 24, .length = 8, },
.bits_per_pixel = 32,
};
static const struct ipu_rgb def_rgbx_32 = {
.red = { .offset = 24, .length = 8, },
.green = { .offset = 16, .length = 8, },
.blue = { .offset = 8, .length = 8, },
.transp = { .offset = 0, .length = 8, },
.bits_per_pixel = 32,
};
static const struct ipu_rgb def_bgrx_32 = {
.red = { .offset = 8, .length = 8, },
.green = { .offset = 16, .length = 8, },
.blue = { .offset = 24, .length = 8, },
.transp = { .offset = 0, .length = 8, },
.bits_per_pixel = 32,
};
static const struct ipu_rgb def_rgb_24 = {
.red = { .offset = 16, .length = 8, },
.green = { .offset = 8, .length = 8, },
.blue = { .offset = 0, .length = 8, },
.transp = { .offset = 0, .length = 0, },
.bits_per_pixel = 24,
};
static const struct ipu_rgb def_bgr_24 = {
.red = { .offset = 0, .length = 8, },
.green = { .offset = 8, .length = 8, },
.blue = { .offset = 16, .length = 8, },
.transp = { .offset = 0, .length = 0, },
.bits_per_pixel = 24,
};
static const struct ipu_rgb def_rgb_16 = {
.red = { .offset = 11, .length = 5, },
.green = { .offset = 5, .length = 6, },
.blue = { .offset = 0, .length = 5, },
.transp = { .offset = 0, .length = 0, },
.bits_per_pixel = 16,
};
static const struct ipu_rgb def_bgr_16 = {
.red = { .offset = 0, .length = 5, },
.green = { .offset = 5, .length = 6, },
.blue = { .offset = 11, .length = 5, },
.transp = { .offset = 0, .length = 0, },
.bits_per_pixel = 16,
};
static const struct ipu_rgb def_argb_16 = {
.red = { .offset = 10, .length = 5, },
.green = { .offset = 5, .length = 5, },
.blue = { .offset = 0, .length = 5, },
.transp = { .offset = 15, .length = 1, },
.bits_per_pixel = 16,
};
static const struct ipu_rgb def_argb_16_4444 = {
.red = { .offset = 8, .length = 4, },
.green = { .offset = 4, .length = 4, },
.blue = { .offset = 0, .length = 4, },
.transp = { .offset = 12, .length = 4, },
.bits_per_pixel = 16,
};
static const struct ipu_rgb def_abgr_16 = {
.red = { .offset = 0, .length = 5, },
.green = { .offset = 5, .length = 5, },
.blue = { .offset = 10, .length = 5, },
.transp = { .offset = 15, .length = 1, },
.bits_per_pixel = 16,
};
static const struct ipu_rgb def_rgba_16 = {
.red = { .offset = 11, .length = 5, },
.green = { .offset = 6, .length = 5, },
.blue = { .offset = 1, .length = 5, },
.transp = { .offset = 0, .length = 1, },
.bits_per_pixel = 16,
};
static const struct ipu_rgb def_bgra_16 = {
.red = { .offset = 1, .length = 5, },
.green = { .offset = 6, .length = 5, },
.blue = { .offset = 11, .length = 5, },
.transp = { .offset = 0, .length = 1, },
.bits_per_pixel = 16,
};
#define Y_OFFSET(pix, x, y) ((x) + pix->bytesperline * (y))
#define U_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
#define V_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->bytesperline * pix->height / 4) + \
(pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
#define U2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->bytesperline * (y) / 2) + (x) / 2)
#define V2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->bytesperline * pix->height / 2) + \
(pix->bytesperline * (y) / 2) + (x) / 2)
#define UV_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->bytesperline * ((y) / 2)) + (x))
#define UV2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->bytesperline * y) + (x))
#define NUM_ALPHA_CHANNELS 7
/* See Table 37-12. Alpha channels mapping. */
static int ipu_channel_albm(int ch_num)
{
switch (ch_num) {
case IPUV3_CHANNEL_G_MEM_IC_PRP_VF: return 0;
case IPUV3_CHANNEL_G_MEM_IC_PP: return 1;
case IPUV3_CHANNEL_MEM_FG_SYNC: return 2;
case IPUV3_CHANNEL_MEM_FG_ASYNC: return 3;
case IPUV3_CHANNEL_MEM_BG_SYNC: return 4;
case IPUV3_CHANNEL_MEM_BG_ASYNC: return 5;
case IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB: return 6;
default:
return -EINVAL;
}
}
static void ipu_cpmem_set_separate_alpha(struct ipuv3_channel *ch)
{
struct ipu_soc *ipu = ch->ipu;
int albm;
u32 val;
albm = ipu_channel_albm(ch->num);
if (albm < 0)
return;
ipu_ch_param_write_field(ch, IPU_FIELD_ALU, 1);
ipu_ch_param_write_field(ch, IPU_FIELD_ALBM, albm);
ipu_ch_param_write_field(ch, IPU_FIELD_CRE, 1);
val = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
val |= BIT(ch->num);
ipu_idmac_write(ipu, val, IDMAC_SEP_ALPHA);
}
int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
{
switch (drm_fourcc) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 2);
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 1);
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0);
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
case DRM_FORMAT_NV12:
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4);
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
case DRM_FORMAT_NV16:
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 3);
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
case DRM_FORMAT_UYVY:
/* bits/pixel */
ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
case DRM_FORMAT_YUYV:
/* bits/pixel */
ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
ipu_cpmem_set_format_rgb(ch, &def_xbgr_32);
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
ipu_cpmem_set_format_rgb(ch, &def_xrgb_32);
break;
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBX8888_A8:
ipu_cpmem_set_format_rgb(ch, &def_rgbx_32);
break;
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRX8888_A8:
ipu_cpmem_set_format_rgb(ch, &def_bgrx_32);
break;
case DRM_FORMAT_BGR888:
case DRM_FORMAT_BGR888_A8:
ipu_cpmem_set_format_rgb(ch, &def_bgr_24);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_RGB888_A8:
ipu_cpmem_set_format_rgb(ch, &def_rgb_24);
break;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565_A8:
ipu_cpmem_set_format_rgb(ch, &def_rgb_16);
break;
case DRM_FORMAT_BGR565:
case DRM_FORMAT_BGR565_A8:
ipu_cpmem_set_format_rgb(ch, &def_bgr_16);
break;
case DRM_FORMAT_ARGB1555:
ipu_cpmem_set_format_rgb(ch, &def_argb_16);
break;
case DRM_FORMAT_ABGR1555:
ipu_cpmem_set_format_rgb(ch, &def_abgr_16);
break;
case DRM_FORMAT_RGBA5551:
ipu_cpmem_set_format_rgb(ch, &def_rgba_16);
break;
case DRM_FORMAT_BGRA5551:
ipu_cpmem_set_format_rgb(ch, &def_bgra_16);
break;
case DRM_FORMAT_ARGB4444:
ipu_cpmem_set_format_rgb(ch, &def_argb_16_4444);
break;
default:
return -EINVAL;
}
switch (drm_fourcc) {
case DRM_FORMAT_RGB565_A8:
case DRM_FORMAT_BGR565_A8:
case DRM_FORMAT_RGB888_A8:
case DRM_FORMAT_BGR888_A8:
case DRM_FORMAT_RGBX8888_A8:
case DRM_FORMAT_BGRX8888_A8:
ipu_ch_param_write_field(ch, IPU_FIELD_WID3, 7);
ipu_cpmem_set_separate_alpha(ch);
break;
default:
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
{
struct v4l2_pix_format *pix = &image->pix;
int offset, u_offset, v_offset;
int ret = 0;
pr_debug("%s: resolution: %dx%d stride: %d\n",
__func__, pix->width, pix->height,
pix->bytesperline);
ipu_cpmem_set_resolution(ch, image->rect.width, image->rect.height);
ipu_cpmem_set_stride(ch, pix->bytesperline);
ipu_cpmem_set_fmt(ch, v4l2_pix_fmt_to_drm_fourcc(pix->pixelformat));
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUV420:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = image->u_offset ?
image->u_offset : U_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
v_offset = image->v_offset ?
image->v_offset : V_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_YVU420:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = image->u_offset ?
image->u_offset : V_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
v_offset = image->v_offset ?
image->v_offset : U_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_YUV422P:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = image->u_offset ?
image->u_offset : U2_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
v_offset = image->v_offset ?
image->v_offset : V2_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV12:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = image->u_offset ?
image->u_offset : UV_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
v_offset = image->v_offset ? image->v_offset : 0;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV16:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = image->u_offset ?
image->u_offset : UV2_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
v_offset = image->v_offset ? image->v_offset : 0;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_RGB565:
offset = image->rect.left * 2 +
image->rect.top * pix->bytesperline;
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_BGRA32:
case V4L2_PIX_FMT_BGRX32:
case V4L2_PIX_FMT_RGBA32:
case V4L2_PIX_FMT_RGBX32:
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_XRGB32:
offset = image->rect.left * 4 +
image->rect.top * pix->bytesperline;
break;
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_BGR24:
offset = image->rect.left * 3 +
image->rect.top * pix->bytesperline;
break;
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
case V4L2_PIX_FMT_GREY:
offset = image->rect.left + image->rect.top * pix->bytesperline;
break;
case V4L2_PIX_FMT_SBGGR16:
case V4L2_PIX_FMT_SGBRG16:
case V4L2_PIX_FMT_SGRBG16:
case V4L2_PIX_FMT_SRGGB16:
case V4L2_PIX_FMT_Y16:
offset = image->rect.left * 2 +
image->rect.top * pix->bytesperline;
break;
default:
/* This should not happen */
WARN_ON(1);
offset = 0;
ret = -EINVAL;
}
ipu_cpmem_set_buffer(ch, 0, image->phys0 + offset);
ipu_cpmem_set_buffer(ch, 1, image->phys1 + offset);
return ret;
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
void ipu_cpmem_dump(struct ipuv3_channel *ch)
{
struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
struct ipu_soc *ipu = ch->ipu;
int chno = ch->num;
dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", chno,
readl(&p->word[0].data[0]),
readl(&p->word[0].data[1]),
readl(&p->word[0].data[2]),
readl(&p->word[0].data[3]),
readl(&p->word[0].data[4]));
dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", chno,
readl(&p->word[1].data[0]),
readl(&p->word[1].data[1]),
readl(&p->word[1].data[2]),
readl(&p->word[1].data[3]),
readl(&p->word[1].data[4]));
dev_dbg(ipu->dev, "PFS 0x%x, ",
ipu_ch_param_read_field(ch, IPU_FIELD_PFS));
dev_dbg(ipu->dev, "BPP 0x%x, ",
ipu_ch_param_read_field(ch, IPU_FIELD_BPP));
dev_dbg(ipu->dev, "NPB 0x%x\n",
ipu_ch_param_read_field(ch, IPU_FIELD_NPB));
dev_dbg(ipu->dev, "FW %d, ",
ipu_ch_param_read_field(ch, IPU_FIELD_FW));
dev_dbg(ipu->dev, "FH %d, ",
ipu_ch_param_read_field(ch, IPU_FIELD_FH));
dev_dbg(ipu->dev, "EBA0 0x%x\n",
ipu_ch_param_read_field(ch, IPU_FIELD_EBA0) << 3);
dev_dbg(ipu->dev, "EBA1 0x%x\n",
ipu_ch_param_read_field(ch, IPU_FIELD_EBA1) << 3);
dev_dbg(ipu->dev, "Stride %d\n",
ipu_ch_param_read_field(ch, IPU_FIELD_SL));
dev_dbg(ipu->dev, "scan_order %d\n",
ipu_ch_param_read_field(ch, IPU_FIELD_SO));
dev_dbg(ipu->dev, "uv_stride %d\n",
ipu_ch_param_read_field(ch, IPU_FIELD_SLUV));
dev_dbg(ipu->dev, "u_offset 0x%x\n",
ipu_ch_param_read_field(ch, IPU_FIELD_UBO) << 3);
dev_dbg(ipu->dev, "v_offset 0x%x\n",
ipu_ch_param_read_field(ch, IPU_FIELD_VBO) << 3);
dev_dbg(ipu->dev, "Width0 %d+1, ",
ipu_ch_param_read_field(ch, IPU_FIELD_WID0));
dev_dbg(ipu->dev, "Width1 %d+1, ",
ipu_ch_param_read_field(ch, IPU_FIELD_WID1));
dev_dbg(ipu->dev, "Width2 %d+1, ",
ipu_ch_param_read_field(ch, IPU_FIELD_WID2));
dev_dbg(ipu->dev, "Width3 %d+1, ",
ipu_ch_param_read_field(ch, IPU_FIELD_WID3));
dev_dbg(ipu->dev, "Offset0 %d, ",
ipu_ch_param_read_field(ch, IPU_FIELD_OFS0));
dev_dbg(ipu->dev, "Offset1 %d, ",
ipu_ch_param_read_field(ch, IPU_FIELD_OFS1));
dev_dbg(ipu->dev, "Offset2 %d, ",
ipu_ch_param_read_field(ch, IPU_FIELD_OFS2));
dev_dbg(ipu->dev, "Offset3 %d\n",
ipu_ch_param_read_field(ch, IPU_FIELD_OFS3));
}
EXPORT_SYMBOL_GPL(ipu_cpmem_dump);
int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
{
struct ipu_cpmem *cpmem;
cpmem = devm_kzalloc(dev, sizeof(*cpmem), GFP_KERNEL);
if (!cpmem)
return -ENOMEM;
ipu->cpmem_priv = cpmem;
spin_lock_init(&cpmem->lock);
cpmem->base = devm_ioremap(dev, base, SZ_128K);
if (!cpmem->base)
return -ENOMEM;
dev_dbg(dev, "CPMEM base: 0x%08lx remapped to %p\n",
base, cpmem->base);
cpmem->ipu = ipu;
return 0;
}
void ipu_cpmem_exit(struct ipu_soc *ipu)
{
}
| linux-master | drivers/gpu/ipu-v3/ipu-cpmem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017 Lucas Stach, Pengutronix
*/
#include <drm/drm_fourcc.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
#define IPU_PRE_MAX_WIDTH 2048
#define IPU_PRE_NUM_SCANLINES 8
#define IPU_PRE_CTRL 0x000
#define IPU_PRE_CTRL_SET 0x004
#define IPU_PRE_CTRL_ENABLE (1 << 0)
#define IPU_PRE_CTRL_BLOCK_EN (1 << 1)
#define IPU_PRE_CTRL_BLOCK_16 (1 << 2)
#define IPU_PRE_CTRL_SDW_UPDATE (1 << 4)
#define IPU_PRE_CTRL_VFLIP (1 << 5)
#define IPU_PRE_CTRL_SO (1 << 6)
#define IPU_PRE_CTRL_INTERLACED_FIELD (1 << 7)
#define IPU_PRE_CTRL_HANDSHAKE_EN (1 << 8)
#define IPU_PRE_CTRL_HANDSHAKE_LINE_NUM(v) ((v & 0x3) << 9)
#define IPU_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN (1 << 11)
#define IPU_PRE_CTRL_EN_REPEAT (1 << 28)
#define IPU_PRE_CTRL_TPR_REST_SEL (1 << 29)
#define IPU_PRE_CTRL_CLKGATE (1 << 30)
#define IPU_PRE_CTRL_SFTRST (1 << 31)
#define IPU_PRE_CUR_BUF 0x030
#define IPU_PRE_NEXT_BUF 0x040
#define IPU_PRE_TPR_CTRL 0x070
#define IPU_PRE_TPR_CTRL_TILE_FORMAT(v) ((v & 0xff) << 0)
#define IPU_PRE_TPR_CTRL_TILE_FORMAT_MASK 0xff
#define IPU_PRE_TPR_CTRL_TILE_FORMAT_16_BIT (1 << 0)
#define IPU_PRE_TPR_CTRL_TILE_FORMAT_SPLIT_BUF (1 << 4)
#define IPU_PRE_TPR_CTRL_TILE_FORMAT_SINGLE_BUF (1 << 5)
#define IPU_PRE_TPR_CTRL_TILE_FORMAT_SUPER_TILED (1 << 6)
#define IPU_PRE_PREFETCH_ENG_CTRL 0x080
#define IPU_PRE_PREF_ENG_CTRL_PREFETCH_EN (1 << 0)
#define IPU_PRE_PREF_ENG_CTRL_RD_NUM_BYTES(v) ((v & 0x7) << 1)
#define IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
#define IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(v) ((v & 0x7) << 8)
#define IPU_PRE_PREF_ENG_CTRL_SHIFT_BYPASS (1 << 11)
#define IPU_PRE_PREF_ENG_CTRL_FIELD_INVERSE (1 << 12)
#define IPU_PRE_PREF_ENG_CTRL_PARTIAL_UV_SWAP (1 << 14)
#define IPU_PRE_PREF_ENG_CTRL_TPR_COOR_OFFSET_EN (1 << 15)
#define IPU_PRE_PREFETCH_ENG_INPUT_SIZE 0x0a0
#define IPU_PRE_PREFETCH_ENG_INPUT_SIZE_WIDTH(v) ((v & 0xffff) << 0)
#define IPU_PRE_PREFETCH_ENG_INPUT_SIZE_HEIGHT(v) ((v & 0xffff) << 16)
#define IPU_PRE_PREFETCH_ENG_PITCH 0x0d0
#define IPU_PRE_PREFETCH_ENG_PITCH_Y(v) ((v & 0xffff) << 0)
#define IPU_PRE_PREFETCH_ENG_PITCH_UV(v) ((v & 0xffff) << 16)
#define IPU_PRE_STORE_ENG_CTRL 0x110
#define IPU_PRE_STORE_ENG_CTRL_STORE_EN (1 << 0)
#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1)
#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
#define IPU_PRE_STORE_ENG_STATUS 0x120
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16
#define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30)
#define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31)
#define IPU_PRE_STORE_ENG_SIZE 0x130
#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0)
#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16)
#define IPU_PRE_STORE_ENG_PITCH 0x140
#define IPU_PRE_STORE_ENG_PITCH_OUT_PITCH(v) ((v & 0xffff) << 0)
#define IPU_PRE_STORE_ENG_ADDR 0x150
struct ipu_pre {
struct list_head list;
struct device *dev;
void __iomem *regs;
struct clk *clk_axi;
struct gen_pool *iram;
dma_addr_t buffer_paddr;
void *buffer_virt;
bool in_use;
unsigned int safe_window_end;
unsigned int last_bufaddr;
};
static DEFINE_MUTEX(ipu_pre_list_mutex);
static LIST_HEAD(ipu_pre_list);
static int available_pres;
int ipu_pre_get_available_count(void)
{
return available_pres;
}
struct ipu_pre *
ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
{
struct device_node *pre_node = of_parse_phandle(dev->of_node,
name, index);
struct ipu_pre *pre;
mutex_lock(&ipu_pre_list_mutex);
list_for_each_entry(pre, &ipu_pre_list, list) {
if (pre_node == pre->dev->of_node) {
mutex_unlock(&ipu_pre_list_mutex);
device_link_add(dev, pre->dev,
DL_FLAG_AUTOREMOVE_CONSUMER);
of_node_put(pre_node);
return pre;
}
}
mutex_unlock(&ipu_pre_list_mutex);
of_node_put(pre_node);
return NULL;
}
int ipu_pre_get(struct ipu_pre *pre)
{
u32 val;
if (pre->in_use)
return -EBUSY;
/* first get the engine out of reset and remove clock gating */
writel(0, pre->regs + IPU_PRE_CTRL);
/* init defaults that should be applied to all streams */
val = IPU_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN |
IPU_PRE_CTRL_HANDSHAKE_EN |
IPU_PRE_CTRL_TPR_REST_SEL |
IPU_PRE_CTRL_SDW_UPDATE;
writel(val, pre->regs + IPU_PRE_CTRL);
pre->in_use = true;
return 0;
}
void ipu_pre_put(struct ipu_pre *pre)
{
writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
pre->in_use = false;
}
void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
unsigned int height, unsigned int stride, u32 format,
uint64_t modifier, unsigned int bufaddr)
{
const struct drm_format_info *info = drm_format_info(format);
u32 active_bpp = info->cpp[0] >> 1;
u32 val;
/* calculate safe window for ctrl register updates */
if (modifier == DRM_FORMAT_MOD_LINEAR)
pre->safe_window_end = height - 2;
else
pre->safe_window_end = DIV_ROUND_UP(height, 4) - 1;
writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
pre->last_bufaddr = bufaddr;
val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) |
IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) |
IPU_PRE_PREF_ENG_CTRL_RD_NUM_BYTES(4) |
IPU_PRE_PREF_ENG_CTRL_SHIFT_BYPASS |
IPU_PRE_PREF_ENG_CTRL_PREFETCH_EN;
writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_CTRL);
val = IPU_PRE_PREFETCH_ENG_INPUT_SIZE_WIDTH(width) |
IPU_PRE_PREFETCH_ENG_INPUT_SIZE_HEIGHT(height);
writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_INPUT_SIZE);
val = IPU_PRE_PREFETCH_ENG_PITCH_Y(stride);
writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_PITCH);
val = IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(active_bpp) |
IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(4) |
IPU_PRE_STORE_ENG_CTRL_STORE_EN;
writel(val, pre->regs + IPU_PRE_STORE_ENG_CTRL);
val = IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(width) |
IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(height);
writel(val, pre->regs + IPU_PRE_STORE_ENG_SIZE);
val = IPU_PRE_STORE_ENG_PITCH_OUT_PITCH(stride);
writel(val, pre->regs + IPU_PRE_STORE_ENG_PITCH);
writel(pre->buffer_paddr, pre->regs + IPU_PRE_STORE_ENG_ADDR);
val = readl(pre->regs + IPU_PRE_TPR_CTRL);
val &= ~IPU_PRE_TPR_CTRL_TILE_FORMAT_MASK;
if (modifier != DRM_FORMAT_MOD_LINEAR) {
/* only support single buffer formats for now */
val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_SINGLE_BUF;
if (modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED)
val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_SUPER_TILED;
if (info->cpp[0] == 2)
val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_16_BIT;
}
writel(val, pre->regs + IPU_PRE_TPR_CTRL);
val = readl(pre->regs + IPU_PRE_CTRL);
val |= IPU_PRE_CTRL_EN_REPEAT | IPU_PRE_CTRL_ENABLE |
IPU_PRE_CTRL_SDW_UPDATE;
if (modifier == DRM_FORMAT_MOD_LINEAR)
val &= ~IPU_PRE_CTRL_BLOCK_EN;
else
val |= IPU_PRE_CTRL_BLOCK_EN;
writel(val, pre->regs + IPU_PRE_CTRL);
}
void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
{
unsigned long timeout = jiffies + msecs_to_jiffies(5);
unsigned short current_yblock;
u32 val;
if (bufaddr == pre->last_bufaddr)
return;
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
pre->last_bufaddr = bufaddr;
do {
if (time_after(jiffies, timeout)) {
dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
return;
}
val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
current_yblock =
(val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
} while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
}
bool ipu_pre_update_pending(struct ipu_pre *pre)
{
return !!(readl_relaxed(pre->regs + IPU_PRE_CTRL) &
IPU_PRE_CTRL_SDW_UPDATE);
}
u32 ipu_pre_get_baddr(struct ipu_pre *pre)
{
return (u32)pre->buffer_paddr;
}
static int ipu_pre_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ipu_pre *pre;
pre = devm_kzalloc(dev, sizeof(*pre), GFP_KERNEL);
if (!pre)
return -ENOMEM;
pre->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pre->regs))
return PTR_ERR(pre->regs);
pre->clk_axi = devm_clk_get(dev, "axi");
if (IS_ERR(pre->clk_axi))
return PTR_ERR(pre->clk_axi);
pre->iram = of_gen_pool_get(dev->of_node, "fsl,iram", 0);
if (!pre->iram)
return -EPROBE_DEFER;
/*
* Allocate IRAM buffer with maximum size. This could be made dynamic,
* but as there is no other user of this IRAM region and we can fit all
* max sized buffers into it, there is no need yet.
*/
pre->buffer_virt = gen_pool_dma_alloc(pre->iram, IPU_PRE_MAX_WIDTH *
IPU_PRE_NUM_SCANLINES * 4,
&pre->buffer_paddr);
if (!pre->buffer_virt)
return -ENOMEM;
clk_prepare_enable(pre->clk_axi);
pre->dev = dev;
platform_set_drvdata(pdev, pre);
mutex_lock(&ipu_pre_list_mutex);
list_add(&pre->list, &ipu_pre_list);
available_pres++;
mutex_unlock(&ipu_pre_list_mutex);
return 0;
}
static int ipu_pre_remove(struct platform_device *pdev)
{
struct ipu_pre *pre = platform_get_drvdata(pdev);
mutex_lock(&ipu_pre_list_mutex);
list_del(&pre->list);
available_pres--;
mutex_unlock(&ipu_pre_list_mutex);
clk_disable_unprepare(pre->clk_axi);
if (pre->buffer_virt)
gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
return 0;
}
static const struct of_device_id ipu_pre_dt_ids[] = {
{ .compatible = "fsl,imx6qp-pre", },
{ /* sentinel */ },
};
struct platform_driver ipu_pre_drv = {
.probe = ipu_pre_probe,
.remove = ipu_pre_remove,
.driver = {
.name = "imx-ipu-pre",
.of_match_table = ipu_pre_dt_ids,
},
};
| linux-master | drivers/gpu/ipu-v3/ipu-pre.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2014 Mentor Graphics Inc.
* Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/bitrev.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/sizes.h>
#include "ipu-prv.h"
/* IC Register Offsets */
#define IC_CONF 0x0000
#define IC_PRP_ENC_RSC 0x0004
#define IC_PRP_VF_RSC 0x0008
#define IC_PP_RSC 0x000C
#define IC_CMBP_1 0x0010
#define IC_CMBP_2 0x0014
#define IC_IDMAC_1 0x0018
#define IC_IDMAC_2 0x001C
#define IC_IDMAC_3 0x0020
#define IC_IDMAC_4 0x0024
/* IC Register Fields */
#define IC_CONF_PRPENC_EN (1 << 0)
#define IC_CONF_PRPENC_CSC1 (1 << 1)
#define IC_CONF_PRPENC_ROT_EN (1 << 2)
#define IC_CONF_PRPVF_EN (1 << 8)
#define IC_CONF_PRPVF_CSC1 (1 << 9)
#define IC_CONF_PRPVF_CSC2 (1 << 10)
#define IC_CONF_PRPVF_CMB (1 << 11)
#define IC_CONF_PRPVF_ROT_EN (1 << 12)
#define IC_CONF_PP_EN (1 << 16)
#define IC_CONF_PP_CSC1 (1 << 17)
#define IC_CONF_PP_CSC2 (1 << 18)
#define IC_CONF_PP_CMB (1 << 19)
#define IC_CONF_PP_ROT_EN (1 << 20)
#define IC_CONF_IC_GLB_LOC_A (1 << 28)
#define IC_CONF_KEY_COLOR_EN (1 << 29)
#define IC_CONF_RWS_EN (1 << 30)
#define IC_CONF_CSI_MEM_WR_EN (1 << 31)
#define IC_IDMAC_1_CB0_BURST_16 (1 << 0)
#define IC_IDMAC_1_CB1_BURST_16 (1 << 1)
#define IC_IDMAC_1_CB2_BURST_16 (1 << 2)
#define IC_IDMAC_1_CB3_BURST_16 (1 << 3)
#define IC_IDMAC_1_CB4_BURST_16 (1 << 4)
#define IC_IDMAC_1_CB5_BURST_16 (1 << 5)
#define IC_IDMAC_1_CB6_BURST_16 (1 << 6)
#define IC_IDMAC_1_CB7_BURST_16 (1 << 7)
#define IC_IDMAC_1_PRPENC_ROT_MASK (0x7 << 11)
#define IC_IDMAC_1_PRPENC_ROT_OFFSET 11
#define IC_IDMAC_1_PRPVF_ROT_MASK (0x7 << 14)
#define IC_IDMAC_1_PRPVF_ROT_OFFSET 14
#define IC_IDMAC_1_PP_ROT_MASK (0x7 << 17)
#define IC_IDMAC_1_PP_ROT_OFFSET 17
#define IC_IDMAC_1_PP_FLIP_RS (1 << 22)
#define IC_IDMAC_1_PRPVF_FLIP_RS (1 << 21)
#define IC_IDMAC_1_PRPENC_FLIP_RS (1 << 20)
#define IC_IDMAC_2_PRPENC_HEIGHT_MASK (0x3ff << 0)
#define IC_IDMAC_2_PRPENC_HEIGHT_OFFSET 0
#define IC_IDMAC_2_PRPVF_HEIGHT_MASK (0x3ff << 10)
#define IC_IDMAC_2_PRPVF_HEIGHT_OFFSET 10
#define IC_IDMAC_2_PP_HEIGHT_MASK (0x3ff << 20)
#define IC_IDMAC_2_PP_HEIGHT_OFFSET 20
#define IC_IDMAC_3_PRPENC_WIDTH_MASK (0x3ff << 0)
#define IC_IDMAC_3_PRPENC_WIDTH_OFFSET 0
#define IC_IDMAC_3_PRPVF_WIDTH_MASK (0x3ff << 10)
#define IC_IDMAC_3_PRPVF_WIDTH_OFFSET 10
#define IC_IDMAC_3_PP_WIDTH_MASK (0x3ff << 20)
#define IC_IDMAC_3_PP_WIDTH_OFFSET 20
struct ic_task_regoffs {
u32 rsc;
u32 tpmem_csc[2];
};
struct ic_task_bitfields {
u32 ic_conf_en;
u32 ic_conf_rot_en;
u32 ic_conf_cmb_en;
u32 ic_conf_csc1_en;
u32 ic_conf_csc2_en;
u32 ic_cmb_galpha_bit;
};
static const struct ic_task_regoffs ic_task_reg[IC_NUM_TASKS] = {
[IC_TASK_ENCODER] = {
.rsc = IC_PRP_ENC_RSC,
.tpmem_csc = {0x2008, 0},
},
[IC_TASK_VIEWFINDER] = {
.rsc = IC_PRP_VF_RSC,
.tpmem_csc = {0x4028, 0x4040},
},
[IC_TASK_POST_PROCESSOR] = {
.rsc = IC_PP_RSC,
.tpmem_csc = {0x6060, 0x6078},
},
};
static const struct ic_task_bitfields ic_task_bit[IC_NUM_TASKS] = {
[IC_TASK_ENCODER] = {
.ic_conf_en = IC_CONF_PRPENC_EN,
.ic_conf_rot_en = IC_CONF_PRPENC_ROT_EN,
.ic_conf_cmb_en = 0, /* NA */
.ic_conf_csc1_en = IC_CONF_PRPENC_CSC1,
.ic_conf_csc2_en = 0, /* NA */
.ic_cmb_galpha_bit = 0, /* NA */
},
[IC_TASK_VIEWFINDER] = {
.ic_conf_en = IC_CONF_PRPVF_EN,
.ic_conf_rot_en = IC_CONF_PRPVF_ROT_EN,
.ic_conf_cmb_en = IC_CONF_PRPVF_CMB,
.ic_conf_csc1_en = IC_CONF_PRPVF_CSC1,
.ic_conf_csc2_en = IC_CONF_PRPVF_CSC2,
.ic_cmb_galpha_bit = 0,
},
[IC_TASK_POST_PROCESSOR] = {
.ic_conf_en = IC_CONF_PP_EN,
.ic_conf_rot_en = IC_CONF_PP_ROT_EN,
.ic_conf_cmb_en = IC_CONF_PP_CMB,
.ic_conf_csc1_en = IC_CONF_PP_CSC1,
.ic_conf_csc2_en = IC_CONF_PP_CSC2,
.ic_cmb_galpha_bit = 8,
},
};
struct ipu_ic_priv;
struct ipu_ic {
enum ipu_ic_task task;
const struct ic_task_regoffs *reg;
const struct ic_task_bitfields *bit;
struct ipu_ic_colorspace in_cs;
struct ipu_ic_colorspace g_in_cs;
struct ipu_ic_colorspace out_cs;
bool graphics;
bool rotation;
bool in_use;
struct ipu_ic_priv *priv;
};
struct ipu_ic_priv {
void __iomem *base;
void __iomem *tpmem_base;
spinlock_t lock;
struct ipu_soc *ipu;
int use_count;
int irt_use_count;
struct ipu_ic task[IC_NUM_TASKS];
};
static inline u32 ipu_ic_read(struct ipu_ic *ic, unsigned offset)
{
return readl(ic->priv->base + offset);
}
static inline void ipu_ic_write(struct ipu_ic *ic, u32 value, unsigned offset)
{
writel(value, ic->priv->base + offset);
}
static int init_csc(struct ipu_ic *ic,
const struct ipu_ic_csc *csc,
int csc_index)
{
struct ipu_ic_priv *priv = ic->priv;
u32 __iomem *base;
const u16 (*c)[3];
const u16 *a;
u32 param;
base = (u32 __iomem *)
(priv->tpmem_base + ic->reg->tpmem_csc[csc_index]);
/* Cast to unsigned */
c = (const u16 (*)[3])csc->params.coeff;
a = (const u16 *)csc->params.offset;
param = ((a[0] & 0x1f) << 27) | ((c[0][0] & 0x1ff) << 18) |
((c[1][1] & 0x1ff) << 9) | (c[2][2] & 0x1ff);
writel(param, base++);
param = ((a[0] & 0x1fe0) >> 5) | (csc->params.scale << 8) |
(csc->params.sat << 10);
writel(param, base++);
param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
((c[1][0] & 0x1ff) << 9) | (c[2][0] & 0x1ff);
writel(param, base++);
param = ((a[1] & 0x1fe0) >> 5);
writel(param, base++);
param = ((a[2] & 0x1f) << 27) | ((c[0][2] & 0x1ff) << 18) |
((c[1][2] & 0x1ff) << 9) | (c[2][1] & 0x1ff);
writel(param, base++);
param = ((a[2] & 0x1fe0) >> 5);
writel(param, base++);
return 0;
}
static int calc_resize_coeffs(struct ipu_ic *ic,
u32 in_size, u32 out_size,
u32 *resize_coeff,
u32 *downsize_coeff)
{
struct ipu_ic_priv *priv = ic->priv;
struct ipu_soc *ipu = priv->ipu;
u32 temp_size, temp_downsize;
/*
* Input size cannot be more than 4096, and output size cannot
* be more than 1024
*/
if (in_size > 4096) {
dev_err(ipu->dev, "Unsupported resize (in_size > 4096)\n");
return -EINVAL;
}
if (out_size > 1024) {
dev_err(ipu->dev, "Unsupported resize (out_size > 1024)\n");
return -EINVAL;
}
/* Cannot downsize more than 4:1 */
if ((out_size << 2) < in_size) {
dev_err(ipu->dev, "Unsupported downsize\n");
return -EINVAL;
}
/* Compute downsizing coefficient */
temp_downsize = 0;
temp_size = in_size;
while (((temp_size > 1024) || (temp_size >= out_size * 2)) &&
(temp_downsize < 2)) {
temp_size >>= 1;
temp_downsize++;
}
*downsize_coeff = temp_downsize;
/*
* compute resizing coefficient using the following equation:
* resize_coeff = M * (SI - 1) / (SO - 1)
* where M = 2^13, SI = input size, SO = output size
*/
*resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
if (*resize_coeff >= 16384L) {
dev_err(ipu->dev, "Warning! Overflow on resize coeff.\n");
*resize_coeff = 0x3FFF;
}
return 0;
}
void ipu_ic_task_enable(struct ipu_ic *ic)
{
struct ipu_ic_priv *priv = ic->priv;
unsigned long flags;
u32 ic_conf;
spin_lock_irqsave(&priv->lock, flags);
ic_conf = ipu_ic_read(ic, IC_CONF);
ic_conf |= ic->bit->ic_conf_en;
if (ic->rotation)
ic_conf |= ic->bit->ic_conf_rot_en;
if (ic->in_cs.cs != ic->out_cs.cs)
ic_conf |= ic->bit->ic_conf_csc1_en;
if (ic->graphics) {
ic_conf |= ic->bit->ic_conf_cmb_en;
ic_conf |= ic->bit->ic_conf_csc1_en;
if (ic->g_in_cs.cs != ic->out_cs.cs)
ic_conf |= ic->bit->ic_conf_csc2_en;
}
ipu_ic_write(ic, ic_conf, IC_CONF);
spin_unlock_irqrestore(&priv->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_ic_task_enable);
void ipu_ic_task_disable(struct ipu_ic *ic)
{
struct ipu_ic_priv *priv = ic->priv;
unsigned long flags;
u32 ic_conf;
spin_lock_irqsave(&priv->lock, flags);
ic_conf = ipu_ic_read(ic, IC_CONF);
ic_conf &= ~(ic->bit->ic_conf_en |
ic->bit->ic_conf_csc1_en |
ic->bit->ic_conf_rot_en);
if (ic->bit->ic_conf_csc2_en)
ic_conf &= ~ic->bit->ic_conf_csc2_en;
if (ic->bit->ic_conf_cmb_en)
ic_conf &= ~ic->bit->ic_conf_cmb_en;
ipu_ic_write(ic, ic_conf, IC_CONF);
spin_unlock_irqrestore(&priv->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
int ipu_ic_task_graphics_init(struct ipu_ic *ic,
const struct ipu_ic_colorspace *g_in_cs,
bool galpha_en, u32 galpha,
bool colorkey_en, u32 colorkey)
{
struct ipu_ic_priv *priv = ic->priv;
struct ipu_ic_csc csc2;
unsigned long flags;
u32 reg, ic_conf;
int ret = 0;
if (ic->task == IC_TASK_ENCODER)
return -EINVAL;
spin_lock_irqsave(&priv->lock, flags);
ic_conf = ipu_ic_read(ic, IC_CONF);
if (!(ic_conf & ic->bit->ic_conf_csc1_en)) {
struct ipu_ic_csc csc1;
ret = ipu_ic_calc_csc(&csc1,
V4L2_YCBCR_ENC_601,
V4L2_QUANTIZATION_FULL_RANGE,
IPUV3_COLORSPACE_RGB,
V4L2_YCBCR_ENC_601,
V4L2_QUANTIZATION_FULL_RANGE,
IPUV3_COLORSPACE_RGB);
if (ret)
goto unlock;
/* need transparent CSC1 conversion */
ret = init_csc(ic, &csc1, 0);
if (ret)
goto unlock;
}
ic->g_in_cs = *g_in_cs;
csc2.in_cs = ic->g_in_cs;
csc2.out_cs = ic->out_cs;
ret = __ipu_ic_calc_csc(&csc2);
if (ret)
goto unlock;
ret = init_csc(ic, &csc2, 1);
if (ret)
goto unlock;
if (galpha_en) {
ic_conf |= IC_CONF_IC_GLB_LOC_A;
reg = ipu_ic_read(ic, IC_CMBP_1);
reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit);
reg |= (galpha << ic->bit->ic_cmb_galpha_bit);
ipu_ic_write(ic, reg, IC_CMBP_1);
} else
ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
if (colorkey_en) {
ic_conf |= IC_CONF_KEY_COLOR_EN;
ipu_ic_write(ic, colorkey, IC_CMBP_2);
} else
ic_conf &= ~IC_CONF_KEY_COLOR_EN;
ipu_ic_write(ic, ic_conf, IC_CONF);
ic->graphics = true;
unlock:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
int ipu_ic_task_init_rsc(struct ipu_ic *ic,
const struct ipu_ic_csc *csc,
int in_width, int in_height,
int out_width, int out_height,
u32 rsc)
{
struct ipu_ic_priv *priv = ic->priv;
u32 downsize_coeff, resize_coeff;
unsigned long flags;
int ret = 0;
if (!rsc) {
/* Setup vertical resizing */
ret = calc_resize_coeffs(ic, in_height, out_height,
&resize_coeff, &downsize_coeff);
if (ret)
return ret;
rsc = (downsize_coeff << 30) | (resize_coeff << 16);
/* Setup horizontal resizing */
ret = calc_resize_coeffs(ic, in_width, out_width,
&resize_coeff, &downsize_coeff);
if (ret)
return ret;
rsc |= (downsize_coeff << 14) | resize_coeff;
}
spin_lock_irqsave(&priv->lock, flags);
ipu_ic_write(ic, rsc, ic->reg->rsc);
/* Setup color space conversion */
ic->in_cs = csc->in_cs;
ic->out_cs = csc->out_cs;
ret = init_csc(ic, csc, 0);
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
int ipu_ic_task_init(struct ipu_ic *ic,
const struct ipu_ic_csc *csc,
int in_width, int in_height,
int out_width, int out_height)
{
return ipu_ic_task_init_rsc(ic, csc,
in_width, in_height,
out_width, out_height, 0);
}
EXPORT_SYMBOL_GPL(ipu_ic_task_init);
int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
u32 width, u32 height, int burst_size,
enum ipu_rotate_mode rot)
{
struct ipu_ic_priv *priv = ic->priv;
struct ipu_soc *ipu = priv->ipu;
u32 ic_idmac_1, ic_idmac_2, ic_idmac_3;
u32 temp_rot = bitrev8(rot) >> 5;
bool need_hor_flip = false;
unsigned long flags;
int ret = 0;
if ((burst_size != 8) && (burst_size != 16)) {
dev_err(ipu->dev, "Illegal burst length for IC\n");
return -EINVAL;
}
width--;
height--;
if (temp_rot & 0x2) /* Need horizontal flip */
need_hor_flip = true;
spin_lock_irqsave(&priv->lock, flags);
ic_idmac_1 = ipu_ic_read(ic, IC_IDMAC_1);
ic_idmac_2 = ipu_ic_read(ic, IC_IDMAC_2);
ic_idmac_3 = ipu_ic_read(ic, IC_IDMAC_3);
switch (channel->num) {
case IPUV3_CHANNEL_IC_PP_MEM:
if (burst_size == 16)
ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16;
else
ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16;
if (need_hor_flip)
ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS;
else
ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS;
ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK;
ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET;
ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK;
ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET;
break;
case IPUV3_CHANNEL_MEM_IC_PP:
if (burst_size == 16)
ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16;
else
ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16;
break;
case IPUV3_CHANNEL_MEM_ROT_PP:
ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK;
ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET;
break;
case IPUV3_CHANNEL_MEM_IC_PRP_VF:
if (burst_size == 16)
ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16;
else
ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16;
break;
case IPUV3_CHANNEL_IC_PRP_ENC_MEM:
if (burst_size == 16)
ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16;
else
ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16;
if (need_hor_flip)
ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS;
else
ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS;
ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK;
ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET;
ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK;
ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET;
break;
case IPUV3_CHANNEL_MEM_ROT_ENC:
ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK;
ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET;
break;
case IPUV3_CHANNEL_IC_PRP_VF_MEM:
if (burst_size == 16)
ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16;
else
ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16;
if (need_hor_flip)
ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS;
else
ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS;
ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK;
ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET;
ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK;
ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET;
break;
case IPUV3_CHANNEL_MEM_ROT_VF:
ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK;
ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET;
break;
case IPUV3_CHANNEL_G_MEM_IC_PRP_VF:
if (burst_size == 16)
ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16;
else
ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16;
break;
case IPUV3_CHANNEL_G_MEM_IC_PP:
if (burst_size == 16)
ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16;
else
ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16;
break;
case IPUV3_CHANNEL_VDI_MEM_IC_VF:
if (burst_size == 16)
ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16;
else
ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16;
break;
default:
goto unlock;
}
ipu_ic_write(ic, ic_idmac_1, IC_IDMAC_1);
ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
if (ipu_rot_mode_is_irt(rot))
ic->rotation = true;
unlock:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(ipu_ic_task_idma_init);
static void ipu_irt_enable(struct ipu_ic *ic)
{
struct ipu_ic_priv *priv = ic->priv;
if (!priv->irt_use_count)
ipu_module_enable(priv->ipu, IPU_CONF_ROT_EN);
priv->irt_use_count++;
}
static void ipu_irt_disable(struct ipu_ic *ic)
{
struct ipu_ic_priv *priv = ic->priv;
if (priv->irt_use_count) {
if (!--priv->irt_use_count)
ipu_module_disable(priv->ipu, IPU_CONF_ROT_EN);
}
}
int ipu_ic_enable(struct ipu_ic *ic)
{
struct ipu_ic_priv *priv = ic->priv;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (!priv->use_count)
ipu_module_enable(priv->ipu, IPU_CONF_IC_EN);
priv->use_count++;
if (ic->rotation)
ipu_irt_enable(ic);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_ic_enable);
int ipu_ic_disable(struct ipu_ic *ic)
{
struct ipu_ic_priv *priv = ic->priv;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
priv->use_count--;
if (!priv->use_count)
ipu_module_disable(priv->ipu, IPU_CONF_IC_EN);
if (priv->use_count < 0)
priv->use_count = 0;
if (ic->rotation)
ipu_irt_disable(ic);
ic->rotation = ic->graphics = false;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_ic_disable);
struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task)
{
struct ipu_ic_priv *priv = ipu->ic_priv;
unsigned long flags;
struct ipu_ic *ic, *ret;
if (task >= IC_NUM_TASKS)
return ERR_PTR(-EINVAL);
ic = &priv->task[task];
spin_lock_irqsave(&priv->lock, flags);
if (ic->in_use) {
ret = ERR_PTR(-EBUSY);
goto unlock;
}
ic->in_use = true;
ret = ic;
unlock:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(ipu_ic_get);
void ipu_ic_put(struct ipu_ic *ic)
{
struct ipu_ic_priv *priv = ic->priv;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
ic->in_use = false;
spin_unlock_irqrestore(&priv->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_ic_put);
int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
unsigned long base, unsigned long tpmem_base)
{
struct ipu_ic_priv *priv;
int i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ipu->ic_priv = priv;
spin_lock_init(&priv->lock);
priv->base = devm_ioremap(dev, base, PAGE_SIZE);
if (!priv->base)
return -ENOMEM;
priv->tpmem_base = devm_ioremap(dev, tpmem_base, SZ_64K);
if (!priv->tpmem_base)
return -ENOMEM;
dev_dbg(dev, "IC base: 0x%08lx remapped to %p\n", base, priv->base);
priv->ipu = ipu;
for (i = 0; i < IC_NUM_TASKS; i++) {
priv->task[i].task = i;
priv->task[i].priv = priv;
priv->task[i].reg = &ic_task_reg[i];
priv->task[i].bit = &ic_task_bit[i];
}
return 0;
}
void ipu_ic_exit(struct ipu_soc *ipu)
{
}
void ipu_ic_dump(struct ipu_ic *ic)
{
struct ipu_ic_priv *priv = ic->priv;
struct ipu_soc *ipu = priv->ipu;
dev_dbg(ipu->dev, "IC_CONF = \t0x%08X\n",
ipu_ic_read(ic, IC_CONF));
dev_dbg(ipu->dev, "IC_PRP_ENC_RSC = \t0x%08X\n",
ipu_ic_read(ic, IC_PRP_ENC_RSC));
dev_dbg(ipu->dev, "IC_PRP_VF_RSC = \t0x%08X\n",
ipu_ic_read(ic, IC_PRP_VF_RSC));
dev_dbg(ipu->dev, "IC_PP_RSC = \t0x%08X\n",
ipu_ic_read(ic, IC_PP_RSC));
dev_dbg(ipu->dev, "IC_CMBP_1 = \t0x%08X\n",
ipu_ic_read(ic, IC_CMBP_1));
dev_dbg(ipu->dev, "IC_CMBP_2 = \t0x%08X\n",
ipu_ic_read(ic, IC_CMBP_2));
dev_dbg(ipu->dev, "IC_IDMAC_1 = \t0x%08X\n",
ipu_ic_read(ic, IC_IDMAC_1));
dev_dbg(ipu->dev, "IC_IDMAC_2 = \t0x%08X\n",
ipu_ic_read(ic, IC_IDMAC_2));
dev_dbg(ipu->dev, "IC_IDMAC_3 = \t0x%08X\n",
ipu_ic_read(ic, IC_IDMAC_3));
dev_dbg(ipu->dev, "IC_IDMAC_4 = \t0x%08X\n",
ipu_ic_read(ic, IC_IDMAC_4));
}
EXPORT_SYMBOL_GPL(ipu_ic_dump);
| linux-master | drivers/gpu/ipu-v3/ipu-ic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2016 Mentor Graphics Inc.
*
* Queued image conversion support, with tiling and rotation.
*/
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/math.h>
#include <video/imx-ipu-image-convert.h>
#include "ipu-prv.h"
/*
* The IC Resizer has a restriction that the output frame from the
* resizer must be 1024 or less in both width (pixels) and height
* (lines).
*
* The image converter attempts to split up a conversion when
* the desired output (converted) frame resolution exceeds the
* IC resizer limit of 1024 in either dimension.
*
* If either dimension of the output frame exceeds the limit, the
* dimension is split into 1, 2, or 4 equal stripes, for a maximum
* of 4*4 or 16 tiles. A conversion is then carried out for each
* tile (but taking care to pass the full frame stride length to
* the DMA channel's parameter memory!). IDMA double-buffering is used
* to convert each tile back-to-back when possible (see note below
* when double_buffering boolean is set).
*
* Note that the input frame must be split up into the same number
* of tiles as the output frame:
*
* +---------+-----+
* +-----+---+ | A | B |
* | A | B | | | |
* +-----+---+ --> +---------+-----+
* | C | D | | C | D |
* +-----+---+ | | |
* +---------+-----+
*
* Clockwise 90° rotations are handled by first rescaling into a
* reusable temporary tile buffer and then rotating with the 8x8
* block rotator, writing to the correct destination:
*
* +-----+-----+
* | | |
* +-----+---+ +---------+ | C | A |
* | A | B | | A,B, | | | | |
* +-----+---+ --> | C,D | | --> | | |
* | C | D | +---------+ +-----+-----+
* +-----+---+ | D | B |
* | | |
* +-----+-----+
*
* If the 8x8 block rotator is used, horizontal or vertical flipping
* is done during the rotation step, otherwise flipping is done
* during the scaling step.
* With rotation or flipping, tile order changes between input and
* output image. Tiles are numbered row major from top left to bottom
* right for both input and output image.
*/
#define MAX_STRIPES_W 4
#define MAX_STRIPES_H 4
#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
#define MIN_W 16
#define MIN_H 8
#define MAX_W 4096
#define MAX_H 4096
enum ipu_image_convert_type {
IMAGE_CONVERT_IN = 0,
IMAGE_CONVERT_OUT,
};
struct ipu_image_convert_dma_buf {
void *virt;
dma_addr_t phys;
unsigned long len;
};
struct ipu_image_convert_dma_chan {
int in;
int out;
int rot_in;
int rot_out;
int vdi_in_p;
int vdi_in;
int vdi_in_n;
};
/* dimensions of one tile */
struct ipu_image_tile {
u32 width;
u32 height;
u32 left;
u32 top;
/* size and strides are in bytes */
u32 size;
u32 stride;
u32 rot_stride;
/* start Y or packed offset of this tile */
u32 offset;
/* offset from start to tile in U plane, for planar formats */
u32 u_off;
/* offset from start to tile in V plane, for planar formats */
u32 v_off;
};
struct ipu_image_convert_image {
struct ipu_image base;
enum ipu_image_convert_type type;
const struct ipu_image_pixfmt *fmt;
unsigned int stride;
/* # of rows (horizontal stripes) if dest height is > 1024 */
unsigned int num_rows;
/* # of columns (vertical stripes) if dest width is > 1024 */
unsigned int num_cols;
struct ipu_image_tile tile[MAX_TILES];
};
struct ipu_image_pixfmt {
u32 fourcc; /* V4L2 fourcc */
int bpp; /* total bpp */
int uv_width_dec; /* decimation in width for U/V planes */
int uv_height_dec; /* decimation in height for U/V planes */
bool planar; /* planar format */
bool uv_swapped; /* U and V planes are swapped */
bool uv_packed; /* partial planar (U and V in same plane) */
};
struct ipu_image_convert_ctx;
struct ipu_image_convert_chan;
struct ipu_image_convert_priv;
enum eof_irq_mask {
EOF_IRQ_IN = BIT(0),
EOF_IRQ_ROT_IN = BIT(1),
EOF_IRQ_OUT = BIT(2),
EOF_IRQ_ROT_OUT = BIT(3),
};
#define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT)
#define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \
EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT)
struct ipu_image_convert_ctx {
struct ipu_image_convert_chan *chan;
ipu_image_convert_cb_t complete;
void *complete_context;
/* Source/destination image data and rotation mode */
struct ipu_image_convert_image in;
struct ipu_image_convert_image out;
struct ipu_ic_csc csc;
enum ipu_rotate_mode rot_mode;
u32 downsize_coeff_h;
u32 downsize_coeff_v;
u32 image_resize_coeff_h;
u32 image_resize_coeff_v;
u32 resize_coeffs_h[MAX_STRIPES_W];
u32 resize_coeffs_v[MAX_STRIPES_H];
/* intermediate buffer for rotation */
struct ipu_image_convert_dma_buf rot_intermediate[2];
/* current buffer number for double buffering */
int cur_buf_num;
bool aborting;
struct completion aborted;
/* can we use double-buffering for this conversion operation? */
bool double_buffering;
/* num_rows * num_cols */
unsigned int num_tiles;
/* next tile to process */
unsigned int next_tile;
/* where to place converted tile in dest image */
unsigned int out_tile_map[MAX_TILES];
/* mask of completed EOF irqs at every tile conversion */
enum eof_irq_mask eof_mask;
struct list_head list;
};
struct ipu_image_convert_chan {
struct ipu_image_convert_priv *priv;
enum ipu_ic_task ic_task;
const struct ipu_image_convert_dma_chan *dma_ch;
struct ipu_ic *ic;
struct ipuv3_channel *in_chan;
struct ipuv3_channel *out_chan;
struct ipuv3_channel *rotation_in_chan;
struct ipuv3_channel *rotation_out_chan;
/* the IPU end-of-frame irqs */
int in_eof_irq;
int rot_in_eof_irq;
int out_eof_irq;
int rot_out_eof_irq;
spinlock_t irqlock;
/* list of convert contexts */
struct list_head ctx_list;
/* queue of conversion runs */
struct list_head pending_q;
/* queue of completed runs */
struct list_head done_q;
/* the current conversion run */
struct ipu_image_convert_run *current_run;
};
struct ipu_image_convert_priv {
struct ipu_image_convert_chan chan[IC_NUM_TASKS];
struct ipu_soc *ipu;
};
static const struct ipu_image_convert_dma_chan
image_convert_dma_chan[IC_NUM_TASKS] = {
[IC_TASK_VIEWFINDER] = {
.in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
.out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
.rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
.rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
.vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
.vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
.vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
},
[IC_TASK_POST_PROCESSOR] = {
.in = IPUV3_CHANNEL_MEM_IC_PP,
.out = IPUV3_CHANNEL_IC_PP_MEM,
.rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
.rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
},
};
static const struct ipu_image_pixfmt image_convert_formats[] = {
{
.fourcc = V4L2_PIX_FMT_RGB565,
.bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_RGB24,
.bpp = 24,
}, {
.fourcc = V4L2_PIX_FMT_BGR24,
.bpp = 24,
}, {
.fourcc = V4L2_PIX_FMT_RGB32,
.bpp = 32,
}, {
.fourcc = V4L2_PIX_FMT_BGR32,
.bpp = 32,
}, {
.fourcc = V4L2_PIX_FMT_XRGB32,
.bpp = 32,
}, {
.fourcc = V4L2_PIX_FMT_XBGR32,
.bpp = 32,
}, {
.fourcc = V4L2_PIX_FMT_BGRX32,
.bpp = 32,
}, {
.fourcc = V4L2_PIX_FMT_RGBX32,
.bpp = 32,
}, {
.fourcc = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.uv_width_dec = 2,
.uv_height_dec = 1,
}, {
.fourcc = V4L2_PIX_FMT_UYVY,
.bpp = 16,
.uv_width_dec = 2,
.uv_height_dec = 1,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
.bpp = 12,
.planar = true,
.uv_width_dec = 2,
.uv_height_dec = 2,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
.bpp = 12,
.planar = true,
.uv_width_dec = 2,
.uv_height_dec = 2,
.uv_swapped = true,
}, {
.fourcc = V4L2_PIX_FMT_NV12,
.bpp = 12,
.planar = true,
.uv_width_dec = 2,
.uv_height_dec = 2,
.uv_packed = true,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
.bpp = 16,
.planar = true,
.uv_width_dec = 2,
.uv_height_dec = 1,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.bpp = 16,
.planar = true,
.uv_width_dec = 2,
.uv_height_dec = 1,
.uv_packed = true,
},
};
static const struct ipu_image_pixfmt *get_format(u32 fourcc)
{
const struct ipu_image_pixfmt *ret = NULL;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
if (image_convert_formats[i].fourcc == fourcc) {
ret = &image_convert_formats[i];
break;
}
}
return ret;
}
static void dump_format(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *ic_image)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
dev_dbg(priv->ipu->dev,
"task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
chan->ic_task, ctx,
ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
ic_image->base.pix.width, ic_image->base.pix.height,
ic_image->num_cols, ic_image->num_rows,
ic_image->fmt->fourcc & 0xff,
(ic_image->fmt->fourcc >> 8) & 0xff,
(ic_image->fmt->fourcc >> 16) & 0xff,
(ic_image->fmt->fourcc >> 24) & 0xff);
}
int ipu_image_convert_enum_format(int index, u32 *fourcc)
{
const struct ipu_image_pixfmt *fmt;
if (index >= (int)ARRAY_SIZE(image_convert_formats))
return -EINVAL;
/* Format found */
fmt = &image_convert_formats[index];
*fourcc = fmt->fourcc;
return 0;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
static void free_dma_buf(struct ipu_image_convert_priv *priv,
struct ipu_image_convert_dma_buf *buf)
{
if (buf->virt)
dma_free_coherent(priv->ipu->dev,
buf->len, buf->virt, buf->phys);
buf->virt = NULL;
buf->phys = 0;
}
static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
struct ipu_image_convert_dma_buf *buf,
int size)
{
buf->len = PAGE_ALIGN(size);
buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
GFP_DMA | GFP_KERNEL);
if (!buf->virt) {
dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
return -ENOMEM;
}
return 0;
}
static inline int num_stripes(int dim)
{
return (dim - 1) / 1024 + 1;
}
/*
* Calculate downsizing coefficients, which are the same for all tiles,
* and initial bilinear resizing coefficients, which are used to find the
* best seam positions.
* Also determine the number of tiles necessary to guarantee that no tile
* is larger than 1024 pixels in either dimension at the output and between
* IC downsizing and main processing sections.
*/
static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
struct ipu_image *in,
struct ipu_image *out)
{
u32 downsized_width = in->rect.width;
u32 downsized_height = in->rect.height;
u32 downsize_coeff_v = 0;
u32 downsize_coeff_h = 0;
u32 resized_width = out->rect.width;
u32 resized_height = out->rect.height;
u32 resize_coeff_h;
u32 resize_coeff_v;
u32 cols;
u32 rows;
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
resized_width = out->rect.height;
resized_height = out->rect.width;
}
/* Do not let invalid input lead to an endless loop below */
if (WARN_ON(resized_width == 0 || resized_height == 0))
return -EINVAL;
while (downsized_width >= resized_width * 2) {
downsized_width >>= 1;
downsize_coeff_h++;
}
while (downsized_height >= resized_height * 2) {
downsized_height >>= 1;
downsize_coeff_v++;
}
/*
* Calculate the bilinear resizing coefficients that could be used if
* we were converting with a single tile. The bottom right output pixel
* should sample as close as possible to the bottom right input pixel
* out of the decimator, but not overshoot it:
*/
resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
/*
* Both the output of the IC downsizing section before being passed to
* the IC main processing section and the final output of the IC main
* processing section must be <= 1024 pixels in both dimensions.
*/
cols = num_stripes(max_t(u32, downsized_width, resized_width));
rows = num_stripes(max_t(u32, downsized_height, resized_height));
dev_dbg(ctx->chan->priv->ipu->dev,
"%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
__func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
resize_coeff_v, cols, rows);
if (downsize_coeff_h > 2 || downsize_coeff_v > 2 ||
resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
return -EINVAL;
ctx->downsize_coeff_h = downsize_coeff_h;
ctx->downsize_coeff_v = downsize_coeff_v;
ctx->image_resize_coeff_h = resize_coeff_h;
ctx->image_resize_coeff_v = resize_coeff_v;
ctx->in.num_cols = cols;
ctx->in.num_rows = rows;
return 0;
}
#define round_closest(x, y) round_down((x) + (y)/2, (y))
/*
* Find the best aligned seam position for the given column / row index.
* Rotation and image offsets are out of scope.
*
* @index: column / row index, used to calculate valid interval
* @in_edge: input right / bottom edge
* @out_edge: output right / bottom edge
* @in_align: input alignment, either horizontal 8-byte line start address
* alignment, or pixel alignment due to image format
* @out_align: output alignment, either horizontal 8-byte line start address
* alignment, or pixel alignment due to image format or rotator
* block size
* @in_burst: horizontal input burst size in case of horizontal flip
* @out_burst: horizontal output burst size or rotator block size
* @downsize_coeff: downsizing section coefficient
* @resize_coeff: main processing section resizing coefficient
* @_in_seam: aligned input seam position return value
* @_out_seam: aligned output seam position return value
*/
static void find_best_seam(struct ipu_image_convert_ctx *ctx,
unsigned int index,
unsigned int in_edge,
unsigned int out_edge,
unsigned int in_align,
unsigned int out_align,
unsigned int in_burst,
unsigned int out_burst,
unsigned int downsize_coeff,
unsigned int resize_coeff,
u32 *_in_seam,
u32 *_out_seam)
{
struct device *dev = ctx->chan->priv->ipu->dev;
unsigned int out_pos;
/* Input / output seam position candidates */
unsigned int out_seam = 0;
unsigned int in_seam = 0;
unsigned int min_diff = UINT_MAX;
unsigned int out_start;
unsigned int out_end;
unsigned int in_start;
unsigned int in_end;
/* Start within 1024 pixels of the right / bottom edge */
out_start = max_t(int, index * out_align, out_edge - 1024);
/* End before having to add more columns to the left / rows above */
out_end = min_t(unsigned int, out_edge, index * 1024 + 1);
/*
* Limit input seam position to make sure that the downsized input tile
* to the right or bottom does not exceed 1024 pixels.
*/
in_start = max_t(int, index * in_align,
in_edge - (1024 << downsize_coeff));
in_end = min_t(unsigned int, in_edge,
index * (1024 << downsize_coeff) + 1);
/*
* Output tiles must start at a multiple of 8 bytes horizontally and
* possibly at an even line horizontally depending on the pixel format.
* Only consider output aligned positions for the seam.
*/
out_start = round_up(out_start, out_align);
for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
unsigned int in_pos;
unsigned int in_pos_aligned;
unsigned int in_pos_rounded;
unsigned int diff;
/*
* Tiles in the right row / bottom column may not be allowed to
* overshoot horizontally / vertically. out_burst may be the
* actual DMA burst size, or the rotator block size.
*/
if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
continue;
/*
* Input sample position, corresponding to out_pos, 19.13 fixed
* point.
*/
in_pos = (out_pos * resize_coeff) << downsize_coeff;
/*
* The closest input sample position that we could actually
* start the input tile at, 19.13 fixed point.
*/
in_pos_aligned = round_closest(in_pos, 8192U * in_align);
/* Convert 19.13 fixed point to integer */
in_pos_rounded = in_pos_aligned / 8192U;
if (in_pos_rounded < in_start)
continue;
if (in_pos_rounded >= in_end)
break;
if ((in_burst > 1) &&
(in_edge - in_pos_rounded) % in_burst)
continue;
diff = abs_diff(in_pos, in_pos_aligned);
if (diff < min_diff) {
in_seam = in_pos_rounded;
out_seam = out_pos;
min_diff = diff;
}
}
*_out_seam = out_seam;
*_in_seam = in_seam;
dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n",
__func__, out_seam, out_align, out_start, out_end,
in_seam, in_align, in_start, in_end, min_diff / 8192,
DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
}
/*
* Tile left edges are required to be aligned to multiples of 8 bytes
* by the IDMAC.
*/
static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
{
if (fmt->planar)
return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
else
return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
}
/*
* Tile top edge alignment is only limited by chroma subsampling.
*/
static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
{
return fmt->uv_height_dec > 1 ? 2 : 1;
}
static inline u32 tile_width_align(enum ipu_image_convert_type type,
const struct ipu_image_pixfmt *fmt,
enum ipu_rotate_mode rot_mode)
{
if (type == IMAGE_CONVERT_IN) {
/*
* The IC burst reads 8 pixels at a time. Reading beyond the
* end of the line is usually acceptable. Those pixels are
* ignored, unless the IC has to write the scaled line in
* reverse.
*/
return (!ipu_rot_mode_is_irt(rot_mode) &&
(rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
}
/*
* Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
* formats to guarantee 8-byte aligned line start addresses in the
* chroma planes when IRT is used. Align to 8x8 pixel IRT block size
* for all other formats.
*/
return (ipu_rot_mode_is_irt(rot_mode) &&
fmt->planar && !fmt->uv_packed) ?
8 * fmt->uv_width_dec : 8;
}
static inline u32 tile_height_align(enum ipu_image_convert_type type,
const struct ipu_image_pixfmt *fmt,
enum ipu_rotate_mode rot_mode)
{
if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
return 2;
/*
* Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
* formats to guarantee 8-byte aligned line start addresses in the
* chroma planes when IRT is used. Align to 8x8 pixel IRT block size
* for all other formats.
*/
return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
}
/*
* Fill in left position and width and for all tiles in an input column, and
* for all corresponding output tiles. If the 90° rotator is used, the output
* tiles are in a row, and output tile top position and height are set.
*/
static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
unsigned int col,
struct ipu_image_convert_image *in,
unsigned int in_left, unsigned int in_width,
struct ipu_image_convert_image *out,
unsigned int out_left, unsigned int out_width)
{
unsigned int row, tile_idx;
struct ipu_image_tile *in_tile, *out_tile;
for (row = 0; row < in->num_rows; row++) {
tile_idx = in->num_cols * row + col;
in_tile = &in->tile[tile_idx];
out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
in_tile->left = in_left;
in_tile->width = in_width;
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
out_tile->top = out_left;
out_tile->height = out_width;
} else {
out_tile->left = out_left;
out_tile->width = out_width;
}
}
}
/*
* Fill in top position and height and for all tiles in an input row, and
* for all corresponding output tiles. If the 90° rotator is used, the output
* tiles are in a column, and output tile left position and width are set.
*/
static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
struct ipu_image_convert_image *in,
unsigned int in_top, unsigned int in_height,
struct ipu_image_convert_image *out,
unsigned int out_top, unsigned int out_height)
{
unsigned int col, tile_idx;
struct ipu_image_tile *in_tile, *out_tile;
for (col = 0; col < in->num_cols; col++) {
tile_idx = in->num_cols * row + col;
in_tile = &in->tile[tile_idx];
out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
in_tile->top = in_top;
in_tile->height = in_height;
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
out_tile->left = out_top;
out_tile->width = out_height;
} else {
out_tile->top = out_top;
out_tile->height = out_height;
}
}
}
/*
* Find the best horizontal and vertical seam positions to split into tiles.
* Minimize the fractional part of the input sampling position for the
* top / left pixels of each tile.
*/
static void find_seams(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *in,
struct ipu_image_convert_image *out)
{
struct device *dev = ctx->chan->priv->ipu->dev;
unsigned int resized_width = out->base.rect.width;
unsigned int resized_height = out->base.rect.height;
unsigned int col;
unsigned int row;
unsigned int in_left_align = tile_left_align(in->fmt);
unsigned int in_top_align = tile_top_align(in->fmt);
unsigned int out_left_align = tile_left_align(out->fmt);
unsigned int out_top_align = tile_top_align(out->fmt);
unsigned int out_width_align = tile_width_align(out->type, out->fmt,
ctx->rot_mode);
unsigned int out_height_align = tile_height_align(out->type, out->fmt,
ctx->rot_mode);
unsigned int in_right = in->base.rect.width;
unsigned int in_bottom = in->base.rect.height;
unsigned int out_right = out->base.rect.width;
unsigned int out_bottom = out->base.rect.height;
unsigned int flipped_out_left;
unsigned int flipped_out_top;
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
/* Switch width/height and align top left to IRT block size */
resized_width = out->base.rect.height;
resized_height = out->base.rect.width;
out_left_align = out_height_align;
out_top_align = out_width_align;
out_width_align = out_left_align;
out_height_align = out_top_align;
out_right = out->base.rect.height;
out_bottom = out->base.rect.width;
}
for (col = in->num_cols - 1; col > 0; col--) {
bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
!(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
bool allow_out_overshoot = (col < in->num_cols - 1) &&
!(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
unsigned int in_left;
unsigned int out_left;
/*
* Align input width to burst length if the scaling step flips
* horizontally.
*/
find_best_seam(ctx, col,
in_right, out_right,
in_left_align, out_left_align,
allow_in_overshoot ? 1 : 8 /* burst length */,
allow_out_overshoot ? 1 : out_width_align,
ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
&in_left, &out_left);
if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
flipped_out_left = resized_width - out_right;
else
flipped_out_left = out_left;
fill_tile_column(ctx, col, in, in_left, in_right - in_left,
out, flipped_out_left, out_right - out_left);
dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
in_left, in_right - in_left,
flipped_out_left, out_right - out_left);
in_right = in_left;
out_right = out_left;
}
flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
resized_width - out_right : 0;
fill_tile_column(ctx, 0, in, 0, in_right,
out, flipped_out_left, out_right);
dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
in_right, flipped_out_left, out_right);
for (row = in->num_rows - 1; row > 0; row--) {
bool allow_overshoot = row < in->num_rows - 1;
unsigned int in_top;
unsigned int out_top;
find_best_seam(ctx, row,
in_bottom, out_bottom,
in_top_align, out_top_align,
1, allow_overshoot ? 1 : out_height_align,
ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
&in_top, &out_top);
if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
ipu_rot_mode_is_irt(ctx->rot_mode))
flipped_out_top = resized_height - out_bottom;
else
flipped_out_top = out_top;
fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
out, flipped_out_top, out_bottom - out_top);
dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
in_top, in_bottom - in_top,
flipped_out_top, out_bottom - out_top);
in_bottom = in_top;
out_bottom = out_top;
}
if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
ipu_rot_mode_is_irt(ctx->rot_mode))
flipped_out_top = resized_height - out_bottom;
else
flipped_out_top = 0;
fill_tile_row(ctx, 0, in, 0, in_bottom,
out, flipped_out_top, out_bottom);
dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
in_bottom, flipped_out_top, out_bottom);
}
static int calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *image)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
unsigned int max_width = 1024;
unsigned int max_height = 1024;
unsigned int i;
if (image->type == IMAGE_CONVERT_IN) {
/* Up to 4096x4096 input tile size */
max_width <<= ctx->downsize_coeff_h;
max_height <<= ctx->downsize_coeff_v;
}
for (i = 0; i < ctx->num_tiles; i++) {
struct ipu_image_tile *tile;
const unsigned int row = i / image->num_cols;
const unsigned int col = i % image->num_cols;
if (image->type == IMAGE_CONVERT_OUT)
tile = &image->tile[ctx->out_tile_map[i]];
else
tile = &image->tile[i];
tile->size = ((tile->height * image->fmt->bpp) >> 3) *
tile->width;
if (image->fmt->planar) {
tile->stride = tile->width;
tile->rot_stride = tile->height;
} else {
tile->stride =
(image->fmt->bpp * tile->width) >> 3;
tile->rot_stride =
(image->fmt->bpp * tile->height) >> 3;
}
dev_dbg(priv->ipu->dev,
"task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
chan->ic_task, ctx,
image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
row, col,
tile->width, tile->height, tile->left, tile->top);
if (!tile->width || tile->width > max_width ||
!tile->height || tile->height > max_height) {
dev_err(priv->ipu->dev, "invalid %s tile size: %ux%u\n",
image->type == IMAGE_CONVERT_IN ? "input" :
"output", tile->width, tile->height);
return -EINVAL;
}
}
return 0;
}
/*
* Use the rotation transformation to find the tile coordinates
* (row, col) of a tile in the destination frame that corresponds
* to the given tile coordinates of a source frame. The destination
* coordinate is then converted to a tile index.
*/
static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
int src_row, int src_col)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_convert_image *s_image = &ctx->in;
struct ipu_image_convert_image *d_image = &ctx->out;
int dst_row, dst_col;
/* with no rotation it's a 1:1 mapping */
if (ctx->rot_mode == IPU_ROTATE_NONE)
return src_row * s_image->num_cols + src_col;
/*
* before doing the transform, first we have to translate
* source row,col for an origin in the center of s_image
*/
src_row = src_row * 2 - (s_image->num_rows - 1);
src_col = src_col * 2 - (s_image->num_cols - 1);
/* do the rotation transform */
if (ctx->rot_mode & IPU_ROT_BIT_90) {
dst_col = -src_row;
dst_row = src_col;
} else {
dst_col = src_col;
dst_row = src_row;
}
/* apply flip */
if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
dst_col = -dst_col;
if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
dst_row = -dst_row;
dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
/*
* finally translate dest row,col using an origin in upper
* left of d_image
*/
dst_row += d_image->num_rows - 1;
dst_col += d_image->num_cols - 1;
dst_row /= 2;
dst_col /= 2;
return dst_row * d_image->num_cols + dst_col;
}
/*
* Fill the out_tile_map[] with transformed destination tile indeces.
*/
static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
{
struct ipu_image_convert_image *s_image = &ctx->in;
unsigned int row, col, tile = 0;
for (row = 0; row < s_image->num_rows; row++) {
for (col = 0; col < s_image->num_cols; col++) {
ctx->out_tile_map[tile] =
transform_tile_index(ctx, row, col);
tile++;
}
}
}
static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *image)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
const struct ipu_image_pixfmt *fmt = image->fmt;
unsigned int row, col, tile = 0;
u32 H, top, y_stride, uv_stride;
u32 uv_row_off, uv_col_off, uv_off, u_off, v_off;
u32 y_row_off, y_col_off, y_off;
u32 y_size, uv_size;
/* setup some convenience vars */
H = image->base.pix.height;
y_stride = image->stride;
uv_stride = y_stride / fmt->uv_width_dec;
if (fmt->uv_packed)
uv_stride *= 2;
y_size = H * y_stride;
uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
for (row = 0; row < image->num_rows; row++) {
top = image->tile[tile].top;
y_row_off = top * y_stride;
uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
for (col = 0; col < image->num_cols; col++) {
y_col_off = image->tile[tile].left;
uv_col_off = y_col_off / fmt->uv_width_dec;
if (fmt->uv_packed)
uv_col_off *= 2;
y_off = y_row_off + y_col_off;
uv_off = uv_row_off + uv_col_off;
u_off = y_size - y_off + uv_off;
v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
if (fmt->uv_swapped)
swap(u_off, v_off);
image->tile[tile].offset = y_off;
image->tile[tile].u_off = u_off;
image->tile[tile++].v_off = v_off;
if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
dev_err(priv->ipu->dev,
"task %u: ctx %p: %s@[%d,%d]: "
"y_off %08x, u_off %08x, v_off %08x\n",
chan->ic_task, ctx,
image->type == IMAGE_CONVERT_IN ?
"Input" : "Output", row, col,
y_off, u_off, v_off);
return -EINVAL;
}
}
}
return 0;
}
static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *image)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
const struct ipu_image_pixfmt *fmt = image->fmt;
unsigned int row, col, tile = 0;
u32 bpp, stride, offset;
u32 row_off, col_off;
/* setup some convenience vars */
stride = image->stride;
bpp = fmt->bpp;
for (row = 0; row < image->num_rows; row++) {
row_off = image->tile[tile].top * stride;
for (col = 0; col < image->num_cols; col++) {
col_off = (image->tile[tile].left * bpp) >> 3;
offset = row_off + col_off;
image->tile[tile].offset = offset;
image->tile[tile].u_off = 0;
image->tile[tile++].v_off = 0;
if (offset & 0x7) {
dev_err(priv->ipu->dev,
"task %u: ctx %p: %s@[%d,%d]: "
"phys %08x\n",
chan->ic_task, ctx,
image->type == IMAGE_CONVERT_IN ?
"Input" : "Output", row, col,
row_off + col_off);
return -EINVAL;
}
}
}
return 0;
}
static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *image)
{
if (image->fmt->planar)
return calc_tile_offsets_planar(ctx, image);
return calc_tile_offsets_packed(ctx, image);
}
/*
* Calculate the resizing ratio for the IC main processing section given input
* size, fixed downsizing coefficient, and output size.
* Either round to closest for the next tile's first pixel to minimize seams
* and distortion (for all but right column / bottom row), or round down to
* avoid sampling beyond the edges of the input image for this tile's last
* pixel.
* Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
*/
static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
u32 output_size, bool allow_overshoot)
{
u32 downsized = input_size >> downsize_coeff;
if (allow_overshoot)
return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
else
return 8192 * (downsized - 1) / (output_size - 1);
}
/*
* Slightly modify resize coefficients per tile to hide the bilinear
* interpolator reset at tile borders, shifting the right / bottom edge
* by up to a half input pixel. This removes noticeable seams between
* tiles at higher upscaling factors.
*/
static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_tile *in_tile, *out_tile;
unsigned int col, row, tile_idx;
unsigned int last_output;
for (col = 0; col < ctx->in.num_cols; col++) {
bool closest = (col < ctx->in.num_cols - 1) &&
!(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
u32 resized_width;
u32 resize_coeff_h;
u32 in_width;
tile_idx = col;
in_tile = &ctx->in.tile[tile_idx];
out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
if (ipu_rot_mode_is_irt(ctx->rot_mode))
resized_width = out_tile->height;
else
resized_width = out_tile->width;
resize_coeff_h = calc_resize_coeff(in_tile->width,
ctx->downsize_coeff_h,
resized_width, closest);
dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
__func__, col, resize_coeff_h);
/*
* With the horizontal scaling factor known, round up resized
* width (output width or height) to burst size.
*/
resized_width = round_up(resized_width, 8);
/*
* Calculate input width from the last accessed input pixel
* given resized width and scaling coefficients. Round up to
* burst size.
*/
last_output = resized_width - 1;
if (closest && ((last_output * resize_coeff_h) % 8192))
last_output++;
in_width = round_up(
(DIV_ROUND_UP(last_output * resize_coeff_h, 8192) + 1)
<< ctx->downsize_coeff_h, 8);
for (row = 0; row < ctx->in.num_rows; row++) {
tile_idx = row * ctx->in.num_cols + col;
in_tile = &ctx->in.tile[tile_idx];
out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
if (ipu_rot_mode_is_irt(ctx->rot_mode))
out_tile->height = resized_width;
else
out_tile->width = resized_width;
in_tile->width = in_width;
}
ctx->resize_coeffs_h[col] = resize_coeff_h;
}
for (row = 0; row < ctx->in.num_rows; row++) {
bool closest = (row < ctx->in.num_rows - 1) &&
!(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
u32 resized_height;
u32 resize_coeff_v;
u32 in_height;
tile_idx = row * ctx->in.num_cols;
in_tile = &ctx->in.tile[tile_idx];
out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
if (ipu_rot_mode_is_irt(ctx->rot_mode))
resized_height = out_tile->width;
else
resized_height = out_tile->height;
resize_coeff_v = calc_resize_coeff(in_tile->height,
ctx->downsize_coeff_v,
resized_height, closest);
dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
__func__, row, resize_coeff_v);
/*
* With the vertical scaling factor known, round up resized
* height (output width or height) to IDMAC limitations.
*/
resized_height = round_up(resized_height, 2);
/*
* Calculate input width from the last accessed input pixel
* given resized height and scaling coefficients. Align to
* IDMAC restrictions.
*/
last_output = resized_height - 1;
if (closest && ((last_output * resize_coeff_v) % 8192))
last_output++;
in_height = round_up(
(DIV_ROUND_UP(last_output * resize_coeff_v, 8192) + 1)
<< ctx->downsize_coeff_v, 2);
for (col = 0; col < ctx->in.num_cols; col++) {
tile_idx = row * ctx->in.num_cols + col;
in_tile = &ctx->in.tile[tile_idx];
out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
if (ipu_rot_mode_is_irt(ctx->rot_mode))
out_tile->width = resized_height;
else
out_tile->height = resized_height;
in_tile->height = in_height;
}
ctx->resize_coeffs_v[row] = resize_coeff_v;
}
}
/*
* return the number of runs in given queue (pending_q or done_q)
* for this context. hold irqlock when calling.
*/
static int get_run_count(struct ipu_image_convert_ctx *ctx,
struct list_head *q)
{
struct ipu_image_convert_run *run;
int count = 0;
lockdep_assert_held(&ctx->chan->irqlock);
list_for_each_entry(run, q, list) {
if (run->ctx == ctx)
count++;
}
return count;
}
static void convert_stop(struct ipu_image_convert_run *run)
{
struct ipu_image_convert_ctx *ctx = run->ctx;
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
__func__, chan->ic_task, ctx, run);
/* disable IC tasks and the channels */
ipu_ic_task_disable(chan->ic);
ipu_idmac_disable_channel(chan->in_chan);
ipu_idmac_disable_channel(chan->out_chan);
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
ipu_idmac_disable_channel(chan->rotation_in_chan);
ipu_idmac_disable_channel(chan->rotation_out_chan);
ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
}
ipu_ic_disable(chan->ic);
}
static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
struct ipuv3_channel *channel,
struct ipu_image_convert_image *image,
enum ipu_rotate_mode rot_mode,
bool rot_swap_width_height,
unsigned int tile)
{
struct ipu_image_convert_chan *chan = ctx->chan;
unsigned int burst_size;
u32 width, height, stride;
dma_addr_t addr0, addr1 = 0;
struct ipu_image tile_image;
unsigned int tile_idx[2];
if (image->type == IMAGE_CONVERT_OUT) {
tile_idx[0] = ctx->out_tile_map[tile];
tile_idx[1] = ctx->out_tile_map[1];
} else {
tile_idx[0] = tile;
tile_idx[1] = 1;
}
if (rot_swap_width_height) {
width = image->tile[tile_idx[0]].height;
height = image->tile[tile_idx[0]].width;
stride = image->tile[tile_idx[0]].rot_stride;
addr0 = ctx->rot_intermediate[0].phys;
if (ctx->double_buffering)
addr1 = ctx->rot_intermediate[1].phys;
} else {
width = image->tile[tile_idx[0]].width;
height = image->tile[tile_idx[0]].height;
stride = image->stride;
addr0 = image->base.phys0 +
image->tile[tile_idx[0]].offset;
if (ctx->double_buffering)
addr1 = image->base.phys0 +
image->tile[tile_idx[1]].offset;
}
ipu_cpmem_zero(channel);
memset(&tile_image, 0, sizeof(tile_image));
tile_image.pix.width = tile_image.rect.width = width;
tile_image.pix.height = tile_image.rect.height = height;
tile_image.pix.bytesperline = stride;
tile_image.pix.pixelformat = image->fmt->fourcc;
tile_image.phys0 = addr0;
tile_image.phys1 = addr1;
if (image->fmt->planar && !rot_swap_width_height) {
tile_image.u_offset = image->tile[tile_idx[0]].u_off;
tile_image.v_offset = image->tile[tile_idx[0]].v_off;
}
ipu_cpmem_set_image(channel, &tile_image);
if (rot_mode)
ipu_cpmem_set_rotation(channel, rot_mode);
/*
* Skip writing U and V components to odd rows in the output
* channels for planar 4:2:0.
*/
if ((channel == chan->out_chan ||
channel == chan->rotation_out_chan) &&
image->fmt->planar && image->fmt->uv_height_dec == 2)
ipu_cpmem_skip_odd_chroma_rows(channel);
if (channel == chan->rotation_in_chan ||
channel == chan->rotation_out_chan) {
burst_size = 8;
ipu_cpmem_set_block_mode(channel);
} else
burst_size = (width % 16) ? 8 : 16;
ipu_cpmem_set_burstsize(channel, burst_size);
ipu_ic_task_idma_init(chan->ic, channel, width, height,
burst_size, rot_mode);
/*
* Setting a non-zero AXI ID collides with the PRG AXI snooping, so
* only do this when there is no PRG present.
*/
if (!channel->ipu->prg_priv)
ipu_cpmem_set_axi_id(channel, 1);
ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
}
static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
{
struct ipu_image_convert_ctx *ctx = run->ctx;
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_convert_image *s_image = &ctx->in;
struct ipu_image_convert_image *d_image = &ctx->out;
unsigned int dst_tile = ctx->out_tile_map[tile];
unsigned int dest_width, dest_height;
unsigned int col, row;
u32 rsc;
int ret;
dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
__func__, chan->ic_task, ctx, run, tile, dst_tile);
/* clear EOF irq mask */
ctx->eof_mask = 0;
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
/* swap width/height for resizer */
dest_width = d_image->tile[dst_tile].height;
dest_height = d_image->tile[dst_tile].width;
} else {
dest_width = d_image->tile[dst_tile].width;
dest_height = d_image->tile[dst_tile].height;
}
row = tile / s_image->num_cols;
col = tile % s_image->num_cols;
rsc = (ctx->downsize_coeff_v << 30) |
(ctx->resize_coeffs_v[row] << 16) |
(ctx->downsize_coeff_h << 14) |
(ctx->resize_coeffs_h[col]);
dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
__func__, s_image->tile[tile].width,
s_image->tile[tile].height, dest_width, dest_height, rsc);
/* setup the IC resizer and CSC */
ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc,
s_image->tile[tile].width,
s_image->tile[tile].height,
dest_width,
dest_height,
rsc);
if (ret) {
dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
return ret;
}
/* init the source MEM-->IC PP IDMAC channel */
init_idmac_channel(ctx, chan->in_chan, s_image,
IPU_ROTATE_NONE, false, tile);
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
/* init the IC PP-->MEM IDMAC channel */
init_idmac_channel(ctx, chan->out_chan, d_image,
IPU_ROTATE_NONE, true, tile);
/* init the MEM-->IC PP ROT IDMAC channel */
init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
ctx->rot_mode, true, tile);
/* init the destination IC PP ROT-->MEM IDMAC channel */
init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
IPU_ROTATE_NONE, false, tile);
/* now link IC PP-->MEM to MEM-->IC PP ROT */
ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
} else {
/* init the destination IC PP-->MEM IDMAC channel */
init_idmac_channel(ctx, chan->out_chan, d_image,
ctx->rot_mode, false, tile);
}
/* enable the IC */
ipu_ic_enable(chan->ic);
/* set buffers ready */
ipu_idmac_select_buffer(chan->in_chan, 0);
ipu_idmac_select_buffer(chan->out_chan, 0);
if (ipu_rot_mode_is_irt(ctx->rot_mode))
ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
if (ctx->double_buffering) {
ipu_idmac_select_buffer(chan->in_chan, 1);
ipu_idmac_select_buffer(chan->out_chan, 1);
if (ipu_rot_mode_is_irt(ctx->rot_mode))
ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
}
/* enable the channels! */
ipu_idmac_enable_channel(chan->in_chan);
ipu_idmac_enable_channel(chan->out_chan);
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
ipu_idmac_enable_channel(chan->rotation_in_chan);
ipu_idmac_enable_channel(chan->rotation_out_chan);
}
ipu_ic_task_enable(chan->ic);
ipu_cpmem_dump(chan->in_chan);
ipu_cpmem_dump(chan->out_chan);
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
ipu_cpmem_dump(chan->rotation_in_chan);
ipu_cpmem_dump(chan->rotation_out_chan);
}
ipu_dump(priv->ipu);
return 0;
}
/* hold irqlock when calling */
static int do_run(struct ipu_image_convert_run *run)
{
struct ipu_image_convert_ctx *ctx = run->ctx;
struct ipu_image_convert_chan *chan = ctx->chan;
lockdep_assert_held(&chan->irqlock);
ctx->in.base.phys0 = run->in_phys;
ctx->out.base.phys0 = run->out_phys;
ctx->cur_buf_num = 0;
ctx->next_tile = 1;
/* remove run from pending_q and set as current */
list_del(&run->list);
chan->current_run = run;
return convert_start(run, 0);
}
/* hold irqlock when calling */
static void run_next(struct ipu_image_convert_chan *chan)
{
struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_convert_run *run, *tmp;
int ret;
lockdep_assert_held(&chan->irqlock);
list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
/* skip contexts that are aborting */
if (run->ctx->aborting) {
dev_dbg(priv->ipu->dev,
"%s: task %u: skipping aborting ctx %p run %p\n",
__func__, chan->ic_task, run->ctx, run);
continue;
}
ret = do_run(run);
if (!ret)
break;
/*
* something went wrong with start, add the run
* to done q and continue to the next run in the
* pending q.
*/
run->status = ret;
list_add_tail(&run->list, &chan->done_q);
chan->current_run = NULL;
}
}
static void empty_done_q(struct ipu_image_convert_chan *chan)
{
struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_convert_run *run;
unsigned long flags;
spin_lock_irqsave(&chan->irqlock, flags);
while (!list_empty(&chan->done_q)) {
run = list_entry(chan->done_q.next,
struct ipu_image_convert_run,
list);
list_del(&run->list);
dev_dbg(priv->ipu->dev,
"%s: task %u: completing ctx %p run %p with %d\n",
__func__, chan->ic_task, run->ctx, run, run->status);
/* call the completion callback and free the run */
spin_unlock_irqrestore(&chan->irqlock, flags);
run->ctx->complete(run, run->ctx->complete_context);
spin_lock_irqsave(&chan->irqlock, flags);
}
spin_unlock_irqrestore(&chan->irqlock, flags);
}
/*
* the bottom half thread clears out the done_q, calling the
* completion handler for each.
*/
static irqreturn_t do_bh(int irq, void *dev_id)
{
struct ipu_image_convert_chan *chan = dev_id;
struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_convert_ctx *ctx;
unsigned long flags;
dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
chan->ic_task);
empty_done_q(chan);
spin_lock_irqsave(&chan->irqlock, flags);
/*
* the done_q is cleared out, signal any contexts
* that are aborting that abort can complete.
*/
list_for_each_entry(ctx, &chan->ctx_list, list) {
if (ctx->aborting) {
dev_dbg(priv->ipu->dev,
"%s: task %u: signaling abort for ctx %p\n",
__func__, chan->ic_task, ctx);
complete_all(&ctx->aborted);
}
}
spin_unlock_irqrestore(&chan->irqlock, flags);
dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
chan->ic_task);
return IRQ_HANDLED;
}
static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
{
unsigned int cur_tile = ctx->next_tile - 1;
unsigned int next_tile = ctx->next_tile;
if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
return true;
return false;
}
/* hold irqlock when calling */
static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run)
{
struct ipu_image_convert_ctx *ctx = run->ctx;
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_tile *src_tile, *dst_tile;
struct ipu_image_convert_image *s_image = &ctx->in;
struct ipu_image_convert_image *d_image = &ctx->out;
struct ipuv3_channel *outch;
unsigned int dst_idx;
lockdep_assert_held(&chan->irqlock);
outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
chan->rotation_out_chan : chan->out_chan;
/*
* It is difficult to stop the channel DMA before the channels
* enter the paused state. Without double-buffering the channels
* are always in a paused state when the EOF irq occurs, so it
* is safe to stop the channels now. For double-buffering we
* just ignore the abort until the operation completes, when it
* is safe to shut down.
*/
if (ctx->aborting && !ctx->double_buffering) {
convert_stop(run);
run->status = -EIO;
goto done;
}
if (ctx->next_tile == ctx->num_tiles) {
/*
* the conversion is complete
*/
convert_stop(run);
run->status = 0;
goto done;
}
/*
* not done, place the next tile buffers.
*/
if (!ctx->double_buffering) {
if (ic_settings_changed(ctx)) {
convert_stop(run);
convert_start(run, ctx->next_tile);
} else {
src_tile = &s_image->tile[ctx->next_tile];
dst_idx = ctx->out_tile_map[ctx->next_tile];
dst_tile = &d_image->tile[dst_idx];
ipu_cpmem_set_buffer(chan->in_chan, 0,
s_image->base.phys0 +
src_tile->offset);
ipu_cpmem_set_buffer(outch, 0,
d_image->base.phys0 +
dst_tile->offset);
if (s_image->fmt->planar)
ipu_cpmem_set_uv_offset(chan->in_chan,
src_tile->u_off,
src_tile->v_off);
if (d_image->fmt->planar)
ipu_cpmem_set_uv_offset(outch,
dst_tile->u_off,
dst_tile->v_off);
ipu_idmac_select_buffer(chan->in_chan, 0);
ipu_idmac_select_buffer(outch, 0);
}
} else if (ctx->next_tile < ctx->num_tiles - 1) {
src_tile = &s_image->tile[ctx->next_tile + 1];
dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
dst_tile = &d_image->tile[dst_idx];
ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
s_image->base.phys0 + src_tile->offset);
ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
d_image->base.phys0 + dst_tile->offset);
ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
ctx->cur_buf_num ^= 1;
}
ctx->eof_mask = 0; /* clear EOF irq mask for next tile */
ctx->next_tile++;
return IRQ_HANDLED;
done:
list_add_tail(&run->list, &chan->done_q);
chan->current_run = NULL;
run_next(chan);
return IRQ_WAKE_THREAD;
}
static irqreturn_t eof_irq(int irq, void *data)
{
struct ipu_image_convert_chan *chan = data;
struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_convert_ctx *ctx;
struct ipu_image_convert_run *run;
irqreturn_t ret = IRQ_HANDLED;
bool tile_complete = false;
unsigned long flags;
spin_lock_irqsave(&chan->irqlock, flags);
/* get current run and its context */
run = chan->current_run;
if (!run) {
ret = IRQ_NONE;
goto out;
}
ctx = run->ctx;
if (irq == chan->in_eof_irq) {
ctx->eof_mask |= EOF_IRQ_IN;
} else if (irq == chan->out_eof_irq) {
ctx->eof_mask |= EOF_IRQ_OUT;
} else if (irq == chan->rot_in_eof_irq ||
irq == chan->rot_out_eof_irq) {
if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
/* this was NOT a rotation op, shouldn't happen */
dev_err(priv->ipu->dev,
"Unexpected rotation interrupt\n");
goto out;
}
ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ?
EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT;
} else {
dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
ret = IRQ_NONE;
goto out;
}
if (ipu_rot_mode_is_irt(ctx->rot_mode))
tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE);
else
tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE);
if (tile_complete)
ret = do_tile_complete(run);
out:
spin_unlock_irqrestore(&chan->irqlock, flags);
return ret;
}
/*
* try to force the completion of runs for this ctx. Called when
* abort wait times out in ipu_image_convert_abort().
*/
static void force_abort(struct ipu_image_convert_ctx *ctx)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_run *run;
unsigned long flags;
spin_lock_irqsave(&chan->irqlock, flags);
run = chan->current_run;
if (run && run->ctx == ctx) {
convert_stop(run);
run->status = -EIO;
list_add_tail(&run->list, &chan->done_q);
chan->current_run = NULL;
run_next(chan);
}
spin_unlock_irqrestore(&chan->irqlock, flags);
empty_done_q(chan);
}
static void release_ipu_resources(struct ipu_image_convert_chan *chan)
{
if (chan->in_eof_irq >= 0)
free_irq(chan->in_eof_irq, chan);
if (chan->rot_in_eof_irq >= 0)
free_irq(chan->rot_in_eof_irq, chan);
if (chan->out_eof_irq >= 0)
free_irq(chan->out_eof_irq, chan);
if (chan->rot_out_eof_irq >= 0)
free_irq(chan->rot_out_eof_irq, chan);
if (!IS_ERR_OR_NULL(chan->in_chan))
ipu_idmac_put(chan->in_chan);
if (!IS_ERR_OR_NULL(chan->out_chan))
ipu_idmac_put(chan->out_chan);
if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
ipu_idmac_put(chan->rotation_in_chan);
if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
ipu_idmac_put(chan->rotation_out_chan);
if (!IS_ERR_OR_NULL(chan->ic))
ipu_ic_put(chan->ic);
chan->in_chan = chan->out_chan = chan->rotation_in_chan =
chan->rotation_out_chan = NULL;
chan->in_eof_irq = -1;
chan->rot_in_eof_irq = -1;
chan->out_eof_irq = -1;
chan->rot_out_eof_irq = -1;
}
static int get_eof_irq(struct ipu_image_convert_chan *chan,
struct ipuv3_channel *channel)
{
struct ipu_image_convert_priv *priv = chan->priv;
int ret, irq;
irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF);
ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan);
if (ret < 0) {
dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq);
return ret;
}
return irq;
}
static int get_ipu_resources(struct ipu_image_convert_chan *chan)
{
const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
struct ipu_image_convert_priv *priv = chan->priv;
int ret;
/* get IC */
chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
if (IS_ERR(chan->ic)) {
dev_err(priv->ipu->dev, "could not acquire IC\n");
ret = PTR_ERR(chan->ic);
goto err;
}
/* get IDMAC channels */
chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
ret = -EBUSY;
goto err;
}
chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
dev_err(priv->ipu->dev,
"could not acquire idmac rotation channels\n");
ret = -EBUSY;
goto err;
}
/* acquire the EOF interrupts */
ret = get_eof_irq(chan, chan->in_chan);
if (ret < 0) {
chan->in_eof_irq = -1;
goto err;
}
chan->in_eof_irq = ret;
ret = get_eof_irq(chan, chan->rotation_in_chan);
if (ret < 0) {
chan->rot_in_eof_irq = -1;
goto err;
}
chan->rot_in_eof_irq = ret;
ret = get_eof_irq(chan, chan->out_chan);
if (ret < 0) {
chan->out_eof_irq = -1;
goto err;
}
chan->out_eof_irq = ret;
ret = get_eof_irq(chan, chan->rotation_out_chan);
if (ret < 0) {
chan->rot_out_eof_irq = -1;
goto err;
}
chan->rot_out_eof_irq = ret;
return 0;
err:
release_ipu_resources(chan);
return ret;
}
static int fill_image(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *ic_image,
struct ipu_image *image,
enum ipu_image_convert_type type)
{
struct ipu_image_convert_priv *priv = ctx->chan->priv;
ic_image->base = *image;
ic_image->type = type;
ic_image->fmt = get_format(image->pix.pixelformat);
if (!ic_image->fmt) {
dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
type == IMAGE_CONVERT_OUT ? "Output" : "Input");
return -EINVAL;
}
if (ic_image->fmt->planar)
ic_image->stride = ic_image->base.pix.width;
else
ic_image->stride = ic_image->base.pix.bytesperline;
return 0;
}
/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
static unsigned int clamp_align(unsigned int x, unsigned int min,
unsigned int max, unsigned int align)
{
/* Bits that must be zero to be aligned */
unsigned int mask = ~((1 << align) - 1);
/* Clamp to aligned min and max */
x = clamp(x, (min + ~mask) & mask, max & mask);
/* Round to nearest aligned value */
if (align)
x = (x + (1 << (align - 1))) & mask;
return x;
}
/* Adjusts input/output images to IPU restrictions */
void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
enum ipu_rotate_mode rot_mode)
{
const struct ipu_image_pixfmt *infmt, *outfmt;
u32 w_align_out, h_align_out;
u32 w_align_in, h_align_in;
infmt = get_format(in->pix.pixelformat);
outfmt = get_format(out->pix.pixelformat);
/* set some default pixel formats if needed */
if (!infmt) {
in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
infmt = get_format(V4L2_PIX_FMT_RGB24);
}
if (!outfmt) {
out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
outfmt = get_format(V4L2_PIX_FMT_RGB24);
}
/* image converter does not handle fields */
in->pix.field = out->pix.field = V4L2_FIELD_NONE;
/* resizer cannot downsize more than 4:1 */
if (ipu_rot_mode_is_irt(rot_mode)) {
out->pix.height = max_t(__u32, out->pix.height,
in->pix.width / 4);
out->pix.width = max_t(__u32, out->pix.width,
in->pix.height / 4);
} else {
out->pix.width = max_t(__u32, out->pix.width,
in->pix.width / 4);
out->pix.height = max_t(__u32, out->pix.height,
in->pix.height / 4);
}
/* align input width/height */
w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt,
rot_mode));
h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt,
rot_mode));
in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W,
w_align_in);
in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H,
h_align_in);
/* align output width/height */
w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt,
rot_mode));
h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt,
rot_mode));
out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W,
w_align_out);
out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H,
h_align_out);
/* set input/output strides and image sizes */
in->pix.bytesperline = infmt->planar ?
clamp_align(in->pix.width, 2 << w_align_in, MAX_W,
w_align_in) :
clamp_align((in->pix.width * infmt->bpp) >> 3,
((2 << w_align_in) * infmt->bpp) >> 3,
(MAX_W * infmt->bpp) >> 3,
w_align_in);
in->pix.sizeimage = infmt->planar ?
(in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
in->pix.height * in->pix.bytesperline;
out->pix.bytesperline = outfmt->planar ? out->pix.width :
(out->pix.width * outfmt->bpp) >> 3;
out->pix.sizeimage = outfmt->planar ?
(out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
out->pix.height * out->pix.bytesperline;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
/*
* this is used by ipu_image_convert_prepare() to verify set input and
* output images are valid before starting the conversion. Clients can
* also call it before calling ipu_image_convert_prepare().
*/
int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
enum ipu_rotate_mode rot_mode)
{
struct ipu_image testin, testout;
testin = *in;
testout = *out;
ipu_image_convert_adjust(&testin, &testout, rot_mode);
if (testin.pix.width != in->pix.width ||
testin.pix.height != in->pix.height ||
testout.pix.width != out->pix.width ||
testout.pix.height != out->pix.height)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
/*
* Call ipu_image_convert_prepare() to prepare for the conversion of
* given images and rotation mode. Returns a new conversion context.
*/
struct ipu_image_convert_ctx *
ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
struct ipu_image *in, struct ipu_image *out,
enum ipu_rotate_mode rot_mode,
ipu_image_convert_cb_t complete,
void *complete_context)
{
struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
struct ipu_image_convert_image *s_image, *d_image;
struct ipu_image_convert_chan *chan;
struct ipu_image_convert_ctx *ctx;
unsigned long flags;
unsigned int i;
bool get_res;
int ret;
if (!in || !out || !complete ||
(ic_task != IC_TASK_VIEWFINDER &&
ic_task != IC_TASK_POST_PROCESSOR))
return ERR_PTR(-EINVAL);
/* verify the in/out images before continuing */
ret = ipu_image_convert_verify(in, out, rot_mode);
if (ret) {
dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
__func__);
return ERR_PTR(ret);
}
chan = &priv->chan[ic_task];
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
chan->ic_task, ctx);
ctx->chan = chan;
init_completion(&ctx->aborted);
ctx->rot_mode = rot_mode;
/* Sets ctx->in.num_rows/cols as well */
ret = calc_image_resize_coefficients(ctx, in, out);
if (ret)
goto out_free;
s_image = &ctx->in;
d_image = &ctx->out;
/* set tiling and rotation */
if (ipu_rot_mode_is_irt(rot_mode)) {
d_image->num_rows = s_image->num_cols;
d_image->num_cols = s_image->num_rows;
} else {
d_image->num_rows = s_image->num_rows;
d_image->num_cols = s_image->num_cols;
}
ctx->num_tiles = d_image->num_cols * d_image->num_rows;
ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
if (ret)
goto out_free;
ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
if (ret)
goto out_free;
calc_out_tile_map(ctx);
find_seams(ctx, s_image, d_image);
ret = calc_tile_dimensions(ctx, s_image);
if (ret)
goto out_free;
ret = calc_tile_offsets(ctx, s_image);
if (ret)
goto out_free;
calc_tile_dimensions(ctx, d_image);
ret = calc_tile_offsets(ctx, d_image);
if (ret)
goto out_free;
calc_tile_resize_coefficients(ctx);
ret = ipu_ic_calc_csc(&ctx->csc,
s_image->base.pix.ycbcr_enc,
s_image->base.pix.quantization,
ipu_pixelformat_to_colorspace(s_image->fmt->fourcc),
d_image->base.pix.ycbcr_enc,
d_image->base.pix.quantization,
ipu_pixelformat_to_colorspace(d_image->fmt->fourcc));
if (ret)
goto out_free;
dump_format(ctx, s_image);
dump_format(ctx, d_image);
ctx->complete = complete;
ctx->complete_context = complete_context;
/*
* Can we use double-buffering for this operation? If there is
* only one tile (the whole image can be converted in a single
* operation) there's no point in using double-buffering. Also,
* the IPU's IDMAC channels allow only a single U and V plane
* offset shared between both buffers, but these offsets change
* for every tile, and therefore would have to be updated for
* each buffer which is not possible. So double-buffering is
* impossible when either the source or destination images are
* a planar format (YUV420, YUV422P, etc.). Further, differently
* sized tiles or different resizing coefficients per tile
* prevent double-buffering as well.
*/
ctx->double_buffering = (ctx->num_tiles > 1 &&
!s_image->fmt->planar &&
!d_image->fmt->planar);
for (i = 1; i < ctx->num_tiles; i++) {
if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
ctx->in.tile[i].height != ctx->in.tile[0].height ||
ctx->out.tile[i].width != ctx->out.tile[0].width ||
ctx->out.tile[i].height != ctx->out.tile[0].height) {
ctx->double_buffering = false;
break;
}
}
for (i = 1; i < ctx->in.num_cols; i++) {
if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
ctx->double_buffering = false;
break;
}
}
for (i = 1; i < ctx->in.num_rows; i++) {
if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
ctx->double_buffering = false;
break;
}
}
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
unsigned long intermediate_size = d_image->tile[0].size;
for (i = 1; i < ctx->num_tiles; i++) {
if (d_image->tile[i].size > intermediate_size)
intermediate_size = d_image->tile[i].size;
}
ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
intermediate_size);
if (ret)
goto out_free;
if (ctx->double_buffering) {
ret = alloc_dma_buf(priv,
&ctx->rot_intermediate[1],
intermediate_size);
if (ret)
goto out_free_dmabuf0;
}
}
spin_lock_irqsave(&chan->irqlock, flags);
get_res = list_empty(&chan->ctx_list);
list_add_tail(&ctx->list, &chan->ctx_list);
spin_unlock_irqrestore(&chan->irqlock, flags);
if (get_res) {
ret = get_ipu_resources(chan);
if (ret)
goto out_free_dmabuf1;
}
return ctx;
out_free_dmabuf1:
free_dma_buf(priv, &ctx->rot_intermediate[1]);
spin_lock_irqsave(&chan->irqlock, flags);
list_del(&ctx->list);
spin_unlock_irqrestore(&chan->irqlock, flags);
out_free_dmabuf0:
free_dma_buf(priv, &ctx->rot_intermediate[0]);
out_free:
kfree(ctx);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
/*
* Carry out a single image conversion run. Only the physaddr's of the input
* and output image buffers are needed. The conversion context must have
* been created previously with ipu_image_convert_prepare().
*/
int ipu_image_convert_queue(struct ipu_image_convert_run *run)
{
struct ipu_image_convert_chan *chan;
struct ipu_image_convert_priv *priv;
struct ipu_image_convert_ctx *ctx;
unsigned long flags;
int ret = 0;
if (!run || !run->ctx || !run->in_phys || !run->out_phys)
return -EINVAL;
ctx = run->ctx;
chan = ctx->chan;
priv = chan->priv;
dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
chan->ic_task, ctx, run);
INIT_LIST_HEAD(&run->list);
spin_lock_irqsave(&chan->irqlock, flags);
if (ctx->aborting) {
ret = -EIO;
goto unlock;
}
list_add_tail(&run->list, &chan->pending_q);
if (!chan->current_run) {
ret = do_run(run);
if (ret)
chan->current_run = NULL;
}
unlock:
spin_unlock_irqrestore(&chan->irqlock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
/* Abort any active or pending conversions for this context */
static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_convert_run *run, *active_run, *tmp;
unsigned long flags;
int run_count, ret;
spin_lock_irqsave(&chan->irqlock, flags);
/* move all remaining pending runs in this context to done_q */
list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
if (run->ctx != ctx)
continue;
run->status = -EIO;
list_move_tail(&run->list, &chan->done_q);
}
run_count = get_run_count(ctx, &chan->done_q);
active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
chan->current_run : NULL;
if (active_run)
reinit_completion(&ctx->aborted);
ctx->aborting = true;
spin_unlock_irqrestore(&chan->irqlock, flags);
if (!run_count && !active_run) {
dev_dbg(priv->ipu->dev,
"%s: task %u: no abort needed for ctx %p\n",
__func__, chan->ic_task, ctx);
return;
}
if (!active_run) {
empty_done_q(chan);
return;
}
dev_dbg(priv->ipu->dev,
"%s: task %u: wait for completion: %d runs\n",
__func__, chan->ic_task, run_count);
ret = wait_for_completion_timeout(&ctx->aborted,
msecs_to_jiffies(10000));
if (ret == 0) {
dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
force_abort(ctx);
}
}
void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
{
__ipu_image_convert_abort(ctx);
ctx->aborting = false;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
/* Unprepare image conversion context */
void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
unsigned long flags;
bool put_res;
/* make sure no runs are hanging around */
__ipu_image_convert_abort(ctx);
dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
chan->ic_task, ctx);
spin_lock_irqsave(&chan->irqlock, flags);
list_del(&ctx->list);
put_res = list_empty(&chan->ctx_list);
spin_unlock_irqrestore(&chan->irqlock, flags);
if (put_res)
release_ipu_resources(chan);
free_dma_buf(priv, &ctx->rot_intermediate[1]);
free_dma_buf(priv, &ctx->rot_intermediate[0]);
kfree(ctx);
}
EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
/*
* "Canned" asynchronous single image conversion. Allocates and returns
* a new conversion run. On successful return the caller must free the
* run and call ipu_image_convert_unprepare() after conversion completes.
*/
struct ipu_image_convert_run *
ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
struct ipu_image *in, struct ipu_image *out,
enum ipu_rotate_mode rot_mode,
ipu_image_convert_cb_t complete,
void *complete_context)
{
struct ipu_image_convert_ctx *ctx;
struct ipu_image_convert_run *run;
int ret;
ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
complete, complete_context);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
run = kzalloc(sizeof(*run), GFP_KERNEL);
if (!run) {
ipu_image_convert_unprepare(ctx);
return ERR_PTR(-ENOMEM);
}
run->ctx = ctx;
run->in_phys = in->phys0;
run->out_phys = out->phys0;
ret = ipu_image_convert_queue(run);
if (ret) {
ipu_image_convert_unprepare(ctx);
kfree(run);
return ERR_PTR(ret);
}
return run;
}
EXPORT_SYMBOL_GPL(ipu_image_convert);
/* "Canned" synchronous single image conversion */
static void image_convert_sync_complete(struct ipu_image_convert_run *run,
void *data)
{
struct completion *comp = data;
complete(comp);
}
int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
struct ipu_image *in, struct ipu_image *out,
enum ipu_rotate_mode rot_mode)
{
struct ipu_image_convert_run *run;
struct completion comp;
int ret;
init_completion(&comp);
run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
image_convert_sync_complete, &comp);
if (IS_ERR(run))
return PTR_ERR(run);
ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
ret = (ret == 0) ? -ETIMEDOUT : 0;
ipu_image_convert_unprepare(run->ctx);
kfree(run);
return ret;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
{
struct ipu_image_convert_priv *priv;
int i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ipu->image_convert_priv = priv;
priv->ipu = ipu;
for (i = 0; i < IC_NUM_TASKS; i++) {
struct ipu_image_convert_chan *chan = &priv->chan[i];
chan->ic_task = i;
chan->priv = priv;
chan->dma_ch = &image_convert_dma_chan[i];
chan->in_eof_irq = -1;
chan->rot_in_eof_irq = -1;
chan->out_eof_irq = -1;
chan->rot_out_eof_irq = -1;
spin_lock_init(&chan->irqlock);
INIT_LIST_HEAD(&chan->ctx_list);
INIT_LIST_HEAD(&chan->pending_q);
INIT_LIST_HEAD(&chan->done_q);
}
return 0;
}
void ipu_image_convert_exit(struct ipu_soc *ipu)
{
}
| linux-master | drivers/gpu/ipu-v3/ipu-image-convert.c |
/*
* vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
*
* Copyright (c) 2010 Red Hat Inc.
* Author : Dave Airlie <[email protected]>
*
* Copyright (c) 2015 Lukas Wunner <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS
* IN THE SOFTWARE.
*
*/
#define pr_fmt(fmt) "vga_switcheroo: " fmt
#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/debugfs.h>
#include <linux/fb.h>
#include <linux/fs.h>
#include <linux/fbcon.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
/**
* DOC: Overview
*
* vga_switcheroo is the Linux subsystem for laptop hybrid graphics.
* These come in two flavors:
*
* * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs.
* * muxless: Dual GPUs but only one of them is connected to outputs.
* The other one is merely used to offload rendering, its results
* are copied over PCIe into the framebuffer. On Linux this is
* supported with DRI PRIME.
*
* Hybrid graphics started to appear in the late Naughties and were initially
* all muxed. Newer laptops moved to a muxless architecture for cost reasons.
* A notable exception is the MacBook Pro which continues to use a mux.
* Muxes come with varying capabilities: Some switch only the panel, others
* can also switch external displays. Some switch all display pins at once
* while others can switch just the DDC lines. (To allow EDID probing
* for the inactive GPU.) Also, muxes are often used to cut power to the
* discrete GPU while it is not used.
*
* DRM drivers register GPUs with vga_switcheroo, these are henceforth called
* clients. The mux is called the handler. Muxless machines also register a
* handler to control the power state of the discrete GPU, its ->switchto
* callback is a no-op for obvious reasons. The discrete GPU is often equipped
* with an HDA controller for the HDMI/DP audio signal, this will also
* register as a client so that vga_switcheroo can take care of the correct
* suspend/resume order when changing the discrete GPU's power state. In total
* there can thus be up to three clients: Two vga clients (GPUs) and one audio
* client (on the discrete GPU). The code is mostly prepared to support
* machines with more than two GPUs should they become available.
*
* The GPU to which the outputs are currently switched is called the
* active client in vga_switcheroo parlance. The GPU not in use is the
* inactive client. When the inactive client's DRM driver is loaded,
* it will be unable to probe the panel's EDID and hence depends on
* VBIOS to provide its display modes. If the VBIOS modes are bogus or
* if there is no VBIOS at all (which is common on the MacBook Pro),
* a client may alternatively request that the DDC lines are temporarily
* switched to it, provided that the handler supports this. Switching
* only the DDC lines and not the entire output avoids unnecessary
* flickering.
*/
/**
* struct vga_switcheroo_client - registered client
* @pdev: client pci device
* @fb_info: framebuffer to which console is remapped on switching
* @pwr_state: current power state if manual power control is used.
* For driver power control, call vga_switcheroo_pwr_state().
* @ops: client callbacks
* @id: client identifier. Determining the id requires the handler,
* so gpus are initially assigned VGA_SWITCHEROO_UNKNOWN_ID
* and later given their true id in vga_switcheroo_enable()
* @active: whether the outputs are currently switched to this client
* @driver_power_control: whether power state is controlled by the driver's
* runtime pm. If true, writing ON and OFF to the vga_switcheroo debugfs
* interface is a no-op so as not to interfere with runtime pm
* @list: client list
* @vga_dev: pci device, indicate which GPU is bound to current audio client
*
* Registered client. A client can be either a GPU or an audio device on a GPU.
* For audio clients, the @fb_info and @active members are bogus. For GPU
* clients, the @vga_dev is bogus.
*/
struct vga_switcheroo_client {
struct pci_dev *pdev;
struct fb_info *fb_info;
enum vga_switcheroo_state pwr_state;
const struct vga_switcheroo_client_ops *ops;
enum vga_switcheroo_client_id id;
bool active;
bool driver_power_control;
struct list_head list;
struct pci_dev *vga_dev;
};
/*
* protects access to struct vgasr_priv
*/
static DEFINE_MUTEX(vgasr_mutex);
/**
* struct vgasr_priv - vga_switcheroo private data
* @active: whether vga_switcheroo is enabled.
* Prerequisite is the registration of two GPUs and a handler
* @delayed_switch_active: whether a delayed switch is pending
* @delayed_client_id: client to which a delayed switch is pending
* @debugfs_root: directory for vga_switcheroo debugfs interface
* @registered_clients: number of registered GPUs
* (counting only vga clients, not audio clients)
* @clients: list of registered clients
* @handler: registered handler
* @handler_flags: flags of registered handler
* @mux_hw_lock: protects mux state
* (in particular while DDC lines are temporarily switched)
* @old_ddc_owner: client to which DDC lines will be switched back on unlock
*
* vga_switcheroo private data. Currently only one vga_switcheroo instance
* per system is supported.
*/
struct vgasr_priv {
bool active;
bool delayed_switch_active;
enum vga_switcheroo_client_id delayed_client_id;
struct dentry *debugfs_root;
int registered_clients;
struct list_head clients;
const struct vga_switcheroo_handler *handler;
enum vga_switcheroo_handler_flags_t handler_flags;
struct mutex mux_hw_lock;
int old_ddc_owner;
};
#define ID_BIT_AUDIO 0x100
#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
#define client_is_vga(c) (!client_is_audio(c))
#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
static void vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
/* only one switcheroo per system */
static struct vgasr_priv vgasr_priv = {
.clients = LIST_HEAD_INIT(vgasr_priv.clients),
.mux_hw_lock = __MUTEX_INITIALIZER(vgasr_priv.mux_hw_lock),
};
static bool vga_switcheroo_ready(void)
{
/* we're ready if we get two clients + handler */
return !vgasr_priv.active &&
vgasr_priv.registered_clients == 2 && vgasr_priv.handler;
}
static void vga_switcheroo_enable(void)
{
int ret;
struct vga_switcheroo_client *client;
/* call the handler to init */
if (vgasr_priv.handler->init)
vgasr_priv.handler->init();
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (!client_is_vga(client) ||
client_id(client) != VGA_SWITCHEROO_UNKNOWN_ID)
continue;
ret = vgasr_priv.handler->get_client_id(client->pdev);
if (ret < 0)
return;
client->id = ret;
}
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (!client_is_audio(client) ||
client_id(client) != VGA_SWITCHEROO_UNKNOWN_ID)
continue;
ret = vgasr_priv.handler->get_client_id(client->vga_dev);
if (ret < 0)
return;
client->id = ret | ID_BIT_AUDIO;
if (client->ops->gpu_bound)
client->ops->gpu_bound(client->pdev, ret);
}
vga_switcheroo_debugfs_init(&vgasr_priv);
vgasr_priv.active = true;
}
/**
* vga_switcheroo_register_handler() - register handler
* @handler: handler callbacks
* @handler_flags: handler flags
*
* Register handler. Enable vga_switcheroo if two vga clients have already
* registered.
*
* Return: 0 on success, -EINVAL if a handler was already registered.
*/
int vga_switcheroo_register_handler(
const struct vga_switcheroo_handler *handler,
enum vga_switcheroo_handler_flags_t handler_flags)
{
mutex_lock(&vgasr_mutex);
if (vgasr_priv.handler) {
mutex_unlock(&vgasr_mutex);
return -EINVAL;
}
vgasr_priv.handler = handler;
vgasr_priv.handler_flags = handler_flags;
if (vga_switcheroo_ready()) {
pr_info("enabled\n");
vga_switcheroo_enable();
}
mutex_unlock(&vgasr_mutex);
return 0;
}
EXPORT_SYMBOL(vga_switcheroo_register_handler);
/**
* vga_switcheroo_unregister_handler() - unregister handler
*
* Unregister handler. Disable vga_switcheroo.
*/
void vga_switcheroo_unregister_handler(void)
{
mutex_lock(&vgasr_mutex);
mutex_lock(&vgasr_priv.mux_hw_lock);
vgasr_priv.handler_flags = 0;
vgasr_priv.handler = NULL;
if (vgasr_priv.active) {
pr_info("disabled\n");
vga_switcheroo_debugfs_fini(&vgasr_priv);
vgasr_priv.active = false;
}
mutex_unlock(&vgasr_priv.mux_hw_lock);
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
/**
* vga_switcheroo_handler_flags() - obtain handler flags
*
* Helper for clients to obtain the handler flags bitmask.
*
* Return: Handler flags. A value of 0 means that no handler is registered
* or that the handler has no special capabilities.
*/
enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void)
{
return vgasr_priv.handler_flags;
}
EXPORT_SYMBOL(vga_switcheroo_handler_flags);
static int register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
enum vga_switcheroo_client_id id,
struct pci_dev *vga_dev,
bool active,
bool driver_power_control)
{
struct vga_switcheroo_client *client;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return -ENOMEM;
client->pwr_state = VGA_SWITCHEROO_ON;
client->pdev = pdev;
client->ops = ops;
client->id = id;
client->active = active;
client->driver_power_control = driver_power_control;
client->vga_dev = vga_dev;
mutex_lock(&vgasr_mutex);
list_add_tail(&client->list, &vgasr_priv.clients);
if (client_is_vga(client))
vgasr_priv.registered_clients++;
if (vga_switcheroo_ready()) {
pr_info("enabled\n");
vga_switcheroo_enable();
}
mutex_unlock(&vgasr_mutex);
return 0;
}
/**
* vga_switcheroo_register_client - register vga client
* @pdev: client pci device
* @ops: client callbacks
* @driver_power_control: whether power state is controlled by the driver's
* runtime pm
*
* Register vga client (GPU). Enable vga_switcheroo if another GPU and a
* handler have already registered. The power state of the client is assumed
* to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called
* to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error.
*/
int vga_switcheroo_register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
bool driver_power_control)
{
return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID, NULL,
pdev == vga_default_device(),
driver_power_control);
}
EXPORT_SYMBOL(vga_switcheroo_register_client);
/**
* vga_switcheroo_register_audio_client - register audio client
* @pdev: client pci device
* @ops: client callbacks
* @vga_dev: pci device which is bound to current audio client
*
* Register audio client (audio device on a GPU). The client is assumed
* to use runtime PM. Beforehand, vga_switcheroo_client_probe_defer()
* shall be called to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error, -EINVAL on getting
* client id error.
*/
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
struct pci_dev *vga_dev)
{
enum vga_switcheroo_client_id id = VGA_SWITCHEROO_UNKNOWN_ID;
/*
* if vga_switcheroo has enabled, that mean two GPU clients and also
* handler are registered. Get audio client id from bound GPU client
* id directly, otherwise, set it as VGA_SWITCHEROO_UNKNOWN_ID,
* it will set to correct id in later when vga_switcheroo_enable()
* is called.
*/
mutex_lock(&vgasr_mutex);
if (vgasr_priv.active) {
id = vgasr_priv.handler->get_client_id(vga_dev);
if (id < 0) {
mutex_unlock(&vgasr_mutex);
return -EINVAL;
}
/* notify if GPU has been already bound */
if (ops->gpu_bound)
ops->gpu_bound(pdev, id);
}
mutex_unlock(&vgasr_mutex);
return register_client(pdev, ops, id | ID_BIT_AUDIO, vga_dev,
false, true);
}
EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
static struct vga_switcheroo_client *
find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
{
struct vga_switcheroo_client *client;
list_for_each_entry(client, head, list)
if (client->pdev == pdev)
return client;
return NULL;
}
static struct vga_switcheroo_client *
find_client_from_id(struct list_head *head,
enum vga_switcheroo_client_id client_id)
{
struct vga_switcheroo_client *client;
list_for_each_entry(client, head, list)
if (client->id == client_id)
return client;
return NULL;
}
static struct vga_switcheroo_client *
find_active_client(struct list_head *head)
{
struct vga_switcheroo_client *client;
list_for_each_entry(client, head, list)
if (client->active)
return client;
return NULL;
}
/**
* vga_switcheroo_client_probe_defer() - whether to defer probing a given client
* @pdev: client pci device
*
* Determine whether any prerequisites are not fulfilled to probe a given
* client. Drivers shall invoke this early on in their ->probe callback
* and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not
* register the client ere thou hast called this.
*
* Return: %true if probing should be deferred, otherwise %false.
*/
bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
{
if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
/*
* apple-gmux is needed on pre-retina MacBook Pro
* to probe the panel if pdev is the inactive GPU.
*/
if (apple_gmux_present() && pdev != vga_default_device() &&
!vgasr_priv.handler_flags)
return true;
}
return false;
}
EXPORT_SYMBOL(vga_switcheroo_client_probe_defer);
static enum vga_switcheroo_state
vga_switcheroo_pwr_state(struct vga_switcheroo_client *client)
{
if (client->driver_power_control)
if (pm_runtime_enabled(&client->pdev->dev) &&
pm_runtime_active(&client->pdev->dev))
return VGA_SWITCHEROO_ON;
else
return VGA_SWITCHEROO_OFF;
else
return client->pwr_state;
}
/**
* vga_switcheroo_get_client_state() - obtain power state of a given client
* @pdev: client pci device
*
* Obtain power state of a given client as seen from vga_switcheroo.
* The function is only called from hda_intel.c.
*
* Return: Power state.
*/
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *pdev)
{
struct vga_switcheroo_client *client;
enum vga_switcheroo_state ret;
mutex_lock(&vgasr_mutex);
client = find_client_from_pci(&vgasr_priv.clients, pdev);
if (!client)
ret = VGA_SWITCHEROO_NOT_FOUND;
else
ret = vga_switcheroo_pwr_state(client);
mutex_unlock(&vgasr_mutex);
return ret;
}
EXPORT_SYMBOL(vga_switcheroo_get_client_state);
/**
* vga_switcheroo_unregister_client() - unregister client
* @pdev: client pci device
*
* Unregister client. Disable vga_switcheroo if this is a vga client (GPU).
*/
void vga_switcheroo_unregister_client(struct pci_dev *pdev)
{
struct vga_switcheroo_client *client;
mutex_lock(&vgasr_mutex);
client = find_client_from_pci(&vgasr_priv.clients, pdev);
if (client) {
if (client_is_vga(client))
vgasr_priv.registered_clients--;
list_del(&client->list);
kfree(client);
}
if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
pr_info("disabled\n");
vga_switcheroo_debugfs_fini(&vgasr_priv);
vgasr_priv.active = false;
}
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_unregister_client);
/**
* vga_switcheroo_client_fb_set() - set framebuffer of a given client
* @pdev: client pci device
* @info: framebuffer
*
* Set framebuffer of a given client. The console will be remapped to this
* on switching.
*/
void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
struct fb_info *info)
{
struct vga_switcheroo_client *client;
mutex_lock(&vgasr_mutex);
client = find_client_from_pci(&vgasr_priv.clients, pdev);
if (client)
client->fb_info = info;
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
/**
* vga_switcheroo_lock_ddc() - temporarily switch DDC lines to a given client
* @pdev: client pci device
*
* Temporarily switch DDC lines to the client identified by @pdev
* (but leave the outputs otherwise switched to where they are).
* This allows the inactive client to probe EDID. The DDC lines must
* afterwards be switched back by calling vga_switcheroo_unlock_ddc(),
* even if this function returns an error.
*
* Return: Previous DDC owner on success or a negative int on error.
* Specifically, %-ENODEV if no handler has registered or if the handler
* does not support switching the DDC lines. Also, a negative value
* returned by the handler is propagated back to the caller.
* The return value has merely an informational purpose for any caller
* which might be interested in it. It is acceptable to ignore the return
* value and simply rely on the result of the subsequent EDID probe,
* which will be %NULL if DDC switching failed.
*/
int vga_switcheroo_lock_ddc(struct pci_dev *pdev)
{
enum vga_switcheroo_client_id id;
mutex_lock(&vgasr_priv.mux_hw_lock);
if (!vgasr_priv.handler || !vgasr_priv.handler->switch_ddc) {
vgasr_priv.old_ddc_owner = -ENODEV;
return -ENODEV;
}
id = vgasr_priv.handler->get_client_id(pdev);
vgasr_priv.old_ddc_owner = vgasr_priv.handler->switch_ddc(id);
return vgasr_priv.old_ddc_owner;
}
EXPORT_SYMBOL(vga_switcheroo_lock_ddc);
/**
* vga_switcheroo_unlock_ddc() - switch DDC lines back to previous owner
* @pdev: client pci device
*
* Switch DDC lines back to the previous owner after calling
* vga_switcheroo_lock_ddc(). This must be called even if
* vga_switcheroo_lock_ddc() returned an error.
*
* Return: Previous DDC owner on success (i.e. the client identifier of @pdev)
* or a negative int on error.
* Specifically, %-ENODEV if no handler has registered or if the handler
* does not support switching the DDC lines. Also, a negative value
* returned by the handler is propagated back to the caller.
* Finally, invoking this function without calling vga_switcheroo_lock_ddc()
* first is not allowed and will result in %-EINVAL.
*/
int vga_switcheroo_unlock_ddc(struct pci_dev *pdev)
{
enum vga_switcheroo_client_id id;
int ret = vgasr_priv.old_ddc_owner;
if (WARN_ON_ONCE(!mutex_is_locked(&vgasr_priv.mux_hw_lock)))
return -EINVAL;
if (vgasr_priv.old_ddc_owner >= 0) {
id = vgasr_priv.handler->get_client_id(pdev);
if (vgasr_priv.old_ddc_owner != id)
ret = vgasr_priv.handler->switch_ddc(
vgasr_priv.old_ddc_owner);
}
mutex_unlock(&vgasr_priv.mux_hw_lock);
return ret;
}
EXPORT_SYMBOL(vga_switcheroo_unlock_ddc);
/**
* DOC: Manual switching and manual power control
*
* In this mode of use, the file /sys/kernel/debug/vgaswitcheroo/switch
* can be read to retrieve the current vga_switcheroo state and commands
* can be written to it to change the state. The file appears as soon as
* two GPU drivers and one handler have registered with vga_switcheroo.
* The following commands are understood:
*
* * OFF: Power off the device not in use.
* * ON: Power on the device not in use.
* * IGD: Switch to the integrated graphics device.
* Power on the integrated GPU if necessary, power off the discrete GPU.
* Prerequisite is that no user space processes (e.g. Xorg, alsactl)
* have opened device files of the GPUs or the audio client. If the
* switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
* and /dev/snd/controlC1 to identify processes blocking the switch.
* * DIS: Switch to the discrete graphics device.
* * DIGD: Delayed switch to the integrated graphics device.
* This will perform the switch once the last user space process has
* closed the device files of the GPUs and the audio client.
* * DDIS: Delayed switch to the discrete graphics device.
* * MIGD: Mux-only switch to the integrated graphics device.
* Does not remap console or change the power state of either gpu.
* If the integrated GPU is currently off, the screen will turn black.
* If it is on, the screen will show whatever happens to be in VRAM.
* Either way, the user has to blindly enter the command to switch back.
* * MDIS: Mux-only switch to the discrete graphics device.
*
* For GPUs whose power state is controlled by the driver's runtime pm,
* the ON and OFF commands are a no-op (see next section).
*
* For muxless machines, the IGD/DIS, DIGD/DDIS and MIGD/MDIS commands
* should not be used.
*/
static int vga_switcheroo_show(struct seq_file *m, void *v)
{
struct vga_switcheroo_client *client;
int i = 0;
mutex_lock(&vgasr_mutex);
list_for_each_entry(client, &vgasr_priv.clients, list) {
seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" :
"IGD",
client_is_vga(client) ? "" : "-Audio",
client->active ? '+' : ' ',
client->driver_power_control ? "Dyn" : "",
vga_switcheroo_pwr_state(client) ? "Pwr" : "Off",
pci_name(client->pdev));
i++;
}
mutex_unlock(&vgasr_mutex);
return 0;
}
static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, vga_switcheroo_show, NULL);
}
static int vga_switchon(struct vga_switcheroo_client *client)
{
if (client->driver_power_control)
return 0;
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
/* call the driver callback to turn on device */
client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
client->pwr_state = VGA_SWITCHEROO_ON;
return 0;
}
static int vga_switchoff(struct vga_switcheroo_client *client)
{
if (client->driver_power_control)
return 0;
/* call the driver callback to turn off device */
client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
client->pwr_state = VGA_SWITCHEROO_OFF;
return 0;
}
static void set_audio_state(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state)
{
struct vga_switcheroo_client *client;
client = find_client_from_id(&vgasr_priv.clients, id | ID_BIT_AUDIO);
if (client)
client->ops->set_gpu_state(client->pdev, state);
}
/* stage one happens before delay */
static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
{
struct vga_switcheroo_client *active;
active = find_active_client(&vgasr_priv.clients);
if (!active)
return 0;
if (vga_switcheroo_pwr_state(new_client) == VGA_SWITCHEROO_OFF)
vga_switchon(new_client);
vga_set_default_device(new_client->pdev);
return 0;
}
/* post delay */
static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
{
int ret;
struct vga_switcheroo_client *active;
active = find_active_client(&vgasr_priv.clients);
if (!active)
return 0;
active->active = false;
/* let HDA controller autosuspend if GPU uses driver power control */
if (!active->driver_power_control)
set_audio_state(active->id, VGA_SWITCHEROO_OFF);
if (new_client->fb_info)
fbcon_remap_all(new_client->fb_info);
mutex_lock(&vgasr_priv.mux_hw_lock);
ret = vgasr_priv.handler->switchto(new_client->id);
mutex_unlock(&vgasr_priv.mux_hw_lock);
if (ret)
return ret;
if (new_client->ops->reprobe)
new_client->ops->reprobe(new_client->pdev);
if (vga_switcheroo_pwr_state(active) == VGA_SWITCHEROO_ON)
vga_switchoff(active);
/* let HDA controller autoresume if GPU uses driver power control */
if (!new_client->driver_power_control)
set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
new_client->active = true;
return 0;
}
static bool check_can_switch(void)
{
struct vga_switcheroo_client *client;
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (!client->ops->can_switch(client->pdev)) {
pr_err("client %x refused switch\n", client->id);
return false;
}
}
return true;
}
static ssize_t
vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char usercmd[64];
int ret;
bool delay = false, can_switch;
bool just_mux = false;
enum vga_switcheroo_client_id client_id = VGA_SWITCHEROO_UNKNOWN_ID;
struct vga_switcheroo_client *client = NULL;
if (cnt > 63)
cnt = 63;
if (copy_from_user(usercmd, ubuf, cnt))
return -EFAULT;
mutex_lock(&vgasr_mutex);
if (!vgasr_priv.active) {
cnt = -EINVAL;
goto out;
}
/* pwr off the device not in use */
if (strncmp(usercmd, "OFF", 3) == 0) {
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (client->active || client_is_audio(client))
continue;
if (client->driver_power_control)
continue;
set_audio_state(client->id, VGA_SWITCHEROO_OFF);
if (client->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(client);
}
goto out;
}
/* pwr on the device not in use */
if (strncmp(usercmd, "ON", 2) == 0) {
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (client->active || client_is_audio(client))
continue;
if (client->driver_power_control)
continue;
if (client->pwr_state == VGA_SWITCHEROO_OFF)
vga_switchon(client);
set_audio_state(client->id, VGA_SWITCHEROO_ON);
}
goto out;
}
/* request a delayed switch - test can we switch now */
if (strncmp(usercmd, "DIGD", 4) == 0) {
client_id = VGA_SWITCHEROO_IGD;
delay = true;
}
if (strncmp(usercmd, "DDIS", 4) == 0) {
client_id = VGA_SWITCHEROO_DIS;
delay = true;
}
if (strncmp(usercmd, "IGD", 3) == 0)
client_id = VGA_SWITCHEROO_IGD;
if (strncmp(usercmd, "DIS", 3) == 0)
client_id = VGA_SWITCHEROO_DIS;
if (strncmp(usercmd, "MIGD", 4) == 0) {
just_mux = true;
client_id = VGA_SWITCHEROO_IGD;
}
if (strncmp(usercmd, "MDIS", 4) == 0) {
just_mux = true;
client_id = VGA_SWITCHEROO_DIS;
}
if (client_id == VGA_SWITCHEROO_UNKNOWN_ID)
goto out;
client = find_client_from_id(&vgasr_priv.clients, client_id);
if (!client)
goto out;
vgasr_priv.delayed_switch_active = false;
if (just_mux) {
mutex_lock(&vgasr_priv.mux_hw_lock);
ret = vgasr_priv.handler->switchto(client_id);
mutex_unlock(&vgasr_priv.mux_hw_lock);
goto out;
}
if (client->active)
goto out;
/* okay we want a switch - test if devices are willing to switch */
can_switch = check_can_switch();
if (can_switch == false && delay == false)
goto out;
if (can_switch) {
ret = vga_switchto_stage1(client);
if (ret)
pr_err("switching failed stage 1 %d\n", ret);
ret = vga_switchto_stage2(client);
if (ret)
pr_err("switching failed stage 2 %d\n", ret);
} else {
pr_info("setting delayed switch to client %d\n", client->id);
vgasr_priv.delayed_switch_active = true;
vgasr_priv.delayed_client_id = client_id;
ret = vga_switchto_stage1(client);
if (ret)
pr_err("delayed switching stage 1 failed %d\n", ret);
}
out:
mutex_unlock(&vgasr_mutex);
return cnt;
}
static const struct file_operations vga_switcheroo_debugfs_fops = {
.owner = THIS_MODULE,
.open = vga_switcheroo_debugfs_open,
.write = vga_switcheroo_debugfs_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
{
debugfs_remove_recursive(priv->debugfs_root);
priv->debugfs_root = NULL;
}
static void vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
{
/* already initialised */
if (priv->debugfs_root)
return;
priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
debugfs_create_file("switch", 0644, priv->debugfs_root, NULL,
&vga_switcheroo_debugfs_fops);
}
/**
* vga_switcheroo_process_delayed_switch() - helper for delayed switching
*
* Process a delayed switch if one is pending. DRM drivers should call this
* from their ->lastclose callback.
*
* Return: 0 on success. -EINVAL if no delayed switch is pending, if the client
* has unregistered in the meantime or if there are other clients blocking the
* switch. If the actual switch fails, an error is reported and 0 is returned.
*/
int vga_switcheroo_process_delayed_switch(void)
{
struct vga_switcheroo_client *client;
int ret;
int err = -EINVAL;
mutex_lock(&vgasr_mutex);
if (!vgasr_priv.delayed_switch_active)
goto err;
pr_info("processing delayed switch to %d\n",
vgasr_priv.delayed_client_id);
client = find_client_from_id(&vgasr_priv.clients,
vgasr_priv.delayed_client_id);
if (!client || !check_can_switch())
goto err;
ret = vga_switchto_stage2(client);
if (ret)
pr_err("delayed switching failed stage 2 %d\n", ret);
vgasr_priv.delayed_switch_active = false;
err = 0;
err:
mutex_unlock(&vgasr_mutex);
return err;
}
EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
/**
* DOC: Driver power control
*
* In this mode of use, the discrete GPU automatically powers up and down at
* the discretion of the driver's runtime pm. On muxed machines, the user may
* still influence the muxer state by way of the debugfs interface, however
* the ON and OFF commands become a no-op for the discrete GPU.
*
* This mode is the default on Nvidia HybridPower/Optimus and ATI PowerXpress.
* Specifying nouveau.runpm=0, radeon.runpm=0 or amdgpu.runpm=0 on the kernel
* command line disables it.
*
* After the GPU has been suspended, the handler needs to be called to cut
* power to the GPU. Likewise it needs to reinstate power before the GPU
* can resume. This is achieved by vga_switcheroo_init_domain_pm_ops(),
* which augments the GPU's suspend/resume functions by the requisite
* calls to the handler.
*
* When the audio device resumes, the GPU needs to be woken. This is achieved
* by a PCI quirk which calls device_link_add() to declare a dependency on the
* GPU. That way, the GPU is kept awake whenever and as long as the audio
* device is in use.
*
* On muxed machines, if the mux is initially switched to the discrete GPU,
* the user ends up with a black screen when the GPU powers down after boot.
* As a workaround, the mux is forced to the integrated GPU on runtime suspend,
* cf. https://bugs.freedesktop.org/show_bug.cgi?id=75917
*/
static void vga_switcheroo_power_switch(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct vga_switcheroo_client *client;
if (!vgasr_priv.handler->power_state)
return;
client = find_client_from_pci(&vgasr_priv.clients, pdev);
if (!client)
return;
if (!client->driver_power_control)
return;
vgasr_priv.handler->power_state(client->id, state);
}
/* switcheroo power domain */
static int vga_switcheroo_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int ret;
ret = dev->bus->pm->runtime_suspend(dev);
if (ret)
return ret;
mutex_lock(&vgasr_mutex);
if (vgasr_priv.handler->switchto) {
mutex_lock(&vgasr_priv.mux_hw_lock);
vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
mutex_unlock(&vgasr_priv.mux_hw_lock);
}
pci_bus_set_current_state(pdev->bus, PCI_D3cold);
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
mutex_unlock(&vgasr_mutex);
return 0;
}
static int vga_switcheroo_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
mutex_lock(&vgasr_mutex);
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
mutex_unlock(&vgasr_mutex);
pci_resume_bus(pdev->bus);
return dev->bus->pm->runtime_resume(dev);
}
/**
* vga_switcheroo_init_domain_pm_ops() - helper for driver power control
* @dev: vga client device
* @domain: power domain
*
* Helper for GPUs whose power state is controlled by the driver's runtime pm.
* After the GPU has been suspended, the handler needs to be called to cut
* power to the GPU. Likewise it needs to reinstate power before the GPU
* can resume. To this end, this helper augments the suspend/resume functions
* by the requisite calls to the handler. It needs only be called on platforms
* where the power switch is separate to the device being powered down.
*/
int vga_switcheroo_init_domain_pm_ops(struct device *dev,
struct dev_pm_domain *domain)
{
/* copy over all the bus versions */
if (dev->bus && dev->bus->pm) {
domain->ops = *dev->bus->pm;
domain->ops.runtime_suspend = vga_switcheroo_runtime_suspend;
domain->ops.runtime_resume = vga_switcheroo_runtime_resume;
dev_pm_domain_set(dev, domain);
return 0;
}
dev_pm_domain_set(dev, NULL);
return -EINVAL;
}
EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
{
dev_pm_domain_set(dev, NULL);
}
EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
| linux-master | drivers/gpu/vga/vga_switcheroo.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, NVIDIA Corporation.
*/
#include <linux/device.h>
#include <linux/of.h>
struct bus_type host1x_context_device_bus_type = {
.name = "host1x-context",
};
EXPORT_SYMBOL_GPL(host1x_context_device_bus_type);
static int __init host1x_context_device_bus_init(void)
{
int err;
err = bus_register(&host1x_context_device_bus_type);
if (err < 0) {
pr_err("bus type registration failed: %d\n", err);
return err;
}
return 0;
}
postcore_initcall(host1x_context_device_bus_init);
| linux-master | drivers/gpu/host1x/context_bus.c |
/*
* Copyright (C) 2013 NVIDIA Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/clk.h>
#include <linux/host1x.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dev.h"
#define MIPI_CAL_CTRL 0x00
#define MIPI_CAL_CTRL_NOISE_FILTER(x) (((x) & 0xf) << 26)
#define MIPI_CAL_CTRL_PRESCALE(x) (((x) & 0x3) << 24)
#define MIPI_CAL_CTRL_CLKEN_OVR (1 << 4)
#define MIPI_CAL_CTRL_START (1 << 0)
#define MIPI_CAL_AUTOCAL_CTRL 0x01
#define MIPI_CAL_STATUS 0x02
#define MIPI_CAL_STATUS_DONE (1 << 16)
#define MIPI_CAL_STATUS_ACTIVE (1 << 0)
#define MIPI_CAL_CONFIG_CSIA 0x05
#define MIPI_CAL_CONFIG_CSIB 0x06
#define MIPI_CAL_CONFIG_CSIC 0x07
#define MIPI_CAL_CONFIG_CSID 0x08
#define MIPI_CAL_CONFIG_CSIE 0x09
#define MIPI_CAL_CONFIG_CSIF 0x0a
#define MIPI_CAL_CONFIG_DSIA 0x0e
#define MIPI_CAL_CONFIG_DSIB 0x0f
#define MIPI_CAL_CONFIG_DSIC 0x10
#define MIPI_CAL_CONFIG_DSID 0x11
#define MIPI_CAL_CONFIG_DSIA_CLK 0x19
#define MIPI_CAL_CONFIG_DSIB_CLK 0x1a
#define MIPI_CAL_CONFIG_CSIAB_CLK 0x1b
#define MIPI_CAL_CONFIG_DSIC_CLK 0x1c
#define MIPI_CAL_CONFIG_CSICD_CLK 0x1c
#define MIPI_CAL_CONFIG_DSID_CLK 0x1d
#define MIPI_CAL_CONFIG_CSIE_CLK 0x1d
/* for data and clock lanes */
#define MIPI_CAL_CONFIG_SELECT (1 << 21)
/* for data lanes */
#define MIPI_CAL_CONFIG_HSPDOS(x) (((x) & 0x1f) << 16)
#define MIPI_CAL_CONFIG_HSPUOS(x) (((x) & 0x1f) << 8)
#define MIPI_CAL_CONFIG_TERMOS(x) (((x) & 0x1f) << 0)
/* for clock lanes */
#define MIPI_CAL_CONFIG_HSCLKPDOSD(x) (((x) & 0x1f) << 8)
#define MIPI_CAL_CONFIG_HSCLKPUOSD(x) (((x) & 0x1f) << 0)
#define MIPI_CAL_BIAS_PAD_CFG0 0x16
#define MIPI_CAL_BIAS_PAD_PDVCLAMP (1 << 1)
#define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0)
#define MIPI_CAL_BIAS_PAD_CFG1 0x17
#define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
#define MIPI_CAL_BIAS_PAD_DRV_UP_REF(x) (((x) & 0x7) << 8)
#define MIPI_CAL_BIAS_PAD_CFG2 0x18
#define MIPI_CAL_BIAS_PAD_VCLAMP(x) (((x) & 0x7) << 16)
#define MIPI_CAL_BIAS_PAD_VAUXP(x) (((x) & 0x7) << 4)
#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1)
struct tegra_mipi_pad {
unsigned long data;
unsigned long clk;
};
struct tegra_mipi_soc {
bool has_clk_lane;
const struct tegra_mipi_pad *pads;
unsigned int num_pads;
bool clock_enable_override;
bool needs_vclamp_ref;
/* bias pad configuration settings */
u8 pad_drive_down_ref;
u8 pad_drive_up_ref;
u8 pad_vclamp_level;
u8 pad_vauxp_level;
/* calibration settings for data lanes */
u8 hspdos;
u8 hspuos;
u8 termos;
/* calibration settings for clock lanes */
u8 hsclkpdos;
u8 hsclkpuos;
};
struct tegra_mipi {
const struct tegra_mipi_soc *soc;
struct device *dev;
void __iomem *regs;
struct mutex lock;
struct clk *clk;
unsigned long usage_count;
};
struct tegra_mipi_device {
struct platform_device *pdev;
struct tegra_mipi *mipi;
struct device *device;
unsigned long pads;
};
static inline u32 tegra_mipi_readl(struct tegra_mipi *mipi,
unsigned long offset)
{
return readl(mipi->regs + (offset << 2));
}
static inline void tegra_mipi_writel(struct tegra_mipi *mipi, u32 value,
unsigned long offset)
{
writel(value, mipi->regs + (offset << 2));
}
static int tegra_mipi_power_up(struct tegra_mipi *mipi)
{
u32 value;
int err;
err = clk_enable(mipi->clk);
if (err < 0)
return err;
value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
if (mipi->soc->needs_vclamp_ref)
value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
clk_disable(mipi->clk);
return 0;
}
static int tegra_mipi_power_down(struct tegra_mipi *mipi)
{
u32 value;
int err;
err = clk_enable(mipi->clk);
if (err < 0)
return err;
/*
* The MIPI_CAL_BIAS_PAD_PDVREG controls a voltage regulator that
* supplies the DSI pads. This must be kept enabled until none of the
* DSI lanes are used anymore.
*/
value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
value |= MIPI_CAL_BIAS_PAD_PDVREG;
tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
/*
* MIPI_CAL_BIAS_PAD_PDVCLAMP and MIPI_CAL_BIAS_PAD_E_VCLAMP_REF
* control a regulator that supplies current to the pre-driver logic.
* Powering down this regulator causes DSI to fail, so it must remain
* powered on until none of the DSI lanes are used anymore.
*/
value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
if (mipi->soc->needs_vclamp_ref)
value &= ~MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
value |= MIPI_CAL_BIAS_PAD_PDVCLAMP;
tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
return 0;
}
struct tegra_mipi_device *tegra_mipi_request(struct device *device,
struct device_node *np)
{
struct tegra_mipi_device *dev;
struct of_phandle_args args;
int err;
err = of_parse_phandle_with_args(np, "nvidia,mipi-calibrate",
"#nvidia,mipi-calibrate-cells", 0,
&args);
if (err < 0)
return ERR_PTR(err);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
err = -ENOMEM;
goto out;
}
dev->pdev = of_find_device_by_node(args.np);
if (!dev->pdev) {
err = -ENODEV;
goto free;
}
dev->mipi = platform_get_drvdata(dev->pdev);
if (!dev->mipi) {
err = -EPROBE_DEFER;
goto put;
}
of_node_put(args.np);
dev->pads = args.args[0];
dev->device = device;
return dev;
put:
platform_device_put(dev->pdev);
free:
kfree(dev);
out:
of_node_put(args.np);
return ERR_PTR(err);
}
EXPORT_SYMBOL(tegra_mipi_request);
void tegra_mipi_free(struct tegra_mipi_device *device)
{
platform_device_put(device->pdev);
kfree(device);
}
EXPORT_SYMBOL(tegra_mipi_free);
int tegra_mipi_enable(struct tegra_mipi_device *dev)
{
int err = 0;
mutex_lock(&dev->mipi->lock);
if (dev->mipi->usage_count++ == 0)
err = tegra_mipi_power_up(dev->mipi);
mutex_unlock(&dev->mipi->lock);
return err;
}
EXPORT_SYMBOL(tegra_mipi_enable);
int tegra_mipi_disable(struct tegra_mipi_device *dev)
{
int err = 0;
mutex_lock(&dev->mipi->lock);
if (--dev->mipi->usage_count == 0)
err = tegra_mipi_power_down(dev->mipi);
mutex_unlock(&dev->mipi->lock);
return err;
}
EXPORT_SYMBOL(tegra_mipi_disable);
int tegra_mipi_finish_calibration(struct tegra_mipi_device *device)
{
struct tegra_mipi *mipi = device->mipi;
void __iomem *status_reg = mipi->regs + (MIPI_CAL_STATUS << 2);
u32 value;
int err;
err = readl_relaxed_poll_timeout(status_reg, value,
!(value & MIPI_CAL_STATUS_ACTIVE) &&
(value & MIPI_CAL_STATUS_DONE), 50,
250000);
mutex_unlock(&device->mipi->lock);
clk_disable(device->mipi->clk);
return err;
}
EXPORT_SYMBOL(tegra_mipi_finish_calibration);
int tegra_mipi_start_calibration(struct tegra_mipi_device *device)
{
const struct tegra_mipi_soc *soc = device->mipi->soc;
unsigned int i;
u32 value;
int err;
err = clk_enable(device->mipi->clk);
if (err < 0)
return err;
mutex_lock(&device->mipi->lock);
value = MIPI_CAL_BIAS_PAD_DRV_DN_REF(soc->pad_drive_down_ref) |
MIPI_CAL_BIAS_PAD_DRV_UP_REF(soc->pad_drive_up_ref);
tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG1);
value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
value &= ~MIPI_CAL_BIAS_PAD_VCLAMP(0x7);
value &= ~MIPI_CAL_BIAS_PAD_VAUXP(0x7);
value |= MIPI_CAL_BIAS_PAD_VCLAMP(soc->pad_vclamp_level);
value |= MIPI_CAL_BIAS_PAD_VAUXP(soc->pad_vauxp_level);
tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
for (i = 0; i < soc->num_pads; i++) {
u32 clk = 0, data = 0;
if (device->pads & BIT(i)) {
data = MIPI_CAL_CONFIG_SELECT |
MIPI_CAL_CONFIG_HSPDOS(soc->hspdos) |
MIPI_CAL_CONFIG_HSPUOS(soc->hspuos) |
MIPI_CAL_CONFIG_TERMOS(soc->termos);
clk = MIPI_CAL_CONFIG_SELECT |
MIPI_CAL_CONFIG_HSCLKPDOSD(soc->hsclkpdos) |
MIPI_CAL_CONFIG_HSCLKPUOSD(soc->hsclkpuos);
}
tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
if (soc->has_clk_lane && soc->pads[i].clk != 0)
tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
}
value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
value &= ~MIPI_CAL_CTRL_NOISE_FILTER(0xf);
value &= ~MIPI_CAL_CTRL_PRESCALE(0x3);
value |= MIPI_CAL_CTRL_NOISE_FILTER(0xa);
value |= MIPI_CAL_CTRL_PRESCALE(0x2);
if (!soc->clock_enable_override)
value &= ~MIPI_CAL_CTRL_CLKEN_OVR;
else
value |= MIPI_CAL_CTRL_CLKEN_OVR;
tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
/* clear any pending status bits */
value = tegra_mipi_readl(device->mipi, MIPI_CAL_STATUS);
tegra_mipi_writel(device->mipi, value, MIPI_CAL_STATUS);
value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
value |= MIPI_CAL_CTRL_START;
tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
/*
* Wait for min 72uS to let calibration logic finish calibration
* sequence codes before waiting for pads idle state to apply the
* results.
*/
usleep_range(75, 80);
return 0;
}
EXPORT_SYMBOL(tegra_mipi_start_calibration);
static const struct tegra_mipi_pad tegra114_mipi_pads[] = {
{ .data = MIPI_CAL_CONFIG_CSIA },
{ .data = MIPI_CAL_CONFIG_CSIB },
{ .data = MIPI_CAL_CONFIG_CSIC },
{ .data = MIPI_CAL_CONFIG_CSID },
{ .data = MIPI_CAL_CONFIG_CSIE },
{ .data = MIPI_CAL_CONFIG_DSIA },
{ .data = MIPI_CAL_CONFIG_DSIB },
{ .data = MIPI_CAL_CONFIG_DSIC },
{ .data = MIPI_CAL_CONFIG_DSID },
};
static const struct tegra_mipi_soc tegra114_mipi_soc = {
.has_clk_lane = false,
.pads = tegra114_mipi_pads,
.num_pads = ARRAY_SIZE(tegra114_mipi_pads),
.clock_enable_override = true,
.needs_vclamp_ref = true,
.pad_drive_down_ref = 0x2,
.pad_drive_up_ref = 0x0,
.pad_vclamp_level = 0x0,
.pad_vauxp_level = 0x0,
.hspdos = 0x0,
.hspuos = 0x4,
.termos = 0x5,
.hsclkpdos = 0x0,
.hsclkpuos = 0x4,
};
static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
{ .data = MIPI_CAL_CONFIG_CSIA, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
{ .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
{ .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
{ .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
{ .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
};
static const struct tegra_mipi_soc tegra124_mipi_soc = {
.has_clk_lane = true,
.pads = tegra124_mipi_pads,
.num_pads = ARRAY_SIZE(tegra124_mipi_pads),
.clock_enable_override = true,
.needs_vclamp_ref = true,
.pad_drive_down_ref = 0x2,
.pad_drive_up_ref = 0x0,
.pad_vclamp_level = 0x0,
.pad_vauxp_level = 0x0,
.hspdos = 0x0,
.hspuos = 0x0,
.termos = 0x0,
.hsclkpdos = 0x1,
.hsclkpuos = 0x2,
};
static const struct tegra_mipi_soc tegra132_mipi_soc = {
.has_clk_lane = true,
.pads = tegra124_mipi_pads,
.num_pads = ARRAY_SIZE(tegra124_mipi_pads),
.clock_enable_override = false,
.needs_vclamp_ref = false,
.pad_drive_down_ref = 0x0,
.pad_drive_up_ref = 0x3,
.pad_vclamp_level = 0x0,
.pad_vauxp_level = 0x0,
.hspdos = 0x0,
.hspuos = 0x0,
.termos = 0x0,
.hsclkpdos = 0x3,
.hsclkpuos = 0x2,
};
static const struct tegra_mipi_pad tegra210_mipi_pads[] = {
{ .data = MIPI_CAL_CONFIG_CSIA, .clk = 0 },
{ .data = MIPI_CAL_CONFIG_CSIB, .clk = 0 },
{ .data = MIPI_CAL_CONFIG_CSIC, .clk = 0 },
{ .data = MIPI_CAL_CONFIG_CSID, .clk = 0 },
{ .data = MIPI_CAL_CONFIG_CSIE, .clk = 0 },
{ .data = MIPI_CAL_CONFIG_CSIF, .clk = 0 },
{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
{ .data = MIPI_CAL_CONFIG_DSIC, .clk = MIPI_CAL_CONFIG_DSIC_CLK },
{ .data = MIPI_CAL_CONFIG_DSID, .clk = MIPI_CAL_CONFIG_DSID_CLK },
};
static const struct tegra_mipi_soc tegra210_mipi_soc = {
.has_clk_lane = true,
.pads = tegra210_mipi_pads,
.num_pads = ARRAY_SIZE(tegra210_mipi_pads),
.clock_enable_override = true,
.needs_vclamp_ref = false,
.pad_drive_down_ref = 0x0,
.pad_drive_up_ref = 0x3,
.pad_vclamp_level = 0x1,
.pad_vauxp_level = 0x1,
.hspdos = 0x0,
.hspuos = 0x2,
.termos = 0x0,
.hsclkpdos = 0x0,
.hsclkpuos = 0x2,
};
static const struct of_device_id tegra_mipi_of_match[] = {
{ .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
{ .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
{ .compatible = "nvidia,tegra132-mipi", .data = &tegra132_mipi_soc },
{ .compatible = "nvidia,tegra210-mipi", .data = &tegra210_mipi_soc },
{ },
};
static int tegra_mipi_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct tegra_mipi *mipi;
int err;
match = of_match_node(tegra_mipi_of_match, pdev->dev.of_node);
if (!match)
return -ENODEV;
mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
mipi->soc = match->data;
mipi->dev = &pdev->dev;
mipi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(mipi->regs))
return PTR_ERR(mipi->regs);
mutex_init(&mipi->lock);
mipi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(mipi->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(mipi->clk);
}
err = clk_prepare(mipi->clk);
if (err < 0)
return err;
platform_set_drvdata(pdev, mipi);
return 0;
}
static int tegra_mipi_remove(struct platform_device *pdev)
{
struct tegra_mipi *mipi = platform_get_drvdata(pdev);
clk_unprepare(mipi->clk);
return 0;
}
struct platform_driver tegra_mipi_driver = {
.driver = {
.name = "tegra-mipi",
.of_match_table = tegra_mipi_of_match,
},
.probe = tegra_mipi_probe,
.remove = tegra_mipi_remove,
};
| linux-master | drivers/gpu/host1x/mipi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x Syncpoints
*
* Copyright (c) 2010-2015, NVIDIA Corporation.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/dma-fence.h>
#include <linux/slab.h>
#include <trace/events/host1x.h>
#include "syncpt.h"
#include "dev.h"
#include "intr.h"
#include "debug.h"
#define SYNCPT_CHECK_PERIOD (2 * HZ)
#define MAX_STUCK_CHECK_COUNT 15
static struct host1x_syncpt_base *
host1x_syncpt_base_request(struct host1x *host)
{
struct host1x_syncpt_base *bases = host->bases;
unsigned int i;
for (i = 0; i < host->info->nb_bases; i++)
if (!bases[i].requested)
break;
if (i >= host->info->nb_bases)
return NULL;
bases[i].requested = true;
return &bases[i];
}
static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
{
if (base)
base->requested = false;
}
/**
* host1x_syncpt_alloc() - allocate a syncpoint
* @host: host1x device data
* @flags: bitfield of HOST1X_SYNCPT_* flags
* @name: name for the syncpoint for use in debug prints
*
* Allocates a hardware syncpoint for the caller's use. The caller then has
* the sole authority to mutate the syncpoint's value until it is freed again.
*
* If no free syncpoints are available, or a NULL name was specified, returns
* NULL.
*/
struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
unsigned long flags,
const char *name)
{
struct host1x_syncpt *sp = host->syncpt;
char *full_name;
unsigned int i;
if (!name)
return NULL;
mutex_lock(&host->syncpt_mutex);
for (i = 0; i < host->info->nb_pts && kref_read(&sp->ref); i++, sp++)
;
if (i >= host->info->nb_pts)
goto unlock;
if (flags & HOST1X_SYNCPT_HAS_BASE) {
sp->base = host1x_syncpt_base_request(host);
if (!sp->base)
goto unlock;
}
full_name = kasprintf(GFP_KERNEL, "%u-%s", sp->id, name);
if (!full_name)
goto free_base;
sp->name = full_name;
if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
sp->client_managed = true;
else
sp->client_managed = false;
kref_init(&sp->ref);
mutex_unlock(&host->syncpt_mutex);
return sp;
free_base:
host1x_syncpt_base_free(sp->base);
sp->base = NULL;
unlock:
mutex_unlock(&host->syncpt_mutex);
return NULL;
}
EXPORT_SYMBOL(host1x_syncpt_alloc);
/**
* host1x_syncpt_id() - retrieve syncpoint ID
* @sp: host1x syncpoint
*
* Given a pointer to a struct host1x_syncpt, retrieves its ID. This ID is
* often used as a value to program into registers that control how hardware
* blocks interact with syncpoints.
*/
u32 host1x_syncpt_id(struct host1x_syncpt *sp)
{
return sp->id;
}
EXPORT_SYMBOL(host1x_syncpt_id);
/**
* host1x_syncpt_incr_max() - update the value sent to hardware
* @sp: host1x syncpoint
* @incrs: number of increments
*/
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
{
return (u32)atomic_add_return(incrs, &sp->max_val);
}
EXPORT_SYMBOL(host1x_syncpt_incr_max);
/*
* Write cached syncpoint and waitbase values to hardware.
*/
void host1x_syncpt_restore(struct host1x *host)
{
struct host1x_syncpt *sp_base = host->syncpt;
unsigned int i;
for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
/*
* Unassign syncpt from channels for purposes of Tegra186
* syncpoint protection. This prevents any channel from
* accessing it until it is reassigned.
*/
host1x_hw_syncpt_assign_to_channel(host, sp_base + i, NULL);
host1x_hw_syncpt_restore(host, sp_base + i);
}
for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
host1x_hw_syncpt_enable_protection(host);
wmb();
}
/*
* Update the cached syncpoint and waitbase values by reading them
* from the registers.
*/
void host1x_syncpt_save(struct host1x *host)
{
struct host1x_syncpt *sp_base = host->syncpt;
unsigned int i;
for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
if (host1x_syncpt_client_managed(sp_base + i))
host1x_hw_syncpt_load(host, sp_base + i);
else
WARN_ON(!host1x_syncpt_idle(sp_base + i));
}
for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
host1x_hw_syncpt_load_wait_base(host, sp_base + i);
}
/*
* Updates the cached syncpoint value by reading a new value from the hardware
* register
*/
u32 host1x_syncpt_load(struct host1x_syncpt *sp)
{
u32 val;
val = host1x_hw_syncpt_load(sp->host, sp);
trace_host1x_syncpt_load_min(sp->id, val);
return val;
}
/*
* Get the current syncpoint base
*/
u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
{
host1x_hw_syncpt_load_wait_base(sp->host, sp);
return sp->base_val;
}
/**
* host1x_syncpt_incr() - increment syncpoint value from CPU, updating cache
* @sp: host1x syncpoint
*/
int host1x_syncpt_incr(struct host1x_syncpt *sp)
{
return host1x_hw_syncpt_cpu_incr(sp->host, sp);
}
EXPORT_SYMBOL(host1x_syncpt_incr);
/**
* host1x_syncpt_wait() - wait for a syncpoint to reach a given value
* @sp: host1x syncpoint
* @thresh: threshold
* @timeout: maximum time to wait for the syncpoint to reach the given value
* @value: return location for the syncpoint value
*/
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
u32 *value)
{
struct dma_fence *fence;
long wait_err;
host1x_hw_syncpt_load(sp->host, sp);
if (value)
*value = host1x_syncpt_load(sp);
if (host1x_syncpt_is_expired(sp, thresh))
return 0;
if (timeout < 0)
timeout = LONG_MAX;
else if (timeout == 0)
return -EAGAIN;
fence = host1x_fence_create(sp, thresh, false);
if (IS_ERR(fence))
return PTR_ERR(fence);
wait_err = dma_fence_wait_timeout(fence, true, timeout);
if (wait_err == 0)
host1x_fence_cancel(fence);
dma_fence_put(fence);
if (value)
*value = host1x_syncpt_load(sp);
/*
* Don't rely on dma_fence_wait_timeout return value,
* since it returns zero both on timeout and if the
* wait completed with 0 jiffies left.
*/
host1x_hw_syncpt_load(sp->host, sp);
if (wait_err == 0 && !host1x_syncpt_is_expired(sp, thresh))
return -EAGAIN;
else if (wait_err < 0)
return wait_err;
else
return 0;
}
EXPORT_SYMBOL(host1x_syncpt_wait);
/*
* Returns true if syncpoint is expired, false if we may need to wait
*/
bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
{
u32 current_val;
smp_rmb();
current_val = (u32)atomic_read(&sp->min_val);
return ((current_val - thresh) & 0x80000000U) == 0U;
}
int host1x_syncpt_init(struct host1x *host)
{
struct host1x_syncpt_base *bases;
struct host1x_syncpt *syncpt;
unsigned int i;
syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
GFP_KERNEL);
if (!syncpt)
return -ENOMEM;
bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
GFP_KERNEL);
if (!bases)
return -ENOMEM;
for (i = 0; i < host->info->nb_pts; i++) {
syncpt[i].id = i;
syncpt[i].host = host;
}
for (i = 0; i < host->info->nb_bases; i++)
bases[i].id = i;
mutex_init(&host->syncpt_mutex);
host->syncpt = syncpt;
host->bases = bases;
/* Allocate sync point to use for clearing waits for expired fences */
host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop");
if (!host->nop_sp)
return -ENOMEM;
if (host->info->reserve_vblank_syncpts) {
kref_init(&host->syncpt[26].ref);
kref_init(&host->syncpt[27].ref);
}
return 0;
}
/**
* host1x_syncpt_request() - request a syncpoint
* @client: client requesting the syncpoint
* @flags: flags
*
* host1x client drivers can use this function to allocate a syncpoint for
* subsequent use. A syncpoint returned by this function will be reserved for
* use by the client exclusively. When no longer using a syncpoint, a host1x
* client driver needs to release it using host1x_syncpt_put().
*/
struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags)
{
struct host1x *host = dev_get_drvdata(client->host->parent);
return host1x_syncpt_alloc(host, flags, dev_name(client->dev));
}
EXPORT_SYMBOL(host1x_syncpt_request);
static void syncpt_release(struct kref *ref)
{
struct host1x_syncpt *sp = container_of(ref, struct host1x_syncpt, ref);
atomic_set(&sp->max_val, host1x_syncpt_read(sp));
sp->locked = false;
mutex_lock(&sp->host->syncpt_mutex);
host1x_syncpt_base_free(sp->base);
kfree(sp->name);
sp->base = NULL;
sp->name = NULL;
sp->client_managed = false;
mutex_unlock(&sp->host->syncpt_mutex);
}
/**
* host1x_syncpt_put() - free a requested syncpoint
* @sp: host1x syncpoint
*
* Release a syncpoint previously allocated using host1x_syncpt_request(). A
* host1x client driver should call this when the syncpoint is no longer in
* use.
*/
void host1x_syncpt_put(struct host1x_syncpt *sp)
{
if (!sp)
return;
kref_put(&sp->ref, syncpt_release);
}
EXPORT_SYMBOL(host1x_syncpt_put);
void host1x_syncpt_deinit(struct host1x *host)
{
struct host1x_syncpt *sp = host->syncpt;
unsigned int i;
for (i = 0; i < host->info->nb_pts; i++, sp++)
kfree(sp->name);
}
/**
* host1x_syncpt_read_max() - read maximum syncpoint value
* @sp: host1x syncpoint
*
* The maximum syncpoint value indicates how many operations there are in
* queue, either in channel or in a software thread.
*/
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
{
smp_rmb();
return (u32)atomic_read(&sp->max_val);
}
EXPORT_SYMBOL(host1x_syncpt_read_max);
/**
* host1x_syncpt_read_min() - read minimum syncpoint value
* @sp: host1x syncpoint
*
* The minimum syncpoint value is a shadow of the current sync point value in
* hardware.
*/
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
{
smp_rmb();
return (u32)atomic_read(&sp->min_val);
}
EXPORT_SYMBOL(host1x_syncpt_read_min);
/**
* host1x_syncpt_read() - read the current syncpoint value
* @sp: host1x syncpoint
*/
u32 host1x_syncpt_read(struct host1x_syncpt *sp)
{
return host1x_syncpt_load(sp);
}
EXPORT_SYMBOL(host1x_syncpt_read);
unsigned int host1x_syncpt_nb_pts(struct host1x *host)
{
return host->info->nb_pts;
}
unsigned int host1x_syncpt_nb_bases(struct host1x *host)
{
return host->info->nb_bases;
}
unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
{
return host->info->nb_mlocks;
}
/**
* host1x_syncpt_get_by_id() - obtain a syncpoint by ID
* @host: host1x controller
* @id: syncpoint ID
*/
struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host,
unsigned int id)
{
if (id >= host->info->nb_pts)
return NULL;
if (kref_get_unless_zero(&host->syncpt[id].ref))
return &host->syncpt[id];
else
return NULL;
}
EXPORT_SYMBOL(host1x_syncpt_get_by_id);
/**
* host1x_syncpt_get_by_id_noref() - obtain a syncpoint by ID but don't
* increase the refcount.
* @host: host1x controller
* @id: syncpoint ID
*/
struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host,
unsigned int id)
{
if (id >= host->info->nb_pts)
return NULL;
return &host->syncpt[id];
}
EXPORT_SYMBOL(host1x_syncpt_get_by_id_noref);
/**
* host1x_syncpt_get() - increment syncpoint refcount
* @sp: syncpoint
*/
struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp)
{
kref_get(&sp->ref);
return sp;
}
EXPORT_SYMBOL(host1x_syncpt_get);
/**
* host1x_syncpt_get_base() - obtain the wait base associated with a syncpoint
* @sp: host1x syncpoint
*/
struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
{
return sp ? sp->base : NULL;
}
EXPORT_SYMBOL(host1x_syncpt_get_base);
/**
* host1x_syncpt_base_id() - retrieve the ID of a syncpoint wait base
* @base: host1x syncpoint wait base
*/
u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
{
return base->id;
}
EXPORT_SYMBOL(host1x_syncpt_base_id);
static void do_nothing(struct kref *ref)
{
}
/**
* host1x_syncpt_release_vblank_reservation() - Make VBLANK syncpoint
* available for allocation
*
* @client: host1x bus client
* @syncpt_id: syncpoint ID to make available
*
* Makes VBLANK<i> syncpoint available for allocatation if it was
* reserved at initialization time. This should be called by the display
* driver after it has ensured that any VBLANK increment programming configured
* by the boot chain has been disabled.
*/
void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
u32 syncpt_id)
{
struct host1x *host = dev_get_drvdata(client->host->parent);
if (!host->info->reserve_vblank_syncpts)
return;
kref_put(&host->syncpt[syncpt_id].ref, do_nothing);
}
EXPORT_SYMBOL(host1x_syncpt_release_vblank_reservation);
| linux-master | drivers/gpu/host1x/syncpt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x Channel
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include "channel.h"
#include "dev.h"
#include "job.h"
/* Constructor for the host1x device list */
int host1x_channel_list_init(struct host1x_channel_list *chlist,
unsigned int num_channels)
{
chlist->channels = kcalloc(num_channels, sizeof(struct host1x_channel),
GFP_KERNEL);
if (!chlist->channels)
return -ENOMEM;
chlist->allocated_channels = bitmap_zalloc(num_channels, GFP_KERNEL);
if (!chlist->allocated_channels) {
kfree(chlist->channels);
return -ENOMEM;
}
return 0;
}
void host1x_channel_list_free(struct host1x_channel_list *chlist)
{
bitmap_free(chlist->allocated_channels);
kfree(chlist->channels);
}
int host1x_job_submit(struct host1x_job *job)
{
struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
return host1x_hw_channel_submit(host, job);
}
EXPORT_SYMBOL(host1x_job_submit);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
{
kref_get(&channel->refcount);
return channel;
}
EXPORT_SYMBOL(host1x_channel_get);
/**
* host1x_channel_get_index() - Attempt to get channel reference by index
* @host: Host1x device object
* @index: Index of channel
*
* If channel number @index is currently allocated, increase its refcount
* and return a pointer to it. Otherwise, return NULL.
*/
struct host1x_channel *host1x_channel_get_index(struct host1x *host,
unsigned int index)
{
struct host1x_channel *ch = &host->channel_list.channels[index];
if (!kref_get_unless_zero(&ch->refcount))
return NULL;
return ch;
}
void host1x_channel_stop(struct host1x_channel *channel)
{
struct host1x *host = dev_get_drvdata(channel->dev->parent);
host1x_hw_cdma_stop(host, &channel->cdma);
}
EXPORT_SYMBOL(host1x_channel_stop);
static void release_channel(struct kref *kref)
{
struct host1x_channel *channel =
container_of(kref, struct host1x_channel, refcount);
struct host1x *host = dev_get_drvdata(channel->dev->parent);
struct host1x_channel_list *chlist = &host->channel_list;
host1x_hw_cdma_stop(host, &channel->cdma);
host1x_cdma_deinit(&channel->cdma);
clear_bit(channel->id, chlist->allocated_channels);
}
void host1x_channel_put(struct host1x_channel *channel)
{
kref_put(&channel->refcount, release_channel);
}
EXPORT_SYMBOL(host1x_channel_put);
static struct host1x_channel *acquire_unused_channel(struct host1x *host)
{
struct host1x_channel_list *chlist = &host->channel_list;
unsigned int max_channels = host->info->nb_channels;
unsigned int index;
index = find_first_zero_bit(chlist->allocated_channels, max_channels);
if (index >= max_channels) {
dev_err(host->dev, "failed to find free channel\n");
return NULL;
}
chlist->channels[index].id = index;
set_bit(index, chlist->allocated_channels);
return &chlist->channels[index];
}
/**
* host1x_channel_request() - Allocate a channel
* @client: Host1x client this channel will be used to send commands to
*
* Allocates a new host1x channel for @client. May return NULL if CDMA
* initialization fails.
*/
struct host1x_channel *host1x_channel_request(struct host1x_client *client)
{
struct host1x *host = dev_get_drvdata(client->dev->parent);
struct host1x_channel_list *chlist = &host->channel_list;
struct host1x_channel *channel;
int err;
channel = acquire_unused_channel(host);
if (!channel)
return NULL;
kref_init(&channel->refcount);
mutex_init(&channel->submitlock);
channel->client = client;
channel->dev = client->dev;
err = host1x_hw_channel_init(host, channel, channel->id);
if (err < 0)
goto fail;
err = host1x_cdma_init(&channel->cdma);
if (err < 0)
goto fail;
return channel;
fail:
clear_bit(channel->id, chlist->allocated_channels);
dev_err(client->dev, "failed to initialize channel\n");
return NULL;
}
EXPORT_SYMBOL(host1x_channel_request);
| linux-master | drivers/gpu/host1x/channel.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010 Google, Inc.
* Author: Erik Gilling <[email protected]>
*
* Copyright (C) 2011-2013 NVIDIA Corporation
*/
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include "dev.h"
#include "debug.h"
#include "channel.h"
static DEFINE_MUTEX(debug_lock);
unsigned int host1x_debug_trace_cmdbuf;
static pid_t host1x_debug_force_timeout_pid;
static u32 host1x_debug_force_timeout_val;
static u32 host1x_debug_force_timeout_channel;
void host1x_debug_output(struct output *o, const char *fmt, ...)
{
va_list args;
int len;
va_start(args, fmt);
len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
va_end(args);
o->fn(o->ctx, o->buf, len, false);
}
void host1x_debug_cont(struct output *o, const char *fmt, ...)
{
va_list args;
int len;
va_start(args, fmt);
len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
va_end(args);
o->fn(o->ctx, o->buf, len, true);
}
static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
{
struct host1x *m = dev_get_drvdata(ch->dev->parent);
struct output *o = data;
int err;
err = pm_runtime_resume_and_get(m->dev);
if (err < 0)
return err;
mutex_lock(&ch->cdma.lock);
mutex_lock(&debug_lock);
if (show_fifo)
host1x_hw_show_channel_fifo(m, ch, o);
host1x_hw_show_channel_cdma(m, ch, o);
mutex_unlock(&debug_lock);
mutex_unlock(&ch->cdma.lock);
pm_runtime_put(m->dev);
return 0;
}
static void show_syncpts(struct host1x *m, struct output *o, bool show_all)
{
unsigned long irqflags;
struct list_head *pos;
unsigned int i;
int err;
host1x_debug_output(o, "---- syncpts ----\n");
err = pm_runtime_resume_and_get(m->dev);
if (err < 0)
return;
for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
u32 max = host1x_syncpt_read_max(m->syncpt + i);
u32 min = host1x_syncpt_load(m->syncpt + i);
unsigned int waiters = 0;
spin_lock_irqsave(&m->syncpt[i].fences.lock, irqflags);
list_for_each(pos, &m->syncpt[i].fences.list)
waiters++;
spin_unlock_irqrestore(&m->syncpt[i].fences.lock, irqflags);
if (!kref_read(&m->syncpt[i].ref))
continue;
if (!show_all && !min && !max && !waiters)
continue;
host1x_debug_output(o,
"id %u (%s) min %d max %d (%d waiters)\n",
i, m->syncpt[i].name, min, max, waiters);
}
for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
u32 base_val;
base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
if (base_val)
host1x_debug_output(o, "waitbase id %u val %d\n", i,
base_val);
}
pm_runtime_put(m->dev);
host1x_debug_output(o, "\n");
}
static void show_all(struct host1x *m, struct output *o, bool show_fifo)
{
unsigned int i;
host1x_hw_show_mlocks(m, o);
show_syncpts(m, o, true);
host1x_debug_output(o, "---- channels ----\n");
for (i = 0; i < m->info->nb_channels; ++i) {
struct host1x_channel *ch = host1x_channel_get_index(m, i);
if (ch) {
show_channel(ch, o, show_fifo);
host1x_channel_put(ch);
}
}
}
static int host1x_debug_all_show(struct seq_file *s, void *unused)
{
struct output o = {
.fn = write_to_seqfile,
.ctx = s
};
show_all(s->private, &o, true);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(host1x_debug_all);
static int host1x_debug_show(struct seq_file *s, void *unused)
{
struct output o = {
.fn = write_to_seqfile,
.ctx = s
};
show_all(s->private, &o, false);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(host1x_debug);
static void host1x_debugfs_init(struct host1x *host1x)
{
struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
/* Store the created entry */
host1x->debugfs = de;
debugfs_create_file("status", S_IRUGO, de, host1x, &host1x_debug_fops);
debugfs_create_file("status_all", S_IRUGO, de, host1x,
&host1x_debug_all_fops);
debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de,
&host1x_debug_trace_cmdbuf);
host1x_hw_debug_init(host1x, de);
debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de,
&host1x_debug_force_timeout_pid);
debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de,
&host1x_debug_force_timeout_val);
debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de,
&host1x_debug_force_timeout_channel);
}
static void host1x_debugfs_exit(struct host1x *host1x)
{
debugfs_remove_recursive(host1x->debugfs);
}
void host1x_debug_init(struct host1x *host1x)
{
if (IS_ENABLED(CONFIG_DEBUG_FS))
host1x_debugfs_init(host1x);
}
void host1x_debug_deinit(struct host1x *host1x)
{
if (IS_ENABLED(CONFIG_DEBUG_FS))
host1x_debugfs_exit(host1x);
}
void host1x_debug_dump(struct host1x *host1x)
{
struct output o = {
.fn = write_to_printk
};
show_all(host1x, &o, true);
}
void host1x_debug_dump_syncpts(struct host1x *host1x)
{
struct output o = {
.fn = write_to_printk
};
show_syncpts(host1x, &o, false);
}
| linux-master | drivers/gpu/host1x/debug.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Avionic Design GmbH
* Copyright (C) 2012-2013, NVIDIA Corporation
*/
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/of.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include "bus.h"
#include "dev.h"
static DEFINE_MUTEX(clients_lock);
static LIST_HEAD(clients);
static DEFINE_MUTEX(drivers_lock);
static LIST_HEAD(drivers);
static DEFINE_MUTEX(devices_lock);
static LIST_HEAD(devices);
struct host1x_subdev {
struct host1x_client *client;
struct device_node *np;
struct list_head list;
};
/**
* host1x_subdev_add() - add a new subdevice with an associated device node
* @device: host1x device to add the subdevice to
* @driver: host1x driver containing the subdevices
* @np: device node
*/
static int host1x_subdev_add(struct host1x_device *device,
struct host1x_driver *driver,
struct device_node *np)
{
struct host1x_subdev *subdev;
struct device_node *child;
int err;
subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
if (!subdev)
return -ENOMEM;
INIT_LIST_HEAD(&subdev->list);
subdev->np = of_node_get(np);
mutex_lock(&device->subdevs_lock);
list_add_tail(&subdev->list, &device->subdevs);
mutex_unlock(&device->subdevs_lock);
/* recursively add children */
for_each_child_of_node(np, child) {
if (of_match_node(driver->subdevs, child) &&
of_device_is_available(child)) {
err = host1x_subdev_add(device, driver, child);
if (err < 0) {
/* XXX cleanup? */
of_node_put(child);
return err;
}
}
}
return 0;
}
/**
* host1x_subdev_del() - remove subdevice
* @subdev: subdevice to remove
*/
static void host1x_subdev_del(struct host1x_subdev *subdev)
{
list_del(&subdev->list);
of_node_put(subdev->np);
kfree(subdev);
}
/**
* host1x_device_parse_dt() - scan device tree and add matching subdevices
* @device: host1x logical device
* @driver: host1x driver
*/
static int host1x_device_parse_dt(struct host1x_device *device,
struct host1x_driver *driver)
{
struct device_node *np;
int err;
for_each_child_of_node(device->dev.parent->of_node, np) {
if (of_match_node(driver->subdevs, np) &&
of_device_is_available(np)) {
err = host1x_subdev_add(device, driver, np);
if (err < 0) {
of_node_put(np);
return err;
}
}
}
return 0;
}
static void host1x_subdev_register(struct host1x_device *device,
struct host1x_subdev *subdev,
struct host1x_client *client)
{
int err;
/*
* Move the subdevice to the list of active (registered) subdevices
* and associate it with a client. At the same time, associate the
* client with its parent device.
*/
mutex_lock(&device->subdevs_lock);
mutex_lock(&device->clients_lock);
list_move_tail(&client->list, &device->clients);
list_move_tail(&subdev->list, &device->active);
client->host = &device->dev;
subdev->client = client;
mutex_unlock(&device->clients_lock);
mutex_unlock(&device->subdevs_lock);
if (list_empty(&device->subdevs)) {
err = device_add(&device->dev);
if (err < 0)
dev_err(&device->dev, "failed to add: %d\n", err);
else
device->registered = true;
}
}
static void __host1x_subdev_unregister(struct host1x_device *device,
struct host1x_subdev *subdev)
{
struct host1x_client *client = subdev->client;
/*
* If all subdevices have been activated, we're about to remove the
* first active subdevice, so unload the driver first.
*/
if (list_empty(&device->subdevs)) {
if (device->registered) {
device->registered = false;
device_del(&device->dev);
}
}
/*
* Move the subdevice back to the list of idle subdevices and remove
* it from list of clients.
*/
mutex_lock(&device->clients_lock);
subdev->client = NULL;
client->host = NULL;
list_move_tail(&subdev->list, &device->subdevs);
/*
* XXX: Perhaps don't do this here, but rather explicitly remove it
* when the device is about to be deleted.
*
* This is somewhat complicated by the fact that this function is
* used to remove the subdevice when a client is unregistered but
* also when the composite device is about to be removed.
*/
list_del_init(&client->list);
mutex_unlock(&device->clients_lock);
}
static void host1x_subdev_unregister(struct host1x_device *device,
struct host1x_subdev *subdev)
{
mutex_lock(&device->subdevs_lock);
__host1x_subdev_unregister(device, subdev);
mutex_unlock(&device->subdevs_lock);
}
/**
* host1x_device_init() - initialize a host1x logical device
* @device: host1x logical device
*
* The driver for the host1x logical device can call this during execution of
* its &host1x_driver.probe implementation to initialize each of its clients.
* The client drivers access the subsystem specific driver data using the
* &host1x_client.parent field and driver data associated with it (usually by
* calling dev_get_drvdata()).
*/
int host1x_device_init(struct host1x_device *device)
{
struct host1x_client *client;
int err;
mutex_lock(&device->clients_lock);
list_for_each_entry(client, &device->clients, list) {
if (client->ops && client->ops->early_init) {
err = client->ops->early_init(client);
if (err < 0) {
dev_err(&device->dev, "failed to early initialize %s: %d\n",
dev_name(client->dev), err);
goto teardown_late;
}
}
}
list_for_each_entry(client, &device->clients, list) {
if (client->ops && client->ops->init) {
err = client->ops->init(client);
if (err < 0) {
dev_err(&device->dev,
"failed to initialize %s: %d\n",
dev_name(client->dev), err);
goto teardown;
}
}
}
mutex_unlock(&device->clients_lock);
return 0;
teardown:
list_for_each_entry_continue_reverse(client, &device->clients, list)
if (client->ops->exit)
client->ops->exit(client);
/* reset client to end of list for late teardown */
client = list_entry(&device->clients, struct host1x_client, list);
teardown_late:
list_for_each_entry_continue_reverse(client, &device->clients, list)
if (client->ops->late_exit)
client->ops->late_exit(client);
mutex_unlock(&device->clients_lock);
return err;
}
EXPORT_SYMBOL(host1x_device_init);
/**
* host1x_device_exit() - uninitialize host1x logical device
* @device: host1x logical device
*
* When the driver for a host1x logical device is unloaded, it can call this
* function to tear down each of its clients. Typically this is done after a
* subsystem-specific data structure is removed and the functionality can no
* longer be used.
*/
int host1x_device_exit(struct host1x_device *device)
{
struct host1x_client *client;
int err;
mutex_lock(&device->clients_lock);
list_for_each_entry_reverse(client, &device->clients, list) {
if (client->ops && client->ops->exit) {
err = client->ops->exit(client);
if (err < 0) {
dev_err(&device->dev,
"failed to cleanup %s: %d\n",
dev_name(client->dev), err);
mutex_unlock(&device->clients_lock);
return err;
}
}
}
list_for_each_entry_reverse(client, &device->clients, list) {
if (client->ops && client->ops->late_exit) {
err = client->ops->late_exit(client);
if (err < 0) {
dev_err(&device->dev, "failed to late cleanup %s: %d\n",
dev_name(client->dev), err);
mutex_unlock(&device->clients_lock);
return err;
}
}
}
mutex_unlock(&device->clients_lock);
return 0;
}
EXPORT_SYMBOL(host1x_device_exit);
static int host1x_add_client(struct host1x *host1x,
struct host1x_client *client)
{
struct host1x_device *device;
struct host1x_subdev *subdev;
mutex_lock(&host1x->devices_lock);
list_for_each_entry(device, &host1x->devices, list) {
list_for_each_entry(subdev, &device->subdevs, list) {
if (subdev->np == client->dev->of_node) {
host1x_subdev_register(device, subdev, client);
mutex_unlock(&host1x->devices_lock);
return 0;
}
}
}
mutex_unlock(&host1x->devices_lock);
return -ENODEV;
}
static int host1x_del_client(struct host1x *host1x,
struct host1x_client *client)
{
struct host1x_device *device, *dt;
struct host1x_subdev *subdev;
mutex_lock(&host1x->devices_lock);
list_for_each_entry_safe(device, dt, &host1x->devices, list) {
list_for_each_entry(subdev, &device->active, list) {
if (subdev->client == client) {
host1x_subdev_unregister(device, subdev);
mutex_unlock(&host1x->devices_lock);
return 0;
}
}
}
mutex_unlock(&host1x->devices_lock);
return -ENODEV;
}
static int host1x_device_match(struct device *dev, struct device_driver *drv)
{
return strcmp(dev_name(dev), drv->name) == 0;
}
/*
* Note that this is really only needed for backwards compatibility
* with libdrm, which parses this information from sysfs and will
* fail if it can't find the OF_FULLNAME, specifically.
*/
static int host1x_device_uevent(const struct device *dev,
struct kobj_uevent_env *env)
{
of_device_uevent(dev->parent, env);
return 0;
}
static int host1x_dma_configure(struct device *dev)
{
return of_dma_configure(dev, dev->of_node, true);
}
static const struct dev_pm_ops host1x_device_pm_ops = {
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
.freeze = pm_generic_freeze,
.thaw = pm_generic_thaw,
.poweroff = pm_generic_poweroff,
.restore = pm_generic_restore,
};
struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.uevent = host1x_device_uevent,
.dma_configure = host1x_dma_configure,
.pm = &host1x_device_pm_ops,
};
static void __host1x_device_del(struct host1x_device *device)
{
struct host1x_subdev *subdev, *sd;
struct host1x_client *client, *cl;
mutex_lock(&device->subdevs_lock);
/* unregister subdevices */
list_for_each_entry_safe(subdev, sd, &device->active, list) {
/*
* host1x_subdev_unregister() will remove the client from
* any lists, so we'll need to manually add it back to the
* list of idle clients.
*
* XXX: Alternatively, perhaps don't remove the client from
* any lists in host1x_subdev_unregister() and instead do
* that explicitly from host1x_unregister_client()?
*/
client = subdev->client;
__host1x_subdev_unregister(device, subdev);
/* add the client to the list of idle clients */
mutex_lock(&clients_lock);
list_add_tail(&client->list, &clients);
mutex_unlock(&clients_lock);
}
/* remove subdevices */
list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
host1x_subdev_del(subdev);
mutex_unlock(&device->subdevs_lock);
/* move clients to idle list */
mutex_lock(&clients_lock);
mutex_lock(&device->clients_lock);
list_for_each_entry_safe(client, cl, &device->clients, list)
list_move_tail(&client->list, &clients);
mutex_unlock(&device->clients_lock);
mutex_unlock(&clients_lock);
/* finally remove the device */
list_del_init(&device->list);
}
static void host1x_device_release(struct device *dev)
{
struct host1x_device *device = to_host1x_device(dev);
__host1x_device_del(device);
kfree(device);
}
static int host1x_device_add(struct host1x *host1x,
struct host1x_driver *driver)
{
struct host1x_client *client, *tmp;
struct host1x_subdev *subdev;
struct host1x_device *device;
int err;
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device)
return -ENOMEM;
device_initialize(&device->dev);
mutex_init(&device->subdevs_lock);
INIT_LIST_HEAD(&device->subdevs);
INIT_LIST_HEAD(&device->active);
mutex_init(&device->clients_lock);
INIT_LIST_HEAD(&device->clients);
INIT_LIST_HEAD(&device->list);
device->driver = driver;
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask;
dev_set_name(&device->dev, "%s", driver->driver.name);
device->dev.release = host1x_device_release;
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
of_dma_configure(&device->dev, host1x->dev->of_node, true);
device->dev.dma_parms = &device->dma_parms;
dma_set_max_seg_size(&device->dev, UINT_MAX);
err = host1x_device_parse_dt(device, driver);
if (err < 0) {
kfree(device);
return err;
}
list_add_tail(&device->list, &host1x->devices);
mutex_lock(&clients_lock);
list_for_each_entry_safe(client, tmp, &clients, list) {
list_for_each_entry(subdev, &device->subdevs, list) {
if (subdev->np == client->dev->of_node) {
host1x_subdev_register(device, subdev, client);
break;
}
}
}
mutex_unlock(&clients_lock);
return 0;
}
/*
* Removes a device by first unregistering any subdevices and then removing
* itself from the list of devices.
*
* This function must be called with the host1x->devices_lock held.
*/
static void host1x_device_del(struct host1x *host1x,
struct host1x_device *device)
{
if (device->registered) {
device->registered = false;
device_del(&device->dev);
}
put_device(&device->dev);
}
static void host1x_attach_driver(struct host1x *host1x,
struct host1x_driver *driver)
{
struct host1x_device *device;
int err;
mutex_lock(&host1x->devices_lock);
list_for_each_entry(device, &host1x->devices, list) {
if (device->driver == driver) {
mutex_unlock(&host1x->devices_lock);
return;
}
}
err = host1x_device_add(host1x, driver);
if (err < 0)
dev_err(host1x->dev, "failed to allocate device: %d\n", err);
mutex_unlock(&host1x->devices_lock);
}
static void host1x_detach_driver(struct host1x *host1x,
struct host1x_driver *driver)
{
struct host1x_device *device, *tmp;
mutex_lock(&host1x->devices_lock);
list_for_each_entry_safe(device, tmp, &host1x->devices, list)
if (device->driver == driver)
host1x_device_del(host1x, device);
mutex_unlock(&host1x->devices_lock);
}
static int host1x_devices_show(struct seq_file *s, void *data)
{
struct host1x *host1x = s->private;
struct host1x_device *device;
mutex_lock(&host1x->devices_lock);
list_for_each_entry(device, &host1x->devices, list) {
struct host1x_subdev *subdev;
seq_printf(s, "%s\n", dev_name(&device->dev));
mutex_lock(&device->subdevs_lock);
list_for_each_entry(subdev, &device->active, list)
seq_printf(s, " %pOFf: %s\n", subdev->np,
dev_name(subdev->client->dev));
list_for_each_entry(subdev, &device->subdevs, list)
seq_printf(s, " %pOFf:\n", subdev->np);
mutex_unlock(&device->subdevs_lock);
}
mutex_unlock(&host1x->devices_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(host1x_devices);
/**
* host1x_register() - register a host1x controller
* @host1x: host1x controller
*
* The host1x controller driver uses this to register a host1x controller with
* the infrastructure. Note that all Tegra SoC generations have only ever come
* with a single host1x instance, so this function is somewhat academic.
*/
int host1x_register(struct host1x *host1x)
{
struct host1x_driver *driver;
mutex_lock(&devices_lock);
list_add_tail(&host1x->list, &devices);
mutex_unlock(&devices_lock);
mutex_lock(&drivers_lock);
list_for_each_entry(driver, &drivers, list)
host1x_attach_driver(host1x, driver);
mutex_unlock(&drivers_lock);
debugfs_create_file("devices", S_IRUGO, host1x->debugfs, host1x,
&host1x_devices_fops);
return 0;
}
/**
* host1x_unregister() - unregister a host1x controller
* @host1x: host1x controller
*
* The host1x controller driver uses this to remove a host1x controller from
* the infrastructure.
*/
int host1x_unregister(struct host1x *host1x)
{
struct host1x_driver *driver;
mutex_lock(&drivers_lock);
list_for_each_entry(driver, &drivers, list)
host1x_detach_driver(host1x, driver);
mutex_unlock(&drivers_lock);
mutex_lock(&devices_lock);
list_del_init(&host1x->list);
mutex_unlock(&devices_lock);
return 0;
}
static int host1x_device_probe(struct device *dev)
{
struct host1x_driver *driver = to_host1x_driver(dev->driver);
struct host1x_device *device = to_host1x_device(dev);
if (driver->probe)
return driver->probe(device);
return 0;
}
static int host1x_device_remove(struct device *dev)
{
struct host1x_driver *driver = to_host1x_driver(dev->driver);
struct host1x_device *device = to_host1x_device(dev);
if (driver->remove)
return driver->remove(device);
return 0;
}
static void host1x_device_shutdown(struct device *dev)
{
struct host1x_driver *driver = to_host1x_driver(dev->driver);
struct host1x_device *device = to_host1x_device(dev);
if (driver->shutdown)
driver->shutdown(device);
}
/**
* host1x_driver_register_full() - register a host1x driver
* @driver: host1x driver
* @owner: owner module
*
* Drivers for host1x logical devices call this function to register a driver
* with the infrastructure. Note that since these drive logical devices, the
* registration of the driver actually triggers tho logical device creation.
* A logical device will be created for each host1x instance.
*/
int host1x_driver_register_full(struct host1x_driver *driver,
struct module *owner)
{
struct host1x *host1x;
INIT_LIST_HEAD(&driver->list);
mutex_lock(&drivers_lock);
list_add_tail(&driver->list, &drivers);
mutex_unlock(&drivers_lock);
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list)
host1x_attach_driver(host1x, driver);
mutex_unlock(&devices_lock);
driver->driver.bus = &host1x_bus_type;
driver->driver.owner = owner;
driver->driver.probe = host1x_device_probe;
driver->driver.remove = host1x_device_remove;
driver->driver.shutdown = host1x_device_shutdown;
return driver_register(&driver->driver);
}
EXPORT_SYMBOL(host1x_driver_register_full);
/**
* host1x_driver_unregister() - unregister a host1x driver
* @driver: host1x driver
*
* Unbinds the driver from each of the host1x logical devices that it is
* bound to, effectively removing the subsystem devices that they represent.
*/
void host1x_driver_unregister(struct host1x_driver *driver)
{
struct host1x *host1x;
driver_unregister(&driver->driver);
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list)
host1x_detach_driver(host1x, driver);
mutex_unlock(&devices_lock);
mutex_lock(&drivers_lock);
list_del_init(&driver->list);
mutex_unlock(&drivers_lock);
}
EXPORT_SYMBOL(host1x_driver_unregister);
/**
* __host1x_client_init() - initialize a host1x client
* @client: host1x client
* @key: lock class key for the client-specific mutex
*/
void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
{
host1x_bo_cache_init(&client->cache);
INIT_LIST_HEAD(&client->list);
__mutex_init(&client->lock, "host1x client lock", key);
client->usecount = 0;
}
EXPORT_SYMBOL(__host1x_client_init);
/**
* host1x_client_exit() - uninitialize a host1x client
* @client: host1x client
*/
void host1x_client_exit(struct host1x_client *client)
{
mutex_destroy(&client->lock);
}
EXPORT_SYMBOL(host1x_client_exit);
/**
* __host1x_client_register() - register a host1x client
* @client: host1x client
*
* Registers a host1x client with each host1x controller instance. Note that
* each client will only match their parent host1x controller and will only be
* associated with that instance. Once all clients have been registered with
* their parent host1x controller, the infrastructure will set up the logical
* device and call host1x_device_init(), which will in turn call each client's
* &host1x_client_ops.init implementation.
*/
int __host1x_client_register(struct host1x_client *client)
{
struct host1x *host1x;
int err;
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) {
err = host1x_add_client(host1x, client);
if (!err) {
mutex_unlock(&devices_lock);
return 0;
}
}
mutex_unlock(&devices_lock);
mutex_lock(&clients_lock);
list_add_tail(&client->list, &clients);
mutex_unlock(&clients_lock);
return 0;
}
EXPORT_SYMBOL(__host1x_client_register);
/**
* host1x_client_unregister() - unregister a host1x client
* @client: host1x client
*
* Removes a host1x client from its host1x controller instance. If a logical
* device has already been initialized, it will be torn down.
*/
void host1x_client_unregister(struct host1x_client *client)
{
struct host1x_client *c;
struct host1x *host1x;
int err;
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) {
err = host1x_del_client(host1x, client);
if (!err) {
mutex_unlock(&devices_lock);
return;
}
}
mutex_unlock(&devices_lock);
mutex_lock(&clients_lock);
list_for_each_entry(c, &clients, list) {
if (c == client) {
list_del_init(&c->list);
break;
}
}
mutex_unlock(&clients_lock);
host1x_bo_cache_destroy(&client->cache);
}
EXPORT_SYMBOL(host1x_client_unregister);
int host1x_client_suspend(struct host1x_client *client)
{
int err = 0;
mutex_lock(&client->lock);
if (client->usecount == 1) {
if (client->ops && client->ops->suspend) {
err = client->ops->suspend(client);
if (err < 0)
goto unlock;
}
}
client->usecount--;
dev_dbg(client->dev, "use count: %u\n", client->usecount);
if (client->parent) {
err = host1x_client_suspend(client->parent);
if (err < 0)
goto resume;
}
goto unlock;
resume:
if (client->usecount == 0)
if (client->ops && client->ops->resume)
client->ops->resume(client);
client->usecount++;
unlock:
mutex_unlock(&client->lock);
return err;
}
EXPORT_SYMBOL(host1x_client_suspend);
int host1x_client_resume(struct host1x_client *client)
{
int err = 0;
mutex_lock(&client->lock);
if (client->parent) {
err = host1x_client_resume(client->parent);
if (err < 0)
goto unlock;
}
if (client->usecount == 0) {
if (client->ops && client->ops->resume) {
err = client->ops->resume(client);
if (err < 0)
goto suspend;
}
}
client->usecount++;
dev_dbg(client->dev, "use count: %u\n", client->usecount);
goto unlock;
suspend:
if (client->parent)
host1x_client_suspend(client->parent);
unlock:
mutex_unlock(&client->lock);
return err;
}
EXPORT_SYMBOL(host1x_client_resume);
struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
enum dma_data_direction dir,
struct host1x_bo_cache *cache)
{
struct host1x_bo_mapping *mapping;
if (cache) {
mutex_lock(&cache->lock);
list_for_each_entry(mapping, &cache->mappings, entry) {
if (mapping->bo == bo && mapping->direction == dir) {
kref_get(&mapping->ref);
goto unlock;
}
}
}
mapping = bo->ops->pin(dev, bo, dir);
if (IS_ERR(mapping))
goto unlock;
spin_lock(&mapping->bo->lock);
list_add_tail(&mapping->list, &bo->mappings);
spin_unlock(&mapping->bo->lock);
if (cache) {
INIT_LIST_HEAD(&mapping->entry);
mapping->cache = cache;
list_add_tail(&mapping->entry, &cache->mappings);
/* bump reference count to track the copy in the cache */
kref_get(&mapping->ref);
}
unlock:
if (cache)
mutex_unlock(&cache->lock);
return mapping;
}
EXPORT_SYMBOL(host1x_bo_pin);
static void __host1x_bo_unpin(struct kref *ref)
{
struct host1x_bo_mapping *mapping = to_host1x_bo_mapping(ref);
/*
* When the last reference of the mapping goes away, make sure to remove the mapping from
* the cache.
*/
if (mapping->cache)
list_del(&mapping->entry);
spin_lock(&mapping->bo->lock);
list_del(&mapping->list);
spin_unlock(&mapping->bo->lock);
mapping->bo->ops->unpin(mapping);
}
void host1x_bo_unpin(struct host1x_bo_mapping *mapping)
{
struct host1x_bo_cache *cache = mapping->cache;
if (cache)
mutex_lock(&cache->lock);
kref_put(&mapping->ref, __host1x_bo_unpin);
if (cache)
mutex_unlock(&cache->lock);
}
EXPORT_SYMBOL(host1x_bo_unpin);
| linux-master | drivers/gpu/host1x/bus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x Command DMA
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
#include <asm/cacheflush.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/slab.h>
#include <trace/events/host1x.h>
#include "cdma.h"
#include "channel.h"
#include "dev.h"
#include "debug.h"
#include "job.h"
/*
* push_buffer
*
* The push buffer is a circular array of words to be fetched by command DMA.
* Note that it works slightly differently to the sync queue; fence == pos
* means that the push buffer is full, not empty.
*/
/*
* Typically the commands written into the push buffer are a pair of words. We
* use slots to represent each of these pairs and to simplify things. Note the
* strange number of slots allocated here. 512 slots will fit exactly within a
* single memory page. We also need one additional word at the end of the push
* buffer for the RESTART opcode that will instruct the CDMA to jump back to
* the beginning of the push buffer. With 512 slots, this means that we'll use
* 2 memory pages and waste 4092 bytes of the second page that will never be
* used.
*/
#define HOST1X_PUSHBUFFER_SLOTS 511
/*
* Clean up push buffer resources
*/
static void host1x_pushbuffer_destroy(struct push_buffer *pb)
{
struct host1x_cdma *cdma = pb_to_cdma(pb);
struct host1x *host1x = cdma_to_host1x(cdma);
if (!pb->mapped)
return;
if (host1x->domain) {
iommu_unmap(host1x->domain, pb->dma, pb->alloc_size);
free_iova(&host1x->iova, iova_pfn(&host1x->iova, pb->dma));
}
dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
pb->mapped = NULL;
pb->phys = 0;
}
/*
* Init push buffer resources
*/
static int host1x_pushbuffer_init(struct push_buffer *pb)
{
struct host1x_cdma *cdma = pb_to_cdma(pb);
struct host1x *host1x = cdma_to_host1x(cdma);
struct iova *alloc;
u32 size;
int err;
pb->mapped = NULL;
pb->phys = 0;
pb->size = HOST1X_PUSHBUFFER_SLOTS * 8;
size = pb->size + 4;
/* initialize buffer pointers */
pb->fence = pb->size - 8;
pb->pos = 0;
if (host1x->domain) {
unsigned long shift;
size = iova_align(&host1x->iova, size);
pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
GFP_KERNEL);
if (!pb->mapped)
return -ENOMEM;
shift = iova_shift(&host1x->iova);
alloc = alloc_iova(&host1x->iova, size >> shift,
host1x->iova_end >> shift, true);
if (!alloc) {
err = -ENOMEM;
goto iommu_free_mem;
}
pb->dma = iova_dma_addr(&host1x->iova, alloc);
err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
IOMMU_READ, GFP_KERNEL);
if (err)
goto iommu_free_iova;
} else {
pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
GFP_KERNEL);
if (!pb->mapped)
return -ENOMEM;
pb->dma = pb->phys;
}
pb->alloc_size = size;
host1x_hw_pushbuffer_init(host1x, pb);
return 0;
iommu_free_iova:
__free_iova(&host1x->iova, alloc);
iommu_free_mem:
dma_free_wc(host1x->dev, size, pb->mapped, pb->phys);
return err;
}
/*
* Push two words to the push buffer
* Caller must ensure push buffer is not full
*/
static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
{
u32 *p = (u32 *)((void *)pb->mapped + pb->pos);
WARN_ON(pb->pos == pb->fence);
*(p++) = op1;
*(p++) = op2;
pb->pos += 8;
if (pb->pos >= pb->size)
pb->pos -= pb->size;
}
/*
* Pop a number of two word slots from the push buffer
* Caller must ensure push buffer is not empty
*/
static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
{
/* Advance the next write position */
pb->fence += slots * 8;
if (pb->fence >= pb->size)
pb->fence -= pb->size;
}
/*
* Return the number of two word slots free in the push buffer
*/
static u32 host1x_pushbuffer_space(struct push_buffer *pb)
{
unsigned int fence = pb->fence;
if (pb->fence < pb->pos)
fence += pb->size;
return (fence - pb->pos) / 8;
}
/*
* Sleep (if necessary) until the requested event happens
* - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
* - Returns 1
* - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
* - Return the amount of space (> 0)
* Must be called with the cdma lock held.
*/
unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
enum cdma_event event)
{
for (;;) {
struct push_buffer *pb = &cdma->push_buffer;
unsigned int space;
switch (event) {
case CDMA_EVENT_SYNC_QUEUE_EMPTY:
space = list_empty(&cdma->sync_queue) ? 1 : 0;
break;
case CDMA_EVENT_PUSH_BUFFER_SPACE:
space = host1x_pushbuffer_space(pb);
break;
default:
WARN_ON(1);
return -EINVAL;
}
if (space)
return space;
trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
event);
/* If somebody has managed to already start waiting, yield */
if (cdma->event != CDMA_EVENT_NONE) {
mutex_unlock(&cdma->lock);
schedule();
mutex_lock(&cdma->lock);
continue;
}
cdma->event = event;
mutex_unlock(&cdma->lock);
wait_for_completion(&cdma->complete);
mutex_lock(&cdma->lock);
}
return 0;
}
/*
* Sleep (if necessary) until the push buffer has enough free space.
*
* Must be called with the cdma lock held.
*/
static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
struct host1x_cdma *cdma,
unsigned int needed)
{
while (true) {
struct push_buffer *pb = &cdma->push_buffer;
unsigned int space;
space = host1x_pushbuffer_space(pb);
if (space >= needed)
break;
trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
CDMA_EVENT_PUSH_BUFFER_SPACE);
host1x_hw_cdma_flush(host1x, cdma);
/* If somebody has managed to already start waiting, yield */
if (cdma->event != CDMA_EVENT_NONE) {
mutex_unlock(&cdma->lock);
schedule();
mutex_lock(&cdma->lock);
continue;
}
cdma->event = CDMA_EVENT_PUSH_BUFFER_SPACE;
mutex_unlock(&cdma->lock);
wait_for_completion(&cdma->complete);
mutex_lock(&cdma->lock);
}
return 0;
}
/*
* Start timer that tracks the time spent by the job.
* Must be called with the cdma lock held.
*/
static void cdma_start_timer_locked(struct host1x_cdma *cdma,
struct host1x_job *job)
{
if (cdma->timeout.client) {
/* timer already started */
return;
}
cdma->timeout.client = job->client;
cdma->timeout.syncpt = job->syncpt;
cdma->timeout.syncpt_val = job->syncpt_end;
cdma->timeout.start_ktime = ktime_get();
schedule_delayed_work(&cdma->timeout.wq,
msecs_to_jiffies(job->timeout));
}
/*
* Stop timer when a buffer submission completes.
* Must be called with the cdma lock held.
*/
static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
{
cancel_delayed_work(&cdma->timeout.wq);
cdma->timeout.client = NULL;
}
/*
* For all sync queue entries that have already finished according to the
* current sync point registers:
* - unpin & unref their mems
* - pop their push buffer slots
* - remove them from the sync queue
* This is normally called from the host code's worker thread, but can be
* called manually if necessary.
* Must be called with the cdma lock held.
*/
static void update_cdma_locked(struct host1x_cdma *cdma)
{
bool signal = false;
struct host1x_job *job, *n;
/*
* Walk the sync queue, reading the sync point registers as necessary,
* to consume as many sync queue entries as possible without blocking
*/
list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
struct host1x_syncpt *sp = job->syncpt;
/* Check whether this syncpt has completed, and bail if not */
if (!host1x_syncpt_is_expired(sp, job->syncpt_end) &&
!job->cancelled) {
/* Start timer on next pending syncpt */
if (job->timeout)
cdma_start_timer_locked(cdma, job);
break;
}
/* Cancel timeout, when a buffer completes */
if (cdma->timeout.client)
stop_cdma_timer_locked(cdma);
/* Unpin the memory */
host1x_job_unpin(job);
/* Pop push buffer slots */
if (job->num_slots) {
struct push_buffer *pb = &cdma->push_buffer;
host1x_pushbuffer_pop(pb, job->num_slots);
if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
signal = true;
}
list_del(&job->list);
host1x_job_put(job);
}
if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY &&
list_empty(&cdma->sync_queue))
signal = true;
if (signal) {
cdma->event = CDMA_EVENT_NONE;
complete(&cdma->complete);
}
}
void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
struct device *dev)
{
struct host1x *host1x = cdma_to_host1x(cdma);
u32 restart_addr, syncpt_incrs, syncpt_val;
struct host1x_job *job, *next_job = NULL;
syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
dev_dbg(dev, "%s: starting cleanup (thresh %d)\n",
__func__, syncpt_val);
/*
* Move the sync_queue read pointer to the first entry that hasn't
* completed based on the current HW syncpt value. It's likely there
* won't be any (i.e. we're still at the head), but covers the case
* where a syncpt incr happens just prior/during the teardown.
*/
dev_dbg(dev, "%s: skip completed buffers still in sync_queue\n",
__func__);
list_for_each_entry(job, &cdma->sync_queue, list) {
if (syncpt_val < job->syncpt_end) {
if (!list_is_last(&job->list, &cdma->sync_queue))
next_job = list_next_entry(job, list);
goto syncpt_incr;
}
host1x_job_dump(dev, job);
}
/* all jobs have been completed */
job = NULL;
syncpt_incr:
/*
* Increment with CPU the remaining syncpts of a partially executed job.
*
* CDMA will continue execution starting with the next job or will get
* into idle state.
*/
if (next_job)
restart_addr = next_job->first_get;
else
restart_addr = cdma->last_pos;
if (!job)
goto resume;
/* do CPU increments for the remaining syncpts */
if (job->syncpt_recovery) {
dev_dbg(dev, "%s: perform CPU incr on pending buffers\n",
__func__);
/* won't need a timeout when replayed */
job->timeout = 0;
syncpt_incrs = job->syncpt_end - syncpt_val;
dev_dbg(dev, "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
host1x_job_dump(dev, job);
/* safe to use CPU to incr syncpts */
host1x_hw_cdma_timeout_cpu_incr(host1x, cdma, job->first_get,
syncpt_incrs, job->syncpt_end,
job->num_slots);
dev_dbg(dev, "%s: finished sync_queue modification\n",
__func__);
} else {
struct host1x_job *failed_job = job;
host1x_job_dump(dev, job);
host1x_syncpt_set_locked(job->syncpt);
failed_job->cancelled = true;
list_for_each_entry_continue(job, &cdma->sync_queue, list) {
unsigned int i;
if (job->syncpt != failed_job->syncpt)
continue;
for (i = 0; i < job->num_slots; i++) {
unsigned int slot = (job->first_get/8 + i) %
HOST1X_PUSHBUFFER_SLOTS;
u32 *mapped = cdma->push_buffer.mapped;
/*
* Overwrite opcodes with 0 word writes
* to offset 0xbad. This does nothing but
* has a easily detected signature in debug
* traces.
*
* On systems with MLOCK enforcement enabled,
* the above 0 word writes would fall foul of
* the enforcement. As such, in the first slot
* put a RESTART_W opcode to the beginning
* of the next job. We don't use this for older
* chips since those only support the RESTART
* opcode with inconvenient alignment requirements.
*/
if (i == 0 && host1x->info->has_wide_gather) {
unsigned int next_job = (job->first_get/8 + job->num_slots)
% HOST1X_PUSHBUFFER_SLOTS;
mapped[2*slot+0] = (0xd << 28) | (next_job * 2);
mapped[2*slot+1] = 0x0;
} else {
mapped[2*slot+0] = 0x1bad0000;
mapped[2*slot+1] = 0x1bad0000;
}
}
job->cancelled = true;
}
wmb();
update_cdma_locked(cdma);
}
resume:
/* roll back DMAGET and start up channel again */
host1x_hw_cdma_resume(host1x, cdma, restart_addr);
}
static void cdma_update_work(struct work_struct *work)
{
struct host1x_cdma *cdma = container_of(work, struct host1x_cdma, update_work);
mutex_lock(&cdma->lock);
update_cdma_locked(cdma);
mutex_unlock(&cdma->lock);
}
/*
* Create a cdma
*/
int host1x_cdma_init(struct host1x_cdma *cdma)
{
int err;
mutex_init(&cdma->lock);
init_completion(&cdma->complete);
INIT_WORK(&cdma->update_work, cdma_update_work);
INIT_LIST_HEAD(&cdma->sync_queue);
cdma->event = CDMA_EVENT_NONE;
cdma->running = false;
cdma->torndown = false;
err = host1x_pushbuffer_init(&cdma->push_buffer);
if (err)
return err;
return 0;
}
/*
* Destroy a cdma
*/
int host1x_cdma_deinit(struct host1x_cdma *cdma)
{
struct push_buffer *pb = &cdma->push_buffer;
struct host1x *host1x = cdma_to_host1x(cdma);
if (cdma->running) {
pr_warn("%s: CDMA still running\n", __func__);
return -EBUSY;
}
host1x_pushbuffer_destroy(pb);
host1x_hw_cdma_timeout_destroy(host1x, cdma);
return 0;
}
/*
* Begin a cdma submit
*/
int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
{
struct host1x *host1x = cdma_to_host1x(cdma);
mutex_lock(&cdma->lock);
/*
* Check if syncpoint was locked due to previous job timeout.
* This needs to be done within the cdma lock to avoid a race
* with the timeout handler.
*/
if (job->syncpt->locked) {
mutex_unlock(&cdma->lock);
return -EPERM;
}
if (job->timeout) {
/* init state on first submit with timeout value */
if (!cdma->timeout.initialized) {
int err;
err = host1x_hw_cdma_timeout_init(host1x, cdma);
if (err) {
mutex_unlock(&cdma->lock);
return err;
}
}
}
if (!cdma->running)
host1x_hw_cdma_start(host1x, cdma);
cdma->slots_free = 0;
cdma->slots_used = 0;
cdma->first_get = cdma->push_buffer.pos;
trace_host1x_cdma_begin(dev_name(job->channel->dev));
return 0;
}
/*
* Push two words into a push buffer slot
* Blocks as necessary if the push buffer is full.
*/
void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
{
struct host1x *host1x = cdma_to_host1x(cdma);
struct push_buffer *pb = &cdma->push_buffer;
u32 slots_free = cdma->slots_free;
if (host1x_debug_trace_cmdbuf)
trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
op1, op2);
if (slots_free == 0) {
host1x_hw_cdma_flush(host1x, cdma);
slots_free = host1x_cdma_wait_locked(cdma,
CDMA_EVENT_PUSH_BUFFER_SPACE);
}
cdma->slots_free = slots_free - 1;
cdma->slots_used++;
host1x_pushbuffer_push(pb, op1, op2);
}
/*
* Push four words into two consecutive push buffer slots. Note that extra
* care needs to be taken not to split the two slots across the end of the
* push buffer. Otherwise the RESTART opcode at the end of the push buffer
* that ensures processing will restart at the beginning will break up the
* four words.
*
* Blocks as necessary if the push buffer is full.
*/
void host1x_cdma_push_wide(struct host1x_cdma *cdma, u32 op1, u32 op2,
u32 op3, u32 op4)
{
struct host1x_channel *channel = cdma_to_channel(cdma);
struct host1x *host1x = cdma_to_host1x(cdma);
struct push_buffer *pb = &cdma->push_buffer;
unsigned int space = cdma->slots_free;
unsigned int needed = 2, extra = 0;
if (host1x_debug_trace_cmdbuf)
trace_host1x_cdma_push_wide(dev_name(channel->dev), op1, op2,
op3, op4);
/* compute number of extra slots needed for padding */
if (pb->pos + 16 > pb->size) {
extra = (pb->size - pb->pos) / 8;
needed += extra;
}
host1x_cdma_wait_pushbuffer_space(host1x, cdma, needed);
space = host1x_pushbuffer_space(pb);
cdma->slots_free = space - needed;
cdma->slots_used += needed;
if (extra > 0) {
/*
* If there isn't enough space at the tail of the pushbuffer,
* insert a RESTART(0) here to go back to the beginning.
* The code above adjusted the indexes appropriately.
*/
host1x_pushbuffer_push(pb, (0x5 << 28), 0xdead0000);
}
host1x_pushbuffer_push(pb, op1, op2);
host1x_pushbuffer_push(pb, op3, op4);
}
/*
* End a cdma submit
* Kick off DMA, add job to the sync queue, and a number of slots to be freed
* from the pushbuffer. The handles for a submit must all be pinned at the same
* time, but they can be unpinned in smaller chunks.
*/
void host1x_cdma_end(struct host1x_cdma *cdma,
struct host1x_job *job)
{
struct host1x *host1x = cdma_to_host1x(cdma);
bool idle = list_empty(&cdma->sync_queue);
host1x_hw_cdma_flush(host1x, cdma);
job->first_get = cdma->first_get;
job->num_slots = cdma->slots_used;
host1x_job_get(job);
list_add_tail(&job->list, &cdma->sync_queue);
/* start timer on idle -> active transitions */
if (job->timeout && idle)
cdma_start_timer_locked(cdma, job);
trace_host1x_cdma_end(dev_name(job->channel->dev));
mutex_unlock(&cdma->lock);
}
/*
* Update cdma state according to current sync point values
*/
void host1x_cdma_update(struct host1x_cdma *cdma)
{
schedule_work(&cdma->update_work);
}
| linux-master | drivers/gpu/host1x/cdma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Syncpoint dma_fence implementation
*
* Copyright (c) 2020, NVIDIA Corporation.
*/
#include <linux/dma-fence.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/sync_file.h>
#include "fence.h"
#include "intr.h"
#include "syncpt.h"
static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
{
return "host1x";
}
static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
{
return "syncpoint";
}
static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
{
return container_of(f, struct host1x_syncpt_fence, base);
}
static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
{
struct host1x_syncpt_fence *sf = to_host1x_fence(f);
if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
return false;
/* Reference for interrupt path. */
dma_fence_get(f);
/*
* The dma_fence framework requires the fence driver to keep a
* reference to any fences for which 'enable_signaling' has been
* called (and that have not been signalled).
*
* We cannot currently always guarantee that all fences get signalled
* or cancelled. As such, for such situations, set up a timeout, so
* that long-lasting fences will get reaped eventually.
*/
if (sf->timeout) {
/* Reference for timeout path. */
dma_fence_get(f);
schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
}
host1x_intr_add_fence_locked(sf->sp->host, sf);
/*
* The fence may get signalled at any time after the above call,
* so we need to initialize all state used by signalling
* before it.
*/
return true;
}
static const struct dma_fence_ops host1x_syncpt_fence_ops = {
.get_driver_name = host1x_syncpt_fence_get_driver_name,
.get_timeline_name = host1x_syncpt_fence_get_timeline_name,
.enable_signaling = host1x_syncpt_fence_enable_signaling,
};
void host1x_fence_signal(struct host1x_syncpt_fence *f)
{
if (atomic_xchg(&f->signaling, 1)) {
/*
* Already on timeout path, but we removed the fence before
* timeout path could, so drop interrupt path reference.
*/
dma_fence_put(&f->base);
return;
}
if (f->timeout && cancel_delayed_work(&f->timeout_work)) {
/*
* We know that the timeout path will not be entered.
* Safe to drop the timeout path's reference now.
*/
dma_fence_put(&f->base);
}
dma_fence_signal_locked(&f->base);
dma_fence_put(&f->base);
}
static void do_fence_timeout(struct work_struct *work)
{
struct delayed_work *dwork = (struct delayed_work *)work;
struct host1x_syncpt_fence *f =
container_of(dwork, struct host1x_syncpt_fence, timeout_work);
if (atomic_xchg(&f->signaling, 1)) {
/* Already on interrupt path, drop timeout path reference if any. */
if (f->timeout)
dma_fence_put(&f->base);
return;
}
if (host1x_intr_remove_fence(f->sp->host, f)) {
/*
* Managed to remove fence from queue, so it's safe to drop
* the interrupt path's reference.
*/
dma_fence_put(&f->base);
}
dma_fence_set_error(&f->base, -ETIMEDOUT);
dma_fence_signal(&f->base);
if (f->timeout)
dma_fence_put(&f->base);
}
struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
bool timeout)
{
struct host1x_syncpt_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return ERR_PTR(-ENOMEM);
fence->sp = sp;
fence->threshold = threshold;
fence->timeout = timeout;
dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock,
dma_fence_context_alloc(1), 0);
INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
return &fence->base;
}
EXPORT_SYMBOL(host1x_fence_create);
void host1x_fence_cancel(struct dma_fence *f)
{
struct host1x_syncpt_fence *sf = to_host1x_fence(f);
schedule_delayed_work(&sf->timeout_work, 0);
flush_delayed_work(&sf->timeout_work);
}
EXPORT_SYMBOL(host1x_fence_cancel);
| linux-master | drivers/gpu/host1x/fence.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, NVIDIA Corporation.
*/
#include <linux/device.h>
#include <linux/kref.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pid.h>
#include <linux/slab.h>
#include "context.h"
#include "dev.h"
static void host1x_memory_context_release(struct device *dev)
{
/* context device is freed in host1x_memory_context_list_free() */
}
int host1x_memory_context_list_init(struct host1x *host1x)
{
struct host1x_memory_context_list *cdl = &host1x->context_list;
struct device_node *node = host1x->dev->of_node;
struct host1x_memory_context *ctx;
unsigned int i;
int err;
cdl->devs = NULL;
cdl->len = 0;
mutex_init(&cdl->lock);
err = of_property_count_u32_elems(node, "iommu-map");
if (err < 0)
return 0;
cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
if (!cdl->devs)
return -ENOMEM;
cdl->len = err / 4;
for (i = 0; i < cdl->len; i++) {
ctx = &cdl->devs[i];
ctx->host = host1x;
device_initialize(&ctx->dev);
/*
* Due to an issue with T194 NVENC, only 38 bits can be used.
* Anyway, 256GiB of IOVA ought to be enough for anyone.
*/
ctx->dma_mask = DMA_BIT_MASK(38);
ctx->dev.dma_mask = &ctx->dma_mask;
ctx->dev.coherent_dma_mask = ctx->dma_mask;
dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
ctx->dev.bus = &host1x_context_device_bus_type;
ctx->dev.parent = host1x->dev;
ctx->dev.release = host1x_memory_context_release;
dma_set_max_seg_size(&ctx->dev, UINT_MAX);
err = device_add(&ctx->dev);
if (err) {
dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
put_device(&ctx->dev);
goto unreg_devices;
}
err = of_dma_configure_id(&ctx->dev, node, true, &i);
if (err) {
dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
i, err);
device_unregister(&ctx->dev);
goto unreg_devices;
}
if (!tegra_dev_iommu_get_stream_id(&ctx->dev, &ctx->stream_id) ||
!device_iommu_mapped(&ctx->dev)) {
dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
device_unregister(&ctx->dev);
/*
* This means that if IOMMU is disabled but context devices
* are defined in the device tree, Host1x will fail to probe.
* That's probably OK in this time and age.
*/
err = -EINVAL;
goto unreg_devices;
}
}
return 0;
unreg_devices:
while (i--)
device_unregister(&cdl->devs[i].dev);
kfree(cdl->devs);
cdl->devs = NULL;
cdl->len = 0;
return err;
}
void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
{
unsigned int i;
for (i = 0; i < cdl->len; i++)
device_unregister(&cdl->devs[i].dev);
kfree(cdl->devs);
cdl->len = 0;
}
struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
struct device *dev,
struct pid *pid)
{
struct host1x_memory_context_list *cdl = &host1x->context_list;
struct host1x_memory_context *free = NULL;
int i;
if (!cdl->len)
return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&cdl->lock);
for (i = 0; i < cdl->len; i++) {
struct host1x_memory_context *cd = &cdl->devs[i];
if (cd->dev.iommu->iommu_dev != dev->iommu->iommu_dev)
continue;
if (cd->owner == pid) {
refcount_inc(&cd->ref);
mutex_unlock(&cdl->lock);
return cd;
} else if (!cd->owner && !free) {
free = cd;
}
}
if (!free) {
mutex_unlock(&cdl->lock);
return ERR_PTR(-EBUSY);
}
refcount_set(&free->ref, 1);
free->owner = get_pid(pid);
mutex_unlock(&cdl->lock);
return free;
}
EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
void host1x_memory_context_get(struct host1x_memory_context *cd)
{
refcount_inc(&cd->ref);
}
EXPORT_SYMBOL_GPL(host1x_memory_context_get);
void host1x_memory_context_put(struct host1x_memory_context *cd)
{
struct host1x_memory_context_list *cdl = &cd->host->context_list;
if (refcount_dec_and_mutex_lock(&cd->ref, &cdl->lock)) {
put_pid(cd->owner);
cd->owner = NULL;
mutex_unlock(&cdl->lock);
}
}
EXPORT_SYMBOL_GPL(host1x_memory_context_put);
| linux-master | drivers/gpu/host1x/context.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x driver
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <soc/tegra/common.h>
#define CREATE_TRACE_POINTS
#include <trace/events/host1x.h>
#undef CREATE_TRACE_POINTS
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
#include <asm/dma-iommu.h>
#endif
#include "bus.h"
#include "channel.h"
#include "context.h"
#include "debug.h"
#include "dev.h"
#include "intr.h"
#include "hw/host1x01.h"
#include "hw/host1x02.h"
#include "hw/host1x04.h"
#include "hw/host1x05.h"
#include "hw/host1x06.h"
#include "hw/host1x07.h"
#include "hw/host1x08.h"
void host1x_common_writel(struct host1x *host1x, u32 v, u32 r)
{
writel(v, host1x->common_regs + r);
}
void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
{
writel(v, host1x->hv_regs + r);
}
u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
{
return readl(host1x->hv_regs + r);
}
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
{
void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
writel(v, sync_regs + r);
}
u32 host1x_sync_readl(struct host1x *host1x, u32 r)
{
void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
return readl(sync_regs + r);
}
void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
{
writel(v, ch->regs + r);
}
u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
{
return readl(ch->regs + r);
}
static const struct host1x_info host1x01_info = {
.nb_channels = 8,
.nb_pts = 32,
.nb_mlocks = 16,
.nb_bases = 8,
.init = host1x01_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
.reserve_vblank_syncpts = true,
};
static const struct host1x_info host1x02_info = {
.nb_channels = 9,
.nb_pts = 32,
.nb_mlocks = 16,
.nb_bases = 12,
.init = host1x02_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
.reserve_vblank_syncpts = true,
};
static const struct host1x_info host1x04_info = {
.nb_channels = 12,
.nb_pts = 192,
.nb_mlocks = 16,
.nb_bases = 64,
.init = host1x04_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
.reserve_vblank_syncpts = false,
};
static const struct host1x_info host1x05_info = {
.nb_channels = 14,
.nb_pts = 192,
.nb_mlocks = 16,
.nb_bases = 64,
.init = host1x05_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
.reserve_vblank_syncpts = false,
};
static const struct host1x_sid_entry tegra186_sid_table[] = {
{
/* VIC */
.base = 0x1af0,
.offset = 0x30,
.limit = 0x34
},
{
/* NVDEC */
.base = 0x1b00,
.offset = 0x30,
.limit = 0x34
},
};
static const struct host1x_info host1x06_info = {
.nb_channels = 63,
.nb_pts = 576,
.nb_mlocks = 24,
.nb_bases = 16,
.init = host1x06_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
.has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
.reserve_vblank_syncpts = false,
};
static const struct host1x_sid_entry tegra194_sid_table[] = {
{
/* VIC */
.base = 0x1af0,
.offset = 0x30,
.limit = 0x34
},
{
/* NVDEC */
.base = 0x1b00,
.offset = 0x30,
.limit = 0x34
},
{
/* NVDEC1 */
.base = 0x1bc0,
.offset = 0x30,
.limit = 0x34
},
};
static const struct host1x_info host1x07_info = {
.nb_channels = 63,
.nb_pts = 704,
.nb_mlocks = 32,
.nb_bases = 0,
.init = host1x07_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
.has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
.sid_table = tegra194_sid_table,
.reserve_vblank_syncpts = false,
};
/*
* Tegra234 has two stream ID protection tables, one for setting stream IDs
* through the channel path via SETSTREAMID, and one for setting them via
* MMIO. We program each engine's data stream ID in the channel path table
* and firmware stream ID in the MMIO path table.
*/
static const struct host1x_sid_entry tegra234_sid_table[] = {
{
/* VIC channel */
.base = 0x17b8,
.offset = 0x30,
.limit = 0x30
},
{
/* VIC MMIO */
.base = 0x1688,
.offset = 0x34,
.limit = 0x34
},
{
/* NVDEC channel */
.base = 0x17c8,
.offset = 0x30,
.limit = 0x30,
},
{
/* NVDEC MMIO */
.base = 0x1698,
.offset = 0x34,
.limit = 0x34,
},
};
static const struct host1x_info host1x08_info = {
.nb_channels = 63,
.nb_pts = 1024,
.nb_mlocks = 24,
.nb_bases = 0,
.init = host1x08_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
.has_wide_gather = true,
.has_hypervisor = true,
.has_common = true,
.num_sid_entries = ARRAY_SIZE(tegra234_sid_table),
.sid_table = tegra234_sid_table,
.streamid_vm_table = { 0x1004, 128 },
.classid_vm_table = { 0x1404, 25 },
.mmio_vm_table = { 0x1504, 25 },
.reserve_vblank_syncpts = false,
};
static const struct of_device_id host1x_of_match[] = {
{ .compatible = "nvidia,tegra234-host1x", .data = &host1x08_info, },
{ .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
{ .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
{ },
};
MODULE_DEVICE_TABLE(of, host1x_of_match);
static void host1x_setup_virtualization_tables(struct host1x *host)
{
const struct host1x_info *info = host->info;
unsigned int i;
if (!info->has_hypervisor)
return;
for (i = 0; i < info->num_sid_entries; i++) {
const struct host1x_sid_entry *entry = &info->sid_table[i];
host1x_hypervisor_writel(host, entry->offset, entry->base);
host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
}
for (i = 0; i < info->streamid_vm_table.count; i++) {
/* Allow access to all stream IDs to all VMs. */
host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i);
}
for (i = 0; i < info->classid_vm_table.count; i++) {
/* Allow access to all classes to all VMs. */
host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i);
}
for (i = 0; i < info->mmio_vm_table.count; i++) {
/* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */
host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i);
}
}
static bool host1x_wants_iommu(struct host1x *host1x)
{
/* Our IOMMU usage policy doesn't currently play well with GART */
if (of_machine_is_compatible("nvidia,tegra20"))
return false;
/*
* If we support addressing a maximum of 32 bits of physical memory
* and if the host1x firewall is enabled, there's no need to enable
* IOMMU support. This can happen for example on Tegra20, Tegra30
* and Tegra114.
*
* Tegra124 and later can address up to 34 bits of physical memory and
* many platforms come equipped with more than 2 GiB of system memory,
* which requires crossing the 4 GiB boundary. But there's a catch: on
* SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
* only address up to 32 bits of memory in GATHER opcodes, which means
* that command buffers need to either be in the first 2 GiB of system
* memory (which could quickly lead to memory exhaustion), or command
* buffers need to be treated differently from other buffers (which is
* not possible with the current ABI).
*
* A third option is to use the IOMMU in these cases to make sure all
* buffers will be mapped into a 32-bit IOVA space that host1x can
* address. This allows all of the system memory to be used and works
* within the limitations of the host1x on these SoCs.
*
* In summary, default to enable IOMMU on Tegra124 and later. For any
* of the earlier SoCs, only use the IOMMU for additional safety when
* the host1x firewall is disabled.
*/
if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
return false;
}
return true;
}
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
int err;
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
if (host->dev->archdata.mapping) {
struct dma_iommu_mapping *mapping =
to_dma_iommu_mapping(host->dev);
arm_iommu_detach_device(host->dev);
arm_iommu_release_mapping(mapping);
domain = iommu_get_domain_for_dev(host->dev);
}
#endif
/*
* We may not always want to enable IOMMU support (for example if the
* host1x firewall is already enabled and we don't support addressing
* more than 32 bits of physical memory), so check for that first.
*
* Similarly, if host1x is already attached to an IOMMU (via the DMA
* API), don't try to attach again.
*/
if (!host1x_wants_iommu(host) || domain)
return domain;
host->group = iommu_group_get(host->dev);
if (host->group) {
struct iommu_domain_geometry *geometry;
dma_addr_t start, end;
unsigned long order;
err = iova_cache_get();
if (err < 0)
goto put_group;
host->domain = iommu_domain_alloc(&platform_bus_type);
if (!host->domain) {
err = -ENOMEM;
goto put_cache;
}
err = iommu_attach_group(host->domain, host->group);
if (err) {
if (err == -ENODEV)
err = 0;
goto free_domain;
}
geometry = &host->domain->geometry;
start = geometry->aperture_start & host->info->dma_mask;
end = geometry->aperture_end & host->info->dma_mask;
order = __ffs(host->domain->pgsize_bitmap);
init_iova_domain(&host->iova, 1UL << order, start >> order);
host->iova_end = end;
domain = host->domain;
}
return domain;
free_domain:
iommu_domain_free(host->domain);
host->domain = NULL;
put_cache:
iova_cache_put();
put_group:
iommu_group_put(host->group);
host->group = NULL;
return ERR_PTR(err);
}
static int host1x_iommu_init(struct host1x *host)
{
u64 mask = host->info->dma_mask;
struct iommu_domain *domain;
int err;
domain = host1x_iommu_attach(host);
if (IS_ERR(domain)) {
err = PTR_ERR(domain);
dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
return err;
}
/*
* If we're not behind an IOMMU make sure we don't get push buffers
* that are allocated outside of the range addressable by the GATHER
* opcode.
*
* Newer generations of Tegra (Tegra186 and later) support a wide
* variant of the GATHER opcode that allows addressing more bits.
*/
if (!domain && !host->info->has_wide_gather)
mask = DMA_BIT_MASK(32);
err = dma_coerce_mask_and_coherent(host->dev, mask);
if (err < 0) {
dev_err(host->dev, "failed to set DMA mask: %d\n", err);
return err;
}
return 0;
}
static void host1x_iommu_exit(struct host1x *host)
{
if (host->domain) {
put_iova_domain(&host->iova);
iommu_detach_group(host->domain, host->group);
iommu_domain_free(host->domain);
host->domain = NULL;
iova_cache_put();
iommu_group_put(host->group);
host->group = NULL;
}
}
static int host1x_get_resets(struct host1x *host)
{
int err;
host->resets[0].id = "mc";
host->resets[1].id = "host1x";
host->nresets = ARRAY_SIZE(host->resets);
err = devm_reset_control_bulk_get_optional_exclusive_released(
host->dev, host->nresets, host->resets);
if (err) {
dev_err(host->dev, "failed to get reset: %d\n", err);
return err;
}
return 0;
}
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
int err;
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->info = of_device_get_match_data(&pdev->dev);
if (host->info->has_hypervisor) {
host->regs = devm_platform_ioremap_resource_byname(pdev, "vm");
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
host->hv_regs = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
if (IS_ERR(host->hv_regs))
return PTR_ERR(host->hv_regs);
if (host->info->has_common) {
host->common_regs = devm_platform_ioremap_resource_byname(pdev, "common");
if (IS_ERR(host->common_regs))
return PTR_ERR(host->common_regs);
}
} else {
host->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
}
host->syncpt_irq = platform_get_irq(pdev, 0);
if (host->syncpt_irq < 0)
return host->syncpt_irq;
mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices);
INIT_LIST_HEAD(&host->list);
host->dev = &pdev->dev;
/* set common host1x device data */
platform_set_drvdata(pdev, host);
host->dev->dma_parms = &host->dma_parms;
dma_set_max_seg_size(host->dev, UINT_MAX);
if (host->info->init) {
err = host->info->init(host);
if (err)
return err;
}
host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
err = PTR_ERR(host->clk);
if (err != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to get clock: %d\n", err);
return err;
}
err = host1x_get_resets(host);
if (err)
return err;
host1x_bo_cache_init(&host->cache);
err = host1x_iommu_init(host);
if (err < 0) {
dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
goto destroy_cache;
}
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
dev_err(&pdev->dev, "failed to initialize channel list\n");
goto iommu_exit;
}
err = host1x_memory_context_list_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize context list\n");
goto free_channels;
}
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
goto free_contexts;
}
err = host1x_intr_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize interrupts\n");
goto deinit_syncpt;
}
pm_runtime_enable(&pdev->dev);
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
goto pm_disable;
/* the driver's code isn't ready yet for the dynamic RPM */
err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
goto pm_disable;
host1x_debug_init(host);
err = host1x_register(host);
if (err < 0)
goto deinit_debugfs;
err = devm_of_platform_populate(&pdev->dev);
if (err < 0)
goto unregister;
return 0;
unregister:
host1x_unregister(host);
deinit_debugfs:
host1x_debug_deinit(host);
pm_runtime_put_sync_suspend(&pdev->dev);
pm_disable:
pm_runtime_disable(&pdev->dev);
host1x_intr_deinit(host);
deinit_syncpt:
host1x_syncpt_deinit(host);
free_contexts:
host1x_memory_context_list_free(&host->context_list);
free_channels:
host1x_channel_list_free(&host->channel_list);
iommu_exit:
host1x_iommu_exit(host);
destroy_cache:
host1x_bo_cache_destroy(&host->cache);
return err;
}
static int host1x_remove(struct platform_device *pdev)
{
struct host1x *host = platform_get_drvdata(pdev);
host1x_unregister(host);
host1x_debug_deinit(host);
pm_runtime_force_suspend(&pdev->dev);
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
host1x_memory_context_list_free(&host->context_list);
host1x_channel_list_free(&host->channel_list);
host1x_iommu_exit(host);
host1x_bo_cache_destroy(&host->cache);
return 0;
}
static int __maybe_unused host1x_runtime_suspend(struct device *dev)
{
struct host1x *host = dev_get_drvdata(dev);
int err;
host1x_intr_stop(host);
host1x_syncpt_save(host);
err = reset_control_bulk_assert(host->nresets, host->resets);
if (err) {
dev_err(dev, "failed to assert reset: %d\n", err);
goto resume_host1x;
}
usleep_range(1000, 2000);
clk_disable_unprepare(host->clk);
reset_control_bulk_release(host->nresets, host->resets);
return 0;
resume_host1x:
host1x_setup_virtualization_tables(host);
host1x_syncpt_restore(host);
host1x_intr_start(host);
return err;
}
static int __maybe_unused host1x_runtime_resume(struct device *dev)
{
struct host1x *host = dev_get_drvdata(dev);
int err;
err = reset_control_bulk_acquire(host->nresets, host->resets);
if (err) {
dev_err(dev, "failed to acquire reset: %d\n", err);
return err;
}
err = clk_prepare_enable(host->clk);
if (err) {
dev_err(dev, "failed to enable clock: %d\n", err);
goto release_reset;
}
err = reset_control_bulk_deassert(host->nresets, host->resets);
if (err < 0) {
dev_err(dev, "failed to deassert reset: %d\n", err);
goto disable_clk;
}
host1x_setup_virtualization_tables(host);
host1x_syncpt_restore(host);
host1x_intr_start(host);
return 0;
disable_clk:
clk_disable_unprepare(host->clk);
release_reset:
reset_control_bulk_release(host->nresets, host->resets);
return err;
}
static const struct dev_pm_ops host1x_pm_ops = {
SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
NULL)
/* TODO: add system suspend-resume once driver will be ready for that */
};
static struct platform_driver tegra_host1x_driver = {
.driver = {
.name = "tegra-host1x",
.of_match_table = host1x_of_match,
.pm = &host1x_pm_ops,
},
.probe = host1x_probe,
.remove = host1x_remove,
};
static struct platform_driver * const drivers[] = {
&tegra_host1x_driver,
&tegra_mipi_driver,
};
static int __init tegra_host1x_init(void)
{
int err;
err = bus_register(&host1x_bus_type);
if (err < 0)
return err;
err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (err < 0)
bus_unregister(&host1x_bus_type);
return err;
}
module_init(tegra_host1x_init);
static void __exit tegra_host1x_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
bus_unregister(&host1x_bus_type);
}
module_exit(tegra_host1x_exit);
/**
* host1x_get_dma_mask() - query the supported DMA mask for host1x
* @host1x: host1x instance
*
* Note that this returns the supported DMA mask for host1x, which can be
* different from the applicable DMA mask under certain circumstances.
*/
u64 host1x_get_dma_mask(struct host1x *host1x)
{
return host1x->info->dma_mask;
}
EXPORT_SYMBOL(host1x_get_dma_mask);
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_AUTHOR("Terje Bergstrom <[email protected]>");
MODULE_DESCRIPTION("Host1x driver for Tegra products");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/host1x/dev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x Interrupt Management
*
* Copyright (c) 2010-2021, NVIDIA Corporation.
*/
#include <linux/clk.h>
#include "dev.h"
#include "fence.h"
#include "intr.h"
static void host1x_intr_add_fence_to_list(struct host1x_fence_list *list,
struct host1x_syncpt_fence *fence)
{
struct host1x_syncpt_fence *fence_in_list;
list_for_each_entry_reverse(fence_in_list, &list->list, list) {
if ((s32)(fence_in_list->threshold - fence->threshold) <= 0) {
/* Fence in list is before us, we can insert here */
list_add(&fence->list, &fence_in_list->list);
return;
}
}
/* Add as first in list */
list_add(&fence->list, &list->list);
}
static void host1x_intr_update_hw_state(struct host1x *host, struct host1x_syncpt *sp)
{
struct host1x_syncpt_fence *fence;
if (!list_empty(&sp->fences.list)) {
fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list);
host1x_hw_intr_set_syncpt_threshold(host, sp->id, fence->threshold);
host1x_hw_intr_enable_syncpt_intr(host, sp->id);
} else {
host1x_hw_intr_disable_syncpt_intr(host, sp->id);
}
}
void host1x_intr_add_fence_locked(struct host1x *host, struct host1x_syncpt_fence *fence)
{
struct host1x_fence_list *fence_list = &fence->sp->fences;
INIT_LIST_HEAD(&fence->list);
host1x_intr_add_fence_to_list(fence_list, fence);
host1x_intr_update_hw_state(host, fence->sp);
}
bool host1x_intr_remove_fence(struct host1x *host, struct host1x_syncpt_fence *fence)
{
struct host1x_fence_list *fence_list = &fence->sp->fences;
unsigned long irqflags;
spin_lock_irqsave(&fence_list->lock, irqflags);
if (list_empty(&fence->list)) {
spin_unlock_irqrestore(&fence_list->lock, irqflags);
return false;
}
list_del_init(&fence->list);
host1x_intr_update_hw_state(host, fence->sp);
spin_unlock_irqrestore(&fence_list->lock, irqflags);
return true;
}
void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id)
{
struct host1x_syncpt *sp = &host->syncpt[id];
struct host1x_syncpt_fence *fence, *tmp;
unsigned int value;
value = host1x_syncpt_load(sp);
spin_lock(&sp->fences.lock);
list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) {
if (((value - fence->threshold) & 0x80000000U) != 0U) {
/* Fence is not yet expired, we are done */
break;
}
list_del_init(&fence->list);
host1x_fence_signal(fence);
}
/* Re-enable interrupt if necessary */
host1x_intr_update_hw_state(host, sp);
spin_unlock(&sp->fences.lock);
}
int host1x_intr_init(struct host1x *host)
{
unsigned int id;
mutex_init(&host->intr_mutex);
for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
struct host1x_syncpt *syncpt = &host->syncpt[id];
spin_lock_init(&syncpt->fences.lock);
INIT_LIST_HEAD(&syncpt->fences.list);
}
return 0;
}
void host1x_intr_deinit(struct host1x *host)
{
}
void host1x_intr_start(struct host1x *host)
{
u32 hz = clk_get_rate(host->clk);
int err;
mutex_lock(&host->intr_mutex);
err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000));
if (err) {
mutex_unlock(&host->intr_mutex);
return;
}
mutex_unlock(&host->intr_mutex);
}
void host1x_intr_stop(struct host1x *host)
{
host1x_hw_intr_disable_all_syncpt_intrs(host);
}
| linux-master | drivers/gpu/host1x/intr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x Job
*
* Copyright (c) 2010-2015, NVIDIA Corporation.
*/
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <trace/events/host1x.h>
#include "channel.h"
#include "dev.h"
#include "job.h"
#include "syncpt.h"
#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
u32 num_cmdbufs, u32 num_relocs,
bool skip_firewall)
{
struct host1x_job *job = NULL;
unsigned int num_unpins = num_relocs;
bool enable_firewall;
u64 total;
void *mem;
enable_firewall = IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !skip_firewall;
if (!enable_firewall)
num_unpins += num_cmdbufs;
/* Check that we're not going to overflow */
total = sizeof(struct host1x_job) +
(u64)num_relocs * sizeof(struct host1x_reloc) +
(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
(u64)num_cmdbufs * sizeof(struct host1x_job_cmd) +
(u64)num_unpins * sizeof(dma_addr_t) +
(u64)num_unpins * sizeof(u32 *);
if (total > ULONG_MAX)
return NULL;
mem = job = kzalloc(total, GFP_KERNEL);
if (!job)
return NULL;
job->enable_firewall = enable_firewall;
kref_init(&job->ref);
job->channel = ch;
/* Redistribute memory to the structs */
mem += sizeof(struct host1x_job);
job->relocs = num_relocs ? mem : NULL;
mem += num_relocs * sizeof(struct host1x_reloc);
job->unpins = num_unpins ? mem : NULL;
mem += num_unpins * sizeof(struct host1x_job_unpin_data);
job->cmds = num_cmdbufs ? mem : NULL;
mem += num_cmdbufs * sizeof(struct host1x_job_cmd);
job->addr_phys = num_unpins ? mem : NULL;
job->reloc_addr_phys = job->addr_phys;
job->gather_addr_phys = &job->addr_phys[num_relocs];
return job;
}
EXPORT_SYMBOL(host1x_job_alloc);
struct host1x_job *host1x_job_get(struct host1x_job *job)
{
kref_get(&job->ref);
return job;
}
EXPORT_SYMBOL(host1x_job_get);
static void job_free(struct kref *ref)
{
struct host1x_job *job = container_of(ref, struct host1x_job, ref);
if (job->release)
job->release(job);
if (job->fence) {
/*
* remove_callback is atomic w.r.t. fence signaling, so
* after the call returns, we know that the callback is not
* in execution, and the fence can be safely freed.
*/
dma_fence_remove_callback(job->fence, &job->fence_cb);
dma_fence_put(job->fence);
}
if (job->syncpt)
host1x_syncpt_put(job->syncpt);
kfree(job);
}
void host1x_job_put(struct host1x_job *job)
{
kref_put(&job->ref, job_free);
}
EXPORT_SYMBOL(host1x_job_put);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
unsigned int words, unsigned int offset)
{
struct host1x_job_gather *gather = &job->cmds[job->num_cmds].gather;
gather->words = words;
gather->bo = bo;
gather->offset = offset;
job->num_cmds++;
}
EXPORT_SYMBOL(host1x_job_add_gather);
void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
bool relative, u32 next_class)
{
struct host1x_job_cmd *cmd = &job->cmds[job->num_cmds];
cmd->is_wait = true;
cmd->wait.id = id;
cmd->wait.threshold = thresh;
cmd->wait.next_class = next_class;
cmd->wait.relative = relative;
job->num_cmds++;
}
EXPORT_SYMBOL(host1x_job_add_wait);
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
unsigned long mask = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
struct host1x_client *client = job->client;
struct device *dev = client->dev;
struct host1x_job_gather *g;
unsigned int i;
int err;
job->num_unpins = 0;
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocs[i];
enum dma_data_direction direction;
struct host1x_bo_mapping *map;
struct host1x_bo *bo;
reloc->target.bo = host1x_bo_get(reloc->target.bo);
if (!reloc->target.bo) {
err = -EINVAL;
goto unpin;
}
bo = reloc->target.bo;
switch (reloc->flags & mask) {
case HOST1X_RELOC_READ:
direction = DMA_TO_DEVICE;
break;
case HOST1X_RELOC_WRITE:
direction = DMA_FROM_DEVICE;
break;
case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
direction = DMA_BIDIRECTIONAL;
break;
default:
err = -EINVAL;
goto unpin;
}
map = host1x_bo_pin(dev, bo, direction, NULL);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto unpin;
}
/*
* host1x clients are generally not able to do scatter-gather themselves, so fail
* if the buffer is discontiguous and we fail to map its SG table to a single
* contiguous chunk of I/O virtual memory.
*/
if (map->chunks > 1) {
err = -EINVAL;
goto unpin;
}
job->addr_phys[job->num_unpins] = map->phys;
job->unpins[job->num_unpins].map = map;
job->num_unpins++;
}
/*
* We will copy gathers BO content later, so there is no need to
* hold and pin them.
*/
if (job->enable_firewall)
return 0;
for (i = 0; i < job->num_cmds; i++) {
struct host1x_bo_mapping *map;
size_t gather_size = 0;
struct scatterlist *sg;
unsigned long shift;
struct iova *alloc;
unsigned int j;
if (job->cmds[i].is_wait)
continue;
g = &job->cmds[i].gather;
g->bo = host1x_bo_get(g->bo);
if (!g->bo) {
err = -EINVAL;
goto unpin;
}
map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, NULL);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto unpin;
}
if (host->domain) {
for_each_sgtable_sg(map->sgt, sg, j)
gather_size += sg->length;
gather_size = iova_align(&host->iova, gather_size);
shift = iova_shift(&host->iova);
alloc = alloc_iova(&host->iova, gather_size >> shift,
host->iova_end >> shift, true);
if (!alloc) {
err = -ENOMEM;
goto put;
}
err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc),
map->sgt, IOMMU_READ);
if (err == 0) {
__free_iova(&host->iova, alloc);
err = -EINVAL;
goto put;
}
map->phys = iova_dma_addr(&host->iova, alloc);
map->size = gather_size;
}
job->addr_phys[job->num_unpins] = map->phys;
job->unpins[job->num_unpins].map = map;
job->num_unpins++;
job->gather_addr_phys[i] = map->phys;
}
return 0;
put:
host1x_bo_put(g->bo);
unpin:
host1x_job_unpin(job);
return err;
}
static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
{
void *cmdbuf_addr = NULL;
struct host1x_bo *cmdbuf = g->bo;
unsigned int i;
/* pin & patch the relocs for one gather */
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocs[i];
u32 reloc_addr = (job->reloc_addr_phys[i] +
reloc->target.offset) >> reloc->shift;
u32 *target;
/* skip all other gathers */
if (cmdbuf != reloc->cmdbuf.bo)
continue;
if (job->enable_firewall) {
target = (u32 *)job->gather_copy_mapped +
reloc->cmdbuf.offset / sizeof(u32) +
g->offset / sizeof(u32);
goto patch_reloc;
}
if (!cmdbuf_addr) {
cmdbuf_addr = host1x_bo_mmap(cmdbuf);
if (unlikely(!cmdbuf_addr)) {
pr_err("Could not map cmdbuf for relocation\n");
return -ENOMEM;
}
}
target = cmdbuf_addr + reloc->cmdbuf.offset;
patch_reloc:
*target = reloc_addr;
}
if (cmdbuf_addr)
host1x_bo_munmap(cmdbuf, cmdbuf_addr);
return 0;
}
static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
unsigned int offset)
{
offset *= sizeof(u32);
if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
return false;
/* relocation shift value validation isn't implemented yet */
if (reloc->shift)
return false;
return true;
}
struct host1x_firewall {
struct host1x_job *job;
struct device *dev;
unsigned int num_relocs;
struct host1x_reloc *reloc;
struct host1x_bo *cmdbuf;
unsigned int offset;
u32 words;
u32 class;
u32 reg;
u32 mask;
u32 count;
};
static int check_register(struct host1x_firewall *fw, unsigned long offset)
{
if (!fw->job->is_addr_reg)
return 0;
if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
if (!fw->num_relocs)
return -EINVAL;
if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
return -EINVAL;
fw->num_relocs--;
fw->reloc++;
}
return 0;
}
static int check_class(struct host1x_firewall *fw, u32 class)
{
if (!fw->job->is_valid_class) {
if (fw->class != class)
return -EINVAL;
} else {
if (!fw->job->is_valid_class(fw->class))
return -EINVAL;
}
return 0;
}
static int check_mask(struct host1x_firewall *fw)
{
u32 mask = fw->mask;
u32 reg = fw->reg;
int ret;
while (mask) {
if (fw->words == 0)
return -EINVAL;
if (mask & 1) {
ret = check_register(fw, reg);
if (ret < 0)
return ret;
fw->words--;
fw->offset++;
}
mask >>= 1;
reg++;
}
return 0;
}
static int check_incr(struct host1x_firewall *fw)
{
u32 count = fw->count;
u32 reg = fw->reg;
int ret;
while (count) {
if (fw->words == 0)
return -EINVAL;
ret = check_register(fw, reg);
if (ret < 0)
return ret;
reg++;
fw->words--;
fw->offset++;
count--;
}
return 0;
}
static int check_nonincr(struct host1x_firewall *fw)
{
u32 count = fw->count;
int ret;
while (count) {
if (fw->words == 0)
return -EINVAL;
ret = check_register(fw, fw->reg);
if (ret < 0)
return ret;
fw->words--;
fw->offset++;
count--;
}
return 0;
}
static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
{
u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
(g->offset / sizeof(u32));
u32 job_class = fw->class;
int err = 0;
fw->words = g->words;
fw->cmdbuf = g->bo;
fw->offset = 0;
while (fw->words && !err) {
u32 word = cmdbuf_base[fw->offset];
u32 opcode = (word & 0xf0000000) >> 28;
fw->mask = 0;
fw->reg = 0;
fw->count = 0;
fw->words--;
fw->offset++;
switch (opcode) {
case 0:
fw->class = word >> 6 & 0x3ff;
fw->mask = word & 0x3f;
fw->reg = word >> 16 & 0xfff;
err = check_class(fw, job_class);
if (!err)
err = check_mask(fw);
if (err)
goto out;
break;
case 1:
fw->reg = word >> 16 & 0xfff;
fw->count = word & 0xffff;
err = check_incr(fw);
if (err)
goto out;
break;
case 2:
fw->reg = word >> 16 & 0xfff;
fw->count = word & 0xffff;
err = check_nonincr(fw);
if (err)
goto out;
break;
case 3:
fw->mask = word & 0xffff;
fw->reg = word >> 16 & 0xfff;
err = check_mask(fw);
if (err)
goto out;
break;
case 4:
case 14:
break;
default:
err = -EINVAL;
break;
}
}
out:
return err;
}
static inline int copy_gathers(struct device *host, struct host1x_job *job,
struct device *dev)
{
struct host1x_firewall fw;
size_t size = 0;
size_t offset = 0;
unsigned int i;
fw.job = job;
fw.dev = dev;
fw.reloc = job->relocs;
fw.num_relocs = job->num_relocs;
fw.class = job->class;
for (i = 0; i < job->num_cmds; i++) {
struct host1x_job_gather *g;
if (job->cmds[i].is_wait)
continue;
g = &job->cmds[i].gather;
size += g->words * sizeof(u32);
}
/*
* Try a non-blocking allocation from a higher priority pools first,
* as awaiting for the allocation here is a major performance hit.
*/
job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
GFP_NOWAIT);
/* the higher priority allocation failed, try the generic-blocking */
if (!job->gather_copy_mapped)
job->gather_copy_mapped = dma_alloc_wc(host, size,
&job->gather_copy,
GFP_KERNEL);
if (!job->gather_copy_mapped)
return -ENOMEM;
job->gather_copy_size = size;
for (i = 0; i < job->num_cmds; i++) {
struct host1x_job_gather *g;
void *gather;
if (job->cmds[i].is_wait)
continue;
g = &job->cmds[i].gather;
/* Copy the gather */
gather = host1x_bo_mmap(g->bo);
memcpy(job->gather_copy_mapped + offset, gather + g->offset,
g->words * sizeof(u32));
host1x_bo_munmap(g->bo, gather);
/* Store the location in the buffer */
g->base = job->gather_copy;
g->offset = offset;
/* Validate the job */
if (validate(&fw, g))
return -EINVAL;
offset += g->words * sizeof(u32);
}
/* No relocs should remain at this point */
if (fw.num_relocs)
return -EINVAL;
return 0;
}
int host1x_job_pin(struct host1x_job *job, struct device *dev)
{
int err;
unsigned int i, j;
struct host1x *host = dev_get_drvdata(dev->parent);
/* pin memory */
err = pin_job(host, job);
if (err)
goto out;
if (job->enable_firewall) {
err = copy_gathers(host->dev, job, dev);
if (err)
goto out;
}
/* patch gathers */
for (i = 0; i < job->num_cmds; i++) {
struct host1x_job_gather *g;
if (job->cmds[i].is_wait)
continue;
g = &job->cmds[i].gather;
/* process each gather mem only once */
if (g->handled)
continue;
/* copy_gathers() sets gathers base if firewall is enabled */
if (!job->enable_firewall)
g->base = job->gather_addr_phys[i];
for (j = i + 1; j < job->num_cmds; j++) {
if (!job->cmds[j].is_wait &&
job->cmds[j].gather.bo == g->bo) {
job->cmds[j].gather.handled = true;
job->cmds[j].gather.base = g->base;
}
}
err = do_relocs(job, g);
if (err)
break;
}
out:
if (err)
host1x_job_unpin(job);
wmb();
return err;
}
EXPORT_SYMBOL(host1x_job_pin);
void host1x_job_unpin(struct host1x_job *job)
{
struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
unsigned int i;
for (i = 0; i < job->num_unpins; i++) {
struct host1x_bo_mapping *map = job->unpins[i].map;
struct host1x_bo *bo = map->bo;
if (!job->enable_firewall && map->size && host->domain) {
iommu_unmap(host->domain, job->addr_phys[i], map->size);
free_iova(&host->iova, iova_pfn(&host->iova, job->addr_phys[i]));
}
host1x_bo_unpin(map);
host1x_bo_put(bo);
}
job->num_unpins = 0;
if (job->gather_copy_size)
dma_free_wc(host->dev, job->gather_copy_size,
job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
/*
* Debug routine used to dump job entries
*/
void host1x_job_dump(struct device *dev, struct host1x_job *job)
{
dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt->id);
dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
}
| linux-master | drivers/gpu/host1x/job.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Host1x init for T20 and T30 Architecture Chips
*
* Copyright (c) 2011-2013, NVIDIA Corporation.
*/
/* include hw specification */
#include "host1x01.h"
#include "host1x01_hardware.h"
/* include code */
#define HOST1X_HW 1
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
#include "intr_hw.c"
#include "syncpt_hw.c"
#include "../dev.h"
int host1x01_init(struct host1x *host)
{
host->channel_op = &host1x_channel_ops;
host->cdma_op = &host1x_cdma_ops;
host->cdma_pb_op = &host1x_pushbuffer_ops;
host->syncpt_op = &host1x_syncpt_ops;
host->intr_op = &host1x_intr_ops;
host->debug_op = &host1x_debug_ops;
return 0;
}
| linux-master | drivers/gpu/host1x/hw/host1x01.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x Command DMA
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include "../cdma.h"
#include "../channel.h"
#include "../dev.h"
#include "../debug.h"
/*
* Put the restart at the end of pushbuffer memory
*/
static void push_buffer_init(struct push_buffer *pb)
{
*(u32 *)(pb->mapped + pb->size) = host1x_opcode_restart(0);
}
/*
* Increment timedout buffer's syncpt via CPU.
*/
static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
u32 syncpt_incrs, u32 syncval, u32 nr_slots)
{
unsigned int i;
for (i = 0; i < syncpt_incrs; i++)
host1x_syncpt_incr(cdma->timeout.syncpt);
/* after CPU incr, ensure shadow is up to date */
host1x_syncpt_load(cdma->timeout.syncpt);
}
/*
* Start channel DMA
*/
static void cdma_start(struct host1x_cdma *cdma)
{
struct host1x_channel *ch = cdma_to_channel(cdma);
u64 start, end;
if (cdma->running)
return;
cdma->last_pos = cdma->push_buffer.pos;
start = cdma->push_buffer.dma;
end = cdma->push_buffer.size + 4;
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
/* set base, put and end pointer */
host1x_ch_writel(ch, lower_32_bits(start), HOST1X_CHANNEL_DMASTART);
#if HOST1X_HW >= 6
host1x_ch_writel(ch, upper_32_bits(start), HOST1X_CHANNEL_DMASTART_HI);
#endif
host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
#if HOST1X_HW >= 6
host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMAPUT_HI);
#endif
host1x_ch_writel(ch, lower_32_bits(end), HOST1X_CHANNEL_DMAEND);
#if HOST1X_HW >= 6
host1x_ch_writel(ch, upper_32_bits(end), HOST1X_CHANNEL_DMAEND_HI);
#endif
/* reset GET */
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
HOST1X_CHANNEL_DMACTRL_DMAGETRST |
HOST1X_CHANNEL_DMACTRL_DMAINITGET,
HOST1X_CHANNEL_DMACTRL);
/* start the command DMA */
host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
cdma->running = true;
}
/*
* Similar to cdma_start(), but rather than starting from an idle
* state (where DMA GET is set to DMA PUT), on a timeout we restore
* DMA GET from an explicit value (so DMA may again be pending).
*/
static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
{
struct host1x *host1x = cdma_to_host1x(cdma);
struct host1x_channel *ch = cdma_to_channel(cdma);
u64 start, end;
if (cdma->running)
return;
cdma->last_pos = cdma->push_buffer.pos;
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
start = cdma->push_buffer.dma;
end = cdma->push_buffer.size + 4;
/* set base, end pointer (all of memory) */
host1x_ch_writel(ch, lower_32_bits(start), HOST1X_CHANNEL_DMASTART);
#if HOST1X_HW >= 6
host1x_ch_writel(ch, upper_32_bits(start), HOST1X_CHANNEL_DMASTART_HI);
#endif
host1x_ch_writel(ch, lower_32_bits(end), HOST1X_CHANNEL_DMAEND);
#if HOST1X_HW >= 6
host1x_ch_writel(ch, upper_32_bits(end), HOST1X_CHANNEL_DMAEND_HI);
#endif
/* set GET, by loading the value in PUT (then reset GET) */
host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT);
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
HOST1X_CHANNEL_DMACTRL_DMAGETRST |
HOST1X_CHANNEL_DMACTRL_DMAINITGET,
HOST1X_CHANNEL_DMACTRL);
dev_dbg(host1x->dev,
"%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__,
host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
cdma->last_pos);
/* deassert GET reset and set PUT */
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
/* start the command DMA */
host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
cdma->running = true;
}
/*
* Kick channel DMA into action by writing its PUT offset (if it has changed)
*/
static void cdma_flush(struct host1x_cdma *cdma)
{
struct host1x_channel *ch = cdma_to_channel(cdma);
if (cdma->push_buffer.pos != cdma->last_pos) {
host1x_ch_writel(ch, cdma->push_buffer.pos,
HOST1X_CHANNEL_DMAPUT);
cdma->last_pos = cdma->push_buffer.pos;
}
}
static void cdma_stop(struct host1x_cdma *cdma)
{
struct host1x_channel *ch = cdma_to_channel(cdma);
mutex_lock(&cdma->lock);
if (cdma->running) {
host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
cdma->running = false;
}
mutex_unlock(&cdma->lock);
}
static void cdma_hw_cmdproc_stop(struct host1x *host, struct host1x_channel *ch,
bool stop)
{
#if HOST1X_HW >= 6
host1x_ch_writel(ch, stop ? 0x1 : 0x0, HOST1X_CHANNEL_CMDPROC_STOP);
#else
u32 cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
if (stop)
cmdproc_stop |= BIT(ch->id);
else
cmdproc_stop &= ~BIT(ch->id);
host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
#endif
}
static void cdma_hw_teardown(struct host1x *host, struct host1x_channel *ch)
{
#if HOST1X_HW >= 6
host1x_ch_writel(ch, 0x1, HOST1X_CHANNEL_TEARDOWN);
#else
host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
#endif
}
/*
* Stops both channel's command processor and CDMA immediately.
* Also, tears down the channel and resets corresponding module.
*/
static void cdma_freeze(struct host1x_cdma *cdma)
{
struct host1x *host = cdma_to_host1x(cdma);
struct host1x_channel *ch = cdma_to_channel(cdma);
if (cdma->torndown && !cdma->running) {
dev_warn(host->dev, "Already torn down\n");
return;
}
dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id);
cdma_hw_cmdproc_stop(host, ch, true);
dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
__func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
cdma->last_pos);
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
cdma_hw_teardown(host, ch);
cdma->running = false;
cdma->torndown = true;
}
static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
{
struct host1x *host1x = cdma_to_host1x(cdma);
struct host1x_channel *ch = cdma_to_channel(cdma);
dev_dbg(host1x->dev,
"resuming channel (id %u, DMAGET restart = 0x%x)\n",
ch->id, getptr);
cdma_hw_cmdproc_stop(host1x, ch, false);
cdma->torndown = false;
cdma_timeout_restart(cdma, getptr);
}
static void timeout_release_mlock(struct host1x_cdma *cdma)
{
#if HOST1X_HW >= 8
/* Tegra186 and Tegra194 require a more complicated MLOCK release
* sequence. Furthermore, those chips by default don't enforce MLOCKs,
* so it turns out that if we don't /actually/ need MLOCKs, we can just
* ignore them.
*
* As such, for now just implement this on Tegra234 where things are
* stricter but also easy to implement.
*/
struct host1x_channel *ch = cdma_to_channel(cdma);
struct host1x *host1x = cdma_to_host1x(cdma);
u32 offset;
switch (ch->client->class) {
case HOST1X_CLASS_VIC:
offset = HOST1X_COMMON_VIC_MLOCK;
break;
case HOST1X_CLASS_NVDEC:
offset = HOST1X_COMMON_NVDEC_MLOCK;
break;
default:
WARN(1, "%s was not updated for class %u", __func__, ch->client->class);
return;
}
host1x_common_writel(host1x, 0x0, offset);
#endif
}
/*
* If this timeout fires, it indicates the current sync_queue entry has
* exceeded its TTL and the userctx should be timed out and remaining
* submits already issued cleaned up (future submits return an error).
*/
static void cdma_timeout_handler(struct work_struct *work)
{
u32 syncpt_val;
struct host1x_cdma *cdma;
struct host1x *host1x;
struct host1x_channel *ch;
cdma = container_of(to_delayed_work(work), struct host1x_cdma,
timeout.wq);
host1x = cdma_to_host1x(cdma);
ch = cdma_to_channel(cdma);
host1x_debug_dump(cdma_to_host1x(cdma));
mutex_lock(&cdma->lock);
if (!cdma->timeout.client) {
dev_dbg(host1x->dev,
"cdma_timeout: expired, but has no clientid\n");
mutex_unlock(&cdma->lock);
return;
}
/* stop processing to get a clean snapshot */
cdma_hw_cmdproc_stop(host1x, ch, true);
syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
/* has buffer actually completed? */
if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
dev_dbg(host1x->dev,
"cdma_timeout: expired, but buffer had completed\n");
/* restore */
cdma_hw_cmdproc_stop(host1x, ch, false);
mutex_unlock(&cdma->lock);
return;
}
dev_warn(host1x->dev, "%s: timeout: %u (%s), HW thresh %d, done %d\n",
__func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
syncpt_val, cdma->timeout.syncpt_val);
/* stop HW, resetting channel/module */
host1x_hw_cdma_freeze(host1x, cdma);
/* release any held MLOCK */
timeout_release_mlock(cdma);
host1x_cdma_update_sync_queue(cdma, ch->dev);
mutex_unlock(&cdma->lock);
}
/*
* Init timeout resources
*/
static int cdma_timeout_init(struct host1x_cdma *cdma)
{
INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
cdma->timeout.initialized = true;
return 0;
}
/*
* Clean up timeout resources
*/
static void cdma_timeout_destroy(struct host1x_cdma *cdma)
{
if (cdma->timeout.initialized)
cancel_delayed_work(&cdma->timeout.wq);
cdma->timeout.initialized = false;
}
static const struct host1x_cdma_ops host1x_cdma_ops = {
.start = cdma_start,
.stop = cdma_stop,
.flush = cdma_flush,
.timeout_init = cdma_timeout_init,
.timeout_destroy = cdma_timeout_destroy,
.freeze = cdma_freeze,
.resume = cdma_resume,
.timeout_cpu_incr = cdma_timeout_cpu_incr,
};
static const struct host1x_pushbuffer_ops host1x_pushbuffer_ops = {
.init = push_buffer_init,
};
| linux-master | drivers/gpu/host1x/hw/cdma_hw.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Host1x init for Tegra210 SoCs
*
* Copyright (c) 2015 NVIDIA Corporation.
*/
/* include hw specification */
#include "host1x05.h"
#include "host1x05_hardware.h"
/* include code */
#define HOST1X_HW 5
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
#include "intr_hw.c"
#include "syncpt_hw.c"
#include "../dev.h"
int host1x05_init(struct host1x *host)
{
host->channel_op = &host1x_channel_ops;
host->cdma_op = &host1x_cdma_ops;
host->cdma_pb_op = &host1x_pushbuffer_ops;
host->syncpt_op = &host1x_syncpt_ops;
host->intr_op = &host1x_intr_ops;
host->debug_op = &host1x_debug_ops;
return 0;
}
| linux-master | drivers/gpu/host1x/hw/host1x05.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x Syncpoints
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
#include <linux/io.h>
#include "../dev.h"
#include "../syncpt.h"
/*
* Write the current syncpoint value back to hw.
*/
static void syncpt_restore(struct host1x_syncpt *sp)
{
u32 min = host1x_syncpt_read_min(sp);
struct host1x *host = sp->host;
host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
}
/*
* Write the current waitbase value back to hw.
*/
static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
{
#if HOST1X_HW < 7
struct host1x *host = sp->host;
host1x_sync_writel(host, sp->base_val,
HOST1X_SYNC_SYNCPT_BASE(sp->id));
#endif
}
/*
* Read waitbase value from hw.
*/
static void syncpt_read_wait_base(struct host1x_syncpt *sp)
{
#if HOST1X_HW < 7
struct host1x *host = sp->host;
sp->base_val =
host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
#endif
}
/*
* Updates the last value read from hardware.
*/
static u32 syncpt_load(struct host1x_syncpt *sp)
{
struct host1x *host = sp->host;
u32 old, live;
/* Loop in case there's a race writing to min_val */
do {
old = host1x_syncpt_read_min(sp);
live = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT(sp->id));
} while ((u32)atomic_cmpxchg(&sp->min_val, old, live) != old);
if (!host1x_syncpt_check_max(sp, live))
dev_err(host->dev, "%s failed: id=%u, min=%d, max=%d\n",
__func__, sp->id, host1x_syncpt_read_min(sp),
host1x_syncpt_read_max(sp));
return live;
}
/*
* Write a cpu syncpoint increment to the hardware, without touching
* the cache.
*/
static int syncpt_cpu_incr(struct host1x_syncpt *sp)
{
struct host1x *host = sp->host;
u32 reg_offset = sp->id / 32;
if (!host1x_syncpt_client_managed(sp) &&
host1x_syncpt_idle(sp))
return -EINVAL;
host1x_sync_writel(host, BIT(sp->id % 32),
HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
wmb();
return 0;
}
/**
* syncpt_assign_to_channel() - Assign syncpoint to channel
* @sp: syncpoint
* @ch: channel
*
* On chips with the syncpoint protection feature (Tegra186+), assign @sp to
* @ch, preventing other channels from incrementing the syncpoints. If @ch is
* NULL, unassigns the syncpoint.
*
* On older chips, do nothing.
*/
static void syncpt_assign_to_channel(struct host1x_syncpt *sp,
struct host1x_channel *ch)
{
#if HOST1X_HW >= 6
struct host1x *host = sp->host;
host1x_sync_writel(host,
HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff),
HOST1X_SYNC_SYNCPT_CH_APP(sp->id));
#endif
}
/**
* syncpt_enable_protection() - Enable syncpoint protection
* @host: host1x instance
*
* On chips with the syncpoint protection feature (Tegra186+), enable this
* feature. On older chips, do nothing.
*/
static void syncpt_enable_protection(struct host1x *host)
{
#if HOST1X_HW >= 6
if (!host->hv_regs)
return;
host1x_hypervisor_writel(host, HOST1X_HV_SYNCPT_PROT_EN_CH_EN,
HOST1X_HV_SYNCPT_PROT_EN);
#endif
}
static const struct host1x_syncpt_ops host1x_syncpt_ops = {
.restore = syncpt_restore,
.restore_wait_base = syncpt_restore_wait_base,
.load_wait_base = syncpt_read_wait_base,
.load = syncpt_load,
.cpu_incr = syncpt_cpu_incr,
.assign_to_channel = syncpt_assign_to_channel,
.enable_protection = syncpt_enable_protection,
};
| linux-master | drivers/gpu/host1x/hw/syncpt_hw.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Host1x init for Tegra114 SoCs
*
* Copyright (c) 2013 NVIDIA Corporation.
*/
/* include hw specification */
#include "host1x02.h"
#include "host1x02_hardware.h"
/* include code */
#define HOST1X_HW 2
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
#include "intr_hw.c"
#include "syncpt_hw.c"
#include "../dev.h"
int host1x02_init(struct host1x *host)
{
host->channel_op = &host1x_channel_ops;
host->cdma_op = &host1x_cdma_ops;
host->cdma_pb_op = &host1x_pushbuffer_ops;
host->syncpt_op = &host1x_syncpt_ops;
host->intr_op = &host1x_intr_ops;
host->debug_op = &host1x_debug_ops;
return 0;
}
| linux-master | drivers/gpu/host1x/hw/host1x02.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x Interrupt Management
*
* Copyright (C) 2010 Google, Inc.
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include "../intr.h"
#include "../dev.h"
static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
{
struct host1x *host = dev_id;
unsigned long reg;
unsigned int i, id;
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
reg = host1x_sync_readl(host,
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
host1x_sync_writel(host, reg,
HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
host1x_sync_writel(host, reg,
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
for_each_set_bit(id, ®, 32)
host1x_intr_handle_interrupt(host, i * 32 + id);
}
return IRQ_HANDLED;
}
static void host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
{
unsigned int i;
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
host1x_sync_writel(host, 0xffffffffu,
HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
host1x_sync_writel(host, 0xffffffffu,
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
}
}
static void intr_hw_init(struct host1x *host, u32 cpm)
{
#if HOST1X_HW < 6
/* disable the ip_busy_timeout. this prevents write drops */
host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
/*
* increase the auto-ack timout to the maximum value. 2d will hang
* otherwise on Tegra2.
*/
host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
/* update host clocks per usec */
host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
#endif
#if HOST1X_HW >= 8
u32 id;
/*
* Program threshold interrupt destination among 8 lines per VM,
* per syncpoint. For now, just direct all to the first interrupt
* line.
*/
for (id = 0; id < host->info->nb_pts; id++)
host1x_sync_writel(host, 0, HOST1X_SYNC_SYNCPT_INTR_DEST(id));
#endif
}
static int
host1x_intr_init_host_sync(struct host1x *host, u32 cpm)
{
int err;
host1x_hw_intr_disable_all_syncpt_intrs(host);
err = devm_request_irq(host->dev, host->syncpt_irq,
syncpt_thresh_isr, IRQF_SHARED,
"host1x_syncpt", host);
if (err < 0)
return err;
intr_hw_init(host, cpm);
return 0;
}
static void host1x_intr_set_syncpt_threshold(struct host1x *host,
unsigned int id,
u32 thresh)
{
host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
}
static void host1x_intr_enable_syncpt_intr(struct host1x *host,
unsigned int id)
{
host1x_sync_writel(host, BIT(id % 32),
HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id / 32));
}
static void host1x_intr_disable_syncpt_intr(struct host1x *host,
unsigned int id)
{
host1x_sync_writel(host, BIT(id % 32),
HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id / 32));
host1x_sync_writel(host, BIT(id % 32),
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id / 32));
}
static const struct host1x_intr_ops host1x_intr_ops = {
.init_host_sync = host1x_intr_init_host_sync,
.set_syncpt_threshold = host1x_intr_set_syncpt_threshold,
.enable_syncpt_intr = host1x_intr_enable_syncpt_intr,
.disable_syncpt_intr = host1x_intr_disable_syncpt_intr,
.disable_all_syncpt_intrs = host1x_intr_disable_all_syncpt_intrs,
};
| linux-master | drivers/gpu/host1x/hw/intr_hw.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.