python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* helper functions for SG DMA video4linux capture buffers
*
* The functions expect the hardware being able to scatter gather
* (i.e. the buffers are not linear in physical memory, but fragmented
* into PAGE_SIZE chunks). They also assume the driver does not need
* to touch the video data.
*
* (c) 2007 Mauro Carvalho Chehab, <[email protected]>
*
* Highly based on video-buf written originally by:
* (c) 2001,02 Gerd Knorr <[email protected]>
* (c) 2006 Mauro Carvalho Chehab, <[email protected]>
* (c) 2006 Ted Walther and John Sokol
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pgtable.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
#include <media/videobuf-dma-sg.h>
#define MAGIC_DMABUF 0x19721112
#define MAGIC_SG_MEM 0x17890714
#define MAGIC_CHECK(is, should) \
if (unlikely((is) != (should))) { \
printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
is, should); \
BUG(); \
}
static int debug;
module_param(debug, int, 0644);
MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <[email protected]>");
MODULE_LICENSE("GPL");
#define dprintk(level, fmt, arg...) \
if (debug >= level) \
printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
/* --------------------------------------------------------------------- */
/*
* Return a scatterlist for some page-aligned vmalloc()'ed memory
* block (NULL on errors). Memory for the scatterlist is allocated
* using kmalloc. The caller must free the memory.
*/
static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt,
int nr_pages)
{
struct scatterlist *sglist;
struct page *pg;
int i;
sglist = vzalloc(array_size(nr_pages, sizeof(*sglist)));
if (NULL == sglist)
return NULL;
sg_init_table(sglist, nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
if (NULL == pg)
goto err;
BUG_ON(PageHighMem(pg));
sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
}
return sglist;
err:
vfree(sglist);
return NULL;
}
/*
* Return a scatterlist for a an array of userpages (NULL on errors).
* Memory for the scatterlist is allocated using kmalloc. The caller
* must free the memory.
*/
static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
int nr_pages, int offset, size_t size)
{
struct scatterlist *sglist;
int i;
if (NULL == pages[0])
return NULL;
sglist = vmalloc(array_size(nr_pages, sizeof(*sglist)));
if (NULL == sglist)
return NULL;
sg_init_table(sglist, nr_pages);
if (PageHighMem(pages[0]))
/* DMA to highmem pages might not work */
goto highmem;
sg_set_page(&sglist[0], pages[0],
min_t(size_t, PAGE_SIZE - offset, size), offset);
size -= min_t(size_t, PAGE_SIZE - offset, size);
for (i = 1; i < nr_pages; i++) {
if (NULL == pages[i])
goto nopage;
if (PageHighMem(pages[i]))
goto highmem;
sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
size -= min_t(size_t, PAGE_SIZE, size);
}
return sglist;
nopage:
dprintk(2, "sgl: oops - no page\n");
vfree(sglist);
return NULL;
highmem:
dprintk(2, "sgl: oops - highmem page\n");
vfree(sglist);
return NULL;
}
/* --------------------------------------------------------------------- */
struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
return &mem->dma;
}
EXPORT_SYMBOL_GPL(videobuf_to_dma);
static void videobuf_dma_init(struct videobuf_dmabuf *dma)
{
memset(dma, 0, sizeof(*dma));
dma->magic = MAGIC_DMABUF;
}
static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
int direction, unsigned long data, unsigned long size)
{
unsigned int gup_flags = FOLL_LONGTERM;
unsigned long first, last;
int err;
dma->direction = direction;
switch (dma->direction) {
case DMA_FROM_DEVICE:
gup_flags |= FOLL_WRITE;
break;
case DMA_TO_DEVICE:
break;
default:
BUG();
}
first = (data & PAGE_MASK) >> PAGE_SHIFT;
last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
dma->offset = data & ~PAGE_MASK;
dma->size = size;
dma->nr_pages = last-first+1;
dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *),
GFP_KERNEL);
if (NULL == dma->pages)
return -ENOMEM;
dprintk(1, "init user [0x%lx+0x%lx => %lu pages]\n",
data, size, dma->nr_pages);
err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags,
dma->pages);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
dprintk(1, "pin_user_pages: err=%d [%lu]\n", err,
dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
return 0;
}
static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
unsigned long data, unsigned long size)
{
int ret;
mmap_read_lock(current->mm);
ret = videobuf_dma_init_user_locked(dma, direction, data, size);
mmap_read_unlock(current->mm);
return ret;
}
static int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
unsigned long nr_pages)
{
int i;
dprintk(1, "init kernel [%lu pages]\n", nr_pages);
dma->direction = direction;
dma->vaddr_pages = kcalloc(nr_pages, sizeof(*dma->vaddr_pages),
GFP_KERNEL);
if (!dma->vaddr_pages)
return -ENOMEM;
dma->dma_addr = kcalloc(nr_pages, sizeof(*dma->dma_addr), GFP_KERNEL);
if (!dma->dma_addr) {
kfree(dma->vaddr_pages);
return -ENOMEM;
}
for (i = 0; i < nr_pages; i++) {
void *addr;
addr = dma_alloc_coherent(dma->dev, PAGE_SIZE,
&(dma->dma_addr[i]), GFP_KERNEL);
if (addr == NULL)
goto out_free_pages;
dma->vaddr_pages[i] = virt_to_page(addr);
}
dma->vaddr = vmap(dma->vaddr_pages, nr_pages, VM_MAP | VM_IOREMAP,
PAGE_KERNEL);
if (NULL == dma->vaddr) {
dprintk(1, "vmalloc_32(%lu pages) failed\n", nr_pages);
goto out_free_pages;
}
dprintk(1, "vmalloc is at addr %p, size=%lu\n",
dma->vaddr, nr_pages << PAGE_SHIFT);
memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
dma->nr_pages = nr_pages;
return 0;
out_free_pages:
while (i > 0) {
void *addr;
i--;
addr = page_address(dma->vaddr_pages[i]);
dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
}
kfree(dma->dma_addr);
dma->dma_addr = NULL;
kfree(dma->vaddr_pages);
dma->vaddr_pages = NULL;
return -ENOMEM;
}
static int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
dma_addr_t addr, unsigned long nr_pages)
{
dprintk(1, "init overlay [%lu pages @ bus 0x%lx]\n",
nr_pages, (unsigned long)addr);
dma->direction = direction;
if (0 == addr)
return -EINVAL;
dma->bus_addr = addr;
dma->nr_pages = nr_pages;
return 0;
}
static int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
{
MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
BUG_ON(0 == dma->nr_pages);
if (dma->pages) {
dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
dma->offset, dma->size);
}
if (dma->vaddr) {
dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
dma->nr_pages);
}
if (dma->bus_addr) {
dma->sglist = vmalloc(sizeof(*dma->sglist));
if (NULL != dma->sglist) {
dma->sglen = 1;
sg_dma_address(&dma->sglist[0]) = dma->bus_addr
& PAGE_MASK;
dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
}
}
if (NULL == dma->sglist) {
dprintk(1, "scatterlist is NULL\n");
return -ENOMEM;
}
if (!dma->bus_addr) {
dma->sglen = dma_map_sg(dev, dma->sglist,
dma->nr_pages, dma->direction);
if (0 == dma->sglen) {
printk(KERN_WARNING
"%s: videobuf_map_sg failed\n", __func__);
vfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
return -ENOMEM;
}
}
return 0;
}
int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
{
MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
if (!dma->sglen)
return 0;
dma_unmap_sg(dev, dma->sglist, dma->nr_pages, dma->direction);
vfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
return 0;
}
EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
int videobuf_dma_free(struct videobuf_dmabuf *dma)
{
int i;
MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
BUG_ON(dma->sglen);
if (dma->pages) {
unpin_user_pages_dirty_lock(dma->pages, dma->nr_pages,
dma->direction == DMA_FROM_DEVICE);
kfree(dma->pages);
dma->pages = NULL;
}
if (dma->dma_addr) {
for (i = 0; i < dma->nr_pages; i++) {
void *addr;
addr = page_address(dma->vaddr_pages[i]);
dma_free_coherent(dma->dev, PAGE_SIZE, addr,
dma->dma_addr[i]);
}
kfree(dma->dma_addr);
dma->dma_addr = NULL;
kfree(dma->vaddr_pages);
dma->vaddr_pages = NULL;
vunmap(dma->vaddr);
dma->vaddr = NULL;
}
if (dma->bus_addr)
dma->bus_addr = 0;
dma->direction = DMA_NONE;
return 0;
}
EXPORT_SYMBOL_GPL(videobuf_dma_free);
/* --------------------------------------------------------------------- */
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
map->count++;
}
static void videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
struct videobuf_dma_sg_memory *mem;
int i;
dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
dprintk(1, "munmap %p q=%p\n", map, q);
videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
mem = q->bufs[i]->priv;
if (!mem)
continue;
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
if (q->bufs[i]->map != map)
continue;
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
q->ops->buf_release(q, q->bufs[i]);
}
videobuf_queue_unlock(q);
kfree(map);
}
}
/*
* Get a anonymous page for the mapping. Make sure we can DMA to that
* memory location with 32bit PCI devices (i.e. don't use highmem for
* now ...). Bounce buffers don't work very well for the data rates
* video capture has.
*/
static vm_fault_t videobuf_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page;
dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
vmf->address, vma->vm_start, vma->vm_end);
page = alloc_page(GFP_USER | __GFP_DMA32);
if (!page)
return VM_FAULT_OOM;
clear_user_highpage(page, vmf->address);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct videobuf_vm_ops = {
.open = videobuf_vm_open,
.close = videobuf_vm_close,
.fault = videobuf_vm_fault,
};
/* ---------------------------------------------------------------------
* SG handlers for the generic methods
*/
/* Allocated area consists on 3 parts:
struct video_buffer
struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
struct videobuf_dma_sg_memory
*/
static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
{
struct videobuf_dma_sg_memory *mem;
struct videobuf_buffer *vb;
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (!vb)
return vb;
mem = vb->priv = ((char *)vb) + size;
mem->magic = MAGIC_SG_MEM;
videobuf_dma_init(&mem->dma);
dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
__func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
mem, (long)sizeof(*mem));
return vb;
}
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
return mem->dma.vaddr;
}
static int __videobuf_iolock(struct videobuf_queue *q,
struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
{
struct videobuf_dma_sg_memory *mem = vb->priv;
unsigned long pages;
dma_addr_t bus;
int err;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
if (!mem->dma.dev)
mem->dma.dev = q->dev;
else
WARN_ON(mem->dma.dev != q->dev);
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
case V4L2_MEMORY_USERPTR:
if (0 == vb->baddr) {
/* no userspace addr -- kernel bounce buffer */
pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
err = videobuf_dma_init_kernel(&mem->dma,
DMA_FROM_DEVICE,
pages);
if (0 != err)
return err;
} else if (vb->memory == V4L2_MEMORY_USERPTR) {
/* dma directly to userspace */
err = videobuf_dma_init_user(&mem->dma,
DMA_FROM_DEVICE,
vb->baddr, vb->bsize);
if (0 != err)
return err;
} else {
/* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
buffers can only be called from videobuf_qbuf
we take current->mm->mmap_lock there, to prevent
locking inversion, so don't take it here */
err = videobuf_dma_init_user_locked(&mem->dma,
DMA_FROM_DEVICE,
vb->baddr, vb->bsize);
if (0 != err)
return err;
}
break;
case V4L2_MEMORY_OVERLAY:
if (NULL == fbuf)
return -EINVAL;
/* FIXME: need sanity checks for vb->boff */
/*
* Using a double cast to avoid compiler warnings when
* building for PAE. Compiler doesn't like direct casting
* of a 32 bit ptr to 64 bit integer.
*/
bus = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE,
bus, pages);
if (0 != err)
return err;
break;
default:
BUG();
}
err = videobuf_dma_map(q->dev, &mem->dma);
if (0 != err)
return err;
return 0;
}
static int __videobuf_sync(struct videobuf_queue *q,
struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
BUG_ON(!mem || !mem->dma.sglen);
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
mem->dma.nr_pages, mem->dma.direction);
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
struct videobuf_mapping *map;
unsigned int first, last, size = 0, i;
int retval;
retval = -EINVAL;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
/* look for first buffer to map */
for (first = 0; first < VIDEO_MAX_FRAME; first++) {
if (buf == q->bufs[first]) {
size = PAGE_ALIGN(q->bufs[first]->bsize);
break;
}
}
/* paranoia, should never happen since buf is always valid. */
if (!size) {
dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
(vma->vm_pgoff << PAGE_SHIFT));
goto done;
}
last = first;
/* create mapping + update buffer list */
retval = -ENOMEM;
map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (NULL == map)
goto done;
size = 0;
for (i = first; i <= last; i++) {
if (NULL == q->bufs[i])
continue;
q->bufs[i]->map = map;
q->bufs[i]->baddr = vma->vm_start + size;
size += PAGE_ALIGN(q->bufs[i]->bsize);
}
map->count = 1;
map->q = q;
vma->vm_ops = &videobuf_vm_ops;
/* using shared anonymous pages */
vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
retval = 0;
done:
return retval;
}
static struct videobuf_qtype_ops sg_ops = {
.magic = MAGIC_QTYPE_OPS,
.alloc_vb = __videobuf_alloc_vb,
.iolock = __videobuf_iolock,
.sync = __videobuf_sync,
.mmap_mapper = __videobuf_mmap_mapper,
.vaddr = __videobuf_to_vaddr,
};
void *videobuf_sg_alloc(size_t size)
{
struct videobuf_queue q;
/* Required to make generic handler to call __videobuf_alloc */
q.int_ops = &sg_ops;
q.msize = size;
return videobuf_alloc_vb(&q);
}
EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
void videobuf_queue_sg_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
void *priv,
struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &sg_ops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
| linux-master | drivers/media/v4l2-core/videobuf-dma-sg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* V4L2 VP9 helpers.
*
* Copyright (C) 2021 Collabora, Ltd.
*
* Author: Andrzej Pietrasiewicz <[email protected]>
*/
#include <linux/module.h>
#include <media/v4l2-vp9.h>
const u8 v4l2_vp9_kf_y_mode_prob[10][10][9] = {
{
/* above = dc */
{ 137, 30, 42, 148, 151, 207, 70, 52, 91 }, /*left = dc */
{ 92, 45, 102, 136, 116, 180, 74, 90, 100 }, /*left = v */
{ 73, 32, 19, 187, 222, 215, 46, 34, 100 }, /*left = h */
{ 91, 30, 32, 116, 121, 186, 93, 86, 94 }, /*left = d45 */
{ 72, 35, 36, 149, 68, 206, 68, 63, 105 }, /*left = d135*/
{ 73, 31, 28, 138, 57, 124, 55, 122, 151 }, /*left = d117*/
{ 67, 23, 21, 140, 126, 197, 40, 37, 171 }, /*left = d153*/
{ 86, 27, 28, 128, 154, 212, 45, 43, 53 }, /*left = d207*/
{ 74, 32, 27, 107, 86, 160, 63, 134, 102 }, /*left = d63 */
{ 59, 67, 44, 140, 161, 202, 78, 67, 119 }, /*left = tm */
}, { /* above = v */
{ 63, 36, 126, 146, 123, 158, 60, 90, 96 }, /*left = dc */
{ 43, 46, 168, 134, 107, 128, 69, 142, 92 }, /*left = v */
{ 44, 29, 68, 159, 201, 177, 50, 57, 77 }, /*left = h */
{ 58, 38, 76, 114, 97, 172, 78, 133, 92 }, /*left = d45 */
{ 46, 41, 76, 140, 63, 184, 69, 112, 57 }, /*left = d135*/
{ 38, 32, 85, 140, 46, 112, 54, 151, 133 }, /*left = d117*/
{ 39, 27, 61, 131, 110, 175, 44, 75, 136 }, /*left = d153*/
{ 52, 30, 74, 113, 130, 175, 51, 64, 58 }, /*left = d207*/
{ 47, 35, 80, 100, 74, 143, 64, 163, 74 }, /*left = d63 */
{ 36, 61, 116, 114, 128, 162, 80, 125, 82 }, /*left = tm */
}, { /* above = h */
{ 82, 26, 26, 171, 208, 204, 44, 32, 105 }, /*left = dc */
{ 55, 44, 68, 166, 179, 192, 57, 57, 108 }, /*left = v */
{ 42, 26, 11, 199, 241, 228, 23, 15, 85 }, /*left = h */
{ 68, 42, 19, 131, 160, 199, 55, 52, 83 }, /*left = d45 */
{ 58, 50, 25, 139, 115, 232, 39, 52, 118 }, /*left = d135*/
{ 50, 35, 33, 153, 104, 162, 64, 59, 131 }, /*left = d117*/
{ 44, 24, 16, 150, 177, 202, 33, 19, 156 }, /*left = d153*/
{ 55, 27, 12, 153, 203, 218, 26, 27, 49 }, /*left = d207*/
{ 53, 49, 21, 110, 116, 168, 59, 80, 76 }, /*left = d63 */
{ 38, 72, 19, 168, 203, 212, 50, 50, 107 }, /*left = tm */
}, { /* above = d45 */
{ 103, 26, 36, 129, 132, 201, 83, 80, 93 }, /*left = dc */
{ 59, 38, 83, 112, 103, 162, 98, 136, 90 }, /*left = v */
{ 62, 30, 23, 158, 200, 207, 59, 57, 50 }, /*left = h */
{ 67, 30, 29, 84, 86, 191, 102, 91, 59 }, /*left = d45 */
{ 60, 32, 33, 112, 71, 220, 64, 89, 104 }, /*left = d135*/
{ 53, 26, 34, 130, 56, 149, 84, 120, 103 }, /*left = d117*/
{ 53, 21, 23, 133, 109, 210, 56, 77, 172 }, /*left = d153*/
{ 77, 19, 29, 112, 142, 228, 55, 66, 36 }, /*left = d207*/
{ 61, 29, 29, 93, 97, 165, 83, 175, 162 }, /*left = d63 */
{ 47, 47, 43, 114, 137, 181, 100, 99, 95 }, /*left = tm */
}, { /* above = d135 */
{ 69, 23, 29, 128, 83, 199, 46, 44, 101 }, /*left = dc */
{ 53, 40, 55, 139, 69, 183, 61, 80, 110 }, /*left = v */
{ 40, 29, 19, 161, 180, 207, 43, 24, 91 }, /*left = h */
{ 60, 34, 19, 105, 61, 198, 53, 64, 89 }, /*left = d45 */
{ 52, 31, 22, 158, 40, 209, 58, 62, 89 }, /*left = d135*/
{ 44, 31, 29, 147, 46, 158, 56, 102, 198 }, /*left = d117*/
{ 35, 19, 12, 135, 87, 209, 41, 45, 167 }, /*left = d153*/
{ 55, 25, 21, 118, 95, 215, 38, 39, 66 }, /*left = d207*/
{ 51, 38, 25, 113, 58, 164, 70, 93, 97 }, /*left = d63 */
{ 47, 54, 34, 146, 108, 203, 72, 103, 151 }, /*left = tm */
}, { /* above = d117 */
{ 64, 19, 37, 156, 66, 138, 49, 95, 133 }, /*left = dc */
{ 46, 27, 80, 150, 55, 124, 55, 121, 135 }, /*left = v */
{ 36, 23, 27, 165, 149, 166, 54, 64, 118 }, /*left = h */
{ 53, 21, 36, 131, 63, 163, 60, 109, 81 }, /*left = d45 */
{ 40, 26, 35, 154, 40, 185, 51, 97, 123 }, /*left = d135*/
{ 35, 19, 34, 179, 19, 97, 48, 129, 124 }, /*left = d117*/
{ 36, 20, 26, 136, 62, 164, 33, 77, 154 }, /*left = d153*/
{ 45, 18, 32, 130, 90, 157, 40, 79, 91 }, /*left = d207*/
{ 45, 26, 28, 129, 45, 129, 49, 147, 123 }, /*left = d63 */
{ 38, 44, 51, 136, 74, 162, 57, 97, 121 }, /*left = tm */
}, { /* above = d153 */
{ 75, 17, 22, 136, 138, 185, 32, 34, 166 }, /*left = dc */
{ 56, 39, 58, 133, 117, 173, 48, 53, 187 }, /*left = v */
{ 35, 21, 12, 161, 212, 207, 20, 23, 145 }, /*left = h */
{ 56, 29, 19, 117, 109, 181, 55, 68, 112 }, /*left = d45 */
{ 47, 29, 17, 153, 64, 220, 59, 51, 114 }, /*left = d135*/
{ 46, 16, 24, 136, 76, 147, 41, 64, 172 }, /*left = d117*/
{ 34, 17, 11, 108, 152, 187, 13, 15, 209 }, /*left = d153*/
{ 51, 24, 14, 115, 133, 209, 32, 26, 104 }, /*left = d207*/
{ 55, 30, 18, 122, 79, 179, 44, 88, 116 }, /*left = d63 */
{ 37, 49, 25, 129, 168, 164, 41, 54, 148 }, /*left = tm */
}, { /* above = d207 */
{ 82, 22, 32, 127, 143, 213, 39, 41, 70 }, /*left = dc */
{ 62, 44, 61, 123, 105, 189, 48, 57, 64 }, /*left = v */
{ 47, 25, 17, 175, 222, 220, 24, 30, 86 }, /*left = h */
{ 68, 36, 17, 106, 102, 206, 59, 74, 74 }, /*left = d45 */
{ 57, 39, 23, 151, 68, 216, 55, 63, 58 }, /*left = d135*/
{ 49, 30, 35, 141, 70, 168, 82, 40, 115 }, /*left = d117*/
{ 51, 25, 15, 136, 129, 202, 38, 35, 139 }, /*left = d153*/
{ 68, 26, 16, 111, 141, 215, 29, 28, 28 }, /*left = d207*/
{ 59, 39, 19, 114, 75, 180, 77, 104, 42 }, /*left = d63 */
{ 40, 61, 26, 126, 152, 206, 61, 59, 93 }, /*left = tm */
}, { /* above = d63 */
{ 78, 23, 39, 111, 117, 170, 74, 124, 94 }, /*left = dc */
{ 48, 34, 86, 101, 92, 146, 78, 179, 134 }, /*left = v */
{ 47, 22, 24, 138, 187, 178, 68, 69, 59 }, /*left = h */
{ 56, 25, 33, 105, 112, 187, 95, 177, 129 }, /*left = d45 */
{ 48, 31, 27, 114, 63, 183, 82, 116, 56 }, /*left = d135*/
{ 43, 28, 37, 121, 63, 123, 61, 192, 169 }, /*left = d117*/
{ 42, 17, 24, 109, 97, 177, 56, 76, 122 }, /*left = d153*/
{ 58, 18, 28, 105, 139, 182, 70, 92, 63 }, /*left = d207*/
{ 46, 23, 32, 74, 86, 150, 67, 183, 88 }, /*left = d63 */
{ 36, 38, 48, 92, 122, 165, 88, 137, 91 }, /*left = tm */
}, { /* above = tm */
{ 65, 70, 60, 155, 159, 199, 61, 60, 81 }, /*left = dc */
{ 44, 78, 115, 132, 119, 173, 71, 112, 93 }, /*left = v */
{ 39, 38, 21, 184, 227, 206, 42, 32, 64 }, /*left = h */
{ 58, 47, 36, 124, 137, 193, 80, 82, 78 }, /*left = d45 */
{ 49, 50, 35, 144, 95, 205, 63, 78, 59 }, /*left = d135*/
{ 41, 53, 52, 148, 71, 142, 65, 128, 51 }, /*left = d117*/
{ 40, 36, 28, 143, 143, 202, 40, 55, 137 }, /*left = d153*/
{ 52, 34, 29, 129, 183, 227, 42, 35, 43 }, /*left = d207*/
{ 42, 44, 44, 104, 105, 164, 64, 130, 80 }, /*left = d63 */
{ 43, 81, 53, 140, 169, 204, 68, 84, 72 }, /*left = tm */
}
};
EXPORT_SYMBOL_GPL(v4l2_vp9_kf_y_mode_prob);
const u8 v4l2_vp9_kf_partition_probs[16][3] = {
/* 8x8 -> 4x4 */
{ 158, 97, 94 }, /* a/l both not split */
{ 93, 24, 99 }, /* a split, l not split */
{ 85, 119, 44 }, /* l split, a not split */
{ 62, 59, 67 }, /* a/l both split */
/* 16x16 -> 8x8 */
{ 149, 53, 53 }, /* a/l both not split */
{ 94, 20, 48 }, /* a split, l not split */
{ 83, 53, 24 }, /* l split, a not split */
{ 52, 18, 18 }, /* a/l both split */
/* 32x32 -> 16x16 */
{ 150, 40, 39 }, /* a/l both not split */
{ 78, 12, 26 }, /* a split, l not split */
{ 67, 33, 11 }, /* l split, a not split */
{ 24, 7, 5 }, /* a/l both split */
/* 64x64 -> 32x32 */
{ 174, 35, 49 }, /* a/l both not split */
{ 68, 11, 27 }, /* a split, l not split */
{ 57, 15, 9 }, /* l split, a not split */
{ 12, 3, 3 }, /* a/l both split */
};
EXPORT_SYMBOL_GPL(v4l2_vp9_kf_partition_probs);
const u8 v4l2_vp9_kf_uv_mode_prob[10][9] = {
{ 144, 11, 54, 157, 195, 130, 46, 58, 108 }, /* y = dc */
{ 118, 15, 123, 148, 131, 101, 44, 93, 131 }, /* y = v */
{ 113, 12, 23, 188, 226, 142, 26, 32, 125 }, /* y = h */
{ 120, 11, 50, 123, 163, 135, 64, 77, 103 }, /* y = d45 */
{ 113, 9, 36, 155, 111, 157, 32, 44, 161 }, /* y = d135 */
{ 116, 9, 55, 176, 76, 96, 37, 61, 149 }, /* y = d117 */
{ 115, 9, 28, 141, 161, 167, 21, 25, 193 }, /* y = d153 */
{ 120, 12, 32, 145, 195, 142, 32, 38, 86 }, /* y = d207 */
{ 116, 12, 64, 120, 140, 125, 49, 115, 121 }, /* y = d63 */
{ 102, 19, 66, 162, 182, 122, 35, 59, 128 } /* y = tm */
};
EXPORT_SYMBOL_GPL(v4l2_vp9_kf_uv_mode_prob);
const struct v4l2_vp9_frame_context v4l2_vp9_default_probs = {
.tx8 = {
{ 100 },
{ 66 },
},
.tx16 = {
{ 20, 152 },
{ 15, 101 },
},
.tx32 = {
{ 3, 136, 37 },
{ 5, 52, 13 },
},
.coef = {
{ /* tx = 4x4 */
{ /* block Type 0 */
{ /* Intra */
{ /* Coeff Band 0 */
{ 195, 29, 183 },
{ 84, 49, 136 },
{ 8, 42, 71 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 31, 107, 169 },
{ 35, 99, 159 },
{ 17, 82, 140 },
{ 8, 66, 114 },
{ 2, 44, 76 },
{ 1, 19, 32 },
},
{ /* Coeff Band 2 */
{ 40, 132, 201 },
{ 29, 114, 187 },
{ 13, 91, 157 },
{ 7, 75, 127 },
{ 3, 58, 95 },
{ 1, 28, 47 },
},
{ /* Coeff Band 3 */
{ 69, 142, 221 },
{ 42, 122, 201 },
{ 15, 91, 159 },
{ 6, 67, 121 },
{ 1, 42, 77 },
{ 1, 17, 31 },
},
{ /* Coeff Band 4 */
{ 102, 148, 228 },
{ 67, 117, 204 },
{ 17, 82, 154 },
{ 6, 59, 114 },
{ 2, 39, 75 },
{ 1, 15, 29 },
},
{ /* Coeff Band 5 */
{ 156, 57, 233 },
{ 119, 57, 212 },
{ 58, 48, 163 },
{ 29, 40, 124 },
{ 12, 30, 81 },
{ 3, 12, 31 }
},
},
{ /* Inter */
{ /* Coeff Band 0 */
{ 191, 107, 226 },
{ 124, 117, 204 },
{ 25, 99, 155 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 29, 148, 210 },
{ 37, 126, 194 },
{ 8, 93, 157 },
{ 2, 68, 118 },
{ 1, 39, 69 },
{ 1, 17, 33 },
},
{ /* Coeff Band 2 */
{ 41, 151, 213 },
{ 27, 123, 193 },
{ 3, 82, 144 },
{ 1, 58, 105 },
{ 1, 32, 60 },
{ 1, 13, 26 },
},
{ /* Coeff Band 3 */
{ 59, 159, 220 },
{ 23, 126, 198 },
{ 4, 88, 151 },
{ 1, 66, 114 },
{ 1, 38, 71 },
{ 1, 18, 34 },
},
{ /* Coeff Band 4 */
{ 114, 136, 232 },
{ 51, 114, 207 },
{ 11, 83, 155 },
{ 3, 56, 105 },
{ 1, 33, 65 },
{ 1, 17, 34 },
},
{ /* Coeff Band 5 */
{ 149, 65, 234 },
{ 121, 57, 215 },
{ 61, 49, 166 },
{ 28, 36, 114 },
{ 12, 25, 76 },
{ 3, 16, 42 },
},
},
},
{ /* block Type 1 */
{ /* Intra */
{ /* Coeff Band 0 */
{ 214, 49, 220 },
{ 132, 63, 188 },
{ 42, 65, 137 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 85, 137, 221 },
{ 104, 131, 216 },
{ 49, 111, 192 },
{ 21, 87, 155 },
{ 2, 49, 87 },
{ 1, 16, 28 },
},
{ /* Coeff Band 2 */
{ 89, 163, 230 },
{ 90, 137, 220 },
{ 29, 100, 183 },
{ 10, 70, 135 },
{ 2, 42, 81 },
{ 1, 17, 33 },
},
{ /* Coeff Band 3 */
{ 108, 167, 237 },
{ 55, 133, 222 },
{ 15, 97, 179 },
{ 4, 72, 135 },
{ 1, 45, 85 },
{ 1, 19, 38 },
},
{ /* Coeff Band 4 */
{ 124, 146, 240 },
{ 66, 124, 224 },
{ 17, 88, 175 },
{ 4, 58, 122 },
{ 1, 36, 75 },
{ 1, 18, 37 },
},
{ /* Coeff Band 5 */
{ 141, 79, 241 },
{ 126, 70, 227 },
{ 66, 58, 182 },
{ 30, 44, 136 },
{ 12, 34, 96 },
{ 2, 20, 47 },
},
},
{ /* Inter */
{ /* Coeff Band 0 */
{ 229, 99, 249 },
{ 143, 111, 235 },
{ 46, 109, 192 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 82, 158, 236 },
{ 94, 146, 224 },
{ 25, 117, 191 },
{ 9, 87, 149 },
{ 3, 56, 99 },
{ 1, 33, 57 },
},
{ /* Coeff Band 2 */
{ 83, 167, 237 },
{ 68, 145, 222 },
{ 10, 103, 177 },
{ 2, 72, 131 },
{ 1, 41, 79 },
{ 1, 20, 39 },
},
{ /* Coeff Band 3 */
{ 99, 167, 239 },
{ 47, 141, 224 },
{ 10, 104, 178 },
{ 2, 73, 133 },
{ 1, 44, 85 },
{ 1, 22, 47 },
},
{ /* Coeff Band 4 */
{ 127, 145, 243 },
{ 71, 129, 228 },
{ 17, 93, 177 },
{ 3, 61, 124 },
{ 1, 41, 84 },
{ 1, 21, 52 },
},
{ /* Coeff Band 5 */
{ 157, 78, 244 },
{ 140, 72, 231 },
{ 69, 58, 184 },
{ 31, 44, 137 },
{ 14, 38, 105 },
{ 8, 23, 61 },
},
},
},
},
{ /* tx = 8x8 */
{ /* block Type 0 */
{ /* Intra */
{ /* Coeff Band 0 */
{ 125, 34, 187 },
{ 52, 41, 133 },
{ 6, 31, 56 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 37, 109, 153 },
{ 51, 102, 147 },
{ 23, 87, 128 },
{ 8, 67, 101 },
{ 1, 41, 63 },
{ 1, 19, 29 },
},
{ /* Coeff Band 2 */
{ 31, 154, 185 },
{ 17, 127, 175 },
{ 6, 96, 145 },
{ 2, 73, 114 },
{ 1, 51, 82 },
{ 1, 28, 45 },
},
{ /* Coeff Band 3 */
{ 23, 163, 200 },
{ 10, 131, 185 },
{ 2, 93, 148 },
{ 1, 67, 111 },
{ 1, 41, 69 },
{ 1, 14, 24 },
},
{ /* Coeff Band 4 */
{ 29, 176, 217 },
{ 12, 145, 201 },
{ 3, 101, 156 },
{ 1, 69, 111 },
{ 1, 39, 63 },
{ 1, 14, 23 },
},
{ /* Coeff Band 5 */
{ 57, 192, 233 },
{ 25, 154, 215 },
{ 6, 109, 167 },
{ 3, 78, 118 },
{ 1, 48, 69 },
{ 1, 21, 29 },
},
},
{ /* Inter */
{ /* Coeff Band 0 */
{ 202, 105, 245 },
{ 108, 106, 216 },
{ 18, 90, 144 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 33, 172, 219 },
{ 64, 149, 206 },
{ 14, 117, 177 },
{ 5, 90, 141 },
{ 2, 61, 95 },
{ 1, 37, 57 },
},
{ /* Coeff Band 2 */
{ 33, 179, 220 },
{ 11, 140, 198 },
{ 1, 89, 148 },
{ 1, 60, 104 },
{ 1, 33, 57 },
{ 1, 12, 21 },
},
{ /* Coeff Band 3 */
{ 30, 181, 221 },
{ 8, 141, 198 },
{ 1, 87, 145 },
{ 1, 58, 100 },
{ 1, 31, 55 },
{ 1, 12, 20 },
},
{ /* Coeff Band 4 */
{ 32, 186, 224 },
{ 7, 142, 198 },
{ 1, 86, 143 },
{ 1, 58, 100 },
{ 1, 31, 55 },
{ 1, 12, 22 },
},
{ /* Coeff Band 5 */
{ 57, 192, 227 },
{ 20, 143, 204 },
{ 3, 96, 154 },
{ 1, 68, 112 },
{ 1, 42, 69 },
{ 1, 19, 32 },
},
},
},
{ /* block Type 1 */
{ /* Intra */
{ /* Coeff Band 0 */
{ 212, 35, 215 },
{ 113, 47, 169 },
{ 29, 48, 105 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 74, 129, 203 },
{ 106, 120, 203 },
{ 49, 107, 178 },
{ 19, 84, 144 },
{ 4, 50, 84 },
{ 1, 15, 25 },
},
{ /* Coeff Band 2 */
{ 71, 172, 217 },
{ 44, 141, 209 },
{ 15, 102, 173 },
{ 6, 76, 133 },
{ 2, 51, 89 },
{ 1, 24, 42 },
},
{ /* Coeff Band 3 */
{ 64, 185, 231 },
{ 31, 148, 216 },
{ 8, 103, 175 },
{ 3, 74, 131 },
{ 1, 46, 81 },
{ 1, 18, 30 },
},
{ /* Coeff Band 4 */
{ 65, 196, 235 },
{ 25, 157, 221 },
{ 5, 105, 174 },
{ 1, 67, 120 },
{ 1, 38, 69 },
{ 1, 15, 30 },
},
{ /* Coeff Band 5 */
{ 65, 204, 238 },
{ 30, 156, 224 },
{ 7, 107, 177 },
{ 2, 70, 124 },
{ 1, 42, 73 },
{ 1, 18, 34 },
},
},
{ /* Inter */
{ /* Coeff Band 0 */
{ 225, 86, 251 },
{ 144, 104, 235 },
{ 42, 99, 181 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 85, 175, 239 },
{ 112, 165, 229 },
{ 29, 136, 200 },
{ 12, 103, 162 },
{ 6, 77, 123 },
{ 2, 53, 84 },
},
{ /* Coeff Band 2 */
{ 75, 183, 239 },
{ 30, 155, 221 },
{ 3, 106, 171 },
{ 1, 74, 128 },
{ 1, 44, 76 },
{ 1, 17, 28 },
},
{ /* Coeff Band 3 */
{ 73, 185, 240 },
{ 27, 159, 222 },
{ 2, 107, 172 },
{ 1, 75, 127 },
{ 1, 42, 73 },
{ 1, 17, 29 },
},
{ /* Coeff Band 4 */
{ 62, 190, 238 },
{ 21, 159, 222 },
{ 2, 107, 172 },
{ 1, 72, 122 },
{ 1, 40, 71 },
{ 1, 18, 32 },
},
{ /* Coeff Band 5 */
{ 61, 199, 240 },
{ 27, 161, 226 },
{ 4, 113, 180 },
{ 1, 76, 129 },
{ 1, 46, 80 },
{ 1, 23, 41 },
},
},
},
},
{ /* tx = 16x16 */
{ /* block Type 0 */
{ /* Intra */
{ /* Coeff Band 0 */
{ 7, 27, 153 },
{ 5, 30, 95 },
{ 1, 16, 30 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 50, 75, 127 },
{ 57, 75, 124 },
{ 27, 67, 108 },
{ 10, 54, 86 },
{ 1, 33, 52 },
{ 1, 12, 18 },
},
{ /* Coeff Band 2 */
{ 43, 125, 151 },
{ 26, 108, 148 },
{ 7, 83, 122 },
{ 2, 59, 89 },
{ 1, 38, 60 },
{ 1, 17, 27 },
},
{ /* Coeff Band 3 */
{ 23, 144, 163 },
{ 13, 112, 154 },
{ 2, 75, 117 },
{ 1, 50, 81 },
{ 1, 31, 51 },
{ 1, 14, 23 },
},
{ /* Coeff Band 4 */
{ 18, 162, 185 },
{ 6, 123, 171 },
{ 1, 78, 125 },
{ 1, 51, 86 },
{ 1, 31, 54 },
{ 1, 14, 23 },
},
{ /* Coeff Band 5 */
{ 15, 199, 227 },
{ 3, 150, 204 },
{ 1, 91, 146 },
{ 1, 55, 95 },
{ 1, 30, 53 },
{ 1, 11, 20 },
}
},
{ /* Inter */
{ /* Coeff Band 0 */
{ 19, 55, 240 },
{ 19, 59, 196 },
{ 3, 52, 105 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 41, 166, 207 },
{ 104, 153, 199 },
{ 31, 123, 181 },
{ 14, 101, 152 },
{ 5, 72, 106 },
{ 1, 36, 52 },
},
{ /* Coeff Band 2 */
{ 35, 176, 211 },
{ 12, 131, 190 },
{ 2, 88, 144 },
{ 1, 60, 101 },
{ 1, 36, 60 },
{ 1, 16, 28 },
},
{ /* Coeff Band 3 */
{ 28, 183, 213 },
{ 8, 134, 191 },
{ 1, 86, 142 },
{ 1, 56, 96 },
{ 1, 30, 53 },
{ 1, 12, 20 },
},
{ /* Coeff Band 4 */
{ 20, 190, 215 },
{ 4, 135, 192 },
{ 1, 84, 139 },
{ 1, 53, 91 },
{ 1, 28, 49 },
{ 1, 11, 20 },
},
{ /* Coeff Band 5 */
{ 13, 196, 216 },
{ 2, 137, 192 },
{ 1, 86, 143 },
{ 1, 57, 99 },
{ 1, 32, 56 },
{ 1, 13, 24 },
},
},
},
{ /* block Type 1 */
{ /* Intra */
{ /* Coeff Band 0 */
{ 211, 29, 217 },
{ 96, 47, 156 },
{ 22, 43, 87 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 78, 120, 193 },
{ 111, 116, 186 },
{ 46, 102, 164 },
{ 15, 80, 128 },
{ 2, 49, 76 },
{ 1, 18, 28 },
},
{ /* Coeff Band 2 */
{ 71, 161, 203 },
{ 42, 132, 192 },
{ 10, 98, 150 },
{ 3, 69, 109 },
{ 1, 44, 70 },
{ 1, 18, 29 },
},
{ /* Coeff Band 3 */
{ 57, 186, 211 },
{ 30, 140, 196 },
{ 4, 93, 146 },
{ 1, 62, 102 },
{ 1, 38, 65 },
{ 1, 16, 27 },
},
{ /* Coeff Band 4 */
{ 47, 199, 217 },
{ 14, 145, 196 },
{ 1, 88, 142 },
{ 1, 57, 98 },
{ 1, 36, 62 },
{ 1, 15, 26 },
},
{ /* Coeff Band 5 */
{ 26, 219, 229 },
{ 5, 155, 207 },
{ 1, 94, 151 },
{ 1, 60, 104 },
{ 1, 36, 62 },
{ 1, 16, 28 },
}
},
{ /* Inter */
{ /* Coeff Band 0 */
{ 233, 29, 248 },
{ 146, 47, 220 },
{ 43, 52, 140 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 100, 163, 232 },
{ 179, 161, 222 },
{ 63, 142, 204 },
{ 37, 113, 174 },
{ 26, 89, 137 },
{ 18, 68, 97 },
},
{ /* Coeff Band 2 */
{ 85, 181, 230 },
{ 32, 146, 209 },
{ 7, 100, 164 },
{ 3, 71, 121 },
{ 1, 45, 77 },
{ 1, 18, 30 },
},
{ /* Coeff Band 3 */
{ 65, 187, 230 },
{ 20, 148, 207 },
{ 2, 97, 159 },
{ 1, 68, 116 },
{ 1, 40, 70 },
{ 1, 14, 29 },
},
{ /* Coeff Band 4 */
{ 40, 194, 227 },
{ 8, 147, 204 },
{ 1, 94, 155 },
{ 1, 65, 112 },
{ 1, 39, 66 },
{ 1, 14, 26 },
},
{ /* Coeff Band 5 */
{ 16, 208, 228 },
{ 3, 151, 207 },
{ 1, 98, 160 },
{ 1, 67, 117 },
{ 1, 41, 74 },
{ 1, 17, 31 },
},
},
},
},
{ /* tx = 32x32 */
{ /* block Type 0 */
{ /* Intra */
{ /* Coeff Band 0 */
{ 17, 38, 140 },
{ 7, 34, 80 },
{ 1, 17, 29 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 37, 75, 128 },
{ 41, 76, 128 },
{ 26, 66, 116 },
{ 12, 52, 94 },
{ 2, 32, 55 },
{ 1, 10, 16 },
},
{ /* Coeff Band 2 */
{ 50, 127, 154 },
{ 37, 109, 152 },
{ 16, 82, 121 },
{ 5, 59, 85 },
{ 1, 35, 54 },
{ 1, 13, 20 },
},
{ /* Coeff Band 3 */
{ 40, 142, 167 },
{ 17, 110, 157 },
{ 2, 71, 112 },
{ 1, 44, 72 },
{ 1, 27, 45 },
{ 1, 11, 17 },
},
{ /* Coeff Band 4 */
{ 30, 175, 188 },
{ 9, 124, 169 },
{ 1, 74, 116 },
{ 1, 48, 78 },
{ 1, 30, 49 },
{ 1, 11, 18 },
},
{ /* Coeff Band 5 */
{ 10, 222, 223 },
{ 2, 150, 194 },
{ 1, 83, 128 },
{ 1, 48, 79 },
{ 1, 27, 45 },
{ 1, 11, 17 },
},
},
{ /* Inter */
{ /* Coeff Band 0 */
{ 36, 41, 235 },
{ 29, 36, 193 },
{ 10, 27, 111 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 85, 165, 222 },
{ 177, 162, 215 },
{ 110, 135, 195 },
{ 57, 113, 168 },
{ 23, 83, 120 },
{ 10, 49, 61 },
},
{ /* Coeff Band 2 */
{ 85, 190, 223 },
{ 36, 139, 200 },
{ 5, 90, 146 },
{ 1, 60, 103 },
{ 1, 38, 65 },
{ 1, 18, 30 },
},
{ /* Coeff Band 3 */
{ 72, 202, 223 },
{ 23, 141, 199 },
{ 2, 86, 140 },
{ 1, 56, 97 },
{ 1, 36, 61 },
{ 1, 16, 27 },
},
{ /* Coeff Band 4 */
{ 55, 218, 225 },
{ 13, 145, 200 },
{ 1, 86, 141 },
{ 1, 57, 99 },
{ 1, 35, 61 },
{ 1, 13, 22 },
},
{ /* Coeff Band 5 */
{ 15, 235, 212 },
{ 1, 132, 184 },
{ 1, 84, 139 },
{ 1, 57, 97 },
{ 1, 34, 56 },
{ 1, 14, 23 },
},
},
},
{ /* block Type 1 */
{ /* Intra */
{ /* Coeff Band 0 */
{ 181, 21, 201 },
{ 61, 37, 123 },
{ 10, 38, 71 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 47, 106, 172 },
{ 95, 104, 173 },
{ 42, 93, 159 },
{ 18, 77, 131 },
{ 4, 50, 81 },
{ 1, 17, 23 },
},
{ /* Coeff Band 2 */
{ 62, 147, 199 },
{ 44, 130, 189 },
{ 28, 102, 154 },
{ 18, 75, 115 },
{ 2, 44, 65 },
{ 1, 12, 19 },
},
{ /* Coeff Band 3 */
{ 55, 153, 210 },
{ 24, 130, 194 },
{ 3, 93, 146 },
{ 1, 61, 97 },
{ 1, 31, 50 },
{ 1, 10, 16 },
},
{ /* Coeff Band 4 */
{ 49, 186, 223 },
{ 17, 148, 204 },
{ 1, 96, 142 },
{ 1, 53, 83 },
{ 1, 26, 44 },
{ 1, 11, 17 },
},
{ /* Coeff Band 5 */
{ 13, 217, 212 },
{ 2, 136, 180 },
{ 1, 78, 124 },
{ 1, 50, 83 },
{ 1, 29, 49 },
{ 1, 14, 23 },
},
},
{ /* Inter */
{ /* Coeff Band 0 */
{ 197, 13, 247 },
{ 82, 17, 222 },
{ 25, 17, 162 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
},
{ /* Coeff Band 1 */
{ 126, 186, 247 },
{ 234, 191, 243 },
{ 176, 177, 234 },
{ 104, 158, 220 },
{ 66, 128, 186 },
{ 55, 90, 137 },
},
{ /* Coeff Band 2 */
{ 111, 197, 242 },
{ 46, 158, 219 },
{ 9, 104, 171 },
{ 2, 65, 125 },
{ 1, 44, 80 },
{ 1, 17, 91 },
},
{ /* Coeff Band 3 */
{ 104, 208, 245 },
{ 39, 168, 224 },
{ 3, 109, 162 },
{ 1, 79, 124 },
{ 1, 50, 102 },
{ 1, 43, 102 },
},
{ /* Coeff Band 4 */
{ 84, 220, 246 },
{ 31, 177, 231 },
{ 2, 115, 180 },
{ 1, 79, 134 },
{ 1, 55, 77 },
{ 1, 60, 79 },
},
{ /* Coeff Band 5 */
{ 43, 243, 240 },
{ 8, 180, 217 },
{ 1, 115, 166 },
{ 1, 84, 121 },
{ 1, 51, 67 },
{ 1, 16, 6 },
},
},
},
},
},
.skip = { 192, 128, 64 },
.inter_mode = {
{ 2, 173, 34 },
{ 7, 145, 85 },
{ 7, 166, 63 },
{ 7, 94, 66 },
{ 8, 64, 46 },
{ 17, 81, 31 },
{ 25, 29, 30 },
},
.interp_filter = {
{ 235, 162 },
{ 36, 255 },
{ 34, 3 },
{ 149, 144 },
},
.is_inter = { 9, 102, 187, 225 },
.comp_mode = { 239, 183, 119, 96, 41 },
.single_ref = {
{ 33, 16 },
{ 77, 74 },
{ 142, 142 },
{ 172, 170 },
{ 238, 247 },
},
.comp_ref = { 50, 126, 123, 221, 226 },
.y_mode = {
{ 65, 32, 18, 144, 162, 194, 41, 51, 98 },
{ 132, 68, 18, 165, 217, 196, 45, 40, 78 },
{ 173, 80, 19, 176, 240, 193, 64, 35, 46 },
{ 221, 135, 38, 194, 248, 121, 96, 85, 29 },
},
.uv_mode = {
{ 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */,
{ 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */,
{ 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */,
{ 97, 5, 44, 131, 176, 139, 48, 68, 97 } /* y = d45 */,
{ 83, 5, 42, 156, 111, 152, 26, 49, 152 } /* y = d135 */,
{ 80, 5, 58, 178, 74, 83, 33, 62, 145 } /* y = d117 */,
{ 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */,
{ 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d207 */,
{ 77, 7, 64, 116, 132, 122, 37, 126, 120 } /* y = d63 */,
{ 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */
},
.partition = {
/* 8x8 -> 4x4 */
{ 199, 122, 141 } /* a/l both not split */,
{ 147, 63, 159 } /* a split, l not split */,
{ 148, 133, 118 } /* l split, a not split */,
{ 121, 104, 114 } /* a/l both split */,
/* 16x16 -> 8x8 */
{ 174, 73, 87 } /* a/l both not split */,
{ 92, 41, 83 } /* a split, l not split */,
{ 82, 99, 50 } /* l split, a not split */,
{ 53, 39, 39 } /* a/l both split */,
/* 32x32 -> 16x16 */
{ 177, 58, 59 } /* a/l both not split */,
{ 68, 26, 63 } /* a split, l not split */,
{ 52, 79, 25 } /* l split, a not split */,
{ 17, 14, 12 } /* a/l both split */,
/* 64x64 -> 32x32 */
{ 222, 34, 30 } /* a/l both not split */,
{ 72, 16, 44 } /* a split, l not split */,
{ 58, 32, 12 } /* l split, a not split */,
{ 10, 7, 6 } /* a/l both split */,
},
.mv = {
.joint = { 32, 64, 96 },
.sign = { 128, 128 },
.classes = {
{ 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 },
{ 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 },
},
.class0_bit = { 216, 208 },
.bits = {
{ 136, 140, 148, 160, 176, 192, 224, 234, 234, 240},
{ 136, 140, 148, 160, 176, 192, 224, 234, 234, 240},
},
.class0_fr = {
{
{ 128, 128, 64 },
{ 96, 112, 64 },
},
{
{ 128, 128, 64 },
{ 96, 112, 64 },
},
},
.fr = {
{ 64, 96, 64 },
{ 64, 96, 64 },
},
.class0_hp = { 160, 160 },
.hp = { 128, 128 },
},
};
EXPORT_SYMBOL_GPL(v4l2_vp9_default_probs);
static u32 fastdiv(u32 dividend, u16 divisor)
{
#define DIV_INV(d) ((u32)(((1ULL << 32) + ((d) - 1)) / (d)))
#define DIVS_INV(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) \
DIV_INV(d0), DIV_INV(d1), DIV_INV(d2), DIV_INV(d3), \
DIV_INV(d4), DIV_INV(d5), DIV_INV(d6), DIV_INV(d7), \
DIV_INV(d8), DIV_INV(d9)
static const u32 inv[] = {
DIV_INV(2), DIV_INV(3), DIV_INV(4), DIV_INV(5),
DIV_INV(6), DIV_INV(7), DIV_INV(8), DIV_INV(9),
DIVS_INV(10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
DIVS_INV(20, 21, 22, 23, 24, 25, 26, 27, 28, 29),
DIVS_INV(30, 31, 32, 33, 34, 35, 36, 37, 38, 39),
DIVS_INV(40, 41, 42, 43, 44, 45, 46, 47, 48, 49),
DIVS_INV(50, 51, 52, 53, 54, 55, 56, 57, 58, 59),
DIVS_INV(60, 61, 62, 63, 64, 65, 66, 67, 68, 69),
DIVS_INV(70, 71, 72, 73, 74, 75, 76, 77, 78, 79),
DIVS_INV(80, 81, 82, 83, 84, 85, 86, 87, 88, 89),
DIVS_INV(90, 91, 92, 93, 94, 95, 96, 97, 98, 99),
DIVS_INV(100, 101, 102, 103, 104, 105, 106, 107, 108, 109),
DIVS_INV(110, 111, 112, 113, 114, 115, 116, 117, 118, 119),
DIVS_INV(120, 121, 122, 123, 124, 125, 126, 127, 128, 129),
DIVS_INV(130, 131, 132, 133, 134, 135, 136, 137, 138, 139),
DIVS_INV(140, 141, 142, 143, 144, 145, 146, 147, 148, 149),
DIVS_INV(150, 151, 152, 153, 154, 155, 156, 157, 158, 159),
DIVS_INV(160, 161, 162, 163, 164, 165, 166, 167, 168, 169),
DIVS_INV(170, 171, 172, 173, 174, 175, 176, 177, 178, 179),
DIVS_INV(180, 181, 182, 183, 184, 185, 186, 187, 188, 189),
DIVS_INV(190, 191, 192, 193, 194, 195, 196, 197, 198, 199),
DIVS_INV(200, 201, 202, 203, 204, 205, 206, 207, 208, 209),
DIVS_INV(210, 211, 212, 213, 214, 215, 216, 217, 218, 219),
DIVS_INV(220, 221, 222, 223, 224, 225, 226, 227, 228, 229),
DIVS_INV(230, 231, 232, 233, 234, 235, 236, 237, 238, 239),
DIVS_INV(240, 241, 242, 243, 244, 245, 246, 247, 248, 249),
DIV_INV(250), DIV_INV(251), DIV_INV(252), DIV_INV(253),
DIV_INV(254), DIV_INV(255), DIV_INV(256),
};
if (divisor == 0)
return 0;
else if (divisor == 1)
return dividend;
if (WARN_ON(divisor - 2 >= ARRAY_SIZE(inv)))
return dividend;
return ((u64)dividend * inv[divisor - 2]) >> 32;
}
/* 6.3.6 inv_recenter_nonneg(v, m) */
static int inv_recenter_nonneg(int v, int m)
{
if (v > 2 * m)
return v;
if (v & 1)
return m - ((v + 1) >> 1);
return m + (v >> 1);
}
/*
* part of 6.3.5 inv_remap_prob(deltaProb, prob)
* delta = inv_map_table[deltaProb] done by userspace
*/
static int update_prob(int delta, int prob)
{
if (!delta)
return prob;
return prob <= 128 ?
1 + inv_recenter_nonneg(delta, prob - 1) :
255 - inv_recenter_nonneg(delta, 255 - prob);
}
/* Counterpart to 6.3.2 tx_mode_probs() */
static void update_tx_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
{
int i;
for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) {
u8 *p8x8 = probs->tx8[i];
u8 *p16x16 = probs->tx16[i];
u8 *p32x32 = probs->tx32[i];
const u8 *d8x8 = deltas->tx8[i];
const u8 *d16x16 = deltas->tx16[i];
const u8 *d32x32 = deltas->tx32[i];
p8x8[0] = update_prob(d8x8[0], p8x8[0]);
p16x16[0] = update_prob(d16x16[0], p16x16[0]);
p16x16[1] = update_prob(d16x16[1], p16x16[1]);
p32x32[0] = update_prob(d32x32[0], p32x32[0]);
p32x32[1] = update_prob(d32x32[1], p32x32[1]);
p32x32[2] = update_prob(d32x32[2], p32x32[2]);
}
}
#define BAND_6(band) ((band) == 0 ? 3 : 6)
static void update_coeff(const u8 deltas[6][6][3], u8 probs[6][6][3])
{
int l, m, n;
for (l = 0; l < 6; l++)
for (m = 0; m < BAND_6(l); m++) {
u8 *p = probs[l][m];
const u8 *d = deltas[l][m];
for (n = 0; n < 3; n++)
p[n] = update_prob(d[n], p[n]);
}
}
/* Counterpart to 6.3.7 read_coef_probs() */
static void update_coef_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
int i, j, k;
for (i = 0; i < ARRAY_SIZE(probs->coef); i++) {
for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
update_coeff(deltas->coef[i][j][k], probs->coef[i][j][k]);
if (deltas->tx_mode == i)
break;
}
}
/* Counterpart to 6.3.8 read_skip_prob() */
static void update_skip_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
{
int i;
for (i = 0; i < ARRAY_SIZE(probs->skip); i++)
probs->skip[i] = update_prob(deltas->skip[i], probs->skip[i]);
}
/* Counterpart to 6.3.9 read_inter_mode_probs() */
static void update_inter_mode_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
{
int i;
for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) {
u8 *p = probs->inter_mode[i];
const u8 *d = deltas->inter_mode[i];
p[0] = update_prob(d[0], p[0]);
p[1] = update_prob(d[1], p[1]);
p[2] = update_prob(d[2], p[2]);
}
}
/* Counterpart to 6.3.10 read_interp_filter_probs() */
static void update_interp_filter_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
{
int i;
for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) {
u8 *p = probs->interp_filter[i];
const u8 *d = deltas->interp_filter[i];
p[0] = update_prob(d[0], p[0]);
p[1] = update_prob(d[1], p[1]);
}
}
/* Counterpart to 6.3.11 read_is_inter_probs() */
static void update_is_inter_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
{
int i;
for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++)
probs->is_inter[i] = update_prob(deltas->is_inter[i], probs->is_inter[i]);
}
/* 6.3.12 frame_reference_mode() done entirely in userspace */
/* Counterpart to 6.3.13 frame_reference_mode_probs() */
static void
update_frame_reference_mode_probs(unsigned int reference_mode,
struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
{
int i;
if (reference_mode == V4L2_VP9_REFERENCE_MODE_SELECT)
for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++)
probs->comp_mode[i] = update_prob(deltas->comp_mode[i],
probs->comp_mode[i]);
if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE)
for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) {
u8 *p = probs->single_ref[i];
const u8 *d = deltas->single_ref[i];
p[0] = update_prob(d[0], p[0]);
p[1] = update_prob(d[1], p[1]);
}
if (reference_mode != V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE)
for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++)
probs->comp_ref[i] = update_prob(deltas->comp_ref[i], probs->comp_ref[i]);
}
/* Counterpart to 6.3.14 read_y_mode_probs() */
static void update_y_mode_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++)
for (j = 0; j < ARRAY_SIZE(probs->y_mode[0]); ++j)
probs->y_mode[i][j] =
update_prob(deltas->y_mode[i][j], probs->y_mode[i][j]);
}
/* Counterpart to 6.3.15 read_partition_probs() */
static void update_partition_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++) {
u8 *p = probs->partition[i * 4 + j];
const u8 *d = deltas->partition[i * 4 + j];
p[0] = update_prob(d[0], p[0]);
p[1] = update_prob(d[1], p[1]);
p[2] = update_prob(d[2], p[2]);
}
}
static inline int update_mv_prob(int delta, int prob)
{
if (!delta)
return prob;
return delta;
}
/* Counterpart to 6.3.16 mv_probs() */
static void update_mv_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
u8 *p = probs->mv.joint;
const u8 *d = deltas->mv.joint;
unsigned int i, j;
p[0] = update_mv_prob(d[0], p[0]);
p[1] = update_mv_prob(d[1], p[1]);
p[2] = update_mv_prob(d[2], p[2]);
for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) {
p = probs->mv.sign;
d = deltas->mv.sign;
p[i] = update_mv_prob(d[i], p[i]);
p = probs->mv.classes[i];
d = deltas->mv.classes[i];
for (j = 0; j < ARRAY_SIZE(probs->mv.classes[0]); j++)
p[j] = update_mv_prob(d[j], p[j]);
p = probs->mv.class0_bit;
d = deltas->mv.class0_bit;
p[i] = update_mv_prob(d[i], p[i]);
p = probs->mv.bits[i];
d = deltas->mv.bits[i];
for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++)
p[j] = update_mv_prob(d[j], p[j]);
for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) {
p = probs->mv.class0_fr[i][j];
d = deltas->mv.class0_fr[i][j];
p[0] = update_mv_prob(d[0], p[0]);
p[1] = update_mv_prob(d[1], p[1]);
p[2] = update_mv_prob(d[2], p[2]);
}
p = probs->mv.fr[i];
d = deltas->mv.fr[i];
for (j = 0; j < ARRAY_SIZE(probs->mv.fr[i]); j++)
p[j] = update_mv_prob(d[j], p[j]);
if (dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV) {
p = probs->mv.class0_hp;
d = deltas->mv.class0_hp;
p[i] = update_mv_prob(d[i], p[i]);
p = probs->mv.hp;
d = deltas->mv.hp;
p[i] = update_mv_prob(d[i], p[i]);
}
}
}
/* Counterpart to 6.3 compressed_header(), but parsing has been done in userspace. */
void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
if (deltas->tx_mode == V4L2_VP9_TX_MODE_SELECT)
update_tx_probs(probs, deltas);
update_coef_probs(probs, deltas, dec_params);
update_skip_probs(probs, deltas);
if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY)
return;
update_inter_mode_probs(probs, deltas);
if (dec_params->interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE)
update_interp_filter_probs(probs, deltas);
update_is_inter_probs(probs, deltas);
update_frame_reference_mode_probs(dec_params->reference_mode, probs, deltas);
update_y_mode_probs(probs, deltas);
update_partition_probs(probs, deltas);
update_mv_probs(probs, deltas, dec_params);
}
EXPORT_SYMBOL_GPL(v4l2_vp9_fw_update_probs);
u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params,
struct v4l2_vp9_frame_context *frame_context)
{
int i;
u8 fctx_idx = dec_params->frame_context_idx;
if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY ||
dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) {
/*
* setup_past_independence()
* We do nothing here. Instead of storing default probs in some intermediate
* location and then copying from that location to appropriate contexts
* in save_probs() below, we skip that step and save default probs directly
* to appropriate contexts.
*/
if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT ||
dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_ALL)
for (i = 0; i < 4; ++i)
/* save_probs(i) */
memcpy(&frame_context[i], &v4l2_vp9_default_probs,
sizeof(v4l2_vp9_default_probs));
else if (dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_SPEC)
/* save_probs(fctx_idx) */
memcpy(&frame_context[fctx_idx], &v4l2_vp9_default_probs,
sizeof(v4l2_vp9_default_probs));
fctx_idx = 0;
}
return fctx_idx;
}
EXPORT_SYMBOL_GPL(v4l2_vp9_reset_frame_ctx);
/* 8.4.1 Merge prob process */
static u8 merge_prob(u8 pre_prob, u32 ct0, u32 ct1, u16 count_sat, u32 max_update_factor)
{
u32 den, prob, count, factor;
den = ct0 + ct1;
if (!den) {
/*
* prob = 128, count = 0, update_factor = 0
* Round2's argument: pre_prob * 256
* (pre_prob * 256 + 128) >> 8 == pre_prob
*/
return pre_prob;
}
prob = clamp(((ct0 << 8) + (den >> 1)) / den, (u32)1, (u32)255);
count = min_t(u32, den, count_sat);
factor = fastdiv(max_update_factor * count, count_sat);
/*
* Round2(pre_prob * (256 - factor) + prob * factor, 8)
* Round2(pre_prob * 256 + (prob - pre_prob) * factor, 8)
* (pre_prob * 256 >> 8) + (((prob - pre_prob) * factor + 128) >> 8)
*/
return pre_prob + (((prob - pre_prob) * factor + 128) >> 8);
}
static inline u8 noncoef_merge_prob(u8 pre_prob, u32 ct0, u32 ct1)
{
return merge_prob(pre_prob, ct0, ct1, 20, 128);
}
/* 8.4.2 Merge probs process */
/*
* merge_probs() is a recursive function in the spec. We avoid recursion in the kernel.
* That said, the "tree" parameter of merge_probs() controls how deep the recursion goes.
* It turns out that in all cases the recursive calls boil down to a short-ish series
* of merge_prob() invocations (note no "s").
*
* Variant A
* ---------
* merge_probs(small_token_tree, 2):
* merge_prob(p[1], c[0], c[1] + c[2])
* merge_prob(p[2], c[1], c[2])
*
* Variant B
* ---------
* merge_probs(binary_tree, 0) or
* merge_probs(tx_size_8_tree, 0):
* merge_prob(p[0], c[0], c[1])
*
* Variant C
* ---------
* merge_probs(inter_mode_tree, 0):
* merge_prob(p[0], c[2], c[1] + c[0] + c[3])
* merge_prob(p[1], c[0], c[1] + c[3])
* merge_prob(p[2], c[1], c[3])
*
* Variant D
* ---------
* merge_probs(intra_mode_tree, 0):
* merge_prob(p[0], c[0], c[1] + ... + c[9])
* merge_prob(p[1], c[9], c[1] + ... + c[8])
* merge_prob(p[2], c[1], c[2] + ... + c[8])
* merge_prob(p[3], c[2] + c[4] + c[5], c[3] + c[8] + c[6] + c[7])
* merge_prob(p[4], c[2], c[4] + c[5])
* merge_prob(p[5], c[4], c[5])
* merge_prob(p[6], c[3], c[8] + c[6] + c[7])
* merge_prob(p[7], c[8], c[6] + c[7])
* merge_prob(p[8], c[6], c[7])
*
* Variant E
* ---------
* merge_probs(partition_tree, 0) or
* merge_probs(tx_size_32_tree, 0) or
* merge_probs(mv_joint_tree, 0) or
* merge_probs(mv_fr_tree, 0):
* merge_prob(p[0], c[0], c[1] + c[2] + c[3])
* merge_prob(p[1], c[1], c[2] + c[3])
* merge_prob(p[2], c[2], c[3])
*
* Variant F
* ---------
* merge_probs(interp_filter_tree, 0) or
* merge_probs(tx_size_16_tree, 0):
* merge_prob(p[0], c[0], c[1] + c[2])
* merge_prob(p[1], c[1], c[2])
*
* Variant G
* ---------
* merge_probs(mv_class_tree, 0):
* merge_prob(p[0], c[0], c[1] + ... + c[10])
* merge_prob(p[1], c[1], c[2] + ... + c[10])
* merge_prob(p[2], c[2] + c[3], c[4] + ... + c[10])
* merge_prob(p[3], c[2], c[3])
* merge_prob(p[4], c[4] + c[5], c[6] + ... + c[10])
* merge_prob(p[5], c[4], c[5])
* merge_prob(p[6], c[6], c[7] + ... + c[10])
* merge_prob(p[7], c[7] + c[8], c[9] + c[10])
* merge_prob(p[8], c[7], c[8])
* merge_prob(p[9], c[9], [10])
*/
static inline void merge_probs_variant_a(u8 *p, const u32 *c, u16 count_sat, u32 update_factor)
{
p[1] = merge_prob(p[1], c[0], c[1] + c[2], count_sat, update_factor);
p[2] = merge_prob(p[2], c[1], c[2], count_sat, update_factor);
}
static inline void merge_probs_variant_b(u8 *p, const u32 *c, u16 count_sat, u32 update_factor)
{
p[0] = merge_prob(p[0], c[0], c[1], count_sat, update_factor);
}
static inline void merge_probs_variant_c(u8 *p, const u32 *c)
{
p[0] = noncoef_merge_prob(p[0], c[2], c[1] + c[0] + c[3]);
p[1] = noncoef_merge_prob(p[1], c[0], c[1] + c[3]);
p[2] = noncoef_merge_prob(p[2], c[1], c[3]);
}
static void merge_probs_variant_d(u8 *p, const u32 *c)
{
u32 sum = 0, s2;
sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
p[0] = noncoef_merge_prob(p[0], c[0], sum);
sum -= c[9];
p[1] = noncoef_merge_prob(p[1], c[9], sum);
sum -= c[1];
p[2] = noncoef_merge_prob(p[2], c[1], sum);
s2 = c[2] + c[4] + c[5];
sum -= s2;
p[3] = noncoef_merge_prob(p[3], s2, sum);
s2 -= c[2];
p[4] = noncoef_merge_prob(p[4], c[2], s2);
p[5] = noncoef_merge_prob(p[5], c[4], c[5]);
sum -= c[3];
p[6] = noncoef_merge_prob(p[6], c[3], sum);
sum -= c[8];
p[7] = noncoef_merge_prob(p[7], c[8], sum);
p[8] = noncoef_merge_prob(p[8], c[6], c[7]);
}
static inline void merge_probs_variant_e(u8 *p, const u32 *c)
{
p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2] + c[3]);
p[1] = noncoef_merge_prob(p[1], c[1], c[2] + c[3]);
p[2] = noncoef_merge_prob(p[2], c[2], c[3]);
}
static inline void merge_probs_variant_f(u8 *p, const u32 *c)
{
p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2]);
p[1] = noncoef_merge_prob(p[1], c[1], c[2]);
}
static void merge_probs_variant_g(u8 *p, const u32 *c)
{
u32 sum;
sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10];
p[0] = noncoef_merge_prob(p[0], c[0], sum);
sum -= c[1];
p[1] = noncoef_merge_prob(p[1], c[1], sum);
sum -= c[2] + c[3];
p[2] = noncoef_merge_prob(p[2], c[2] + c[3], sum);
p[3] = noncoef_merge_prob(p[3], c[2], c[3]);
sum -= c[4] + c[5];
p[4] = noncoef_merge_prob(p[4], c[4] + c[5], sum);
p[5] = noncoef_merge_prob(p[5], c[4], c[5]);
sum -= c[6];
p[6] = noncoef_merge_prob(p[6], c[6], sum);
p[7] = noncoef_merge_prob(p[7], c[7] + c[8], c[9] + c[10]);
p[8] = noncoef_merge_prob(p[8], c[7], c[8]);
p[9] = noncoef_merge_prob(p[9], c[9], c[10]);
}
/* 8.4.3 Coefficient probability adaptation process */
static inline void adapt_probs_variant_a_coef(u8 *p, const u32 *c, u32 update_factor)
{
merge_probs_variant_a(p, c, 24, update_factor);
}
static inline void adapt_probs_variant_b_coef(u8 *p, const u32 *c, u32 update_factor)
{
merge_probs_variant_b(p, c, 24, update_factor);
}
static void _adapt_coeff(unsigned int i, unsigned int j, unsigned int k,
struct v4l2_vp9_frame_context *probs,
const struct v4l2_vp9_frame_symbol_counts *counts,
u32 uf)
{
s32 l, m;
for (l = 0; l < ARRAY_SIZE(probs->coef[0][0][0]); l++) {
for (m = 0; m < BAND_6(l); m++) {
u8 *p = probs->coef[i][j][k][l][m];
const u32 counts_more_coefs[2] = {
*counts->eob[i][j][k][l][m][1],
*counts->eob[i][j][k][l][m][0] - *counts->eob[i][j][k][l][m][1],
};
adapt_probs_variant_a_coef(p, *counts->coeff[i][j][k][l][m], uf);
adapt_probs_variant_b_coef(p, counts_more_coefs, uf);
}
}
}
static void _adapt_coef_probs(struct v4l2_vp9_frame_context *probs,
const struct v4l2_vp9_frame_symbol_counts *counts,
unsigned int uf)
{
unsigned int i, j, k;
for (i = 0; i < ARRAY_SIZE(probs->coef); i++)
for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
_adapt_coeff(i, j, k, probs, counts, uf);
}
void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs,
struct v4l2_vp9_frame_symbol_counts *counts,
bool use_128,
bool frame_is_intra)
{
if (frame_is_intra) {
_adapt_coef_probs(probs, counts, 112);
} else {
if (use_128)
_adapt_coef_probs(probs, counts, 128);
else
_adapt_coef_probs(probs, counts, 112);
}
}
EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_coef_probs);
/* 8.4.4 Non coefficient probability adaptation process, adapt_probs() */
static inline void adapt_probs_variant_b(u8 *p, const u32 *c)
{
merge_probs_variant_b(p, c, 20, 128);
}
static inline void adapt_probs_variant_c(u8 *p, const u32 *c)
{
merge_probs_variant_c(p, c);
}
static inline void adapt_probs_variant_d(u8 *p, const u32 *c)
{
merge_probs_variant_d(p, c);
}
static inline void adapt_probs_variant_e(u8 *p, const u32 *c)
{
merge_probs_variant_e(p, c);
}
static inline void adapt_probs_variant_f(u8 *p, const u32 *c)
{
merge_probs_variant_f(p, c);
}
static inline void adapt_probs_variant_g(u8 *p, const u32 *c)
{
merge_probs_variant_g(p, c);
}
/* 8.4.4 Non coefficient probability adaptation process, adapt_prob() */
static inline u8 adapt_prob(u8 prob, const u32 counts[2])
{
return noncoef_merge_prob(prob, counts[0], counts[1]);
}
/* 8.4.4 Non coefficient probability adaptation process */
void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs,
struct v4l2_vp9_frame_symbol_counts *counts,
u8 reference_mode, u8 interpolation_filter, u8 tx_mode,
u32 flags)
{
unsigned int i, j;
for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++)
probs->is_inter[i] = adapt_prob(probs->is_inter[i], (*counts->intra_inter)[i]);
for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++)
probs->comp_mode[i] = adapt_prob(probs->comp_mode[i], (*counts->comp)[i]);
for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++)
probs->comp_ref[i] = adapt_prob(probs->comp_ref[i], (*counts->comp_ref)[i]);
if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE)
for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++)
for (j = 0; j < ARRAY_SIZE(probs->single_ref[0]); j++)
probs->single_ref[i][j] = adapt_prob(probs->single_ref[i][j],
(*counts->single_ref)[i][j]);
for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++)
adapt_probs_variant_c(probs->inter_mode[i], (*counts->mv_mode)[i]);
for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++)
adapt_probs_variant_d(probs->y_mode[i], (*counts->y_mode)[i]);
for (i = 0; i < ARRAY_SIZE(probs->uv_mode); i++)
adapt_probs_variant_d(probs->uv_mode[i], (*counts->uv_mode)[i]);
for (i = 0; i < ARRAY_SIZE(probs->partition); i++)
adapt_probs_variant_e(probs->partition[i], (*counts->partition)[i]);
for (i = 0; i < ARRAY_SIZE(probs->skip); i++)
probs->skip[i] = adapt_prob(probs->skip[i], (*counts->skip)[i]);
if (interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE)
for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++)
adapt_probs_variant_f(probs->interp_filter[i], (*counts->filter)[i]);
if (tx_mode == V4L2_VP9_TX_MODE_SELECT)
for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) {
adapt_probs_variant_b(probs->tx8[i], (*counts->tx8p)[i]);
adapt_probs_variant_f(probs->tx16[i], (*counts->tx16p)[i]);
adapt_probs_variant_e(probs->tx32[i], (*counts->tx32p)[i]);
}
adapt_probs_variant_e(probs->mv.joint, *counts->mv_joint);
for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) {
probs->mv.sign[i] = adapt_prob(probs->mv.sign[i], (*counts->sign)[i]);
adapt_probs_variant_g(probs->mv.classes[i], (*counts->classes)[i]);
probs->mv.class0_bit[i] = adapt_prob(probs->mv.class0_bit[i], (*counts->class0)[i]);
for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++)
probs->mv.bits[i][j] = adapt_prob(probs->mv.bits[i][j],
(*counts->bits)[i][j]);
for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++)
adapt_probs_variant_e(probs->mv.class0_fr[i][j],
(*counts->class0_fp)[i][j]);
adapt_probs_variant_e(probs->mv.fr[i], (*counts->fp)[i]);
if (!(flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV))
continue;
probs->mv.class0_hp[i] = adapt_prob(probs->mv.class0_hp[i],
(*counts->class0_hp)[i]);
probs->mv.hp[i] = adapt_prob(probs->mv.hp[i], (*counts->hp)[i]);
}
}
EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_noncoef_probs);
bool
v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled,
unsigned int feature,
unsigned int segid)
{
u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature);
return !!(feature_enabled[segid] & mask);
}
EXPORT_SYMBOL_GPL(v4l2_vp9_seg_feat_enabled);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("V4L2 VP9 Helpers");
MODULE_AUTHOR("Andrzej Pietrasiewicz <[email protected]>");
| linux-master | drivers/media/v4l2-core/v4l2-vp9.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
V4L2 device support.
Copyright (C) 2008 Hans Verkuil <[email protected]>
*/
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
{
if (v4l2_dev == NULL)
return -EINVAL;
INIT_LIST_HEAD(&v4l2_dev->subdevs);
spin_lock_init(&v4l2_dev->lock);
v4l2_prio_init(&v4l2_dev->prio);
kref_init(&v4l2_dev->ref);
get_device(dev);
v4l2_dev->dev = dev;
if (dev == NULL) {
/* If dev == NULL, then name must be filled in by the caller */
if (WARN_ON(!v4l2_dev->name[0]))
return -EINVAL;
return 0;
}
/* Set name to driver name + device name if it is empty. */
if (!v4l2_dev->name[0])
snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s",
dev->driver->name, dev_name(dev));
if (!dev_get_drvdata(dev))
dev_set_drvdata(dev, v4l2_dev);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_device_register);
static void v4l2_device_release(struct kref *ref)
{
struct v4l2_device *v4l2_dev =
container_of(ref, struct v4l2_device, ref);
if (v4l2_dev->release)
v4l2_dev->release(v4l2_dev);
}
int v4l2_device_put(struct v4l2_device *v4l2_dev)
{
return kref_put(&v4l2_dev->ref, v4l2_device_release);
}
EXPORT_SYMBOL_GPL(v4l2_device_put);
int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
atomic_t *instance)
{
int num = atomic_inc_return(instance) - 1;
int len = strlen(basename);
if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
"%s-%d", basename, num);
else
snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
"%s%d", basename, num);
return num;
}
EXPORT_SYMBOL_GPL(v4l2_device_set_name);
void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
{
if (v4l2_dev->dev == NULL)
return;
if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
dev_set_drvdata(v4l2_dev->dev, NULL);
put_device(v4l2_dev->dev);
v4l2_dev->dev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
{
struct v4l2_subdev *sd, *next;
/* Just return if v4l2_dev is NULL or if it was already
* unregistered before. */
if (v4l2_dev == NULL || !v4l2_dev->name[0])
return;
v4l2_device_disconnect(v4l2_dev);
/* Unregister subdevs */
list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
v4l2_device_unregister_subdev(sd);
if (sd->flags & V4L2_SUBDEV_FL_IS_I2C)
v4l2_i2c_subdev_unregister(sd);
else if (sd->flags & V4L2_SUBDEV_FL_IS_SPI)
v4l2_spi_subdev_unregister(sd);
}
/* Mark as unregistered, thus preventing duplicate unregistrations */
v4l2_dev->name[0] = '\0';
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister);
int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
struct v4l2_subdev *sd)
{
int err;
/* Check for valid input */
if (!v4l2_dev || !sd || sd->v4l2_dev || !sd->name[0])
return -EINVAL;
/*
* The reason to acquire the module here is to avoid unloading
* a module of sub-device which is registered to a media
* device. To make it possible to unload modules for media
* devices that also register sub-devices, do not
* try_module_get() such sub-device owners.
*/
sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver &&
sd->owner == v4l2_dev->dev->driver->owner;
if (!sd->owner_v4l2_dev && !try_module_get(sd->owner))
return -ENODEV;
sd->v4l2_dev = v4l2_dev;
/* This just returns 0 if either of the two args is NULL */
err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler,
NULL, true);
if (err)
goto error_module;
#if defined(CONFIG_MEDIA_CONTROLLER)
/* Register the entity. */
if (v4l2_dev->mdev) {
err = media_device_register_entity(v4l2_dev->mdev, &sd->entity);
if (err < 0)
goto error_module;
}
#endif
if (sd->internal_ops && sd->internal_ops->registered) {
err = sd->internal_ops->registered(sd);
if (err)
goto error_unregister;
}
spin_lock(&v4l2_dev->lock);
list_add_tail(&sd->list, &v4l2_dev->subdevs);
spin_unlock(&v4l2_dev->lock);
return 0;
error_unregister:
#if defined(CONFIG_MEDIA_CONTROLLER)
media_device_unregister_entity(&sd->entity);
#endif
error_module:
if (!sd->owner_v4l2_dev)
module_put(sd->owner);
sd->v4l2_dev = NULL;
return err;
}
EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
static void v4l2_subdev_release(struct v4l2_subdev *sd)
{
struct module *owner = !sd->owner_v4l2_dev ? sd->owner : NULL;
if (sd->internal_ops && sd->internal_ops->release)
sd->internal_ops->release(sd);
sd->devnode = NULL;
module_put(owner);
}
static void v4l2_device_release_subdev_node(struct video_device *vdev)
{
v4l2_subdev_release(video_get_drvdata(vdev));
kfree(vdev);
}
int __v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev,
bool read_only)
{
struct video_device *vdev;
struct v4l2_subdev *sd;
int err;
/* Register a device node for every subdev marked with the
* V4L2_SUBDEV_FL_HAS_DEVNODE flag.
*/
list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
continue;
if (sd->devnode)
continue;
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
err = -ENOMEM;
goto clean_up;
}
video_set_drvdata(vdev, sd);
strscpy(vdev->name, sd->name, sizeof(vdev->name));
vdev->dev_parent = sd->dev;
vdev->v4l2_dev = v4l2_dev;
vdev->fops = &v4l2_subdev_fops;
vdev->release = v4l2_device_release_subdev_node;
vdev->ctrl_handler = sd->ctrl_handler;
if (read_only)
set_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
sd->devnode = vdev;
err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
sd->owner);
if (err < 0) {
sd->devnode = NULL;
kfree(vdev);
goto clean_up;
}
#if defined(CONFIG_MEDIA_CONTROLLER)
sd->entity.info.dev.major = VIDEO_MAJOR;
sd->entity.info.dev.minor = vdev->minor;
/* Interface is created by __video_register_device() */
if (vdev->v4l2_dev->mdev) {
struct media_link *link;
link = media_create_intf_link(&sd->entity,
&vdev->intf_devnode->intf,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (!link) {
err = -ENOMEM;
goto clean_up;
}
}
#endif
}
return 0;
clean_up:
list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
if (!sd->devnode)
break;
video_unregister_device(sd->devnode);
}
return err;
}
EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev_nodes);
void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
{
struct v4l2_device *v4l2_dev;
/* return if it isn't registered */
if (sd == NULL || sd->v4l2_dev == NULL)
return;
v4l2_dev = sd->v4l2_dev;
spin_lock(&v4l2_dev->lock);
list_del(&sd->list);
spin_unlock(&v4l2_dev->lock);
if (sd->internal_ops && sd->internal_ops->unregistered)
sd->internal_ops->unregistered(sd);
sd->v4l2_dev = NULL;
#if defined(CONFIG_MEDIA_CONTROLLER)
if (v4l2_dev->mdev) {
/*
* No need to explicitly remove links, as both pads and
* links are removed by the function below, in the right order
*/
media_device_unregister_entity(&sd->entity);
}
#endif
if (sd->devnode)
video_unregister_device(sd->devnode);
else
v4l2_subdev_release(sd);
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
| linux-master | drivers/media/v4l2-core/v4l2-device.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* v4l2-dv-timings - dv-timings helper functions
*
* Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/rational.h>
#include <linux/videodev2.h>
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-dv-timings.h>
#include <linux/math64.h>
#include <linux/hdmi.h>
#include <media/cec.h>
MODULE_AUTHOR("Hans Verkuil");
MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
MODULE_LICENSE("GPL");
const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {
V4L2_DV_BT_CEA_640X480P59_94,
V4L2_DV_BT_CEA_720X480I59_94,
V4L2_DV_BT_CEA_720X480P59_94,
V4L2_DV_BT_CEA_720X576I50,
V4L2_DV_BT_CEA_720X576P50,
V4L2_DV_BT_CEA_1280X720P24,
V4L2_DV_BT_CEA_1280X720P25,
V4L2_DV_BT_CEA_1280X720P30,
V4L2_DV_BT_CEA_1280X720P50,
V4L2_DV_BT_CEA_1280X720P60,
V4L2_DV_BT_CEA_1920X1080P24,
V4L2_DV_BT_CEA_1920X1080P25,
V4L2_DV_BT_CEA_1920X1080P30,
V4L2_DV_BT_CEA_1920X1080I50,
V4L2_DV_BT_CEA_1920X1080P50,
V4L2_DV_BT_CEA_1920X1080I60,
V4L2_DV_BT_CEA_1920X1080P60,
V4L2_DV_BT_DMT_640X350P85,
V4L2_DV_BT_DMT_640X400P85,
V4L2_DV_BT_DMT_720X400P85,
V4L2_DV_BT_DMT_640X480P72,
V4L2_DV_BT_DMT_640X480P75,
V4L2_DV_BT_DMT_640X480P85,
V4L2_DV_BT_DMT_800X600P56,
V4L2_DV_BT_DMT_800X600P60,
V4L2_DV_BT_DMT_800X600P72,
V4L2_DV_BT_DMT_800X600P75,
V4L2_DV_BT_DMT_800X600P85,
V4L2_DV_BT_DMT_800X600P120_RB,
V4L2_DV_BT_DMT_848X480P60,
V4L2_DV_BT_DMT_1024X768I43,
V4L2_DV_BT_DMT_1024X768P60,
V4L2_DV_BT_DMT_1024X768P70,
V4L2_DV_BT_DMT_1024X768P75,
V4L2_DV_BT_DMT_1024X768P85,
V4L2_DV_BT_DMT_1024X768P120_RB,
V4L2_DV_BT_DMT_1152X864P75,
V4L2_DV_BT_DMT_1280X768P60_RB,
V4L2_DV_BT_DMT_1280X768P60,
V4L2_DV_BT_DMT_1280X768P75,
V4L2_DV_BT_DMT_1280X768P85,
V4L2_DV_BT_DMT_1280X768P120_RB,
V4L2_DV_BT_DMT_1280X800P60_RB,
V4L2_DV_BT_DMT_1280X800P60,
V4L2_DV_BT_DMT_1280X800P75,
V4L2_DV_BT_DMT_1280X800P85,
V4L2_DV_BT_DMT_1280X800P120_RB,
V4L2_DV_BT_DMT_1280X960P60,
V4L2_DV_BT_DMT_1280X960P85,
V4L2_DV_BT_DMT_1280X960P120_RB,
V4L2_DV_BT_DMT_1280X1024P60,
V4L2_DV_BT_DMT_1280X1024P75,
V4L2_DV_BT_DMT_1280X1024P85,
V4L2_DV_BT_DMT_1280X1024P120_RB,
V4L2_DV_BT_DMT_1360X768P60,
V4L2_DV_BT_DMT_1360X768P120_RB,
V4L2_DV_BT_DMT_1366X768P60,
V4L2_DV_BT_DMT_1366X768P60_RB,
V4L2_DV_BT_DMT_1400X1050P60_RB,
V4L2_DV_BT_DMT_1400X1050P60,
V4L2_DV_BT_DMT_1400X1050P75,
V4L2_DV_BT_DMT_1400X1050P85,
V4L2_DV_BT_DMT_1400X1050P120_RB,
V4L2_DV_BT_DMT_1440X900P60_RB,
V4L2_DV_BT_DMT_1440X900P60,
V4L2_DV_BT_DMT_1440X900P75,
V4L2_DV_BT_DMT_1440X900P85,
V4L2_DV_BT_DMT_1440X900P120_RB,
V4L2_DV_BT_DMT_1600X900P60_RB,
V4L2_DV_BT_DMT_1600X1200P60,
V4L2_DV_BT_DMT_1600X1200P65,
V4L2_DV_BT_DMT_1600X1200P70,
V4L2_DV_BT_DMT_1600X1200P75,
V4L2_DV_BT_DMT_1600X1200P85,
V4L2_DV_BT_DMT_1600X1200P120_RB,
V4L2_DV_BT_DMT_1680X1050P60_RB,
V4L2_DV_BT_DMT_1680X1050P60,
V4L2_DV_BT_DMT_1680X1050P75,
V4L2_DV_BT_DMT_1680X1050P85,
V4L2_DV_BT_DMT_1680X1050P120_RB,
V4L2_DV_BT_DMT_1792X1344P60,
V4L2_DV_BT_DMT_1792X1344P75,
V4L2_DV_BT_DMT_1792X1344P120_RB,
V4L2_DV_BT_DMT_1856X1392P60,
V4L2_DV_BT_DMT_1856X1392P75,
V4L2_DV_BT_DMT_1856X1392P120_RB,
V4L2_DV_BT_DMT_1920X1200P60_RB,
V4L2_DV_BT_DMT_1920X1200P60,
V4L2_DV_BT_DMT_1920X1200P75,
V4L2_DV_BT_DMT_1920X1200P85,
V4L2_DV_BT_DMT_1920X1200P120_RB,
V4L2_DV_BT_DMT_1920X1440P60,
V4L2_DV_BT_DMT_1920X1440P75,
V4L2_DV_BT_DMT_1920X1440P120_RB,
V4L2_DV_BT_DMT_2048X1152P60_RB,
V4L2_DV_BT_DMT_2560X1600P60_RB,
V4L2_DV_BT_DMT_2560X1600P60,
V4L2_DV_BT_DMT_2560X1600P75,
V4L2_DV_BT_DMT_2560X1600P85,
V4L2_DV_BT_DMT_2560X1600P120_RB,
V4L2_DV_BT_CEA_3840X2160P24,
V4L2_DV_BT_CEA_3840X2160P25,
V4L2_DV_BT_CEA_3840X2160P30,
V4L2_DV_BT_CEA_3840X2160P50,
V4L2_DV_BT_CEA_3840X2160P60,
V4L2_DV_BT_CEA_4096X2160P24,
V4L2_DV_BT_CEA_4096X2160P25,
V4L2_DV_BT_CEA_4096X2160P30,
V4L2_DV_BT_CEA_4096X2160P50,
V4L2_DV_BT_DMT_4096X2160P59_94_RB,
V4L2_DV_BT_CEA_4096X2160P60,
{ }
};
EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets);
bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
const struct v4l2_dv_timings_cap *dvcap,
v4l2_check_dv_timings_fnc fnc,
void *fnc_handle)
{
const struct v4l2_bt_timings *bt = &t->bt;
const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
u32 caps = cap->capabilities;
const u32 max_vert = 10240;
u32 max_hor = 3 * bt->width;
if (t->type != V4L2_DV_BT_656_1120)
return false;
if (t->type != dvcap->type ||
bt->height < cap->min_height ||
bt->height > cap->max_height ||
bt->width < cap->min_width ||
bt->width > cap->max_width ||
bt->pixelclock < cap->min_pixelclock ||
bt->pixelclock > cap->max_pixelclock ||
(!(caps & V4L2_DV_BT_CAP_CUSTOM) &&
cap->standards && bt->standards &&
!(bt->standards & cap->standards)) ||
(bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
(!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
return false;
/* sanity checks for the blanking timings */
if (!bt->interlaced &&
(bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
return false;
/*
* Some video receivers cannot properly separate the frontporch,
* backporch and sync values, and instead they only have the total
* blanking. That can be assigned to any of these three fields.
* So just check that none of these are way out of range.
*/
if (bt->hfrontporch > max_hor ||
bt->hsync > max_hor || bt->hbackporch > max_hor)
return false;
if (bt->vfrontporch > max_vert ||
bt->vsync > max_vert || bt->vbackporch > max_vert)
return false;
if (bt->interlaced && (bt->il_vfrontporch > max_vert ||
bt->il_vsync > max_vert || bt->il_vbackporch > max_vert))
return false;
return fnc == NULL || fnc(t, fnc_handle);
}
EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
const struct v4l2_dv_timings_cap *cap,
v4l2_check_dv_timings_fnc fnc,
void *fnc_handle)
{
u32 i, idx;
memset(t->reserved, 0, sizeof(t->reserved));
for (i = idx = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
fnc, fnc_handle) &&
idx++ == t->index) {
t->timings = v4l2_dv_timings_presets[i];
return 0;
}
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(v4l2_enum_dv_timings_cap);
bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
const struct v4l2_dv_timings_cap *cap,
unsigned pclock_delta,
v4l2_check_dv_timings_fnc fnc,
void *fnc_handle)
{
int i;
if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
return false;
for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
fnc, fnc_handle) &&
v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
pclock_delta, false)) {
u32 flags = t->bt.flags & V4L2_DV_FL_REDUCED_FPS;
*t = v4l2_dv_timings_presets[i];
if (can_reduce_fps(&t->bt))
t->bt.flags |= flags;
return true;
}
}
return false;
}
EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap);
bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
{
unsigned int i;
for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
const struct v4l2_bt_timings *bt =
&v4l2_dv_timings_presets[i].bt;
if ((bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) &&
bt->cea861_vic == vic) {
*t = v4l2_dv_timings_presets[i];
return true;
}
}
return false;
}
EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic);
/**
* v4l2_match_dv_timings - check if two timings match
* @t1: compare this v4l2_dv_timings struct...
* @t2: with this struct.
* @pclock_delta: the allowed pixelclock deviation.
* @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
* match.
*
* Compare t1 with t2 with a given margin of error for the pixelclock.
*/
bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1,
const struct v4l2_dv_timings *t2,
unsigned pclock_delta, bool match_reduced_fps)
{
if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120)
return false;
if (t1->bt.width == t2->bt.width &&
t1->bt.height == t2->bt.height &&
t1->bt.interlaced == t2->bt.interlaced &&
t1->bt.polarities == t2->bt.polarities &&
t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta &&
t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta &&
t1->bt.hfrontporch == t2->bt.hfrontporch &&
t1->bt.hsync == t2->bt.hsync &&
t1->bt.hbackporch == t2->bt.hbackporch &&
t1->bt.vfrontporch == t2->bt.vfrontporch &&
t1->bt.vsync == t2->bt.vsync &&
t1->bt.vbackporch == t2->bt.vbackporch &&
(!match_reduced_fps ||
(t1->bt.flags & V4L2_DV_FL_REDUCED_FPS) ==
(t2->bt.flags & V4L2_DV_FL_REDUCED_FPS)) &&
(!t1->bt.interlaced ||
(t1->bt.il_vfrontporch == t2->bt.il_vfrontporch &&
t1->bt.il_vsync == t2->bt.il_vsync &&
t1->bt.il_vbackporch == t2->bt.il_vbackporch)))
return true;
return false;
}
EXPORT_SYMBOL_GPL(v4l2_match_dv_timings);
void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
const struct v4l2_dv_timings *t, bool detailed)
{
const struct v4l2_bt_timings *bt = &t->bt;
u32 htot, vtot;
u32 fps;
if (t->type != V4L2_DV_BT_656_1120)
return;
htot = V4L2_DV_BT_FRAME_WIDTH(bt);
vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
if (bt->interlaced)
vtot /= 2;
fps = (htot * vtot) > 0 ? div_u64((100 * (u64)bt->pixelclock),
(htot * vtot)) : 0;
if (prefix == NULL)
prefix = "";
pr_info("%s: %s%ux%u%s%u.%02u (%ux%u)\n", dev_prefix, prefix,
bt->width, bt->height, bt->interlaced ? "i" : "p",
fps / 100, fps % 100, htot, vtot);
if (!detailed)
return;
pr_info("%s: horizontal: fp = %u, %ssync = %u, bp = %u\n",
dev_prefix, bt->hfrontporch,
(bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-",
bt->hsync, bt->hbackporch);
pr_info("%s: vertical: fp = %u, %ssync = %u, bp = %u\n",
dev_prefix, bt->vfrontporch,
(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
bt->vsync, bt->vbackporch);
if (bt->interlaced)
pr_info("%s: vertical bottom field: fp = %u, %ssync = %u, bp = %u\n",
dev_prefix, bt->il_vfrontporch,
(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
bt->il_vsync, bt->il_vbackporch);
pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s%s%s%s\n",
dev_prefix, bt->flags,
(bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
" REDUCED_BLANKING" : "",
((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) &&
bt->vsync == 8) ? " (V2)" : "",
(bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ?
" CAN_REDUCE_FPS" : "",
(bt->flags & V4L2_DV_FL_REDUCED_FPS) ?
" REDUCED_FPS" : "",
(bt->flags & V4L2_DV_FL_HALF_LINE) ?
" HALF_LINE" : "",
(bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ?
" CE_VIDEO" : "",
(bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ?
" FIRST_FIELD_EXTRA_LINE" : "",
(bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) ?
" HAS_PICTURE_ASPECT" : "",
(bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) ?
" HAS_CEA861_VIC" : "",
(bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) ?
" HAS_HDMI_VIC" : "");
pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards,
(bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
(bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
(bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "",
(bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "",
(bt->standards & V4L2_DV_BT_STD_SDI) ? " SDI" : "");
if (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT)
pr_info("%s: picture aspect (hor:vert): %u:%u\n", dev_prefix,
bt->picture_aspect.numerator,
bt->picture_aspect.denominator);
if (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC)
pr_info("%s: CEA-861 VIC: %u\n", dev_prefix, bt->cea861_vic);
if (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC)
pr_info("%s: HDMI VIC: %u\n", dev_prefix, bt->hdmi_vic);
}
EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t)
{
struct v4l2_fract ratio = { 1, 1 };
unsigned long n, d;
if (t->type != V4L2_DV_BT_656_1120)
return ratio;
if (!(t->bt.flags & V4L2_DV_FL_HAS_PICTURE_ASPECT))
return ratio;
ratio.numerator = t->bt.width * t->bt.picture_aspect.denominator;
ratio.denominator = t->bt.height * t->bt.picture_aspect.numerator;
rational_best_approximation(ratio.numerator, ratio.denominator,
ratio.numerator, ratio.denominator, &n, &d);
ratio.numerator = n;
ratio.denominator = d;
return ratio;
}
EXPORT_SYMBOL_GPL(v4l2_dv_timings_aspect_ratio);
/** v4l2_calc_timeperframe - helper function to calculate timeperframe based
* v4l2_dv_timings fields.
* @t - Timings for the video mode.
*
* Calculates the expected timeperframe using the pixel clock value and
* horizontal/vertical measures. This means that v4l2_dv_timings structure
* must be correctly and fully filled.
*/
struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t)
{
const struct v4l2_bt_timings *bt = &t->bt;
struct v4l2_fract fps_fract = { 1, 1 };
unsigned long n, d;
u32 htot, vtot, fps;
u64 pclk;
if (t->type != V4L2_DV_BT_656_1120)
return fps_fract;
htot = V4L2_DV_BT_FRAME_WIDTH(bt);
vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
pclk = bt->pixelclock;
if ((bt->flags & V4L2_DV_FL_CAN_DETECT_REDUCED_FPS) &&
(bt->flags & V4L2_DV_FL_REDUCED_FPS))
pclk = div_u64(pclk * 1000ULL, 1001);
fps = (htot * vtot) > 0 ? div_u64((100 * pclk), (htot * vtot)) : 0;
if (!fps)
return fps_fract;
rational_best_approximation(fps, 100, fps, 100, &n, &d);
fps_fract.numerator = d;
fps_fract.denominator = n;
return fps_fract;
}
EXPORT_SYMBOL_GPL(v4l2_calc_timeperframe);
/*
* CVT defines
* Based on Coordinated Video Timings Standard
* version 1.1 September 10, 2003
*/
#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
#define CVT_PXL_CLK_GRAN_RB_V2 1000 /* granularity for reduced blanking v2*/
/* Normal blanking */
#define CVT_MIN_V_BPORCH 7 /* lines */
#define CVT_MIN_V_PORCH_RND 3 /* lines */
#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
#define CVT_HSYNC_PERCENT 8 /* nominal hsync as percentage of line */
/* Normal blanking for CVT uses GTF to calculate horizontal blanking */
#define CVT_CELL_GRAN 8 /* character cell granularity */
#define CVT_M 600 /* blanking formula gradient */
#define CVT_C 40 /* blanking formula offset */
#define CVT_K 128 /* blanking formula scaling factor */
#define CVT_J 20 /* blanking formula scaling factor */
#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J)
#define CVT_M_PRIME (CVT_K * CVT_M / 256)
/* Reduced Blanking */
#define CVT_RB_MIN_V_BPORCH 7 /* lines */
#define CVT_RB_V_FPORCH 3 /* lines */
#define CVT_RB_MIN_V_BLANK 460 /* us */
#define CVT_RB_H_SYNC 32 /* pixels */
#define CVT_RB_H_BLANK 160 /* pixels */
/* Reduce blanking Version 2 */
#define CVT_RB_V2_H_BLANK 80 /* pixels */
#define CVT_RB_MIN_V_FPORCH 3 /* lines */
#define CVT_RB_V2_MIN_V_FPORCH 1 /* lines */
#define CVT_RB_V_BPORCH 6 /* lines */
/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
* @frame_height - the total height of the frame (including blanking) in lines.
* @hfreq - the horizontal frequency in Hz.
* @vsync - the height of the vertical sync in lines.
* @active_width - active width of image (does not include blanking). This
* information is needed only in case of version 2 of reduced blanking.
* In other cases, this parameter does not have any effect on timings.
* @polarities - the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
* @interlaced - if this flag is true, it indicates interlaced format
* @fmt - the resulting timings.
*
* This function will attempt to detect if the given values correspond to a
* valid CVT format. If so, then it will return true, and fmt will be filled
* in with the found CVT timings.
*/
bool v4l2_detect_cvt(unsigned frame_height,
unsigned hfreq,
unsigned vsync,
unsigned active_width,
u32 polarities,
bool interlaced,
struct v4l2_dv_timings *fmt)
{
int v_fp, v_bp, h_fp, h_bp, hsync;
int frame_width, image_height, image_width;
bool reduced_blanking;
bool rb_v2 = false;
unsigned pix_clk;
if (vsync < 4 || vsync > 8)
return false;
if (polarities == V4L2_DV_VSYNC_POS_POL)
reduced_blanking = false;
else if (polarities == V4L2_DV_HSYNC_POS_POL)
reduced_blanking = true;
else
return false;
if (reduced_blanking && vsync == 8)
rb_v2 = true;
if (rb_v2 && active_width == 0)
return false;
if (!rb_v2 && vsync > 7)
return false;
if (hfreq == 0)
return false;
/* Vertical */
if (reduced_blanking) {
if (rb_v2) {
v_bp = CVT_RB_V_BPORCH;
v_fp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
v_fp -= vsync + v_bp;
if (v_fp < CVT_RB_V2_MIN_V_FPORCH)
v_fp = CVT_RB_V2_MIN_V_FPORCH;
} else {
v_fp = CVT_RB_V_FPORCH;
v_bp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
v_bp -= vsync + v_fp;
if (v_bp < CVT_RB_MIN_V_BPORCH)
v_bp = CVT_RB_MIN_V_BPORCH;
}
} else {
v_fp = CVT_MIN_V_PORCH_RND;
v_bp = (CVT_MIN_VSYNC_BP * hfreq) / 1000000 + 1 - vsync;
if (v_bp < CVT_MIN_V_BPORCH)
v_bp = CVT_MIN_V_BPORCH;
}
if (interlaced)
image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1;
else
image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
if (image_height < 0)
return false;
/* Aspect ratio based on vsync */
switch (vsync) {
case 4:
image_width = (image_height * 4) / 3;
break;
case 5:
image_width = (image_height * 16) / 9;
break;
case 6:
image_width = (image_height * 16) / 10;
break;
case 7:
/* special case */
if (image_height == 1024)
image_width = (image_height * 5) / 4;
else if (image_height == 768)
image_width = (image_height * 15) / 9;
else
return false;
break;
case 8:
image_width = active_width;
break;
default:
return false;
}
if (!rb_v2)
image_width = image_width & ~7;
/* Horizontal */
if (reduced_blanking) {
int h_blank;
int clk_gran;
h_blank = rb_v2 ? CVT_RB_V2_H_BLANK : CVT_RB_H_BLANK;
clk_gran = rb_v2 ? CVT_PXL_CLK_GRAN_RB_V2 : CVT_PXL_CLK_GRAN;
pix_clk = (image_width + h_blank) * hfreq;
pix_clk = (pix_clk / clk_gran) * clk_gran;
h_bp = h_blank / 2;
hsync = CVT_RB_H_SYNC;
h_fp = h_blank - h_bp - hsync;
frame_width = image_width + h_blank;
} else {
unsigned ideal_duty_cycle_per_myriad =
100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq;
int h_blank;
if (ideal_duty_cycle_per_myriad < 2000)
ideal_duty_cycle_per_myriad = 2000;
h_blank = image_width * ideal_duty_cycle_per_myriad /
(10000 - ideal_duty_cycle_per_myriad);
h_blank = (h_blank / (2 * CVT_CELL_GRAN)) * 2 * CVT_CELL_GRAN;
pix_clk = (image_width + h_blank) * hfreq;
pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
h_bp = h_blank / 2;
frame_width = image_width + h_blank;
hsync = frame_width * CVT_HSYNC_PERCENT / 100;
hsync = (hsync / CVT_CELL_GRAN) * CVT_CELL_GRAN;
h_fp = h_blank - hsync - h_bp;
}
fmt->type = V4L2_DV_BT_656_1120;
fmt->bt.polarities = polarities;
fmt->bt.width = image_width;
fmt->bt.height = image_height;
fmt->bt.hfrontporch = h_fp;
fmt->bt.vfrontporch = v_fp;
fmt->bt.hsync = hsync;
fmt->bt.vsync = vsync;
fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
if (!interlaced) {
fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
fmt->bt.interlaced = V4L2_DV_PROGRESSIVE;
} else {
fmt->bt.vbackporch = (frame_height - image_height - 2 * v_fp -
2 * vsync) / 2;
fmt->bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
2 * vsync - fmt->bt.vbackporch;
fmt->bt.il_vfrontporch = v_fp;
fmt->bt.il_vsync = vsync;
fmt->bt.flags |= V4L2_DV_FL_HALF_LINE;
fmt->bt.interlaced = V4L2_DV_INTERLACED;
}
fmt->bt.pixelclock = pix_clk;
fmt->bt.standards = V4L2_DV_BT_STD_CVT;
if (reduced_blanking)
fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
return true;
}
EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
/*
* GTF defines
* Based on Generalized Timing Formula Standard
* Version 1.1 September 2, 1999
*/
#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */
#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
#define GTF_V_FP 1 /* vertical front porch (lines) */
#define GTF_CELL_GRAN 8 /* character cell granularity */
/* Default */
#define GTF_D_M 600 /* blanking formula gradient */
#define GTF_D_C 40 /* blanking formula offset */
#define GTF_D_K 128 /* blanking formula scaling factor */
#define GTF_D_J 20 /* blanking formula scaling factor */
#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J)
#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256)
/* Secondary */
#define GTF_S_M 3600 /* blanking formula gradient */
#define GTF_S_C 40 /* blanking formula offset */
#define GTF_S_K 128 /* blanking formula scaling factor */
#define GTF_S_J 35 /* blanking formula scaling factor */
#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J)
#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256)
/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
* @frame_height - the total height of the frame (including blanking) in lines.
* @hfreq - the horizontal frequency in Hz.
* @vsync - the height of the vertical sync in lines.
* @polarities - the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
* @interlaced - if this flag is true, it indicates interlaced format
* @aspect - preferred aspect ratio. GTF has no method of determining the
* aspect ratio in order to derive the image width from the
* image height, so it has to be passed explicitly. Usually
* the native screen aspect ratio is used for this. If it
* is not filled in correctly, then 16:9 will be assumed.
* @fmt - the resulting timings.
*
* This function will attempt to detect if the given values correspond to a
* valid GTF format. If so, then it will return true, and fmt will be filled
* in with the found GTF timings.
*/
bool v4l2_detect_gtf(unsigned frame_height,
unsigned hfreq,
unsigned vsync,
u32 polarities,
bool interlaced,
struct v4l2_fract aspect,
struct v4l2_dv_timings *fmt)
{
int pix_clk;
int v_fp, v_bp, h_fp, hsync;
int frame_width, image_height, image_width;
bool default_gtf;
int h_blank;
if (vsync != 3)
return false;
if (polarities == V4L2_DV_VSYNC_POS_POL)
default_gtf = true;
else if (polarities == V4L2_DV_HSYNC_POS_POL)
default_gtf = false;
else
return false;
if (hfreq == 0)
return false;
/* Vertical */
v_fp = GTF_V_FP;
v_bp = (GTF_MIN_VSYNC_BP * hfreq + 500000) / 1000000 - vsync;
if (interlaced)
image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1;
else
image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
if (image_height < 0)
return false;
if (aspect.numerator == 0 || aspect.denominator == 0) {
aspect.numerator = 16;
aspect.denominator = 9;
}
image_width = ((image_height * aspect.numerator) / aspect.denominator);
image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
/* Horizontal */
if (default_gtf) {
u64 num;
u32 den;
num = ((image_width * GTF_D_C_PRIME * (u64)hfreq) -
((u64)image_width * GTF_D_M_PRIME * 1000));
den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) *
(2 * GTF_CELL_GRAN);
h_blank = div_u64((num + (den >> 1)), den);
h_blank *= (2 * GTF_CELL_GRAN);
} else {
u64 num;
u32 den;
num = ((image_width * GTF_S_C_PRIME * (u64)hfreq) -
((u64)image_width * GTF_S_M_PRIME * 1000));
den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) *
(2 * GTF_CELL_GRAN);
h_blank = div_u64((num + (den >> 1)), den);
h_blank *= (2 * GTF_CELL_GRAN);
}
frame_width = image_width + h_blank;
pix_clk = (image_width + h_blank) * hfreq;
pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
hsync = (frame_width * 8 + 50) / 100;
hsync = DIV_ROUND_CLOSEST(hsync, GTF_CELL_GRAN) * GTF_CELL_GRAN;
h_fp = h_blank / 2 - hsync;
fmt->type = V4L2_DV_BT_656_1120;
fmt->bt.polarities = polarities;
fmt->bt.width = image_width;
fmt->bt.height = image_height;
fmt->bt.hfrontporch = h_fp;
fmt->bt.vfrontporch = v_fp;
fmt->bt.hsync = hsync;
fmt->bt.vsync = vsync;
fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
if (!interlaced) {
fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
fmt->bt.interlaced = V4L2_DV_PROGRESSIVE;
} else {
fmt->bt.vbackporch = (frame_height - image_height - 2 * v_fp -
2 * vsync) / 2;
fmt->bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
2 * vsync - fmt->bt.vbackporch;
fmt->bt.il_vfrontporch = v_fp;
fmt->bt.il_vsync = vsync;
fmt->bt.flags |= V4L2_DV_FL_HALF_LINE;
fmt->bt.interlaced = V4L2_DV_INTERLACED;
}
fmt->bt.pixelclock = pix_clk;
fmt->bt.standards = V4L2_DV_BT_STD_GTF;
if (!default_gtf)
fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
return true;
}
EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
* 0x15 and 0x16 from the EDID.
* @hor_landscape - byte 0x15 from the EDID.
* @vert_portrait - byte 0x16 from the EDID.
*
* Determines the aspect ratio from the EDID.
* See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
* "Horizontal and Vertical Screen Size or Aspect Ratio"
*/
struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
{
struct v4l2_fract aspect = { 16, 9 };
u8 ratio;
/* Nothing filled in, fallback to 16:9 */
if (!hor_landscape && !vert_portrait)
return aspect;
/* Both filled in, so they are interpreted as the screen size in cm */
if (hor_landscape && vert_portrait) {
aspect.numerator = hor_landscape;
aspect.denominator = vert_portrait;
return aspect;
}
/* Only one is filled in, so interpret them as a ratio:
(val + 99) / 100 */
ratio = hor_landscape | vert_portrait;
/* Change some rounded values into the exact aspect ratio */
if (ratio == 79) {
aspect.numerator = 16;
aspect.denominator = 9;
} else if (ratio == 34) {
aspect.numerator = 4;
aspect.denominator = 3;
} else if (ratio == 68) {
aspect.numerator = 15;
aspect.denominator = 9;
} else {
aspect.numerator = hor_landscape + 99;
aspect.denominator = 100;
}
if (hor_landscape)
return aspect;
/* The aspect ratio is for portrait, so swap numerator and denominator */
swap(aspect.denominator, aspect.numerator);
return aspect;
}
EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
/** v4l2_hdmi_rx_colorimetry - determine HDMI colorimetry information
* based on various InfoFrames.
* @avi: the AVI InfoFrame
* @hdmi: the HDMI Vendor InfoFrame, may be NULL
* @height: the frame height
*
* Determines the HDMI colorimetry information, i.e. how the HDMI
* pixel color data should be interpreted.
*
* Note that some of the newer features (DCI-P3, HDR) are not yet
* implemented: the hdmi.h header needs to be updated to the HDMI 2.0
* and CTA-861-G standards.
*/
struct v4l2_hdmi_colorimetry
v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
const struct hdmi_vendor_infoframe *hdmi,
unsigned int height)
{
struct v4l2_hdmi_colorimetry c = {
V4L2_COLORSPACE_SRGB,
V4L2_YCBCR_ENC_DEFAULT,
V4L2_QUANTIZATION_FULL_RANGE,
V4L2_XFER_FUNC_SRGB
};
bool is_ce = avi->video_code || (hdmi && hdmi->vic);
bool is_sdtv = height <= 576;
bool default_is_lim_range_rgb = avi->video_code > 1;
switch (avi->colorspace) {
case HDMI_COLORSPACE_RGB:
/* RGB pixel encoding */
switch (avi->colorimetry) {
case HDMI_COLORIMETRY_EXTENDED:
switch (avi->extended_colorimetry) {
case HDMI_EXTENDED_COLORIMETRY_OPRGB:
c.colorspace = V4L2_COLORSPACE_OPRGB;
c.xfer_func = V4L2_XFER_FUNC_OPRGB;
break;
case HDMI_EXTENDED_COLORIMETRY_BT2020:
c.colorspace = V4L2_COLORSPACE_BT2020;
c.xfer_func = V4L2_XFER_FUNC_709;
break;
default:
break;
}
break;
default:
break;
}
switch (avi->quantization_range) {
case HDMI_QUANTIZATION_RANGE_LIMITED:
c.quantization = V4L2_QUANTIZATION_LIM_RANGE;
break;
case HDMI_QUANTIZATION_RANGE_FULL:
break;
default:
if (default_is_lim_range_rgb)
c.quantization = V4L2_QUANTIZATION_LIM_RANGE;
break;
}
break;
default:
/* YCbCr pixel encoding */
c.quantization = V4L2_QUANTIZATION_LIM_RANGE;
switch (avi->colorimetry) {
case HDMI_COLORIMETRY_NONE:
if (!is_ce)
break;
if (is_sdtv) {
c.colorspace = V4L2_COLORSPACE_SMPTE170M;
c.ycbcr_enc = V4L2_YCBCR_ENC_601;
} else {
c.colorspace = V4L2_COLORSPACE_REC709;
c.ycbcr_enc = V4L2_YCBCR_ENC_709;
}
c.xfer_func = V4L2_XFER_FUNC_709;
break;
case HDMI_COLORIMETRY_ITU_601:
c.colorspace = V4L2_COLORSPACE_SMPTE170M;
c.ycbcr_enc = V4L2_YCBCR_ENC_601;
c.xfer_func = V4L2_XFER_FUNC_709;
break;
case HDMI_COLORIMETRY_ITU_709:
c.colorspace = V4L2_COLORSPACE_REC709;
c.ycbcr_enc = V4L2_YCBCR_ENC_709;
c.xfer_func = V4L2_XFER_FUNC_709;
break;
case HDMI_COLORIMETRY_EXTENDED:
switch (avi->extended_colorimetry) {
case HDMI_EXTENDED_COLORIMETRY_XV_YCC_601:
c.colorspace = V4L2_COLORSPACE_REC709;
c.ycbcr_enc = V4L2_YCBCR_ENC_XV709;
c.xfer_func = V4L2_XFER_FUNC_709;
break;
case HDMI_EXTENDED_COLORIMETRY_XV_YCC_709:
c.colorspace = V4L2_COLORSPACE_REC709;
c.ycbcr_enc = V4L2_YCBCR_ENC_XV601;
c.xfer_func = V4L2_XFER_FUNC_709;
break;
case HDMI_EXTENDED_COLORIMETRY_S_YCC_601:
c.colorspace = V4L2_COLORSPACE_SRGB;
c.ycbcr_enc = V4L2_YCBCR_ENC_601;
c.xfer_func = V4L2_XFER_FUNC_SRGB;
break;
case HDMI_EXTENDED_COLORIMETRY_OPYCC_601:
c.colorspace = V4L2_COLORSPACE_OPRGB;
c.ycbcr_enc = V4L2_YCBCR_ENC_601;
c.xfer_func = V4L2_XFER_FUNC_OPRGB;
break;
case HDMI_EXTENDED_COLORIMETRY_BT2020:
c.colorspace = V4L2_COLORSPACE_BT2020;
c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020;
c.xfer_func = V4L2_XFER_FUNC_709;
break;
case HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM:
c.colorspace = V4L2_COLORSPACE_BT2020;
c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020_CONST_LUM;
c.xfer_func = V4L2_XFER_FUNC_709;
break;
default: /* fall back to ITU_709 */
c.colorspace = V4L2_COLORSPACE_REC709;
c.ycbcr_enc = V4L2_YCBCR_ENC_709;
c.xfer_func = V4L2_XFER_FUNC_709;
break;
}
break;
default:
break;
}
/*
* YCC Quantization Range signaling is more-or-less broken,
* let's just ignore this.
*/
break;
}
return c;
}
EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry);
/**
* v4l2_get_edid_phys_addr() - find and return the physical address
*
* @edid: pointer to the EDID data
* @size: size in bytes of the EDID data
* @offset: If not %NULL then the location of the physical address
* bytes in the EDID will be returned here. This is set to 0
* if there is no physical address found.
*
* Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
*/
u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
unsigned int *offset)
{
unsigned int loc = cec_get_edid_spa_location(edid, size);
if (offset)
*offset = loc;
if (loc == 0)
return CEC_PHYS_ADDR_INVALID;
return (edid[loc] << 8) | edid[loc + 1];
}
EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr);
/**
* v4l2_set_edid_phys_addr() - find and set the physical address
*
* @edid: pointer to the EDID data
* @size: size in bytes of the EDID data
* @phys_addr: the new physical address
*
* This function finds the location of the physical address in the EDID
* and fills in the given physical address and updates the checksum
* at the end of the EDID block. It does nothing if the EDID doesn't
* contain a physical address.
*/
void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
{
unsigned int loc = cec_get_edid_spa_location(edid, size);
u8 sum = 0;
unsigned int i;
if (loc == 0)
return;
edid[loc] = phys_addr >> 8;
edid[loc + 1] = phys_addr & 0xff;
loc &= ~0x7f;
/* update the checksum */
for (i = loc; i < loc + 127; i++)
sum += edid[i];
edid[i] = 256 - sum;
}
EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr);
/**
* v4l2_phys_addr_for_input() - calculate the PA for an input
*
* @phys_addr: the physical address of the parent
* @input: the number of the input port, must be between 1 and 15
*
* This function calculates a new physical address based on the input
* port number. For example:
*
* PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
*
* PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
*
* PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
*
* PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
*
* Return: the new physical address or CEC_PHYS_ADDR_INVALID.
*/
u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input)
{
/* Check if input is sane */
if (WARN_ON(input == 0 || input > 0xf))
return CEC_PHYS_ADDR_INVALID;
if (phys_addr == 0)
return input << 12;
if ((phys_addr & 0x0fff) == 0)
return phys_addr | (input << 8);
if ((phys_addr & 0x00ff) == 0)
return phys_addr | (input << 4);
if ((phys_addr & 0x000f) == 0)
return phys_addr | input;
/*
* All nibbles are used so no valid physical addresses can be assigned
* to the input.
*/
return CEC_PHYS_ADDR_INVALID;
}
EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input);
/**
* v4l2_phys_addr_validate() - validate a physical address from an EDID
*
* @phys_addr: the physical address to validate
* @parent: if not %NULL, then this is filled with the parents PA.
* @port: if not %NULL, then this is filled with the input port.
*
* This validates a physical address as read from an EDID. If the
* PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
* then it will return -EINVAL.
*
* The parent PA is passed into %parent and the input port is passed into
* %port. For example:
*
* PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
*
* PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
*
* PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
*
* PA = f.f.f.f: has parent f.f.f.f and input port 0.
*
* Return: 0 if the PA is valid, -EINVAL if not.
*/
int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
{
int i;
if (parent)
*parent = phys_addr;
if (port)
*port = 0;
if (phys_addr == CEC_PHYS_ADDR_INVALID)
return 0;
for (i = 0; i < 16; i += 4)
if (phys_addr & (0xf << i))
break;
if (i == 16)
return 0;
if (parent)
*parent = phys_addr & (0xfff0 << i);
if (port)
*port = (phys_addr >> i) & 0xf;
for (i += 4; i < 16; i += 4)
if ((phys_addr & (0xf << i)) == 0)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate);
| linux-master | drivers/media/v4l2-core/v4l2-dv-timings.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* helper functions for physically contiguous capture buffers
*
* The functions support hardware lacking scatter gather support
* (i.e. the buffers must be linear in physical memory)
*
* Copyright (c) 2008 Magnus Damm
*
* Based on videobuf-vmalloc.c,
* (c) 2007 Mauro Carvalho Chehab, <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
struct videobuf_dma_contig_memory {
u32 magic;
void *vaddr;
dma_addr_t dma_handle;
unsigned long size;
};
#define MAGIC_DC_MEM 0x0733ac61
#define MAGIC_CHECK(is, should) \
if (unlikely((is) != (should))) { \
pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
BUG(); \
}
static int __videobuf_dc_alloc(struct device *dev,
struct videobuf_dma_contig_memory *mem,
unsigned long size)
{
mem->size = size;
mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle,
GFP_KERNEL);
if (!mem->vaddr) {
dev_err(dev, "memory alloc size %ld failed\n", mem->size);
return -ENOMEM;
}
dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
return 0;
}
static void __videobuf_dc_free(struct device *dev,
struct videobuf_dma_contig_memory *mem)
{
dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
mem->vaddr = NULL;
}
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count++;
}
static void videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
int i;
dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
videobuf_queue_cancel(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
if (q->bufs[i]->map != map)
continue;
mem = q->bufs[i]->priv;
if (mem) {
/* This callback is called only if kernel has
allocated memory and this memory is mmapped.
In this case, memory should be freed,
in order to do memory unmap.
*/
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
/* vfree is not atomic - can't be
called with IRQ's disabled
*/
dev_dbg(q->dev, "buf[%d] freeing %p\n",
i, mem->vaddr);
__videobuf_dc_free(q->dev, mem);
mem->vaddr = NULL;
}
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
}
kfree(map);
videobuf_queue_unlock(q);
}
}
static const struct vm_operations_struct videobuf_vm_ops = {
.open = videobuf_vm_open,
.close = videobuf_vm_close,
};
/**
* videobuf_dma_contig_user_put() - reset pointer to user space buffer
* @mem: per-buffer private videobuf-dma-contig data
*
* This function resets the user space pointer
*/
static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
{
mem->dma_handle = 0;
mem->size = 0;
}
/**
* videobuf_dma_contig_user_get() - setup user space memory pointer
* @mem: per-buffer private videobuf-dma-contig data
* @vb: video buffer to map
*
* This function validates and sets up a pointer to user space memory.
* Only physically contiguous pfn-mapped memory is accepted.
*
* Returns 0 if successful.
*/
static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
struct videobuf_buffer *vb)
{
unsigned long untagged_baddr = untagged_addr(vb->baddr);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long prev_pfn, this_pfn;
unsigned long pages_done, user_address;
unsigned int offset;
int ret;
offset = untagged_baddr & ~PAGE_MASK;
mem->size = PAGE_ALIGN(vb->size + offset);
ret = -EINVAL;
mmap_read_lock(mm);
vma = find_vma(mm, untagged_baddr);
if (!vma)
goto out_up;
if ((untagged_baddr + mem->size) > vma->vm_end)
goto out_up;
pages_done = 0;
prev_pfn = 0; /* kill warning */
user_address = untagged_baddr;
while (pages_done < (mem->size >> PAGE_SHIFT)) {
ret = follow_pfn(vma, user_address, &this_pfn);
if (ret)
break;
if (pages_done == 0)
mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
else if (this_pfn != (prev_pfn + 1))
ret = -EFAULT;
if (ret)
break;
prev_pfn = this_pfn;
user_address += PAGE_SIZE;
pages_done++;
}
out_up:
mmap_read_unlock(current->mm);
return ret;
}
static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (vb) {
vb->priv = ((char *)vb) + size;
mem = vb->priv;
mem->magic = MAGIC_DC_MEM;
}
return vb;
}
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
return mem->vaddr;
}
static int __videobuf_iolock(struct videobuf_queue *q,
struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
{
struct videobuf_dma_contig_memory *mem = vb->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
/* All handling should be done by __videobuf_mmap_mapper() */
if (!mem->vaddr) {
dev_err(q->dev, "memory is not allocated/mmapped.\n");
return -EINVAL;
}
break;
case V4L2_MEMORY_USERPTR:
dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
/* handle pointer from user space */
if (vb->baddr)
return videobuf_dma_contig_user_get(mem, vb);
/* allocate memory for the read() method */
if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size)))
return -ENOMEM;
break;
case V4L2_MEMORY_OVERLAY:
default:
dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
return -EINVAL;
}
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_mapping *map;
int retval;
dev_dbg(q->dev, "%s\n", __func__);
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (!map)
return -ENOMEM;
buf->map = map;
map->q = q;
buf->baddr = vma->vm_start;
mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize)))
goto error;
/* the "vm_pgoff" is just used in v4l2 to find the
* corresponding buffer data structure which is allocated
* earlier and it does not mean the offset from the physical
* buffer start address as usual. So set it to 0 to pass
* the sanity check in dma_mmap_coherent().
*/
vma->vm_pgoff = 0;
retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle,
mem->size);
if (retval) {
dev_err(q->dev, "mmap: remap failed with error %d. ",
retval);
dma_free_coherent(q->dev, mem->size,
mem->vaddr, mem->dma_handle);
goto error;
}
vma->vm_ops = &videobuf_vm_ops;
vm_flags_set(vma, VM_DONTEXPAND);
vma->vm_private_data = map;
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
(long int)buf->bsize, vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
return 0;
error:
kfree(map);
return -ENOMEM;
}
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc_vb = __videobuf_alloc,
.iolock = __videobuf_iolock,
.mmap_mapper = __videobuf_mmap_mapper,
.vaddr = __videobuf_to_vaddr,
};
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
void *priv,
struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &qops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
return mem->dma_handle;
}
EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
void videobuf_dma_contig_free(struct videobuf_queue *q,
struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
/* mmapped memory can't be freed here, otherwise mmapped region
would be released, while still needed. In this case, the memory
release should happen inside videobuf_vm_close().
So, it should free memory only if the memory were allocated for
read() operation.
*/
if (buf->memory != V4L2_MEMORY_USERPTR)
return;
if (!mem)
return;
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
/* handle user space pointer case */
if (buf->baddr) {
videobuf_dma_contig_user_put(mem);
return;
}
/* read() method */
if (mem->vaddr) {
__videobuf_dc_free(q->dev, mem);
mem->vaddr = NULL;
}
}
EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/v4l2-core/videobuf-dma-contig.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* helper functions for vmalloc video4linux capture buffers
*
* The functions expect the hardware being able to scatter gather
* (i.e. the buffers are not linear in physical memory, but fragmented
* into PAGE_SIZE chunks). They also assume the driver does not need
* to touch the video data.
*
* (c) 2007 Mauro Carvalho Chehab <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pgtable.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <asm/page.h>
#include <media/videobuf-vmalloc.h>
#define MAGIC_DMABUF 0x17760309
#define MAGIC_VMAL_MEM 0x18221223
#define MAGIC_CHECK(is, should) \
if (unlikely((is) != (should))) { \
printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
is, should); \
BUG(); \
}
static int debug;
module_param(debug, int, 0644);
MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <[email protected]>");
MODULE_LICENSE("GPL");
#define dprintk(level, fmt, arg...) \
if (debug >= level) \
printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
/***************************************************************************/
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
map->count++;
}
static void videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
int i;
dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
struct videobuf_vmalloc_memory *mem;
dprintk(1, "munmap %p q=%p\n", map, q);
videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
videobuf_queue_cancel(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
if (q->bufs[i]->map != map)
continue;
mem = q->bufs[i]->priv;
if (mem) {
/* This callback is called only if kernel has
allocated memory and this memory is mmapped.
In this case, memory should be freed,
in order to do memory unmap.
*/
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
/* vfree is not atomic - can't be
called with IRQ's disabled
*/
dprintk(1, "%s: buf[%d] freeing (%p)\n",
__func__, i, mem->vaddr);
vfree(mem->vaddr);
mem->vaddr = NULL;
}
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
}
kfree(map);
videobuf_queue_unlock(q);
}
return;
}
static const struct vm_operations_struct videobuf_vm_ops = {
.open = videobuf_vm_open,
.close = videobuf_vm_close,
};
/* ---------------------------------------------------------------------
* vmalloc handlers for the generic methods
*/
/* Allocated area consists on 3 parts:
struct video_buffer
struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
struct videobuf_dma_sg_memory
*/
static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
{
struct videobuf_vmalloc_memory *mem;
struct videobuf_buffer *vb;
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (!vb)
return vb;
mem = vb->priv = ((char *)vb) + size;
mem->magic = MAGIC_VMAL_MEM;
dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
__func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
mem, (long)sizeof(*mem));
return vb;
}
static int __videobuf_iolock(struct videobuf_queue *q,
struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
{
struct videobuf_vmalloc_memory *mem = vb->priv;
int pages;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
dprintk(1, "%s memory method MMAP\n", __func__);
/* All handling should be done by __videobuf_mmap_mapper() */
if (!mem->vaddr) {
printk(KERN_ERR "memory is not allocated/mmapped.\n");
return -EINVAL;
}
break;
case V4L2_MEMORY_USERPTR:
pages = PAGE_ALIGN(vb->size);
dprintk(1, "%s memory method USERPTR\n", __func__);
if (vb->baddr) {
printk(KERN_ERR "USERPTR is currently not supported\n");
return -EINVAL;
}
/* The only USERPTR currently supported is the one needed for
* read() method.
*/
mem->vaddr = vmalloc_user(pages);
if (!mem->vaddr) {
printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
return -ENOMEM;
}
dprintk(1, "vmalloc is at addr %p (%d pages)\n",
mem->vaddr, pages);
break;
case V4L2_MEMORY_OVERLAY:
default:
dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);
/* Currently, doesn't support V4L2_MEMORY_OVERLAY */
printk(KERN_ERR "Memory method currently unsupported.\n");
return -EINVAL;
}
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
{
struct videobuf_vmalloc_memory *mem;
struct videobuf_mapping *map;
int retval, pages;
dprintk(1, "%s\n", __func__);
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (NULL == map)
return -ENOMEM;
buf->map = map;
map->q = q;
buf->baddr = vma->vm_start;
mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
mem->vaddr = vmalloc_user(pages);
if (!mem->vaddr) {
printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
goto error;
}
dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages);
/* Try to remap memory */
retval = remap_vmalloc_range(vma, mem->vaddr, 0);
if (retval < 0) {
printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
vfree(mem->vaddr);
goto error;
}
vma->vm_ops = &videobuf_vm_ops;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
(long int)buf->bsize,
vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
return 0;
error:
mem = NULL;
kfree(map);
return -ENOMEM;
}
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc_vb = __videobuf_alloc_vb,
.iolock = __videobuf_iolock,
.mmap_mapper = __videobuf_mmap_mapper,
.vaddr = videobuf_to_vmalloc,
};
void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
void *priv,
struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &qops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
{
struct videobuf_vmalloc_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
return mem->vaddr;
}
EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
void videobuf_vmalloc_free(struct videobuf_buffer *buf)
{
struct videobuf_vmalloc_memory *mem = buf->priv;
/* mmapped memory can't be freed here, otherwise mmapped region
would be released, while still needed. In this case, the memory
release should happen inside videobuf_vm_close().
So, it should free memory only if the memory were allocated for
read() operation.
*/
if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
return;
if (!mem)
return;
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
vfree(mem->vaddr);
mem->vaddr = NULL;
return;
}
EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
| linux-master | drivers/media/v4l2-core/videobuf-vmalloc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Media Controller ancillary functions
*
* Copyright (c) 2016 Mauro Carvalho Chehab <[email protected]>
* Copyright (C) 2016 Shuah Khan <[email protected]>
* Copyright (C) 2006-2010 Nokia Corporation
* Copyright (c) 2016 Intel Corporation.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/usb.h>
#include <media/media-device.h>
#include <media/media-entity.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-core.h>
int v4l2_mc_create_media_graph(struct media_device *mdev)
{
struct media_entity *entity;
struct media_entity *if_vid = NULL, *if_aud = NULL;
struct media_entity *tuner = NULL, *decoder = NULL;
struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL;
bool is_webcam = false;
u32 flags;
int ret, pad_sink, pad_source;
if (!mdev)
return 0;
media_device_for_each_entity(entity, mdev) {
switch (entity->function) {
case MEDIA_ENT_F_IF_VID_DECODER:
if_vid = entity;
break;
case MEDIA_ENT_F_IF_AUD_DECODER:
if_aud = entity;
break;
case MEDIA_ENT_F_TUNER:
tuner = entity;
break;
case MEDIA_ENT_F_ATV_DECODER:
decoder = entity;
break;
case MEDIA_ENT_F_IO_V4L:
io_v4l = entity;
break;
case MEDIA_ENT_F_IO_VBI:
io_vbi = entity;
break;
case MEDIA_ENT_F_IO_SWRADIO:
io_swradio = entity;
break;
case MEDIA_ENT_F_CAM_SENSOR:
is_webcam = true;
break;
}
}
/* It should have at least one I/O entity */
if (!io_v4l && !io_vbi && !io_swradio) {
dev_warn(mdev->dev, "Didn't find any I/O entity\n");
return -EINVAL;
}
/*
* Here, webcams are modelled on a very simple way: the sensor is
* connected directly to the I/O entity. All dirty details, like
* scaler and crop HW are hidden. While such mapping is not enough
* for mc-centric hardware, it is enough for v4l2 interface centric
* PC-consumer's hardware.
*/
if (is_webcam) {
if (!io_v4l) {
dev_warn(mdev->dev, "Didn't find a MEDIA_ENT_F_IO_V4L\n");
return -EINVAL;
}
media_device_for_each_entity(entity, mdev) {
if (entity->function != MEDIA_ENT_F_CAM_SENSOR)
continue;
ret = media_create_pad_link(entity, 0,
io_v4l, 0,
MEDIA_LNK_FL_ENABLED);
if (ret) {
dev_warn(mdev->dev, "Failed to create a sensor link\n");
return ret;
}
}
if (!decoder)
return 0;
}
/* The device isn't a webcam. So, it should have a decoder */
if (!decoder) {
dev_warn(mdev->dev, "Decoder not found\n");
return -EINVAL;
}
/* Link the tuner and IF video output pads */
if (tuner) {
if (if_vid) {
pad_source = media_get_pad_index(tuner,
MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_ANALOG);
pad_sink = media_get_pad_index(if_vid,
MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_source < 0 || pad_sink < 0) {
dev_warn(mdev->dev, "Couldn't get tuner and/or PLL pad(s): (%d, %d)\n",
pad_source, pad_sink);
return -EINVAL;
}
ret = media_create_pad_link(tuner, pad_source,
if_vid, pad_sink,
MEDIA_LNK_FL_ENABLED);
if (ret) {
dev_warn(mdev->dev, "Couldn't create tuner->PLL link)\n");
return ret;
}
pad_source = media_get_pad_index(if_vid,
MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_ANALOG);
pad_sink = media_get_pad_index(decoder,
MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_source < 0 || pad_sink < 0) {
dev_warn(mdev->dev, "get decoder and/or PLL pad(s): (%d, %d)\n",
pad_source, pad_sink);
return -EINVAL;
}
ret = media_create_pad_link(if_vid, pad_source,
decoder, pad_sink,
MEDIA_LNK_FL_ENABLED);
if (ret) {
dev_warn(mdev->dev, "couldn't link PLL to decoder\n");
return ret;
}
} else {
pad_source = media_get_pad_index(tuner,
MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_ANALOG);
pad_sink = media_get_pad_index(decoder,
MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_source < 0 || pad_sink < 0) {
dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s): (%d, %d)\n",
pad_source, pad_sink);
return -EINVAL;
}
ret = media_create_pad_link(tuner, pad_source,
decoder, pad_sink,
MEDIA_LNK_FL_ENABLED);
if (ret)
return ret;
}
if (if_aud) {
pad_source = media_get_pad_index(tuner,
MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_AUDIO);
pad_sink = media_get_pad_index(if_aud,
MEDIA_PAD_FL_SINK,
PAD_SIGNAL_AUDIO);
if (pad_source < 0 || pad_sink < 0) {
dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s) for audio: (%d, %d)\n",
pad_source, pad_sink);
return -EINVAL;
}
ret = media_create_pad_link(tuner, pad_source,
if_aud, pad_sink,
MEDIA_LNK_FL_ENABLED);
if (ret) {
dev_warn(mdev->dev, "couldn't link tuner->audio PLL\n");
return ret;
}
} else {
if_aud = tuner;
}
}
/* Create demod to V4L, VBI and SDR radio links */
if (io_v4l) {
pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_DV);
if (pad_source < 0) {
dev_warn(mdev->dev, "couldn't get decoder output pad for V4L I/O\n");
return -EINVAL;
}
ret = media_create_pad_link(decoder, pad_source,
io_v4l, 0,
MEDIA_LNK_FL_ENABLED);
if (ret) {
dev_warn(mdev->dev, "couldn't link decoder output to V4L I/O\n");
return ret;
}
}
if (io_swradio) {
pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_DV);
if (pad_source < 0) {
dev_warn(mdev->dev, "couldn't get decoder output pad for SDR\n");
return -EINVAL;
}
ret = media_create_pad_link(decoder, pad_source,
io_swradio, 0,
MEDIA_LNK_FL_ENABLED);
if (ret) {
dev_warn(mdev->dev, "couldn't link decoder output to SDR\n");
return ret;
}
}
if (io_vbi) {
pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_DV);
if (pad_source < 0) {
dev_warn(mdev->dev, "couldn't get decoder output pad for VBI\n");
return -EINVAL;
}
ret = media_create_pad_link(decoder, pad_source,
io_vbi, 0,
MEDIA_LNK_FL_ENABLED);
if (ret) {
dev_warn(mdev->dev, "couldn't link decoder output to VBI\n");
return ret;
}
}
/* Create links for the media connectors */
flags = MEDIA_LNK_FL_ENABLED;
media_device_for_each_entity(entity, mdev) {
switch (entity->function) {
case MEDIA_ENT_F_CONN_RF:
if (!tuner)
continue;
pad_sink = media_get_pad_index(tuner, MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_sink < 0) {
dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n");
return -EINVAL;
}
ret = media_create_pad_link(entity, 0, tuner,
pad_sink,
flags);
break;
case MEDIA_ENT_F_CONN_SVIDEO:
case MEDIA_ENT_F_CONN_COMPOSITE:
pad_sink = media_get_pad_index(decoder,
MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_sink < 0) {
dev_warn(mdev->dev, "couldn't get decoder analog pad sink\n");
return -EINVAL;
}
ret = media_create_pad_link(entity, 0, decoder,
pad_sink,
flags);
break;
default:
continue;
}
if (ret)
return ret;
flags = 0;
}
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_mc_create_media_graph);
int v4l_enable_media_source(struct video_device *vdev)
{
struct media_device *mdev = vdev->entity.graph_obj.mdev;
int ret = 0, err;
if (!mdev)
return 0;
mutex_lock(&mdev->graph_mutex);
if (!mdev->enable_source)
goto end;
err = mdev->enable_source(&vdev->entity, &vdev->pipe);
if (err)
ret = -EBUSY;
end:
mutex_unlock(&mdev->graph_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(v4l_enable_media_source);
void v4l_disable_media_source(struct video_device *vdev)
{
struct media_device *mdev = vdev->entity.graph_obj.mdev;
if (mdev) {
mutex_lock(&mdev->graph_mutex);
if (mdev->disable_source)
mdev->disable_source(&vdev->entity);
mutex_unlock(&mdev->graph_mutex);
}
}
EXPORT_SYMBOL_GPL(v4l_disable_media_source);
int v4l_vb2q_enable_media_source(struct vb2_queue *q)
{
struct v4l2_fh *fh = q->owner;
if (fh && fh->vdev)
return v4l_enable_media_source(fh->vdev);
return 0;
}
EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source);
int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
struct media_pad *sink, u32 flags)
{
struct fwnode_handle *endpoint;
if (!(sink->flags & MEDIA_PAD_FL_SINK))
return -EINVAL;
fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) {
struct fwnode_handle *remote_ep;
int src_idx, sink_idx, ret;
struct media_pad *src;
src_idx = media_entity_get_fwnode_pad(&src_sd->entity,
endpoint,
MEDIA_PAD_FL_SOURCE);
if (src_idx < 0)
continue;
remote_ep = fwnode_graph_get_remote_endpoint(endpoint);
if (!remote_ep)
continue;
/*
* ask the sink to verify it owns the remote endpoint,
* and translate to a sink pad.
*/
sink_idx = media_entity_get_fwnode_pad(sink->entity,
remote_ep,
MEDIA_PAD_FL_SINK);
fwnode_handle_put(remote_ep);
if (sink_idx < 0 || sink_idx != sink->index)
continue;
/*
* the source endpoint corresponds to one of its source pads,
* the source endpoint connects to an endpoint at the sink
* entity, and the sink endpoint corresponds to the sink
* pad requested, so we have found an endpoint connection
* that works, create the media link for it.
*/
src = &src_sd->entity.pads[src_idx];
/* skip if link already exists */
if (media_entity_find_link(src, sink))
continue;
dev_dbg(src_sd->dev, "creating link %s:%d -> %s:%d\n",
src_sd->entity.name, src_idx,
sink->entity->name, sink_idx);
ret = media_create_pad_link(&src_sd->entity, src_idx,
sink->entity, sink_idx, flags);
if (ret) {
dev_err(src_sd->dev,
"link %s:%d -> %s:%d failed with %d\n",
src_sd->entity.name, src_idx,
sink->entity->name, sink_idx, ret);
fwnode_handle_put(endpoint);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links_to_pad);
int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd,
struct v4l2_subdev *sink_sd)
{
unsigned int i;
for (i = 0; i < sink_sd->entity.num_pads; i++) {
struct media_pad *pad = &sink_sd->entity.pads[i];
int ret;
if (!(pad->flags & MEDIA_PAD_FL_SINK))
continue;
ret = v4l2_create_fwnode_links_to_pad(src_sd, pad, 0);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links);
/* -----------------------------------------------------------------------------
* Pipeline power management
*
* Entities must be powered up when part of a pipeline that contains at least
* one open video device node.
*
* To achieve this use the entity use_count field to track the number of users.
* For entities corresponding to video device nodes the use_count field stores
* the users count of the node. For entities corresponding to subdevs the
* use_count field stores the total number of users of all video device nodes
* in the pipeline.
*
* The v4l2_pipeline_pm_{get, put}() functions must be called in the open() and
* close() handlers of video device nodes. It increments or decrements the use
* count of all subdev entities in the pipeline.
*
* To react to link management on powered pipelines, the link setup notification
* callback updates the use count of all entities in the source and sink sides
* of the link.
*/
/*
* pipeline_pm_use_count - Count the number of users of a pipeline
* @entity: The entity
*
* Return the total number of users of all video device nodes in the pipeline.
*/
static int pipeline_pm_use_count(struct media_entity *entity,
struct media_graph *graph)
{
int use = 0;
media_graph_walk_start(graph, entity);
while ((entity = media_graph_walk_next(graph))) {
if (is_media_entity_v4l2_video_device(entity))
use += entity->use_count;
}
return use;
}
/*
* pipeline_pm_power_one - Apply power change to an entity
* @entity: The entity
* @change: Use count change
*
* Change the entity use count by @change. If the entity is a subdev update its
* power state by calling the core::s_power operation when the use count goes
* from 0 to != 0 or from != 0 to 0.
*
* Return 0 on success or a negative error code on failure.
*/
static int pipeline_pm_power_one(struct media_entity *entity, int change)
{
struct v4l2_subdev *subdev;
int ret;
subdev = is_media_entity_v4l2_subdev(entity)
? media_entity_to_v4l2_subdev(entity) : NULL;
if (entity->use_count == 0 && change > 0 && subdev != NULL) {
ret = v4l2_subdev_call(subdev, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
}
entity->use_count += change;
WARN_ON(entity->use_count < 0);
if (entity->use_count == 0 && change < 0 && subdev != NULL)
v4l2_subdev_call(subdev, core, s_power, 0);
return 0;
}
/*
* pipeline_pm_power - Apply power change to all entities in a pipeline
* @entity: The entity
* @change: Use count change
*
* Walk the pipeline to update the use count and the power state of all non-node
* entities.
*
* Return 0 on success or a negative error code on failure.
*/
static int pipeline_pm_power(struct media_entity *entity, int change,
struct media_graph *graph)
{
struct media_entity *first = entity;
int ret = 0;
if (!change)
return 0;
media_graph_walk_start(graph, entity);
while (!ret && (entity = media_graph_walk_next(graph)))
if (is_media_entity_v4l2_subdev(entity))
ret = pipeline_pm_power_one(entity, change);
if (!ret)
return ret;
media_graph_walk_start(graph, first);
while ((first = media_graph_walk_next(graph))
&& first != entity)
if (is_media_entity_v4l2_subdev(first))
pipeline_pm_power_one(first, -change);
return ret;
}
static int v4l2_pipeline_pm_use(struct media_entity *entity, unsigned int use)
{
struct media_device *mdev = entity->graph_obj.mdev;
int change = use ? 1 : -1;
int ret;
mutex_lock(&mdev->graph_mutex);
/* Apply use count to node. */
entity->use_count += change;
WARN_ON(entity->use_count < 0);
/* Apply power change to connected non-nodes. */
ret = pipeline_pm_power(entity, change, &mdev->pm_count_walk);
if (ret < 0)
entity->use_count -= change;
mutex_unlock(&mdev->graph_mutex);
return ret;
}
int v4l2_pipeline_pm_get(struct media_entity *entity)
{
return v4l2_pipeline_pm_use(entity, 1);
}
EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_get);
void v4l2_pipeline_pm_put(struct media_entity *entity)
{
/* Powering off entities shouldn't fail. */
WARN_ON(v4l2_pipeline_pm_use(entity, 0));
}
EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_put);
int v4l2_pipeline_link_notify(struct media_link *link, u32 flags,
unsigned int notification)
{
struct media_graph *graph = &link->graph_obj.mdev->pm_count_walk;
struct media_entity *source = link->source->entity;
struct media_entity *sink = link->sink->entity;
int source_use;
int sink_use;
int ret = 0;
source_use = pipeline_pm_use_count(source, graph);
sink_use = pipeline_pm_use_count(sink, graph);
if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
!(flags & MEDIA_LNK_FL_ENABLED)) {
/* Powering off entities is assumed to never fail. */
pipeline_pm_power(source, -sink_use, graph);
pipeline_pm_power(sink, -source_use, graph);
return 0;
}
if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
(flags & MEDIA_LNK_FL_ENABLED)) {
ret = pipeline_pm_power(source, sink_use, graph);
if (ret < 0)
return ret;
ret = pipeline_pm_power(sink, source_use, graph);
if (ret < 0)
pipeline_pm_power(source, -sink_use, graph);
}
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_pipeline_link_notify);
| linux-master | drivers/media/v4l2-core/v4l2-mc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* v4l2-i2c - I2C helpers for Video4Linux2
*/
#include <linux/i2c.h>
#include <linux/module.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
/*
* We need to unregister the i2c client
* explicitly. We cannot rely on
* i2c_del_adapter to always unregister
* clients for us, since if the i2c bus is a
* platform bus, then it is never deleted.
*
* Device tree or ACPI based devices must not
* be unregistered as they have not been
* registered by us, and would not be
* re-created by just probing the V4L2 driver.
*/
if (client && !client->dev.of_node && !client->dev.fwnode)
i2c_unregister_device(client);
}
void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd,
struct i2c_client *client,
const char *devname, const char *postfix)
{
if (!devname)
devname = client->dev.driver->name;
if (!postfix)
postfix = "";
snprintf(sd->name, sizeof(sd->name), "%s%s %d-%04x", devname, postfix,
i2c_adapter_id(client->adapter), client->addr);
}
EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_set_name);
void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
const struct v4l2_subdev_ops *ops)
{
v4l2_subdev_init(sd, ops);
sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
/* the owner is the same as the i2c_client's driver owner */
sd->owner = client->dev.driver->owner;
sd->dev = &client->dev;
/* i2c_client and v4l2_subdev point to one another */
v4l2_set_subdevdata(sd, client);
i2c_set_clientdata(client, sd);
v4l2_i2c_subdev_set_name(sd, client, NULL, NULL);
}
EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
/* Load an i2c sub-device. */
struct v4l2_subdev
*v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
struct i2c_adapter *adapter,
struct i2c_board_info *info,
const unsigned short *probe_addrs)
{
struct v4l2_subdev *sd = NULL;
struct i2c_client *client;
if (!v4l2_dev)
return NULL;
request_module(I2C_MODULE_PREFIX "%s", info->type);
/* Create the i2c client */
if (info->addr == 0 && probe_addrs)
client = i2c_new_scanned_device(adapter, info, probe_addrs,
NULL);
else
client = i2c_new_client_device(adapter, info);
/*
* Note: by loading the module first we are certain that c->driver
* will be set if the driver was found. If the module was not loaded
* first, then the i2c core tries to delay-load the module for us,
* and then c->driver is still NULL until the module is finally
* loaded. This delay-load mechanism doesn't work if other drivers
* want to use the i2c device, so explicitly loading the module
* is the best alternative.
*/
if (!i2c_client_has_driver(client))
goto error;
/* Lock the module so we can safely get the v4l2_subdev pointer */
if (!try_module_get(client->dev.driver->owner))
goto error;
sd = i2c_get_clientdata(client);
/*
* Register with the v4l2_device which increases the module's
* use count as well.
*/
if (v4l2_device_register_subdev(v4l2_dev, sd))
sd = NULL;
/* Decrease the module use count to match the first try_module_get. */
module_put(client->dev.driver->owner);
error:
/*
* If we have a client but no subdev, then something went wrong and
* we must unregister the client.
*/
if (!IS_ERR(client) && !sd)
i2c_unregister_device(client);
return sd;
}
EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
struct i2c_adapter *adapter,
const char *client_type,
u8 addr,
const unsigned short *probe_addrs)
{
struct i2c_board_info info;
/*
* Setup the i2c board info with the device type and
* the device address.
*/
memset(&info, 0, sizeof(info));
strscpy(info.type, client_type, sizeof(info.type));
info.addr = addr;
return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info,
probe_addrs);
}
EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
/* Return i2c client address of v4l2_subdev. */
unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return client ? client->addr : I2C_CLIENT_END;
}
EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
/*
* Return a list of I2C tuner addresses to probe. Use only if the tuner
* addresses are unknown.
*/
const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
{
static const unsigned short radio_addrs[] = {
#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
0x10,
#endif
0x60,
I2C_CLIENT_END
};
static const unsigned short demod_addrs[] = {
0x42, 0x43, 0x4a, 0x4b,
I2C_CLIENT_END
};
static const unsigned short tv_addrs[] = {
0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
0x60, 0x61, 0x62, 0x63, 0x64,
I2C_CLIENT_END
};
switch (type) {
case ADDRS_RADIO:
return radio_addrs;
case ADDRS_DEMOD:
return demod_addrs;
case ADDRS_TV:
return tv_addrs;
case ADDRS_TV_WITH_DEMOD:
return tv_addrs + 4;
}
return NULL;
}
EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
| linux-master | drivers/media/v4l2-core/v4l2-i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* V4L2 flash LED sub-device registration helpers.
*
* Copyright (C) 2015 Samsung Electronics Co., Ltd
* Author: Jacek Anaszewski <[email protected]>
*/
#include <linux/led-class-flash.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <media/v4l2-flash-led-class.h>
#define has_flash_op(v4l2_flash, op) \
(v4l2_flash && v4l2_flash->ops && v4l2_flash->ops->op)
#define call_flash_op(v4l2_flash, op, arg) \
(has_flash_op(v4l2_flash, op) ? \
v4l2_flash->ops->op(v4l2_flash, arg) : \
-EINVAL)
enum ctrl_init_data_id {
LED_MODE,
TORCH_INTENSITY,
FLASH_INTENSITY,
INDICATOR_INTENSITY,
FLASH_TIMEOUT,
STROBE_SOURCE,
/*
* Only above values are applicable to
* the 'ctrls' array in the struct v4l2_flash.
*/
FLASH_STROBE,
STROBE_STOP,
STROBE_STATUS,
FLASH_FAULT,
NUM_FLASH_CTRLS,
};
static enum led_brightness __intensity_to_led_brightness(
struct v4l2_ctrl *ctrl, s32 intensity)
{
intensity -= ctrl->minimum;
intensity /= (u32) ctrl->step;
/*
* Indicator LEDs, unlike torch LEDs, are turned on/off basing on
* the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only.
* Therefore it must be possible to set it to 0 level which in
* the LED subsystem reflects LED_OFF state.
*/
if (ctrl->minimum)
++intensity;
return intensity;
}
static s32 __led_brightness_to_intensity(struct v4l2_ctrl *ctrl,
enum led_brightness brightness)
{
/*
* Indicator LEDs, unlike torch LEDs, are turned on/off basing on
* the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only.
* Do not decrement brightness read from the LED subsystem for
* indicator LED as it may equal 0. For torch LEDs this function
* is called only when V4L2_FLASH_LED_MODE_TORCH is set and the
* brightness read is guaranteed to be greater than 0. In the mode
* V4L2_FLASH_LED_MODE_NONE the cached torch intensity value is used.
*/
if (ctrl->id != V4L2_CID_FLASH_INDICATOR_INTENSITY)
--brightness;
return (brightness * ctrl->step) + ctrl->minimum;
}
static int v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash,
struct v4l2_ctrl *ctrl)
{
struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
struct led_classdev *led_cdev;
enum led_brightness brightness;
if (has_flash_op(v4l2_flash, intensity_to_led_brightness))
brightness = call_flash_op(v4l2_flash,
intensity_to_led_brightness,
ctrl->val);
else
brightness = __intensity_to_led_brightness(ctrl, ctrl->val);
/*
* In case a LED Flash class driver provides ops for custom
* brightness <-> intensity conversion, it also must have defined
* related v4l2 control step == 1. In such a case a backward conversion
* from led brightness to v4l2 intensity is required to find out the
* aligned intensity value.
*/
if (has_flash_op(v4l2_flash, led_brightness_to_intensity))
ctrl->val = call_flash_op(v4l2_flash,
led_brightness_to_intensity,
brightness);
if (ctrl == ctrls[TORCH_INTENSITY]) {
if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
return 0;
if (WARN_ON_ONCE(!v4l2_flash->fled_cdev))
return -EINVAL;
led_cdev = &v4l2_flash->fled_cdev->led_cdev;
} else {
if (WARN_ON_ONCE(!v4l2_flash->iled_cdev))
return -EINVAL;
led_cdev = v4l2_flash->iled_cdev;
}
return led_set_brightness_sync(led_cdev, brightness);
}
static int v4l2_flash_update_led_brightness(struct v4l2_flash *v4l2_flash,
struct v4l2_ctrl *ctrl)
{
struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
struct led_classdev *led_cdev;
int ret;
if (ctrl == ctrls[TORCH_INTENSITY]) {
/*
* Update torch brightness only if in TORCH_MODE. In other modes
* torch led is turned off, which would spuriously inform the
* user space that V4L2_CID_FLASH_TORCH_INTENSITY control value
* has changed to 0.
*/
if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
return 0;
if (WARN_ON_ONCE(!v4l2_flash->fled_cdev))
return -EINVAL;
led_cdev = &v4l2_flash->fled_cdev->led_cdev;
} else {
if (WARN_ON_ONCE(!v4l2_flash->iled_cdev))
return -EINVAL;
led_cdev = v4l2_flash->iled_cdev;
}
ret = led_update_brightness(led_cdev);
if (ret < 0)
return ret;
if (has_flash_op(v4l2_flash, led_brightness_to_intensity))
ctrl->val = call_flash_op(v4l2_flash,
led_brightness_to_intensity,
led_cdev->brightness);
else
ctrl->val = __led_brightness_to_intensity(ctrl,
led_cdev->brightness);
return 0;
}
static int v4l2_flash_g_volatile_ctrl(struct v4l2_ctrl *c)
{
struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c);
struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
bool is_strobing;
int ret;
switch (c->id) {
case V4L2_CID_FLASH_TORCH_INTENSITY:
case V4L2_CID_FLASH_INDICATOR_INTENSITY:
return v4l2_flash_update_led_brightness(v4l2_flash, c);
}
if (!fled_cdev)
return -EINVAL;
switch (c->id) {
case V4L2_CID_FLASH_INTENSITY:
ret = led_update_flash_brightness(fled_cdev);
if (ret < 0)
return ret;
/*
* No conversion is needed as LED Flash class also uses
* microamperes for flash intensity units.
*/
c->val = fled_cdev->brightness.val;
return 0;
case V4L2_CID_FLASH_STROBE_STATUS:
ret = led_get_flash_strobe(fled_cdev, &is_strobing);
if (ret < 0)
return ret;
c->val = is_strobing;
return 0;
case V4L2_CID_FLASH_FAULT:
/* LED faults map directly to V4L2 flash faults */
return led_get_flash_fault(fled_cdev, &c->val);
default:
return -EINVAL;
}
}
static bool __software_strobe_mode_inactive(struct v4l2_ctrl **ctrls)
{
return ((ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) ||
(ctrls[STROBE_SOURCE] && (ctrls[STROBE_SOURCE]->val !=
V4L2_FLASH_STROBE_SOURCE_SOFTWARE)));
}
static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c)
{
struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c);
struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
struct led_classdev *led_cdev;
struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
bool external_strobe;
int ret = 0;
switch (c->id) {
case V4L2_CID_FLASH_TORCH_INTENSITY:
case V4L2_CID_FLASH_INDICATOR_INTENSITY:
return v4l2_flash_set_led_brightness(v4l2_flash, c);
}
if (!fled_cdev)
return -EINVAL;
led_cdev = &fled_cdev->led_cdev;
switch (c->id) {
case V4L2_CID_FLASH_LED_MODE:
switch (c->val) {
case V4L2_FLASH_LED_MODE_NONE:
led_set_brightness_sync(led_cdev, LED_OFF);
return led_set_flash_strobe(fled_cdev, false);
case V4L2_FLASH_LED_MODE_FLASH:
/* Turn the torch LED off */
led_set_brightness_sync(led_cdev, LED_OFF);
if (ctrls[STROBE_SOURCE]) {
external_strobe = (ctrls[STROBE_SOURCE]->val ==
V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
ret = call_flash_op(v4l2_flash,
external_strobe_set,
external_strobe);
}
return ret;
case V4L2_FLASH_LED_MODE_TORCH:
if (ctrls[STROBE_SOURCE]) {
ret = call_flash_op(v4l2_flash,
external_strobe_set,
false);
if (ret < 0)
return ret;
}
/* Stop flash strobing */
ret = led_set_flash_strobe(fled_cdev, false);
if (ret < 0)
return ret;
return v4l2_flash_set_led_brightness(v4l2_flash,
ctrls[TORCH_INTENSITY]);
}
break;
case V4L2_CID_FLASH_STROBE_SOURCE:
external_strobe = (c->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
/*
* For some hardware arrangements setting strobe source may
* affect torch mode. Therefore, if not in the flash mode,
* cache only this setting. It will be applied upon switching
* to flash mode.
*/
if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH)
return 0;
return call_flash_op(v4l2_flash, external_strobe_set,
external_strobe);
case V4L2_CID_FLASH_STROBE:
if (__software_strobe_mode_inactive(ctrls))
return -EBUSY;
return led_set_flash_strobe(fled_cdev, true);
case V4L2_CID_FLASH_STROBE_STOP:
if (__software_strobe_mode_inactive(ctrls))
return -EBUSY;
return led_set_flash_strobe(fled_cdev, false);
case V4L2_CID_FLASH_TIMEOUT:
/*
* No conversion is needed as LED Flash class also uses
* microseconds for flash timeout units.
*/
return led_set_flash_timeout(fled_cdev, c->val);
case V4L2_CID_FLASH_INTENSITY:
/*
* No conversion is needed as LED Flash class also uses
* microamperes for flash intensity units.
*/
return led_set_flash_brightness(fled_cdev, c->val);
}
return -EINVAL;
}
static const struct v4l2_ctrl_ops v4l2_flash_ctrl_ops = {
.g_volatile_ctrl = v4l2_flash_g_volatile_ctrl,
.s_ctrl = v4l2_flash_s_ctrl,
};
static void __lfs_to_v4l2_ctrl_config(struct led_flash_setting *s,
struct v4l2_ctrl_config *c)
{
c->min = s->min;
c->max = s->max;
c->step = s->step;
c->def = s->val;
}
static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash,
struct v4l2_flash_config *flash_cfg,
struct v4l2_flash_ctrl_data *ctrl_init_data)
{
struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL;
struct v4l2_ctrl_config *ctrl_cfg;
u32 mask;
/* Init INDICATOR_INTENSITY ctrl data */
if (v4l2_flash->iled_cdev) {
ctrl_init_data[INDICATOR_INTENSITY].cid =
V4L2_CID_FLASH_INDICATOR_INTENSITY;
ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config;
__lfs_to_v4l2_ctrl_config(&flash_cfg->intensity,
ctrl_cfg);
ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY;
ctrl_cfg->min = 0;
ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
}
if (!led_cdev || WARN_ON(!(led_cdev->flags & LED_DEV_CAP_FLASH)))
return;
/* Init FLASH_FAULT ctrl data */
if (flash_cfg->flash_faults) {
ctrl_init_data[FLASH_FAULT].cid = V4L2_CID_FLASH_FAULT;
ctrl_cfg = &ctrl_init_data[FLASH_FAULT].config;
ctrl_cfg->id = V4L2_CID_FLASH_FAULT;
ctrl_cfg->max = flash_cfg->flash_faults;
ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
V4L2_CTRL_FLAG_READ_ONLY;
}
/* Init FLASH_LED_MODE ctrl data */
mask = 1 << V4L2_FLASH_LED_MODE_NONE |
1 << V4L2_FLASH_LED_MODE_TORCH;
if (led_cdev->flags & LED_DEV_CAP_FLASH)
mask |= 1 << V4L2_FLASH_LED_MODE_FLASH;
ctrl_init_data[LED_MODE].cid = V4L2_CID_FLASH_LED_MODE;
ctrl_cfg = &ctrl_init_data[LED_MODE].config;
ctrl_cfg->id = V4L2_CID_FLASH_LED_MODE;
ctrl_cfg->max = V4L2_FLASH_LED_MODE_TORCH;
ctrl_cfg->menu_skip_mask = ~mask;
ctrl_cfg->def = V4L2_FLASH_LED_MODE_NONE;
ctrl_cfg->flags = 0;
/* Init TORCH_INTENSITY ctrl data */
ctrl_init_data[TORCH_INTENSITY].cid = V4L2_CID_FLASH_TORCH_INTENSITY;
ctrl_cfg = &ctrl_init_data[TORCH_INTENSITY].config;
__lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, ctrl_cfg);
ctrl_cfg->id = V4L2_CID_FLASH_TORCH_INTENSITY;
ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
/* Init FLASH_STROBE ctrl data */
ctrl_init_data[FLASH_STROBE].cid = V4L2_CID_FLASH_STROBE;
ctrl_cfg = &ctrl_init_data[FLASH_STROBE].config;
ctrl_cfg->id = V4L2_CID_FLASH_STROBE;
/* Init STROBE_STOP ctrl data */
ctrl_init_data[STROBE_STOP].cid = V4L2_CID_FLASH_STROBE_STOP;
ctrl_cfg = &ctrl_init_data[STROBE_STOP].config;
ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STOP;
/* Init FLASH_STROBE_SOURCE ctrl data */
if (flash_cfg->has_external_strobe) {
mask = (1 << V4L2_FLASH_STROBE_SOURCE_SOFTWARE) |
(1 << V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
ctrl_init_data[STROBE_SOURCE].cid =
V4L2_CID_FLASH_STROBE_SOURCE;
ctrl_cfg = &ctrl_init_data[STROBE_SOURCE].config;
ctrl_cfg->id = V4L2_CID_FLASH_STROBE_SOURCE;
ctrl_cfg->max = V4L2_FLASH_STROBE_SOURCE_EXTERNAL;
ctrl_cfg->menu_skip_mask = ~mask;
ctrl_cfg->def = V4L2_FLASH_STROBE_SOURCE_SOFTWARE;
}
/* Init STROBE_STATUS ctrl data */
if (has_flash_op(fled_cdev, strobe_get)) {
ctrl_init_data[STROBE_STATUS].cid =
V4L2_CID_FLASH_STROBE_STATUS;
ctrl_cfg = &ctrl_init_data[STROBE_STATUS].config;
ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STATUS;
ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
V4L2_CTRL_FLAG_READ_ONLY;
}
/* Init FLASH_TIMEOUT ctrl data */
if (has_flash_op(fled_cdev, timeout_set)) {
ctrl_init_data[FLASH_TIMEOUT].cid = V4L2_CID_FLASH_TIMEOUT;
ctrl_cfg = &ctrl_init_data[FLASH_TIMEOUT].config;
__lfs_to_v4l2_ctrl_config(&fled_cdev->timeout, ctrl_cfg);
ctrl_cfg->id = V4L2_CID_FLASH_TIMEOUT;
}
/* Init FLASH_INTENSITY ctrl data */
if (has_flash_op(fled_cdev, flash_brightness_set)) {
ctrl_init_data[FLASH_INTENSITY].cid = V4L2_CID_FLASH_INTENSITY;
ctrl_cfg = &ctrl_init_data[FLASH_INTENSITY].config;
__lfs_to_v4l2_ctrl_config(&fled_cdev->brightness, ctrl_cfg);
ctrl_cfg->id = V4L2_CID_FLASH_INTENSITY;
ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
}
}
static int v4l2_flash_init_controls(struct v4l2_flash *v4l2_flash,
struct v4l2_flash_config *flash_cfg)
{
struct v4l2_flash_ctrl_data *ctrl_init_data;
struct v4l2_ctrl *ctrl;
struct v4l2_ctrl_config *ctrl_cfg;
int i, ret, num_ctrls = 0;
v4l2_flash->ctrls = devm_kcalloc(v4l2_flash->sd.dev,
STROBE_SOURCE + 1,
sizeof(*v4l2_flash->ctrls),
GFP_KERNEL);
if (!v4l2_flash->ctrls)
return -ENOMEM;
/* allocate memory dynamically so as not to exceed stack frame size */
ctrl_init_data = kcalloc(NUM_FLASH_CTRLS, sizeof(*ctrl_init_data),
GFP_KERNEL);
if (!ctrl_init_data)
return -ENOMEM;
__fill_ctrl_init_data(v4l2_flash, flash_cfg, ctrl_init_data);
for (i = 0; i < NUM_FLASH_CTRLS; ++i)
if (ctrl_init_data[i].cid)
++num_ctrls;
v4l2_ctrl_handler_init(&v4l2_flash->hdl, num_ctrls);
for (i = 0; i < NUM_FLASH_CTRLS; ++i) {
ctrl_cfg = &ctrl_init_data[i].config;
if (!ctrl_init_data[i].cid)
continue;
if (ctrl_cfg->id == V4L2_CID_FLASH_LED_MODE ||
ctrl_cfg->id == V4L2_CID_FLASH_STROBE_SOURCE)
ctrl = v4l2_ctrl_new_std_menu(&v4l2_flash->hdl,
&v4l2_flash_ctrl_ops,
ctrl_cfg->id,
ctrl_cfg->max,
ctrl_cfg->menu_skip_mask,
ctrl_cfg->def);
else
ctrl = v4l2_ctrl_new_std(&v4l2_flash->hdl,
&v4l2_flash_ctrl_ops,
ctrl_cfg->id,
ctrl_cfg->min,
ctrl_cfg->max,
ctrl_cfg->step,
ctrl_cfg->def);
if (ctrl)
ctrl->flags |= ctrl_cfg->flags;
if (i <= STROBE_SOURCE)
v4l2_flash->ctrls[i] = ctrl;
}
kfree(ctrl_init_data);
if (v4l2_flash->hdl.error) {
ret = v4l2_flash->hdl.error;
goto error_free_handler;
}
v4l2_ctrl_handler_setup(&v4l2_flash->hdl);
v4l2_flash->sd.ctrl_handler = &v4l2_flash->hdl;
return 0;
error_free_handler:
v4l2_ctrl_handler_free(&v4l2_flash->hdl);
return ret;
}
static int __sync_device_with_v4l2_controls(struct v4l2_flash *v4l2_flash)
{
struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
int ret = 0;
if (ctrls[TORCH_INTENSITY]) {
ret = v4l2_flash_set_led_brightness(v4l2_flash,
ctrls[TORCH_INTENSITY]);
if (ret < 0)
return ret;
}
if (ctrls[INDICATOR_INTENSITY]) {
ret = v4l2_flash_set_led_brightness(v4l2_flash,
ctrls[INDICATOR_INTENSITY]);
if (ret < 0)
return ret;
}
if (ctrls[FLASH_TIMEOUT]) {
if (WARN_ON_ONCE(!fled_cdev))
return -EINVAL;
ret = led_set_flash_timeout(fled_cdev,
ctrls[FLASH_TIMEOUT]->val);
if (ret < 0)
return ret;
}
if (ctrls[FLASH_INTENSITY]) {
if (WARN_ON_ONCE(!fled_cdev))
return -EINVAL;
ret = led_set_flash_brightness(fled_cdev,
ctrls[FLASH_INTENSITY]->val);
if (ret < 0)
return ret;
}
/*
* For some hardware arrangements setting strobe source may affect
* torch mode. Synchronize strobe source setting only if not in torch
* mode. For torch mode case it will get synchronized upon switching
* to flash mode.
*/
if (ctrls[STROBE_SOURCE] &&
ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
ret = call_flash_op(v4l2_flash, external_strobe_set,
ctrls[STROBE_SOURCE]->val);
return ret;
}
/*
* V4L2 subdev internal operations
*/
static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd);
struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL;
struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev;
int ret = 0;
if (!v4l2_fh_is_singular(&fh->vfh))
return 0;
if (led_cdev) {
mutex_lock(&led_cdev->led_access);
led_sysfs_disable(led_cdev);
led_trigger_remove(led_cdev);
mutex_unlock(&led_cdev->led_access);
}
if (led_cdev_ind) {
mutex_lock(&led_cdev_ind->led_access);
led_sysfs_disable(led_cdev_ind);
led_trigger_remove(led_cdev_ind);
mutex_unlock(&led_cdev_ind->led_access);
}
ret = __sync_device_with_v4l2_controls(v4l2_flash);
if (ret < 0)
goto out_sync_device;
return 0;
out_sync_device:
if (led_cdev) {
mutex_lock(&led_cdev->led_access);
led_sysfs_enable(led_cdev);
mutex_unlock(&led_cdev->led_access);
}
if (led_cdev_ind) {
mutex_lock(&led_cdev_ind->led_access);
led_sysfs_enable(led_cdev_ind);
mutex_unlock(&led_cdev_ind->led_access);
}
return ret;
}
static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd);
struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL;
struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev;
int ret = 0;
if (!v4l2_fh_is_singular(&fh->vfh))
return 0;
if (led_cdev) {
mutex_lock(&led_cdev->led_access);
if (v4l2_flash->ctrls[STROBE_SOURCE])
ret = v4l2_ctrl_s_ctrl(
v4l2_flash->ctrls[STROBE_SOURCE],
V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
led_sysfs_enable(led_cdev);
mutex_unlock(&led_cdev->led_access);
}
if (led_cdev_ind) {
mutex_lock(&led_cdev_ind->led_access);
led_sysfs_enable(led_cdev_ind);
mutex_unlock(&led_cdev_ind->led_access);
}
return ret;
}
static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = {
.open = v4l2_flash_open,
.close = v4l2_flash_close,
};
static const struct v4l2_subdev_ops v4l2_flash_subdev_ops;
static struct v4l2_flash *__v4l2_flash_init(
struct device *dev, struct fwnode_handle *fwn,
struct led_classdev_flash *fled_cdev, struct led_classdev *iled_cdev,
const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config)
{
struct v4l2_flash *v4l2_flash;
struct v4l2_subdev *sd;
int ret;
if (!config)
return ERR_PTR(-EINVAL);
v4l2_flash = devm_kzalloc(dev, sizeof(*v4l2_flash), GFP_KERNEL);
if (!v4l2_flash)
return ERR_PTR(-ENOMEM);
sd = &v4l2_flash->sd;
v4l2_flash->fled_cdev = fled_cdev;
v4l2_flash->iled_cdev = iled_cdev;
v4l2_flash->ops = ops;
sd->dev = dev;
sd->fwnode = fwn ? fwn : dev_fwnode(dev);
v4l2_subdev_init(sd, &v4l2_flash_subdev_ops);
sd->internal_ops = &v4l2_flash_subdev_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
strscpy(sd->name, config->dev_name, sizeof(sd->name));
ret = media_entity_pads_init(&sd->entity, 0, NULL);
if (ret < 0)
return ERR_PTR(ret);
sd->entity.function = MEDIA_ENT_F_FLASH;
ret = v4l2_flash_init_controls(v4l2_flash, config);
if (ret < 0)
goto err_init_controls;
fwnode_handle_get(sd->fwnode);
ret = v4l2_async_register_subdev(sd);
if (ret < 0)
goto err_async_register_sd;
return v4l2_flash;
err_async_register_sd:
fwnode_handle_put(sd->fwnode);
v4l2_ctrl_handler_free(sd->ctrl_handler);
err_init_controls:
media_entity_cleanup(&sd->entity);
return ERR_PTR(ret);
}
struct v4l2_flash *v4l2_flash_init(
struct device *dev, struct fwnode_handle *fwn,
struct led_classdev_flash *fled_cdev,
const struct v4l2_flash_ops *ops,
struct v4l2_flash_config *config)
{
return __v4l2_flash_init(dev, fwn, fled_cdev, NULL, ops, config);
}
EXPORT_SYMBOL_GPL(v4l2_flash_init);
struct v4l2_flash *v4l2_flash_indicator_init(
struct device *dev, struct fwnode_handle *fwn,
struct led_classdev *iled_cdev,
struct v4l2_flash_config *config)
{
return __v4l2_flash_init(dev, fwn, NULL, iled_cdev, NULL, config);
}
EXPORT_SYMBOL_GPL(v4l2_flash_indicator_init);
void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
{
struct v4l2_subdev *sd;
if (IS_ERR_OR_NULL(v4l2_flash))
return;
sd = &v4l2_flash->sd;
v4l2_async_unregister_subdev(sd);
fwnode_handle_put(sd->fwnode);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
}
EXPORT_SYMBOL_GPL(v4l2_flash_release);
MODULE_AUTHOR("Jacek Anaszewski <[email protected]>");
MODULE_DESCRIPTION("V4L2 Flash sub-device helpers");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/v4l2-core/v4l2-flash-led-class.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* V4L2 controls framework Request API implementation.
*
* Copyright (C) 2018-2021 Hans Verkuil <[email protected]>
*/
#define pr_fmt(fmt) "v4l2-ctrls: " fmt
#include <linux/export.h>
#include <linux/slab.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
#include "v4l2-ctrls-priv.h"
/* Initialize the request-related fields in a control handler */
void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl)
{
INIT_LIST_HEAD(&hdl->requests);
INIT_LIST_HEAD(&hdl->requests_queued);
hdl->request_is_queued = false;
media_request_object_init(&hdl->req_obj);
}
/* Free the request-related fields in a control handler */
void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl)
{
struct v4l2_ctrl_handler *req, *next_req;
/*
* Do nothing if this isn't the main handler or the main
* handler is not used in any request.
*
* The main handler can be identified by having a NULL ops pointer in
* the request object.
*/
if (hdl->req_obj.ops || list_empty(&hdl->requests))
return;
/*
* If the main handler is freed and it is used by handler objects in
* outstanding requests, then unbind and put those objects before
* freeing the main handler.
*/
list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
media_request_object_unbind(&req->req_obj);
media_request_object_put(&req->req_obj);
}
}
static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_handler *from)
{
struct v4l2_ctrl_ref *ref;
int err = 0;
if (WARN_ON(!hdl || hdl == from))
return -EINVAL;
if (hdl->error)
return hdl->error;
WARN_ON(hdl->lock != &hdl->_lock);
mutex_lock(from->lock);
list_for_each_entry(ref, &from->ctrl_refs, node) {
struct v4l2_ctrl *ctrl = ref->ctrl;
struct v4l2_ctrl_ref *new_ref;
/* Skip refs inherited from other devices */
if (ref->from_other_dev)
continue;
err = handler_new_ref(hdl, ctrl, &new_ref, false, true);
if (err)
break;
}
mutex_unlock(from->lock);
return err;
}
static void v4l2_ctrl_request_queue(struct media_request_object *obj)
{
struct v4l2_ctrl_handler *hdl =
container_of(obj, struct v4l2_ctrl_handler, req_obj);
struct v4l2_ctrl_handler *main_hdl = obj->priv;
mutex_lock(main_hdl->lock);
list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
hdl->request_is_queued = true;
mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
{
struct v4l2_ctrl_handler *hdl =
container_of(obj, struct v4l2_ctrl_handler, req_obj);
struct v4l2_ctrl_handler *main_hdl = obj->priv;
mutex_lock(main_hdl->lock);
list_del_init(&hdl->requests);
if (hdl->request_is_queued) {
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
}
mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_release(struct media_request_object *obj)
{
struct v4l2_ctrl_handler *hdl =
container_of(obj, struct v4l2_ctrl_handler, req_obj);
v4l2_ctrl_handler_free(hdl);
kfree(hdl);
}
static const struct media_request_object_ops req_ops = {
.queue = v4l2_ctrl_request_queue,
.unbind = v4l2_ctrl_request_unbind,
.release = v4l2_ctrl_request_release,
};
struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
struct v4l2_ctrl_handler *parent)
{
struct media_request_object *obj;
if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING &&
req->state != MEDIA_REQUEST_STATE_QUEUED))
return NULL;
obj = media_request_object_find(req, &req_ops, parent);
if (obj)
return container_of(obj, struct v4l2_ctrl_handler, req_obj);
return NULL;
}
EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find);
struct v4l2_ctrl *
v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
{
struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
return (ref && ref->p_req_valid) ? ref->ctrl : NULL;
}
EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
static int v4l2_ctrl_request_bind(struct media_request *req,
struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_handler *from)
{
int ret;
ret = v4l2_ctrl_request_clone(hdl, from);
if (!ret) {
ret = media_request_object_bind(req, &req_ops,
from, false, &hdl->req_obj);
if (!ret) {
mutex_lock(from->lock);
list_add_tail(&hdl->requests, &from->requests);
mutex_unlock(from->lock);
}
}
return ret;
}
static struct media_request_object *
v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl,
struct media_request *req, bool set)
{
struct media_request_object *obj;
struct v4l2_ctrl_handler *new_hdl;
int ret;
if (IS_ERR(req))
return ERR_CAST(req);
if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
return ERR_PTR(-EBUSY);
obj = media_request_object_find(req, &req_ops, hdl);
if (obj)
return obj;
/*
* If there are no controls in this completed request,
* then that can only happen if:
*
* 1) no controls were present in the queued request, and
* 2) v4l2_ctrl_request_complete() could not allocate a
* control handler object to store the completed state in.
*
* So return ENOMEM to indicate that there was an out-of-memory
* error.
*/
if (!set)
return ERR_PTR(-ENOMEM);
new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL);
if (!new_hdl)
return ERR_PTR(-ENOMEM);
obj = &new_hdl->req_obj;
ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8);
if (!ret)
ret = v4l2_ctrl_request_bind(req, new_hdl, hdl);
if (ret) {
v4l2_ctrl_handler_free(new_hdl);
kfree(new_hdl);
return ERR_PTR(ret);
}
media_request_object_get(obj);
return obj;
}
int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
struct media_device *mdev, struct v4l2_ext_controls *cs)
{
struct media_request_object *obj = NULL;
struct media_request *req = NULL;
int ret;
if (!mdev || cs->request_fd < 0)
return -EINVAL;
req = media_request_get_by_fd(mdev, cs->request_fd);
if (IS_ERR(req))
return PTR_ERR(req);
if (req->state != MEDIA_REQUEST_STATE_COMPLETE) {
media_request_put(req);
return -EACCES;
}
ret = media_request_lock_for_access(req);
if (ret) {
media_request_put(req);
return ret;
}
obj = v4l2_ctrls_find_req_obj(hdl, req, false);
if (IS_ERR(obj)) {
media_request_unlock_for_access(req);
media_request_put(req);
return PTR_ERR(obj);
}
hdl = container_of(obj, struct v4l2_ctrl_handler,
req_obj);
ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev);
media_request_unlock_for_access(req);
media_request_object_put(obj);
media_request_put(req);
return ret;
}
int try_set_ext_ctrls_request(struct v4l2_fh *fh,
struct v4l2_ctrl_handler *hdl,
struct video_device *vdev,
struct media_device *mdev,
struct v4l2_ext_controls *cs, bool set)
{
struct media_request_object *obj = NULL;
struct media_request *req = NULL;
int ret;
if (!mdev) {
dprintk(vdev, "%s: missing media device\n",
video_device_node_name(vdev));
return -EINVAL;
}
if (cs->request_fd < 0) {
dprintk(vdev, "%s: invalid request fd %d\n",
video_device_node_name(vdev), cs->request_fd);
return -EINVAL;
}
req = media_request_get_by_fd(mdev, cs->request_fd);
if (IS_ERR(req)) {
dprintk(vdev, "%s: cannot find request fd %d\n",
video_device_node_name(vdev), cs->request_fd);
return PTR_ERR(req);
}
ret = media_request_lock_for_update(req);
if (ret) {
dprintk(vdev, "%s: cannot lock request fd %d\n",
video_device_node_name(vdev), cs->request_fd);
media_request_put(req);
return ret;
}
obj = v4l2_ctrls_find_req_obj(hdl, req, set);
if (IS_ERR(obj)) {
dprintk(vdev,
"%s: cannot find request object for request fd %d\n",
video_device_node_name(vdev),
cs->request_fd);
media_request_unlock_for_update(req);
media_request_put(req);
return PTR_ERR(obj);
}
hdl = container_of(obj, struct v4l2_ctrl_handler,
req_obj);
ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set);
if (ret)
dprintk(vdev,
"%s: try_set_ext_ctrls_common failed (%d)\n",
video_device_node_name(vdev), ret);
media_request_unlock_for_update(req);
media_request_object_put(obj);
media_request_put(req);
return ret;
}
void v4l2_ctrl_request_complete(struct media_request *req,
struct v4l2_ctrl_handler *main_hdl)
{
struct media_request_object *obj;
struct v4l2_ctrl_handler *hdl;
struct v4l2_ctrl_ref *ref;
if (!req || !main_hdl)
return;
/*
* Note that it is valid if nothing was found. It means
* that this request doesn't have any controls and so just
* wants to leave the controls unchanged.
*/
obj = media_request_object_find(req, &req_ops, main_hdl);
if (!obj) {
int ret;
/* Create a new request so the driver can return controls */
hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
if (!hdl)
return;
ret = v4l2_ctrl_handler_init(hdl, (main_hdl->nr_of_buckets - 1) * 8);
if (!ret)
ret = v4l2_ctrl_request_bind(req, hdl, main_hdl);
if (ret) {
v4l2_ctrl_handler_free(hdl);
kfree(hdl);
return;
}
hdl->request_is_queued = true;
obj = media_request_object_find(req, &req_ops, main_hdl);
}
hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
list_for_each_entry(ref, &hdl->ctrl_refs, node) {
struct v4l2_ctrl *ctrl = ref->ctrl;
struct v4l2_ctrl *master = ctrl->cluster[0];
unsigned int i;
if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
v4l2_ctrl_lock(master);
/* g_volatile_ctrl will update the current control values */
for (i = 0; i < master->ncontrols; i++)
cur_to_new(master->cluster[i]);
call_op(master, g_volatile_ctrl);
new_to_req(ref);
v4l2_ctrl_unlock(master);
continue;
}
if (ref->p_req_valid)
continue;
/* Copy the current control value into the request */
v4l2_ctrl_lock(ctrl);
cur_to_req(ref);
v4l2_ctrl_unlock(ctrl);
}
mutex_lock(main_hdl->lock);
WARN_ON(!hdl->request_is_queued);
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
mutex_unlock(main_hdl->lock);
media_request_object_complete(obj);
media_request_object_put(obj);
}
EXPORT_SYMBOL(v4l2_ctrl_request_complete);
int v4l2_ctrl_request_setup(struct media_request *req,
struct v4l2_ctrl_handler *main_hdl)
{
struct media_request_object *obj;
struct v4l2_ctrl_handler *hdl;
struct v4l2_ctrl_ref *ref;
int ret = 0;
if (!req || !main_hdl)
return 0;
if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
return -EBUSY;
/*
* Note that it is valid if nothing was found. It means
* that this request doesn't have any controls and so just
* wants to leave the controls unchanged.
*/
obj = media_request_object_find(req, &req_ops, main_hdl);
if (!obj)
return 0;
if (obj->completed) {
media_request_object_put(obj);
return -EBUSY;
}
hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
list_for_each_entry(ref, &hdl->ctrl_refs, node)
ref->req_done = false;
list_for_each_entry(ref, &hdl->ctrl_refs, node) {
struct v4l2_ctrl *ctrl = ref->ctrl;
struct v4l2_ctrl *master = ctrl->cluster[0];
bool have_new_data = false;
int i;
/*
* Skip if this control was already handled by a cluster.
* Skip button controls and read-only controls.
*/
if (ref->req_done || (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
continue;
v4l2_ctrl_lock(master);
for (i = 0; i < master->ncontrols; i++) {
if (master->cluster[i]) {
struct v4l2_ctrl_ref *r =
find_ref(hdl, master->cluster[i]->id);
if (r->p_req_valid) {
have_new_data = true;
break;
}
}
}
if (!have_new_data) {
v4l2_ctrl_unlock(master);
continue;
}
for (i = 0; i < master->ncontrols; i++) {
if (master->cluster[i]) {
struct v4l2_ctrl_ref *r =
find_ref(hdl, master->cluster[i]->id);
ret = req_to_new(r);
if (ret) {
v4l2_ctrl_unlock(master);
goto error;
}
master->cluster[i]->is_new = 1;
r->req_done = true;
}
}
/*
* For volatile autoclusters that are currently in auto mode
* we need to discover if it will be set to manual mode.
* If so, then we have to copy the current volatile values
* first since those will become the new manual values (which
* may be overwritten by explicit new values from this set
* of controls).
*/
if (master->is_auto && master->has_volatiles &&
!is_cur_manual(master)) {
s32 new_auto_val = *master->p_new.p_s32;
/*
* If the new value == the manual value, then copy
* the current volatile values.
*/
if (new_auto_val == master->manual_mode_value)
update_from_auto_cluster(master);
}
ret = try_or_set_cluster(NULL, master, true, 0);
v4l2_ctrl_unlock(master);
if (ret)
break;
}
error:
media_request_object_put(obj);
return ret;
}
EXPORT_SYMBOL(v4l2_ctrl_request_setup);
| linux-master | drivers/media/v4l2-core/v4l2-ctrls-request.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MIPI Camera Control Interface (CCI) register access helpers.
*
* Copyright (C) 2023 Hans de Goede <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dev_printk.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/types.h>
#include <asm/unaligned.h>
#include <media/v4l2-cci.h>
int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
{
unsigned int len;
u8 buf[8];
int ret;
if (err && *err)
return *err;
len = FIELD_GET(CCI_REG_WIDTH_MASK, reg);
reg = FIELD_GET(CCI_REG_ADDR_MASK, reg);
ret = regmap_bulk_read(map, reg, buf, len);
if (ret) {
dev_err(regmap_get_device(map), "Error reading reg 0x%4x: %d\n",
reg, ret);
goto out;
}
switch (len) {
case 1:
*val = buf[0];
break;
case 2:
*val = get_unaligned_be16(buf);
break;
case 3:
*val = get_unaligned_be24(buf);
break;
case 4:
*val = get_unaligned_be32(buf);
break;
case 8:
*val = get_unaligned_be64(buf);
break;
default:
dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
len, reg);
ret = -EINVAL;
break;
}
out:
if (ret && err)
*err = ret;
return ret;
}
EXPORT_SYMBOL_GPL(cci_read);
int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
{
unsigned int len;
u8 buf[8];
int ret;
if (err && *err)
return *err;
len = FIELD_GET(CCI_REG_WIDTH_MASK, reg);
reg = FIELD_GET(CCI_REG_ADDR_MASK, reg);
switch (len) {
case 1:
buf[0] = val;
break;
case 2:
put_unaligned_be16(val, buf);
break;
case 3:
put_unaligned_be24(val, buf);
break;
case 4:
put_unaligned_be32(val, buf);
break;
case 8:
put_unaligned_be64(val, buf);
break;
default:
dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
len, reg);
ret = -EINVAL;
goto out;
}
ret = regmap_bulk_write(map, reg, buf, len);
if (ret)
dev_err(regmap_get_device(map), "Error writing reg 0x%4x: %d\n",
reg, ret);
out:
if (ret && err)
*err = ret;
return ret;
}
EXPORT_SYMBOL_GPL(cci_write);
int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err)
{
u64 readval;
int ret;
ret = cci_read(map, reg, &readval, err);
if (ret)
return ret;
val = (readval & ~mask) | (val & mask);
return cci_write(map, reg, val, err);
}
EXPORT_SYMBOL_GPL(cci_update_bits);
int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs,
unsigned int num_regs, int *err)
{
unsigned int i;
int ret;
for (i = 0; i < num_regs; i++) {
ret = cci_write(map, regs[i].reg, regs[i].val, err);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(cci_multi_reg_write);
#if IS_ENABLED(CONFIG_V4L2_CCI_I2C)
struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client,
int reg_addr_bits)
{
struct regmap_config config = {
.reg_bits = reg_addr_bits,
.val_bits = 8,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.disable_locking = true,
};
return devm_regmap_init_i2c(client, &config);
}
EXPORT_SYMBOL_GPL(devm_cci_regmap_init_i2c);
#endif
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_DESCRIPTION("MIPI Camera Control Interface (CCI) support");
| linux-master | drivers/media/v4l2-core/v4l2-cci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* V4L2 H264 helpers.
*
* Copyright (C) 2019 Collabora, Ltd.
*
* Author: Boris Brezillon <[email protected]>
*/
#include <linux/module.h>
#include <linux/sort.h>
#include <media/v4l2-h264.h>
/*
* Size of the tempory buffer allocated when printing reference lists. The
* output will be truncated if the size is too small.
*/
static const int tmp_str_size = 1024;
/**
* v4l2_h264_init_reflist_builder() - Initialize a P/B0/B1 reference list
* builder
*
* @b: the builder context to initialize
* @dec_params: decode parameters control
* @sps: SPS control
* @dpb: DPB to use when creating the reference list
*/
void
v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
const struct v4l2_ctrl_h264_decode_params *dec_params,
const struct v4l2_ctrl_h264_sps *sps,
const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES])
{
int cur_frame_num, max_frame_num;
unsigned int i;
max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
cur_frame_num = dec_params->frame_num;
memset(b, 0, sizeof(*b));
if (!(dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)) {
b->cur_pic_order_count = min(dec_params->bottom_field_order_cnt,
dec_params->top_field_order_cnt);
b->cur_pic_fields = V4L2_H264_FRAME_REF;
} else if (dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD) {
b->cur_pic_order_count = dec_params->bottom_field_order_cnt;
b->cur_pic_fields = V4L2_H264_BOTTOM_FIELD_REF;
} else {
b->cur_pic_order_count = dec_params->top_field_order_cnt;
b->cur_pic_fields = V4L2_H264_TOP_FIELD_REF;
}
for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) {
if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
continue;
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
b->refs[i].longterm = true;
/*
* Handle frame_num wraparound as described in section
* '8.2.4.1 Decoding process for picture numbers' of the spec.
* For long term references, frame_num is set to
* long_term_frame_idx which requires no wrapping.
*/
if (!b->refs[i].longterm && dpb[i].frame_num > cur_frame_num)
b->refs[i].frame_num = (int)dpb[i].frame_num -
max_frame_num;
else
b->refs[i].frame_num = dpb[i].frame_num;
b->refs[i].top_field_order_cnt = dpb[i].top_field_order_cnt;
b->refs[i].bottom_field_order_cnt = dpb[i].bottom_field_order_cnt;
if (b->cur_pic_fields == V4L2_H264_FRAME_REF) {
u8 fields = V4L2_H264_FRAME_REF;
b->unordered_reflist[b->num_valid].index = i;
b->unordered_reflist[b->num_valid].fields = fields;
b->num_valid++;
continue;
}
if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF) {
u8 fields = V4L2_H264_TOP_FIELD_REF;
b->unordered_reflist[b->num_valid].index = i;
b->unordered_reflist[b->num_valid].fields = fields;
b->num_valid++;
}
if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF) {
u8 fields = V4L2_H264_BOTTOM_FIELD_REF;
b->unordered_reflist[b->num_valid].index = i;
b->unordered_reflist[b->num_valid].fields = fields;
b->num_valid++;
}
}
for (i = b->num_valid; i < ARRAY_SIZE(b->unordered_reflist); i++)
b->unordered_reflist[i].index = i;
}
EXPORT_SYMBOL_GPL(v4l2_h264_init_reflist_builder);
static s32 v4l2_h264_get_poc(const struct v4l2_h264_reflist_builder *b,
const struct v4l2_h264_reference *ref)
{
switch (ref->fields) {
case V4L2_H264_FRAME_REF:
return min(b->refs[ref->index].top_field_order_cnt,
b->refs[ref->index].bottom_field_order_cnt);
case V4L2_H264_TOP_FIELD_REF:
return b->refs[ref->index].top_field_order_cnt;
case V4L2_H264_BOTTOM_FIELD_REF:
return b->refs[ref->index].bottom_field_order_cnt;
}
/* not reached */
return 0;
}
static int v4l2_h264_p_ref_list_cmp(const void *ptra, const void *ptrb,
const void *data)
{
const struct v4l2_h264_reflist_builder *builder = data;
u8 idxa, idxb;
idxa = ((struct v4l2_h264_reference *)ptra)->index;
idxb = ((struct v4l2_h264_reference *)ptrb)->index;
if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
idxb >= V4L2_H264_NUM_DPB_ENTRIES))
return 1;
if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
/* Short term pics first. */
if (!builder->refs[idxa].longterm)
return -1;
else
return 1;
}
/*
* For frames, short term pics are in descending pic num order and long
* term ones in ascending order. For fields, the same direction is used
* but with frame_num (wrapped). For frames, the value of pic_num and
* frame_num are the same (see formula (8-28) and (8-29)). For this
* reason we can use frame_num only and share this function between
* frames and fields reflist.
*/
if (!builder->refs[idxa].longterm)
return builder->refs[idxb].frame_num <
builder->refs[idxa].frame_num ?
-1 : 1;
return builder->refs[idxa].frame_num < builder->refs[idxb].frame_num ?
-1 : 1;
}
static int v4l2_h264_b0_ref_list_cmp(const void *ptra, const void *ptrb,
const void *data)
{
const struct v4l2_h264_reflist_builder *builder = data;
s32 poca, pocb;
u8 idxa, idxb;
idxa = ((struct v4l2_h264_reference *)ptra)->index;
idxb = ((struct v4l2_h264_reference *)ptrb)->index;
if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
idxb >= V4L2_H264_NUM_DPB_ENTRIES))
return 1;
if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
/* Short term pics first. */
if (!builder->refs[idxa].longterm)
return -1;
else
return 1;
}
/* Long term pics in ascending frame num order. */
if (builder->refs[idxa].longterm)
return builder->refs[idxa].frame_num <
builder->refs[idxb].frame_num ?
-1 : 1;
poca = v4l2_h264_get_poc(builder, ptra);
pocb = v4l2_h264_get_poc(builder, ptrb);
/*
* Short term pics with POC < cur POC first in POC descending order
* followed by short term pics with POC > cur POC in POC ascending
* order.
*/
if ((poca < builder->cur_pic_order_count) !=
(pocb < builder->cur_pic_order_count))
return poca < pocb ? -1 : 1;
else if (poca < builder->cur_pic_order_count)
return pocb < poca ? -1 : 1;
return poca < pocb ? -1 : 1;
}
static int v4l2_h264_b1_ref_list_cmp(const void *ptra, const void *ptrb,
const void *data)
{
const struct v4l2_h264_reflist_builder *builder = data;
s32 poca, pocb;
u8 idxa, idxb;
idxa = ((struct v4l2_h264_reference *)ptra)->index;
idxb = ((struct v4l2_h264_reference *)ptrb)->index;
if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
idxb >= V4L2_H264_NUM_DPB_ENTRIES))
return 1;
if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
/* Short term pics first. */
if (!builder->refs[idxa].longterm)
return -1;
else
return 1;
}
/* Long term pics in ascending frame num order. */
if (builder->refs[idxa].longterm)
return builder->refs[idxa].frame_num <
builder->refs[idxb].frame_num ?
-1 : 1;
poca = v4l2_h264_get_poc(builder, ptra);
pocb = v4l2_h264_get_poc(builder, ptrb);
/*
* Short term pics with POC > cur POC first in POC ascending order
* followed by short term pics with POC < cur POC in POC descending
* order.
*/
if ((poca < builder->cur_pic_order_count) !=
(pocb < builder->cur_pic_order_count))
return pocb < poca ? -1 : 1;
else if (poca < builder->cur_pic_order_count)
return pocb < poca ? -1 : 1;
return poca < pocb ? -1 : 1;
}
/*
* The references need to be reordered so that references are alternating
* between top and bottom field references starting with the current picture
* parity. This has to be done for short term and long term references
* separately.
*/
static void reorder_field_reflist(const struct v4l2_h264_reflist_builder *b,
struct v4l2_h264_reference *reflist)
{
struct v4l2_h264_reference tmplist[V4L2_H264_REF_LIST_LEN];
u8 lt, i = 0, j = 0, k = 0;
memcpy(tmplist, reflist, sizeof(tmplist[0]) * b->num_valid);
for (lt = 0; lt <= 1; lt++) {
do {
for (; i < b->num_valid && b->refs[tmplist[i].index].longterm == lt; i++) {
if (tmplist[i].fields == b->cur_pic_fields) {
reflist[k++] = tmplist[i++];
break;
}
}
for (; j < b->num_valid && b->refs[tmplist[j].index].longterm == lt; j++) {
if (tmplist[j].fields != b->cur_pic_fields) {
reflist[k++] = tmplist[j++];
break;
}
}
} while ((i < b->num_valid && b->refs[tmplist[i].index].longterm == lt) ||
(j < b->num_valid && b->refs[tmplist[j].index].longterm == lt));
}
}
static char ref_type_to_char(u8 ref_type)
{
switch (ref_type) {
case V4L2_H264_FRAME_REF:
return 'f';
case V4L2_H264_TOP_FIELD_REF:
return 't';
case V4L2_H264_BOTTOM_FIELD_REF:
return 'b';
}
return '?';
}
static const char *format_ref_list_p(const struct v4l2_h264_reflist_builder *builder,
struct v4l2_h264_reference *reflist,
char **out_str)
{
int n = 0, i;
*out_str = kmalloc(tmp_str_size, GFP_KERNEL);
if (!(*out_str))
return NULL;
n += snprintf(*out_str + n, tmp_str_size - n, "|");
for (i = 0; i < builder->num_valid; i++) {
/* this is pic_num for frame and frame_num (wrapped) for field,
* but for frame pic_num is equal to frame_num (wrapped).
*/
int frame_num = builder->refs[reflist[i].index].frame_num;
bool longterm = builder->refs[reflist[i].index].longterm;
n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|",
frame_num, longterm ? 'l' : 's',
ref_type_to_char(reflist[i].fields));
}
return *out_str;
}
static void print_ref_list_p(const struct v4l2_h264_reflist_builder *builder,
struct v4l2_h264_reference *reflist)
{
char *buf = NULL;
pr_debug("ref_pic_list_p (cur_poc %u%c) %s\n",
builder->cur_pic_order_count,
ref_type_to_char(builder->cur_pic_fields),
format_ref_list_p(builder, reflist, &buf));
kfree(buf);
}
static const char *format_ref_list_b(const struct v4l2_h264_reflist_builder *builder,
struct v4l2_h264_reference *reflist,
char **out_str)
{
int n = 0, i;
*out_str = kmalloc(tmp_str_size, GFP_KERNEL);
if (!(*out_str))
return NULL;
n += snprintf(*out_str + n, tmp_str_size - n, "|");
for (i = 0; i < builder->num_valid; i++) {
int frame_num = builder->refs[reflist[i].index].frame_num;
u32 poc = v4l2_h264_get_poc(builder, reflist + i);
bool longterm = builder->refs[reflist[i].index].longterm;
n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|",
longterm ? frame_num : poc,
longterm ? 'l' : 's',
ref_type_to_char(reflist[i].fields));
}
return *out_str;
}
static void print_ref_list_b(const struct v4l2_h264_reflist_builder *builder,
struct v4l2_h264_reference *reflist, u8 list_num)
{
char *buf = NULL;
pr_debug("ref_pic_list_b%u (cur_poc %u%c) %s",
list_num, builder->cur_pic_order_count,
ref_type_to_char(builder->cur_pic_fields),
format_ref_list_b(builder, reflist, &buf));
kfree(buf);
}
/**
* v4l2_h264_build_p_ref_list() - Build the P reference list
*
* @builder: reference list builder context
* @reflist: 32 sized array used to store the P reference list. Each entry
* is a v4l2_h264_reference structure
*
* This functions builds the P reference lists. This procedure is describe in
* section '8.2.4 Decoding process for reference picture lists construction'
* of the H264 spec. This function can be used by H264 decoder drivers that
* need to pass a P reference list to the hardware.
*/
void
v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder,
struct v4l2_h264_reference *reflist)
{
memcpy(reflist, builder->unordered_reflist,
sizeof(builder->unordered_reflist[0]) * builder->num_valid);
sort_r(reflist, builder->num_valid, sizeof(*reflist),
v4l2_h264_p_ref_list_cmp, NULL, builder);
if (builder->cur_pic_fields != V4L2_H264_FRAME_REF)
reorder_field_reflist(builder, reflist);
print_ref_list_p(builder, reflist);
}
EXPORT_SYMBOL_GPL(v4l2_h264_build_p_ref_list);
/**
* v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists
*
* @builder: reference list builder context
* @b0_reflist: 32 sized array used to store the B0 reference list. Each entry
* is a v4l2_h264_reference structure
* @b1_reflist: 32 sized array used to store the B1 reference list. Each entry
* is a v4l2_h264_reference structure
*
* This functions builds the B0/B1 reference lists. This procedure is described
* in section '8.2.4 Decoding process for reference picture lists construction'
* of the H264 spec. This function can be used by H264 decoder drivers that
* need to pass B0/B1 reference lists to the hardware.
*/
void
v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder,
struct v4l2_h264_reference *b0_reflist,
struct v4l2_h264_reference *b1_reflist)
{
memcpy(b0_reflist, builder->unordered_reflist,
sizeof(builder->unordered_reflist[0]) * builder->num_valid);
sort_r(b0_reflist, builder->num_valid, sizeof(*b0_reflist),
v4l2_h264_b0_ref_list_cmp, NULL, builder);
memcpy(b1_reflist, builder->unordered_reflist,
sizeof(builder->unordered_reflist[0]) * builder->num_valid);
sort_r(b1_reflist, builder->num_valid, sizeof(*b1_reflist),
v4l2_h264_b1_ref_list_cmp, NULL, builder);
if (builder->cur_pic_fields != V4L2_H264_FRAME_REF) {
reorder_field_reflist(builder, b0_reflist);
reorder_field_reflist(builder, b1_reflist);
}
if (builder->num_valid > 1 &&
!memcmp(b1_reflist, b0_reflist, builder->num_valid))
swap(b1_reflist[0], b1_reflist[1]);
print_ref_list_b(builder, b0_reflist, 0);
print_ref_list_b(builder, b1_reflist, 1);
}
EXPORT_SYMBOL_GPL(v4l2_h264_build_b_ref_lists);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("V4L2 H264 Helpers");
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
| linux-master | drivers/media/v4l2-core/v4l2-h264.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* V4L2 fwnode binding parsing library
*
* The origins of the V4L2 fwnode library are in V4L2 OF library that
* formerly was located in v4l2-of.c.
*
* Copyright (c) 2016 Intel Corporation.
* Author: Sakari Ailus <[email protected]>
*
* Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <[email protected]>
*
* Copyright (C) 2012 Renesas Electronics Corp.
* Author: Guennadi Liakhovetski <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <media/v4l2-async.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include "v4l2-subdev-priv.h"
static const struct v4l2_fwnode_bus_conv {
enum v4l2_fwnode_bus_type fwnode_bus_type;
enum v4l2_mbus_type mbus_type;
const char *name;
} buses[] = {
{
V4L2_FWNODE_BUS_TYPE_GUESS,
V4L2_MBUS_UNKNOWN,
"not specified",
}, {
V4L2_FWNODE_BUS_TYPE_CSI2_CPHY,
V4L2_MBUS_CSI2_CPHY,
"MIPI CSI-2 C-PHY",
}, {
V4L2_FWNODE_BUS_TYPE_CSI1,
V4L2_MBUS_CSI1,
"MIPI CSI-1",
}, {
V4L2_FWNODE_BUS_TYPE_CCP2,
V4L2_MBUS_CCP2,
"compact camera port 2",
}, {
V4L2_FWNODE_BUS_TYPE_CSI2_DPHY,
V4L2_MBUS_CSI2_DPHY,
"MIPI CSI-2 D-PHY",
}, {
V4L2_FWNODE_BUS_TYPE_PARALLEL,
V4L2_MBUS_PARALLEL,
"parallel",
}, {
V4L2_FWNODE_BUS_TYPE_BT656,
V4L2_MBUS_BT656,
"Bt.656",
}, {
V4L2_FWNODE_BUS_TYPE_DPI,
V4L2_MBUS_DPI,
"DPI",
}
};
static const struct v4l2_fwnode_bus_conv *
get_v4l2_fwnode_bus_conv_by_fwnode_bus(enum v4l2_fwnode_bus_type type)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(buses); i++)
if (buses[i].fwnode_bus_type == type)
return &buses[i];
return NULL;
}
static enum v4l2_mbus_type
v4l2_fwnode_bus_type_to_mbus(enum v4l2_fwnode_bus_type type)
{
const struct v4l2_fwnode_bus_conv *conv =
get_v4l2_fwnode_bus_conv_by_fwnode_bus(type);
return conv ? conv->mbus_type : V4L2_MBUS_INVALID;
}
static const char *
v4l2_fwnode_bus_type_to_string(enum v4l2_fwnode_bus_type type)
{
const struct v4l2_fwnode_bus_conv *conv =
get_v4l2_fwnode_bus_conv_by_fwnode_bus(type);
return conv ? conv->name : "not found";
}
static const struct v4l2_fwnode_bus_conv *
get_v4l2_fwnode_bus_conv_by_mbus(enum v4l2_mbus_type type)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(buses); i++)
if (buses[i].mbus_type == type)
return &buses[i];
return NULL;
}
static const char *
v4l2_fwnode_mbus_type_to_string(enum v4l2_mbus_type type)
{
const struct v4l2_fwnode_bus_conv *conv =
get_v4l2_fwnode_bus_conv_by_mbus(type);
return conv ? conv->name : "not found";
}
static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep,
enum v4l2_mbus_type bus_type)
{
struct v4l2_mbus_config_mipi_csi2 *bus = &vep->bus.mipi_csi2;
bool have_clk_lane = false, have_data_lanes = false,
have_lane_polarities = false;
unsigned int flags = 0, lanes_used = 0;
u32 array[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES];
u32 clock_lane = 0;
unsigned int num_data_lanes = 0;
bool use_default_lane_mapping = false;
unsigned int i;
u32 v;
int rval;
if (bus_type == V4L2_MBUS_CSI2_DPHY ||
bus_type == V4L2_MBUS_CSI2_CPHY) {
use_default_lane_mapping = true;
num_data_lanes = min_t(u32, bus->num_data_lanes,
V4L2_MBUS_CSI2_MAX_DATA_LANES);
clock_lane = bus->clock_lane;
if (clock_lane)
use_default_lane_mapping = false;
for (i = 0; i < num_data_lanes; i++) {
array[i] = bus->data_lanes[i];
if (array[i])
use_default_lane_mapping = false;
}
if (use_default_lane_mapping)
pr_debug("no lane mapping given, using defaults\n");
}
rval = fwnode_property_count_u32(fwnode, "data-lanes");
if (rval > 0) {
num_data_lanes =
min_t(int, V4L2_MBUS_CSI2_MAX_DATA_LANES, rval);
fwnode_property_read_u32_array(fwnode, "data-lanes", array,
num_data_lanes);
have_data_lanes = true;
if (use_default_lane_mapping) {
pr_debug("data-lanes property exists; disabling default mapping\n");
use_default_lane_mapping = false;
}
}
for (i = 0; i < num_data_lanes; i++) {
if (lanes_used & BIT(array[i])) {
if (have_data_lanes || !use_default_lane_mapping)
pr_warn("duplicated lane %u in data-lanes, using defaults\n",
array[i]);
use_default_lane_mapping = true;
}
lanes_used |= BIT(array[i]);
if (have_data_lanes)
pr_debug("lane %u position %u\n", i, array[i]);
}
rval = fwnode_property_count_u32(fwnode, "lane-polarities");
if (rval > 0) {
if (rval != 1 + num_data_lanes /* clock+data */) {
pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n",
1 + num_data_lanes, rval);
return -EINVAL;
}
have_lane_polarities = true;
}
if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
clock_lane = v;
pr_debug("clock lane position %u\n", v);
have_clk_lane = true;
}
if (have_clk_lane && lanes_used & BIT(clock_lane) &&
!use_default_lane_mapping) {
pr_warn("duplicated lane %u in clock-lanes, using defaults\n",
v);
use_default_lane_mapping = true;
}
if (fwnode_property_present(fwnode, "clock-noncontinuous")) {
flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
pr_debug("non-continuous clock\n");
}
if (bus_type == V4L2_MBUS_CSI2_DPHY ||
bus_type == V4L2_MBUS_CSI2_CPHY ||
lanes_used || have_clk_lane || flags) {
/* Only D-PHY has a clock lane. */
unsigned int dfl_data_lane_index =
bus_type == V4L2_MBUS_CSI2_DPHY;
bus->flags = flags;
if (bus_type == V4L2_MBUS_UNKNOWN)
vep->bus_type = V4L2_MBUS_CSI2_DPHY;
bus->num_data_lanes = num_data_lanes;
if (use_default_lane_mapping) {
bus->clock_lane = 0;
for (i = 0; i < num_data_lanes; i++)
bus->data_lanes[i] = dfl_data_lane_index + i;
} else {
bus->clock_lane = clock_lane;
for (i = 0; i < num_data_lanes; i++)
bus->data_lanes[i] = array[i];
}
if (have_lane_polarities) {
fwnode_property_read_u32_array(fwnode,
"lane-polarities", array,
1 + num_data_lanes);
for (i = 0; i < 1 + num_data_lanes; i++) {
bus->lane_polarities[i] = array[i];
pr_debug("lane %u polarity %sinverted",
i, array[i] ? "" : "not ");
}
} else {
pr_debug("no lane polarities defined, assuming not inverted\n");
}
}
return 0;
}
#define PARALLEL_MBUS_FLAGS (V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
V4L2_MBUS_HSYNC_ACTIVE_LOW | \
V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
V4L2_MBUS_VSYNC_ACTIVE_LOW | \
V4L2_MBUS_FIELD_EVEN_HIGH | \
V4L2_MBUS_FIELD_EVEN_LOW)
static void
v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep,
enum v4l2_mbus_type bus_type)
{
struct v4l2_mbus_config_parallel *bus = &vep->bus.parallel;
unsigned int flags = 0;
u32 v;
if (bus_type == V4L2_MBUS_PARALLEL || bus_type == V4L2_MBUS_BT656)
flags = bus->flags;
if (!fwnode_property_read_u32(fwnode, "hsync-active", &v)) {
flags &= ~(V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_HSYNC_ACTIVE_LOW);
flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH :
V4L2_MBUS_HSYNC_ACTIVE_LOW;
pr_debug("hsync-active %s\n", v ? "high" : "low");
}
if (!fwnode_property_read_u32(fwnode, "vsync-active", &v)) {
flags &= ~(V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_VSYNC_ACTIVE_LOW);
flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH :
V4L2_MBUS_VSYNC_ACTIVE_LOW;
pr_debug("vsync-active %s\n", v ? "high" : "low");
}
if (!fwnode_property_read_u32(fwnode, "field-even-active", &v)) {
flags &= ~(V4L2_MBUS_FIELD_EVEN_HIGH |
V4L2_MBUS_FIELD_EVEN_LOW);
flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH :
V4L2_MBUS_FIELD_EVEN_LOW;
pr_debug("field-even-active %s\n", v ? "high" : "low");
}
if (!fwnode_property_read_u32(fwnode, "pclk-sample", &v)) {
flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING |
V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_PCLK_SAMPLE_DUALEDGE);
switch (v) {
case 0:
flags |= V4L2_MBUS_PCLK_SAMPLE_FALLING;
pr_debug("pclk-sample low\n");
break;
case 1:
flags |= V4L2_MBUS_PCLK_SAMPLE_RISING;
pr_debug("pclk-sample high\n");
break;
case 2:
flags |= V4L2_MBUS_PCLK_SAMPLE_DUALEDGE;
pr_debug("pclk-sample dual edge\n");
break;
default:
pr_warn("invalid argument for pclk-sample");
break;
}
}
if (!fwnode_property_read_u32(fwnode, "data-active", &v)) {
flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_LOW);
flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
V4L2_MBUS_DATA_ACTIVE_LOW;
pr_debug("data-active %s\n", v ? "high" : "low");
}
if (fwnode_property_present(fwnode, "slave-mode")) {
pr_debug("slave mode\n");
flags &= ~V4L2_MBUS_MASTER;
flags |= V4L2_MBUS_SLAVE;
} else {
flags &= ~V4L2_MBUS_SLAVE;
flags |= V4L2_MBUS_MASTER;
}
if (!fwnode_property_read_u32(fwnode, "bus-width", &v)) {
bus->bus_width = v;
pr_debug("bus-width %u\n", v);
}
if (!fwnode_property_read_u32(fwnode, "data-shift", &v)) {
bus->data_shift = v;
pr_debug("data-shift %u\n", v);
}
if (!fwnode_property_read_u32(fwnode, "sync-on-green-active", &v)) {
flags &= ~(V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH |
V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW);
flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
pr_debug("sync-on-green-active %s\n", v ? "high" : "low");
}
if (!fwnode_property_read_u32(fwnode, "data-enable-active", &v)) {
flags &= ~(V4L2_MBUS_DATA_ENABLE_HIGH |
V4L2_MBUS_DATA_ENABLE_LOW);
flags |= v ? V4L2_MBUS_DATA_ENABLE_HIGH :
V4L2_MBUS_DATA_ENABLE_LOW;
pr_debug("data-enable-active %s\n", v ? "high" : "low");
}
switch (bus_type) {
default:
bus->flags = flags;
if (flags & PARALLEL_MBUS_FLAGS)
vep->bus_type = V4L2_MBUS_PARALLEL;
else
vep->bus_type = V4L2_MBUS_BT656;
break;
case V4L2_MBUS_PARALLEL:
vep->bus_type = V4L2_MBUS_PARALLEL;
bus->flags = flags;
break;
case V4L2_MBUS_BT656:
vep->bus_type = V4L2_MBUS_BT656;
bus->flags = flags & ~PARALLEL_MBUS_FLAGS;
break;
}
}
static void
v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep,
enum v4l2_mbus_type bus_type)
{
struct v4l2_mbus_config_mipi_csi1 *bus = &vep->bus.mipi_csi1;
u32 v;
if (!fwnode_property_read_u32(fwnode, "clock-inv", &v)) {
bus->clock_inv = v;
pr_debug("clock-inv %u\n", v);
}
if (!fwnode_property_read_u32(fwnode, "strobe", &v)) {
bus->strobe = v;
pr_debug("strobe %u\n", v);
}
if (!fwnode_property_read_u32(fwnode, "data-lanes", &v)) {
bus->data_lane = v;
pr_debug("data-lanes %u\n", v);
}
if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
bus->clock_lane = v;
pr_debug("clock-lanes %u\n", v);
}
if (bus_type == V4L2_MBUS_CCP2)
vep->bus_type = V4L2_MBUS_CCP2;
else
vep->bus_type = V4L2_MBUS_CSI1;
}
static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep)
{
u32 bus_type = V4L2_FWNODE_BUS_TYPE_GUESS;
enum v4l2_mbus_type mbus_type;
int rval;
pr_debug("===== begin parsing endpoint %pfw\n", fwnode);
fwnode_property_read_u32(fwnode, "bus-type", &bus_type);
pr_debug("fwnode video bus type %s (%u), mbus type %s (%u)\n",
v4l2_fwnode_bus_type_to_string(bus_type), bus_type,
v4l2_fwnode_mbus_type_to_string(vep->bus_type),
vep->bus_type);
mbus_type = v4l2_fwnode_bus_type_to_mbus(bus_type);
if (mbus_type == V4L2_MBUS_INVALID) {
pr_debug("unsupported bus type %u\n", bus_type);
return -EINVAL;
}
if (vep->bus_type != V4L2_MBUS_UNKNOWN) {
if (mbus_type != V4L2_MBUS_UNKNOWN &&
vep->bus_type != mbus_type) {
pr_debug("expecting bus type %s\n",
v4l2_fwnode_mbus_type_to_string(vep->bus_type));
return -ENXIO;
}
} else {
vep->bus_type = mbus_type;
}
switch (vep->bus_type) {
case V4L2_MBUS_UNKNOWN:
rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep,
V4L2_MBUS_UNKNOWN);
if (rval)
return rval;
if (vep->bus_type == V4L2_MBUS_UNKNOWN)
v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep,
V4L2_MBUS_UNKNOWN);
pr_debug("assuming media bus type %s (%u)\n",
v4l2_fwnode_mbus_type_to_string(vep->bus_type),
vep->bus_type);
break;
case V4L2_MBUS_CCP2:
case V4L2_MBUS_CSI1:
v4l2_fwnode_endpoint_parse_csi1_bus(fwnode, vep, vep->bus_type);
break;
case V4L2_MBUS_CSI2_DPHY:
case V4L2_MBUS_CSI2_CPHY:
rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep,
vep->bus_type);
if (rval)
return rval;
break;
case V4L2_MBUS_PARALLEL:
case V4L2_MBUS_BT656:
v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep,
vep->bus_type);
break;
default:
pr_warn("unsupported bus type %u\n", mbus_type);
return -EINVAL;
}
fwnode_graph_parse_endpoint(fwnode, &vep->base);
return 0;
}
int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep)
{
int ret;
ret = __v4l2_fwnode_endpoint_parse(fwnode, vep);
pr_debug("===== end parsing endpoint %pfw\n", fwnode);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse);
void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
{
if (IS_ERR_OR_NULL(vep))
return;
kfree(vep->link_frequencies);
vep->link_frequencies = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep)
{
int rval;
rval = __v4l2_fwnode_endpoint_parse(fwnode, vep);
if (rval < 0)
return rval;
rval = fwnode_property_count_u64(fwnode, "link-frequencies");
if (rval > 0) {
unsigned int i;
vep->link_frequencies =
kmalloc_array(rval, sizeof(*vep->link_frequencies),
GFP_KERNEL);
if (!vep->link_frequencies)
return -ENOMEM;
vep->nr_of_link_frequencies = rval;
rval = fwnode_property_read_u64_array(fwnode,
"link-frequencies",
vep->link_frequencies,
vep->nr_of_link_frequencies);
if (rval < 0) {
v4l2_fwnode_endpoint_free(vep);
return rval;
}
for (i = 0; i < vep->nr_of_link_frequencies; i++)
pr_debug("link-frequencies %u value %llu\n", i,
vep->link_frequencies[i]);
}
pr_debug("===== end parsing endpoint %pfw\n", fwnode);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
struct v4l2_fwnode_link *link)
{
struct fwnode_endpoint fwep;
memset(link, 0, sizeof(*link));
fwnode_graph_parse_endpoint(fwnode, &fwep);
link->local_id = fwep.id;
link->local_port = fwep.port;
link->local_node = fwnode_graph_get_port_parent(fwnode);
if (!link->local_node)
return -ENOLINK;
fwnode = fwnode_graph_get_remote_endpoint(fwnode);
if (!fwnode)
goto err_put_local_node;
fwnode_graph_parse_endpoint(fwnode, &fwep);
link->remote_id = fwep.id;
link->remote_port = fwep.port;
link->remote_node = fwnode_graph_get_port_parent(fwnode);
if (!link->remote_node)
goto err_put_remote_endpoint;
return 0;
err_put_remote_endpoint:
fwnode_handle_put(fwnode);
err_put_local_node:
fwnode_handle_put(link->local_node);
return -ENOLINK;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
{
fwnode_handle_put(link->local_node);
fwnode_handle_put(link->remote_node);
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link);
static const struct v4l2_fwnode_connector_conv {
enum v4l2_connector_type type;
const char *compatible;
} connectors[] = {
{
.type = V4L2_CONN_COMPOSITE,
.compatible = "composite-video-connector",
}, {
.type = V4L2_CONN_SVIDEO,
.compatible = "svideo-connector",
},
};
static enum v4l2_connector_type
v4l2_fwnode_string_to_connector_type(const char *con_str)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(connectors); i++)
if (!strcmp(con_str, connectors[i].compatible))
return connectors[i].type;
return V4L2_CONN_UNKNOWN;
}
static void
v4l2_fwnode_connector_parse_analog(struct fwnode_handle *fwnode,
struct v4l2_fwnode_connector *vc)
{
u32 stds;
int ret;
ret = fwnode_property_read_u32(fwnode, "sdtv-standards", &stds);
/* The property is optional. */
vc->connector.analog.sdtv_stds = ret ? V4L2_STD_ALL : stds;
}
void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector)
{
struct v4l2_connector_link *link, *tmp;
if (IS_ERR_OR_NULL(connector) || connector->type == V4L2_CONN_UNKNOWN)
return;
list_for_each_entry_safe(link, tmp, &connector->links, head) {
v4l2_fwnode_put_link(&link->fwnode_link);
list_del(&link->head);
kfree(link);
}
kfree(connector->label);
connector->label = NULL;
connector->type = V4L2_CONN_UNKNOWN;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_free);
static enum v4l2_connector_type
v4l2_fwnode_get_connector_type(struct fwnode_handle *fwnode)
{
const char *type_name;
int err;
if (!fwnode)
return V4L2_CONN_UNKNOWN;
/* The connector-type is stored within the compatible string. */
err = fwnode_property_read_string(fwnode, "compatible", &type_name);
if (err)
return V4L2_CONN_UNKNOWN;
return v4l2_fwnode_string_to_connector_type(type_name);
}
int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_connector *connector)
{
struct fwnode_handle *connector_node;
enum v4l2_connector_type connector_type;
const char *label;
int err;
if (!fwnode)
return -EINVAL;
memset(connector, 0, sizeof(*connector));
INIT_LIST_HEAD(&connector->links);
connector_node = fwnode_graph_get_port_parent(fwnode);
connector_type = v4l2_fwnode_get_connector_type(connector_node);
if (connector_type == V4L2_CONN_UNKNOWN) {
fwnode_handle_put(connector_node);
connector_node = fwnode_graph_get_remote_port_parent(fwnode);
connector_type = v4l2_fwnode_get_connector_type(connector_node);
}
if (connector_type == V4L2_CONN_UNKNOWN) {
pr_err("Unknown connector type\n");
err = -ENOTCONN;
goto out;
}
connector->type = connector_type;
connector->name = fwnode_get_name(connector_node);
err = fwnode_property_read_string(connector_node, "label", &label);
connector->label = err ? NULL : kstrdup_const(label, GFP_KERNEL);
/* Parse the connector specific properties. */
switch (connector->type) {
case V4L2_CONN_COMPOSITE:
case V4L2_CONN_SVIDEO:
v4l2_fwnode_connector_parse_analog(connector_node, connector);
break;
/* Avoid compiler warnings */
case V4L2_CONN_UNKNOWN:
break;
}
out:
fwnode_handle_put(connector_node);
return err;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_parse);
int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode,
struct v4l2_fwnode_connector *connector)
{
struct fwnode_handle *connector_ep;
struct v4l2_connector_link *link;
int err;
if (!fwnode || !connector || connector->type == V4L2_CONN_UNKNOWN)
return -EINVAL;
connector_ep = fwnode_graph_get_remote_endpoint(fwnode);
if (!connector_ep)
return -ENOTCONN;
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link) {
err = -ENOMEM;
goto err;
}
err = v4l2_fwnode_parse_link(connector_ep, &link->fwnode_link);
if (err)
goto err;
fwnode_handle_put(connector_ep);
list_add(&link->head, &connector->links);
connector->nr_of_links++;
return 0;
err:
kfree(link);
fwnode_handle_put(connector_ep);
return err;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_add_link);
int v4l2_fwnode_device_parse(struct device *dev,
struct v4l2_fwnode_device_properties *props)
{
struct fwnode_handle *fwnode = dev_fwnode(dev);
u32 val;
int ret;
memset(props, 0, sizeof(*props));
props->orientation = V4L2_FWNODE_PROPERTY_UNSET;
ret = fwnode_property_read_u32(fwnode, "orientation", &val);
if (!ret) {
switch (val) {
case V4L2_FWNODE_ORIENTATION_FRONT:
case V4L2_FWNODE_ORIENTATION_BACK:
case V4L2_FWNODE_ORIENTATION_EXTERNAL:
break;
default:
dev_warn(dev, "Unsupported device orientation: %u\n", val);
return -EINVAL;
}
props->orientation = val;
dev_dbg(dev, "device orientation: %u\n", val);
}
props->rotation = V4L2_FWNODE_PROPERTY_UNSET;
ret = fwnode_property_read_u32(fwnode, "rotation", &val);
if (!ret) {
if (val >= 360) {
dev_warn(dev, "Unsupported device rotation: %u\n", val);
return -EINVAL;
}
props->rotation = val;
dev_dbg(dev, "device rotation: %u\n", val);
}
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse);
/*
* v4l2_fwnode_reference_parse - parse references for async sub-devices
* @dev: the device node the properties of which are parsed for references
* @notifier: the async notifier where the async subdevs will be added
* @prop: the name of the property
*
* Return: 0 on success
* -ENOENT if no entries were found
* -ENOMEM if memory allocation failed
* -EINVAL if property parsing failed
*/
static int v4l2_fwnode_reference_parse(struct device *dev,
struct v4l2_async_notifier *notifier,
const char *prop)
{
struct fwnode_reference_args args;
unsigned int index;
int ret;
for (index = 0;
!(ret = fwnode_property_get_reference_args(dev_fwnode(dev), prop,
NULL, 0, index, &args));
index++) {
struct v4l2_async_connection *asd;
asd = v4l2_async_nf_add_fwnode(notifier, args.fwnode,
struct v4l2_async_connection);
fwnode_handle_put(args.fwnode);
if (IS_ERR(asd)) {
/* not an error if asd already exists */
if (PTR_ERR(asd) == -EEXIST)
continue;
return PTR_ERR(asd);
}
}
/* -ENOENT here means successful parsing */
if (ret != -ENOENT)
return ret;
/* Return -ENOENT if no references were found */
return index ? 0 : -ENOENT;
}
/*
* v4l2_fwnode_reference_get_int_prop - parse a reference with integer
* arguments
* @fwnode: fwnode to read @prop from
* @notifier: notifier for @dev
* @prop: the name of the property
* @index: the index of the reference to get
* @props: the array of integer property names
* @nprops: the number of integer property names in @nprops
*
* First find an fwnode referred to by the reference at @index in @prop.
*
* Then under that fwnode, @nprops times, for each property in @props,
* iteratively follow child nodes starting from fwnode such that they have the
* property in @props array at the index of the child node distance from the
* root node and the value of that property matching with the integer argument
* of the reference, at the same index.
*
* The child fwnode reached at the end of the iteration is then returned to the
* caller.
*
* The core reason for this is that you cannot refer to just any node in ACPI.
* So to refer to an endpoint (easy in DT) you need to refer to a device, then
* provide a list of (property name, property value) tuples where each tuple
* uniquely identifies a child node. The first tuple identifies a child directly
* underneath the device fwnode, the next tuple identifies a child node
* underneath the fwnode identified by the previous tuple, etc. until you
* reached the fwnode you need.
*
* THIS EXAMPLE EXISTS MERELY TO DOCUMENT THIS FUNCTION. DO NOT USE IT AS A
* REFERENCE IN HOW ACPI TABLES SHOULD BE WRITTEN!! See documentation under
* Documentation/firmware-guide/acpi/dsd/ instead and especially graph.txt,
* data-node-references.txt and leds.txt .
*
* Scope (\_SB.PCI0.I2C2)
* {
* Device (CAM0)
* {
* Name (_DSD, Package () {
* ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
* Package () {
* Package () {
* "compatible",
* Package () { "nokia,smia" }
* },
* },
* ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
* Package () {
* Package () { "port0", "PRT0" },
* }
* })
* Name (PRT0, Package() {
* ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
* Package () {
* Package () { "port", 0 },
* },
* ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
* Package () {
* Package () { "endpoint0", "EP00" },
* }
* })
* Name (EP00, Package() {
* ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
* Package () {
* Package () { "endpoint", 0 },
* Package () {
* "remote-endpoint",
* Package() {
* \_SB.PCI0.ISP, 4, 0
* }
* },
* }
* })
* }
* }
*
* Scope (\_SB.PCI0)
* {
* Device (ISP)
* {
* Name (_DSD, Package () {
* ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
* Package () {
* Package () { "port4", "PRT4" },
* }
* })
*
* Name (PRT4, Package() {
* ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
* Package () {
* Package () { "port", 4 },
* },
* ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
* Package () {
* Package () { "endpoint0", "EP40" },
* }
* })
*
* Name (EP40, Package() {
* ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
* Package () {
* Package () { "endpoint", 0 },
* Package () {
* "remote-endpoint",
* Package () {
* \_SB.PCI0.I2C2.CAM0,
* 0, 0
* }
* },
* }
* })
* }
* }
*
* From the EP40 node under ISP device, you could parse the graph remote
* endpoint using v4l2_fwnode_reference_get_int_prop with these arguments:
*
* @fwnode: fwnode referring to EP40 under ISP.
* @prop: "remote-endpoint"
* @index: 0
* @props: "port", "endpoint"
* @nprops: 2
*
* And you'd get back fwnode referring to EP00 under CAM0.
*
* The same works the other way around: if you use EP00 under CAM0 as the
* fwnode, you'll get fwnode referring to EP40 under ISP.
*
* The same example in DT syntax would look like this:
*
* cam: cam0 {
* compatible = "nokia,smia";
*
* port {
* port = <0>;
* endpoint {
* endpoint = <0>;
* remote-endpoint = <&isp 4 0>;
* };
* };
* };
*
* isp: isp {
* ports {
* port@4 {
* port = <4>;
* endpoint {
* endpoint = <0>;
* remote-endpoint = <&cam 0 0>;
* };
* };
* };
* };
*
* Return: 0 on success
* -ENOENT if no entries (or the property itself) were found
* -EINVAL if property parsing otherwise failed
* -ENOMEM if memory allocation failed
*/
static struct fwnode_handle *
v4l2_fwnode_reference_get_int_prop(struct fwnode_handle *fwnode,
const char *prop,
unsigned int index,
const char * const *props,
unsigned int nprops)
{
struct fwnode_reference_args fwnode_args;
u64 *args = fwnode_args.args;
struct fwnode_handle *child;
int ret;
/*
* Obtain remote fwnode as well as the integer arguments.
*
* Note that right now both -ENODATA and -ENOENT may signal
* out-of-bounds access. Return -ENOENT in that case.
*/
ret = fwnode_property_get_reference_args(fwnode, prop, NULL, nprops,
index, &fwnode_args);
if (ret)
return ERR_PTR(ret == -ENODATA ? -ENOENT : ret);
/*
* Find a node in the tree under the referred fwnode corresponding to
* the integer arguments.
*/
fwnode = fwnode_args.fwnode;
while (nprops--) {
u32 val;
/* Loop over all child nodes under fwnode. */
fwnode_for_each_child_node(fwnode, child) {
if (fwnode_property_read_u32(child, *props, &val))
continue;
/* Found property, see if its value matches. */
if (val == *args)
break;
}
fwnode_handle_put(fwnode);
/* No property found; return an error here. */
if (!child) {
fwnode = ERR_PTR(-ENOENT);
break;
}
props++;
args++;
fwnode = child;
}
return fwnode;
}
struct v4l2_fwnode_int_props {
const char *name;
const char * const *props;
unsigned int nprops;
};
/*
* v4l2_fwnode_reference_parse_int_props - parse references for async
* sub-devices
* @dev: struct device pointer
* @notifier: notifier for @dev
* @prop: the name of the property
* @props: the array of integer property names
* @nprops: the number of integer properties
*
* Use v4l2_fwnode_reference_get_int_prop to find fwnodes through reference in
* property @prop with integer arguments with child nodes matching in properties
* @props. Then, set up V4L2 async sub-devices for those fwnodes in the notifier
* accordingly.
*
* While it is technically possible to use this function on DT, it is only
* meaningful on ACPI. On Device tree you can refer to any node in the tree but
* on ACPI the references are limited to devices.
*
* Return: 0 on success
* -ENOENT if no entries (or the property itself) were found
* -EINVAL if property parsing otherwisefailed
* -ENOMEM if memory allocation failed
*/
static int
v4l2_fwnode_reference_parse_int_props(struct device *dev,
struct v4l2_async_notifier *notifier,
const struct v4l2_fwnode_int_props *p)
{
struct fwnode_handle *fwnode;
unsigned int index;
int ret;
const char *prop = p->name;
const char * const *props = p->props;
unsigned int nprops = p->nprops;
index = 0;
do {
fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev),
prop, index,
props, nprops);
if (IS_ERR(fwnode)) {
/*
* Note that right now both -ENODATA and -ENOENT may
* signal out-of-bounds access. Return the error in
* cases other than that.
*/
if (PTR_ERR(fwnode) != -ENOENT &&
PTR_ERR(fwnode) != -ENODATA)
return PTR_ERR(fwnode);
break;
}
fwnode_handle_put(fwnode);
index++;
} while (1);
for (index = 0;
!IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev),
prop, index,
props,
nprops)));
index++) {
struct v4l2_async_connection *asd;
asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
struct v4l2_async_connection);
fwnode_handle_put(fwnode);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
/* not an error if asd already exists */
if (ret == -EEXIST)
continue;
return PTR_ERR(asd);
}
}
return !fwnode || PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode);
}
/**
* v4l2_async_nf_parse_fwnode_sensor - parse common references on
* sensors for async sub-devices
* @dev: the device node the properties of which are parsed for references
* @notifier: the async notifier where the async subdevs will be added
*
* Parse common sensor properties for remote devices related to the
* sensor and set up async sub-devices for them.
*
* Any notifier populated using this function must be released with a call to
* v4l2_async_nf_release() after it has been unregistered and the async
* sub-devices are no longer in use, even in the case the function returned an
* error.
*
* Return: 0 on success
* -ENOMEM if memory allocation failed
* -EINVAL if property parsing failed
*/
static int
v4l2_async_nf_parse_fwnode_sensor(struct device *dev,
struct v4l2_async_notifier *notifier)
{
static const char * const led_props[] = { "led" };
static const struct v4l2_fwnode_int_props props[] = {
{ "flash-leds", led_props, ARRAY_SIZE(led_props) },
{ "lens-focus", NULL, 0 },
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(props); i++) {
int ret;
if (props[i].props && is_acpi_node(dev_fwnode(dev)))
ret = v4l2_fwnode_reference_parse_int_props(dev,
notifier,
&props[i]);
else
ret = v4l2_fwnode_reference_parse(dev, notifier,
props[i].name);
if (ret && ret != -ENOENT) {
dev_warn(dev, "parsing property \"%s\" failed (%d)\n",
props[i].name, ret);
return ret;
}
}
return 0;
}
int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
{
struct v4l2_async_notifier *notifier;
int ret;
if (WARN_ON(!sd->dev))
return -ENODEV;
notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
if (!notifier)
return -ENOMEM;
v4l2_async_subdev_nf_init(notifier, sd);
ret = v4l2_subdev_get_privacy_led(sd);
if (ret < 0)
goto out_cleanup;
ret = v4l2_async_nf_parse_fwnode_sensor(sd->dev, notifier);
if (ret < 0)
goto out_cleanup;
ret = v4l2_async_nf_register(notifier);
if (ret < 0)
goto out_cleanup;
ret = v4l2_async_register_subdev(sd);
if (ret < 0)
goto out_unregister;
sd->subdev_notifier = notifier;
return 0;
out_unregister:
v4l2_async_nf_unregister(notifier);
out_cleanup:
v4l2_subdev_put_privacy_led(sd);
v4l2_async_nf_cleanup(notifier);
kfree(notifier);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sakari Ailus <[email protected]>");
MODULE_AUTHOR("Sylwester Nawrocki <[email protected]>");
MODULE_AUTHOR("Guennadi Liakhovetski <[email protected]>");
| linux-master | drivers/media/v4l2-core/v4l2-fwnode.c |
// SPDX-License-Identifier: GPL-2.0
#include <media/v4l2-common.h>
#include <media/v4l2-fh.h>
#include <media/videobuf2-v4l2.h>
#define CREATE_TRACE_POINTS
#include <trace/events/v4l2.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_queue);
EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_dqbuf);
EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_qbuf);
| linux-master | drivers/media/v4l2-core/v4l2-trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* generic helper functions for handling video4linux capture buffers
*
* (c) 2007 Mauro Carvalho Chehab, <[email protected]>
*
* Highly based on video-buf written originally by:
* (c) 2001,02 Gerd Knorr <[email protected]>
* (c) 2006 Mauro Carvalho Chehab, <[email protected]>
* (c) 2006 Ted Walther and John Sokol
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <media/videobuf-core.h>
#include <media/v4l2-common.h>
#define MAGIC_BUFFER 0x20070728
#define MAGIC_CHECK(is, should) \
do { \
if (unlikely((is) != (should))) { \
printk(KERN_ERR \
"magic mismatch: %x (expected %x)\n", \
is, should); \
BUG(); \
} \
} while (0)
static int debug;
module_param(debug, int, 0644);
MODULE_DESCRIPTION("helper module to manage video4linux buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <[email protected]>");
MODULE_LICENSE("GPL");
#define dprintk(level, fmt, arg...) \
do { \
if (debug >= level) \
printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
} while (0)
/* --------------------------------------------------------------------- */
#define CALL(q, f, arg...) \
((q->int_ops->f) ? q->int_ops->f(arg) : 0)
#define CALLPTR(q, f, arg...) \
((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
{
struct videobuf_buffer *vb;
BUG_ON(q->msize < sizeof(*vb));
if (!q->int_ops || !q->int_ops->alloc_vb) {
printk(KERN_ERR "No specific ops defined!\n");
BUG();
}
vb = q->int_ops->alloc_vb(q->msize);
if (NULL != vb) {
init_waitqueue_head(&vb->done);
vb->magic = MAGIC_BUFFER;
}
return vb;
}
EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
static int state_neither_active_nor_queued(struct videobuf_queue *q,
struct videobuf_buffer *vb)
{
unsigned long flags;
bool rc;
spin_lock_irqsave(q->irqlock, flags);
rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
spin_unlock_irqrestore(q->irqlock, flags);
return rc;
};
int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
int non_blocking, int intr)
{
bool is_ext_locked;
int ret = 0;
MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
if (non_blocking) {
if (state_neither_active_nor_queued(q, vb))
return 0;
return -EAGAIN;
}
is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
/* Release vdev lock to prevent this wait from blocking outside access to
the device. */
if (is_ext_locked)
mutex_unlock(q->ext_lock);
if (intr)
ret = wait_event_interruptible(vb->done,
state_neither_active_nor_queued(q, vb));
else
wait_event(vb->done, state_neither_active_nor_queued(q, vb));
/* Relock */
if (is_ext_locked)
mutex_lock(q->ext_lock);
return ret;
}
EXPORT_SYMBOL_GPL(videobuf_waiton);
int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
{
MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
return CALL(q, iolock, q, vb, fbuf);
}
EXPORT_SYMBOL_GPL(videobuf_iolock);
void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
struct videobuf_buffer *buf)
{
if (q->int_ops->vaddr)
return q->int_ops->vaddr(buf);
return NULL;
}
EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
/* --------------------------------------------------------------------- */
void videobuf_queue_core_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
void *priv,
struct videobuf_qtype_ops *int_ops,
struct mutex *ext_lock)
{
BUG_ON(!q);
memset(q, 0, sizeof(*q));
q->irqlock = irqlock;
q->ext_lock = ext_lock;
q->dev = dev;
q->type = type;
q->field = field;
q->msize = msize;
q->ops = ops;
q->priv_data = priv;
q->int_ops = int_ops;
/* All buffer operations are mandatory */
BUG_ON(!q->ops->buf_setup);
BUG_ON(!q->ops->buf_prepare);
BUG_ON(!q->ops->buf_queue);
BUG_ON(!q->ops->buf_release);
/* Lock is mandatory for queue_cancel to work */
BUG_ON(!irqlock);
/* Having implementations for abstract methods are mandatory */
BUG_ON(!q->int_ops);
mutex_init(&q->vb_lock);
init_waitqueue_head(&q->wait);
INIT_LIST_HEAD(&q->stream);
}
EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
/* Locking: Only usage in bttv unsafe find way to remove */
int videobuf_queue_is_busy(struct videobuf_queue *q)
{
int i;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
if (q->streaming) {
dprintk(1, "busy: streaming active\n");
return 1;
}
if (q->reading) {
dprintk(1, "busy: pending read #1\n");
return 1;
}
if (q->read_buf) {
dprintk(1, "busy: pending read #2\n");
return 1;
}
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
if (q->bufs[i]->map) {
dprintk(1, "busy: buffer #%d mapped\n", i);
return 1;
}
if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
dprintk(1, "busy: buffer #%d queued\n", i);
return 1;
}
if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
dprintk(1, "busy: buffer #%d active\n", i);
return 1;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
/*
* __videobuf_free() - free all the buffers and their control structures
*
* This function can only be called if streaming/reading is off, i.e. no buffers
* are under control of the driver.
*/
/* Locking: Caller holds q->vb_lock */
static int __videobuf_free(struct videobuf_queue *q)
{
int i;
dprintk(1, "%s\n", __func__);
if (!q)
return 0;
if (q->streaming || q->reading) {
dprintk(1, "Cannot free buffers when streaming or reading\n");
return -EBUSY;
}
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
for (i = 0; i < VIDEO_MAX_FRAME; i++)
if (q->bufs[i] && q->bufs[i]->map) {
dprintk(1, "Cannot free mmapped buffers\n");
return -EBUSY;
}
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
q->ops->buf_release(q, q->bufs[i]);
kfree(q->bufs[i]);
q->bufs[i] = NULL;
}
return 0;
}
/* Locking: Caller holds q->vb_lock */
void videobuf_queue_cancel(struct videobuf_queue *q)
{
unsigned long flags = 0;
int i;
q->streaming = 0;
q->reading = 0;
wake_up_interruptible_sync(&q->wait);
/* remove queued buffers from list */
spin_lock_irqsave(q->irqlock, flags);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
list_del(&q->bufs[i]->queue);
q->bufs[i]->state = VIDEOBUF_ERROR;
wake_up_all(&q->bufs[i]->done);
}
}
spin_unlock_irqrestore(q->irqlock, flags);
/* free all buffers + clear queue */
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
q->ops->buf_release(q, q->bufs[i]);
}
INIT_LIST_HEAD(&q->stream);
}
EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
/* --------------------------------------------------------------------- */
/* Locking: Caller holds q->vb_lock */
enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
{
enum v4l2_field field = q->field;
BUG_ON(V4L2_FIELD_ANY == field);
if (V4L2_FIELD_ALTERNATE == field) {
if (V4L2_FIELD_TOP == q->last) {
field = V4L2_FIELD_BOTTOM;
q->last = V4L2_FIELD_BOTTOM;
} else {
field = V4L2_FIELD_TOP;
q->last = V4L2_FIELD_TOP;
}
}
return field;
}
EXPORT_SYMBOL_GPL(videobuf_next_field);
/* Locking: Caller holds q->vb_lock */
static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
struct videobuf_buffer *vb, enum v4l2_buf_type type)
{
MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
b->index = vb->i;
b->type = type;
b->memory = vb->memory;
switch (b->memory) {
case V4L2_MEMORY_MMAP:
b->m.offset = vb->boff;
b->length = vb->bsize;
break;
case V4L2_MEMORY_USERPTR:
b->m.userptr = vb->baddr;
b->length = vb->bsize;
break;
case V4L2_MEMORY_OVERLAY:
b->m.offset = vb->boff;
break;
case V4L2_MEMORY_DMABUF:
/* DMABUF is not handled in videobuf framework */
break;
}
b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
if (vb->map)
b->flags |= V4L2_BUF_FLAG_MAPPED;
switch (vb->state) {
case VIDEOBUF_PREPARED:
case VIDEOBUF_QUEUED:
case VIDEOBUF_ACTIVE:
b->flags |= V4L2_BUF_FLAG_QUEUED;
break;
case VIDEOBUF_ERROR:
b->flags |= V4L2_BUF_FLAG_ERROR;
fallthrough;
case VIDEOBUF_DONE:
b->flags |= V4L2_BUF_FLAG_DONE;
break;
case VIDEOBUF_NEEDS_INIT:
case VIDEOBUF_IDLE:
/* nothing */
break;
}
b->field = vb->field;
v4l2_buffer_set_timestamp(b, vb->ts);
b->bytesused = vb->size;
b->sequence = vb->field_count >> 1;
}
int videobuf_mmap_free(struct videobuf_queue *q)
{
int ret;
videobuf_queue_lock(q);
ret = __videobuf_free(q);
videobuf_queue_unlock(q);
return ret;
}
EXPORT_SYMBOL_GPL(videobuf_mmap_free);
/* Locking: Caller holds q->vb_lock */
int __videobuf_mmap_setup(struct videobuf_queue *q,
unsigned int bcount, unsigned int bsize,
enum v4l2_memory memory)
{
unsigned int i;
int err;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
err = __videobuf_free(q);
if (0 != err)
return err;
/* Allocate and initialize buffers */
for (i = 0; i < bcount; i++) {
q->bufs[i] = videobuf_alloc_vb(q);
if (NULL == q->bufs[i])
break;
q->bufs[i]->i = i;
q->bufs[i]->memory = memory;
q->bufs[i]->bsize = bsize;
switch (memory) {
case V4L2_MEMORY_MMAP:
q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
break;
case V4L2_MEMORY_USERPTR:
case V4L2_MEMORY_OVERLAY:
case V4L2_MEMORY_DMABUF:
/* nothing */
break;
}
}
if (!i)
return -ENOMEM;
dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
return i;
}
EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
int videobuf_mmap_setup(struct videobuf_queue *q,
unsigned int bcount, unsigned int bsize,
enum v4l2_memory memory)
{
int ret;
videobuf_queue_lock(q);
ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
videobuf_queue_unlock(q);
return ret;
}
EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
int videobuf_reqbufs(struct videobuf_queue *q,
struct v4l2_requestbuffers *req)
{
unsigned int size, count;
int retval;
if (req->memory != V4L2_MEMORY_MMAP &&
req->memory != V4L2_MEMORY_USERPTR &&
req->memory != V4L2_MEMORY_OVERLAY) {
dprintk(1, "reqbufs: memory type invalid\n");
return -EINVAL;
}
videobuf_queue_lock(q);
if (req->type != q->type) {
dprintk(1, "reqbufs: queue type invalid\n");
retval = -EINVAL;
goto done;
}
if (q->streaming) {
dprintk(1, "reqbufs: streaming already exists\n");
retval = -EBUSY;
goto done;
}
if (!list_empty(&q->stream)) {
dprintk(1, "reqbufs: stream running\n");
retval = -EBUSY;
goto done;
}
if (req->count == 0) {
dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
retval = __videobuf_free(q);
goto done;
}
count = req->count;
if (count > VIDEO_MAX_FRAME)
count = VIDEO_MAX_FRAME;
size = 0;
q->ops->buf_setup(q, &count, &size);
dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
count, size,
(unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
retval = __videobuf_mmap_setup(q, count, size, req->memory);
if (retval < 0) {
dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
goto done;
}
req->count = retval;
retval = 0;
done:
videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_reqbufs);
int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
int ret = -EINVAL;
videobuf_queue_lock(q);
if (unlikely(b->type != q->type)) {
dprintk(1, "querybuf: Wrong type.\n");
goto done;
}
if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
dprintk(1, "querybuf: index out of range.\n");
goto done;
}
if (unlikely(NULL == q->bufs[b->index])) {
dprintk(1, "querybuf: buffer is null.\n");
goto done;
}
videobuf_status(q, b, q->bufs[b->index], q->type);
ret = 0;
done:
videobuf_queue_unlock(q);
return ret;
}
EXPORT_SYMBOL_GPL(videobuf_querybuf);
int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
struct videobuf_buffer *buf;
enum v4l2_field field;
unsigned long flags = 0;
int retval;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
if (b->memory == V4L2_MEMORY_MMAP)
mmap_read_lock(current->mm);
videobuf_queue_lock(q);
retval = -EBUSY;
if (q->reading) {
dprintk(1, "qbuf: Reading running...\n");
goto done;
}
retval = -EINVAL;
if (b->type != q->type) {
dprintk(1, "qbuf: Wrong type.\n");
goto done;
}
if (b->index >= VIDEO_MAX_FRAME) {
dprintk(1, "qbuf: index out of range.\n");
goto done;
}
buf = q->bufs[b->index];
if (NULL == buf) {
dprintk(1, "qbuf: buffer is null.\n");
goto done;
}
MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
if (buf->memory != b->memory) {
dprintk(1, "qbuf: memory type is wrong.\n");
goto done;
}
if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
dprintk(1, "qbuf: buffer is already queued or active.\n");
goto done;
}
switch (b->memory) {
case V4L2_MEMORY_MMAP:
if (0 == buf->baddr) {
dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
goto done;
}
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
|| q->type == V4L2_BUF_TYPE_VBI_OUTPUT
|| q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
|| q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
buf->size = b->bytesused;
buf->field = b->field;
buf->ts = v4l2_buffer_get_timestamp(b);
}
break;
case V4L2_MEMORY_USERPTR:
if (b->length < buf->bsize) {
dprintk(1, "qbuf: buffer length is not enough\n");
goto done;
}
if (VIDEOBUF_NEEDS_INIT != buf->state &&
buf->baddr != b->m.userptr)
q->ops->buf_release(q, buf);
buf->baddr = b->m.userptr;
break;
case V4L2_MEMORY_OVERLAY:
buf->boff = b->m.offset;
break;
default:
dprintk(1, "qbuf: wrong memory type\n");
goto done;
}
dprintk(1, "qbuf: requesting next field\n");
field = videobuf_next_field(q);
retval = q->ops->buf_prepare(q, buf, field);
if (0 != retval) {
dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
goto done;
}
list_add_tail(&buf->stream, &q->stream);
if (q->streaming) {
spin_lock_irqsave(q->irqlock, flags);
q->ops->buf_queue(q, buf);
spin_unlock_irqrestore(q->irqlock, flags);
}
dprintk(1, "qbuf: succeeded\n");
retval = 0;
wake_up_interruptible_sync(&q->wait);
done:
videobuf_queue_unlock(q);
if (b->memory == V4L2_MEMORY_MMAP)
mmap_read_unlock(current->mm);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_qbuf);
/* Locking: Caller holds q->vb_lock */
static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
{
int retval;
checks:
if (!q->streaming) {
dprintk(1, "next_buffer: Not streaming\n");
retval = -EINVAL;
goto done;
}
if (list_empty(&q->stream)) {
if (noblock) {
retval = -EAGAIN;
dprintk(2, "next_buffer: no buffers to dequeue\n");
goto done;
} else {
dprintk(2, "next_buffer: waiting on buffer\n");
/* Drop lock to avoid deadlock with qbuf */
videobuf_queue_unlock(q);
/* Checking list_empty and streaming is safe without
* locks because we goto checks to validate while
* holding locks before proceeding */
retval = wait_event_interruptible(q->wait,
!list_empty(&q->stream) || !q->streaming);
videobuf_queue_lock(q);
if (retval)
goto done;
goto checks;
}
}
retval = 0;
done:
return retval;
}
/* Locking: Caller holds q->vb_lock */
static int stream_next_buffer(struct videobuf_queue *q,
struct videobuf_buffer **vb, int nonblocking)
{
int retval;
struct videobuf_buffer *buf = NULL;
retval = stream_next_buffer_check_queue(q, nonblocking);
if (retval)
goto done;
buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
retval = videobuf_waiton(q, buf, nonblocking, 1);
if (retval < 0)
goto done;
*vb = buf;
done:
return retval;
}
int videobuf_dqbuf(struct videobuf_queue *q,
struct v4l2_buffer *b, int nonblocking)
{
struct videobuf_buffer *buf = NULL;
int retval;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
memset(b, 0, sizeof(*b));
videobuf_queue_lock(q);
retval = stream_next_buffer(q, &buf, nonblocking);
if (retval < 0) {
dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
goto done;
}
switch (buf->state) {
case VIDEOBUF_ERROR:
dprintk(1, "dqbuf: state is error\n");
break;
case VIDEOBUF_DONE:
dprintk(1, "dqbuf: state is done\n");
break;
default:
dprintk(1, "dqbuf: state invalid\n");
retval = -EINVAL;
goto done;
}
CALL(q, sync, q, buf);
videobuf_status(q, b, buf, q->type);
list_del(&buf->stream);
buf->state = VIDEOBUF_IDLE;
b->flags &= ~V4L2_BUF_FLAG_DONE;
done:
videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_dqbuf);
int videobuf_streamon(struct videobuf_queue *q)
{
struct videobuf_buffer *buf;
unsigned long flags = 0;
int retval;
videobuf_queue_lock(q);
retval = -EBUSY;
if (q->reading)
goto done;
retval = 0;
if (q->streaming)
goto done;
q->streaming = 1;
spin_lock_irqsave(q->irqlock, flags);
list_for_each_entry(buf, &q->stream, stream)
if (buf->state == VIDEOBUF_PREPARED)
q->ops->buf_queue(q, buf);
spin_unlock_irqrestore(q->irqlock, flags);
wake_up_interruptible_sync(&q->wait);
done:
videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_streamon);
/* Locking: Caller holds q->vb_lock */
static int __videobuf_streamoff(struct videobuf_queue *q)
{
if (!q->streaming)
return -EINVAL;
videobuf_queue_cancel(q);
return 0;
}
int videobuf_streamoff(struct videobuf_queue *q)
{
int retval;
videobuf_queue_lock(q);
retval = __videobuf_streamoff(q);
videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_streamoff);
/* Locking: Caller holds q->vb_lock */
static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
char __user *data,
size_t count, loff_t *ppos)
{
enum v4l2_field field;
unsigned long flags = 0;
int retval;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
/* setup stuff */
q->read_buf = videobuf_alloc_vb(q);
if (NULL == q->read_buf)
return -ENOMEM;
q->read_buf->memory = V4L2_MEMORY_USERPTR;
q->read_buf->baddr = (unsigned long)data;
q->read_buf->bsize = count;
field = videobuf_next_field(q);
retval = q->ops->buf_prepare(q, q->read_buf, field);
if (0 != retval)
goto done;
/* start capture & wait */
spin_lock_irqsave(q->irqlock, flags);
q->ops->buf_queue(q, q->read_buf);
spin_unlock_irqrestore(q->irqlock, flags);
retval = videobuf_waiton(q, q->read_buf, 0, 0);
if (0 == retval) {
CALL(q, sync, q, q->read_buf);
if (VIDEOBUF_ERROR == q->read_buf->state)
retval = -EIO;
else
retval = q->read_buf->size;
}
done:
/* cleanup */
q->ops->buf_release(q, q->read_buf);
kfree(q->read_buf);
q->read_buf = NULL;
return retval;
}
static int __videobuf_copy_to_user(struct videobuf_queue *q,
struct videobuf_buffer *buf,
char __user *data, size_t count,
int nonblocking)
{
void *vaddr = CALLPTR(q, vaddr, buf);
/* copy to userspace */
if (count > buf->size - q->read_off)
count = buf->size - q->read_off;
if (copy_to_user(data, vaddr + q->read_off, count))
return -EFAULT;
return count;
}
static int __videobuf_copy_stream(struct videobuf_queue *q,
struct videobuf_buffer *buf,
char __user *data, size_t count, size_t pos,
int vbihack, int nonblocking)
{
unsigned int *fc = CALLPTR(q, vaddr, buf);
if (vbihack) {
/* dirty, undocumented hack -- pass the frame counter
* within the last four bytes of each vbi data block.
* We need that one to maintain backward compatibility
* to all vbi decoding software out there ... */
fc += (buf->size >> 2) - 1;
*fc = buf->field_count >> 1;
dprintk(1, "vbihack: %d\n", *fc);
}
/* copy stuff using the common method */
count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
if ((count == -EFAULT) && (pos == 0))
return -EFAULT;
return count;
}
ssize_t videobuf_read_one(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
int nonblocking)
{
enum v4l2_field field;
unsigned long flags = 0;
unsigned size = 0, nbufs = 1;
int retval;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
videobuf_queue_lock(q);
q->ops->buf_setup(q, &nbufs, &size);
if (NULL == q->read_buf &&
count >= size &&
!nonblocking) {
retval = videobuf_read_zerocopy(q, data, count, ppos);
if (retval >= 0 || retval == -EIO)
/* ok, all done */
goto done;
/* fallback to kernel bounce buffer on failures */
}
if (NULL == q->read_buf) {
/* need to capture a new frame */
retval = -ENOMEM;
q->read_buf = videobuf_alloc_vb(q);
dprintk(1, "video alloc=0x%p\n", q->read_buf);
if (NULL == q->read_buf)
goto done;
q->read_buf->memory = V4L2_MEMORY_USERPTR;
q->read_buf->bsize = count; /* preferred size */
field = videobuf_next_field(q);
retval = q->ops->buf_prepare(q, q->read_buf, field);
if (0 != retval) {
kfree(q->read_buf);
q->read_buf = NULL;
goto done;
}
spin_lock_irqsave(q->irqlock, flags);
q->ops->buf_queue(q, q->read_buf);
spin_unlock_irqrestore(q->irqlock, flags);
q->read_off = 0;
}
/* wait until capture is done */
retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
if (0 != retval)
goto done;
CALL(q, sync, q, q->read_buf);
if (VIDEOBUF_ERROR == q->read_buf->state) {
/* catch I/O errors */
q->ops->buf_release(q, q->read_buf);
kfree(q->read_buf);
q->read_buf = NULL;
retval = -EIO;
goto done;
}
/* Copy to userspace */
retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
if (retval < 0)
goto done;
q->read_off += retval;
if (q->read_off == q->read_buf->size) {
/* all data copied, cleanup */
q->ops->buf_release(q, q->read_buf);
kfree(q->read_buf);
q->read_buf = NULL;
}
done:
videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_read_one);
/* Locking: Caller holds q->vb_lock */
static int __videobuf_read_start(struct videobuf_queue *q)
{
enum v4l2_field field;
unsigned long flags = 0;
unsigned int count = 0, size = 0;
int err, i;
q->ops->buf_setup(q, &count, &size);
if (count < 2)
count = 2;
if (count > VIDEO_MAX_FRAME)
count = VIDEO_MAX_FRAME;
size = PAGE_ALIGN(size);
err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
if (err < 0)
return err;
count = err;
for (i = 0; i < count; i++) {
field = videobuf_next_field(q);
err = q->ops->buf_prepare(q, q->bufs[i], field);
if (err)
return err;
list_add_tail(&q->bufs[i]->stream, &q->stream);
}
spin_lock_irqsave(q->irqlock, flags);
for (i = 0; i < count; i++)
q->ops->buf_queue(q, q->bufs[i]);
spin_unlock_irqrestore(q->irqlock, flags);
q->reading = 1;
return 0;
}
static void __videobuf_read_stop(struct videobuf_queue *q)
{
int i;
videobuf_queue_cancel(q);
__videobuf_free(q);
INIT_LIST_HEAD(&q->stream);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
kfree(q->bufs[i]);
q->bufs[i] = NULL;
}
q->read_buf = NULL;
}
int videobuf_read_start(struct videobuf_queue *q)
{
int rc;
videobuf_queue_lock(q);
rc = __videobuf_read_start(q);
videobuf_queue_unlock(q);
return rc;
}
EXPORT_SYMBOL_GPL(videobuf_read_start);
void videobuf_read_stop(struct videobuf_queue *q)
{
videobuf_queue_lock(q);
__videobuf_read_stop(q);
videobuf_queue_unlock(q);
}
EXPORT_SYMBOL_GPL(videobuf_read_stop);
void videobuf_stop(struct videobuf_queue *q)
{
videobuf_queue_lock(q);
if (q->streaming)
__videobuf_streamoff(q);
if (q->reading)
__videobuf_read_stop(q);
videobuf_queue_unlock(q);
}
EXPORT_SYMBOL_GPL(videobuf_stop);
ssize_t videobuf_read_stream(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
int vbihack, int nonblocking)
{
int rc, retval;
unsigned long flags = 0;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
dprintk(2, "%s\n", __func__);
videobuf_queue_lock(q);
retval = -EBUSY;
if (q->streaming)
goto done;
if (!q->reading) {
retval = __videobuf_read_start(q);
if (retval < 0)
goto done;
}
retval = 0;
while (count > 0) {
/* get / wait for data */
if (NULL == q->read_buf) {
q->read_buf = list_entry(q->stream.next,
struct videobuf_buffer,
stream);
list_del(&q->read_buf->stream);
q->read_off = 0;
}
rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
if (rc < 0) {
if (0 == retval)
retval = rc;
break;
}
if (q->read_buf->state == VIDEOBUF_DONE) {
rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
retval, vbihack, nonblocking);
if (rc < 0) {
retval = rc;
break;
}
retval += rc;
count -= rc;
q->read_off += rc;
} else {
/* some error */
q->read_off = q->read_buf->size;
if (0 == retval)
retval = -EIO;
}
/* requeue buffer when done with copying */
if (q->read_off == q->read_buf->size) {
list_add_tail(&q->read_buf->stream,
&q->stream);
spin_lock_irqsave(q->irqlock, flags);
q->ops->buf_queue(q, q->read_buf);
spin_unlock_irqrestore(q->irqlock, flags);
q->read_buf = NULL;
}
if (retval < 0)
break;
}
done:
videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_read_stream);
__poll_t videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait)
{
__poll_t req_events = poll_requested_events(wait);
struct videobuf_buffer *buf = NULL;
__poll_t rc = 0;
videobuf_queue_lock(q);
if (q->streaming) {
if (!list_empty(&q->stream))
buf = list_entry(q->stream.next,
struct videobuf_buffer, stream);
} else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
if (!q->reading)
__videobuf_read_start(q);
if (!q->reading) {
rc = EPOLLERR;
} else if (NULL == q->read_buf) {
q->read_buf = list_entry(q->stream.next,
struct videobuf_buffer,
stream);
list_del(&q->read_buf->stream);
q->read_off = 0;
}
buf = q->read_buf;
}
if (buf)
poll_wait(file, &buf->done, wait);
else
rc = EPOLLERR;
if (0 == rc) {
if (buf->state == VIDEOBUF_DONE ||
buf->state == VIDEOBUF_ERROR) {
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
case V4L2_BUF_TYPE_VBI_OUTPUT:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
case V4L2_BUF_TYPE_SDR_OUTPUT:
rc = EPOLLOUT | EPOLLWRNORM;
break;
default:
rc = EPOLLIN | EPOLLRDNORM;
break;
}
}
}
videobuf_queue_unlock(q);
return rc;
}
EXPORT_SYMBOL_GPL(videobuf_poll_stream);
int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
{
int rc = -EINVAL;
int i;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
return -EINVAL;
}
videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
struct videobuf_buffer *buf = q->bufs[i];
if (buf && buf->memory == V4L2_MEMORY_MMAP &&
buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
rc = CALL(q, mmap_mapper, q, buf, vma);
break;
}
}
videobuf_queue_unlock(q);
return rc;
}
EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
| linux-master | drivers/media/v4l2-core/videobuf-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* V4L2 asynchronous subdevice registration API
*
* Copyright (C) 2012-2013, Guennadi Liakhovetski <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include "v4l2-subdev-priv.h"
static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
struct v4l2_subdev *subdev,
struct v4l2_async_connection *asc)
{
if (!n->ops || !n->ops->bound)
return 0;
return n->ops->bound(n, subdev, asc);
}
static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
struct v4l2_subdev *subdev,
struct v4l2_async_connection *asc)
{
if (!n->ops || !n->ops->unbind)
return;
n->ops->unbind(n, subdev, asc);
}
static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
{
if (!n->ops || !n->ops->complete)
return 0;
return n->ops->complete(n);
}
static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
struct v4l2_async_connection *asc)
{
if (!n->ops || !n->ops->destroy)
return;
n->ops->destroy(asc);
}
static bool match_i2c(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_match_desc *match)
{
#if IS_ENABLED(CONFIG_I2C)
struct i2c_client *client = i2c_verify_client(sd->dev);
return client &&
match->i2c.adapter_id == client->adapter->nr &&
match->i2c.address == client->addr;
#else
return false;
#endif
}
static struct device *notifier_dev(struct v4l2_async_notifier *notifier)
{
if (notifier->sd)
return notifier->sd->dev;
if (notifier->v4l2_dev)
return notifier->v4l2_dev->dev;
return NULL;
}
static bool
match_fwnode_one(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
struct v4l2_async_match_desc *match)
{
struct fwnode_handle *asd_dev_fwnode;
bool ret;
dev_dbg(notifier_dev(notifier),
"v4l2-async: fwnode match: need %pfw, trying %pfw\n",
sd_fwnode, match->fwnode);
if (sd_fwnode == match->fwnode) {
dev_dbg(notifier_dev(notifier),
"v4l2-async: direct match found\n");
return true;
}
if (!fwnode_graph_is_endpoint(match->fwnode)) {
dev_dbg(notifier_dev(notifier),
"v4l2-async: direct match not found\n");
return false;
}
asd_dev_fwnode = fwnode_graph_get_port_parent(match->fwnode);
ret = sd_fwnode == asd_dev_fwnode;
fwnode_handle_put(asd_dev_fwnode);
dev_dbg(notifier_dev(notifier),
"v4l2-async: device--endpoint match %sfound\n",
ret ? "" : "not ");
return ret;
}
static bool match_fwnode(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_match_desc *match)
{
dev_dbg(notifier_dev(notifier),
"v4l2-async: matching for notifier %pfw, sd fwnode %pfw\n",
dev_fwnode(notifier_dev(notifier)), sd->fwnode);
if (!list_empty(&sd->async_subdev_endpoint_list)) {
struct v4l2_async_subdev_endpoint *ase;
dev_dbg(sd->dev,
"v4l2-async: endpoint fwnode list available, looking for %pfw\n",
match->fwnode);
list_for_each_entry(ase, &sd->async_subdev_endpoint_list,
async_subdev_endpoint_entry) {
bool matched = ase->endpoint == match->fwnode;
dev_dbg(sd->dev,
"v4l2-async: endpoint-endpoint match %sfound with %pfw\n",
matched ? "" : "not ", ase->endpoint);
if (matched)
return true;
}
dev_dbg(sd->dev, "async: no endpoint matched\n");
return false;
}
if (match_fwnode_one(notifier, sd, sd->fwnode, match))
return true;
/* Also check the secondary fwnode. */
if (IS_ERR_OR_NULL(sd->fwnode->secondary))
return false;
dev_dbg(notifier_dev(notifier),
"v4l2-async: trying secondary fwnode match\n");
return match_fwnode_one(notifier, sd, sd->fwnode->secondary, match);
}
static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);
static struct v4l2_async_connection *
v4l2_async_find_match(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd)
{
bool (*match)(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_match_desc *match);
struct v4l2_async_connection *asc;
list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) {
/* bus_type has been verified valid before */
switch (asc->match.type) {
case V4L2_ASYNC_MATCH_TYPE_I2C:
match = match_i2c;
break;
case V4L2_ASYNC_MATCH_TYPE_FWNODE:
match = match_fwnode;
break;
default:
/* Cannot happen, unless someone breaks us */
WARN_ON(true);
return NULL;
}
/* match cannot be NULL here */
if (match(notifier, sd, &asc->match))
return asc;
}
return NULL;
}
/* Compare two async match descriptors for equivalence */
static bool v4l2_async_match_equal(struct v4l2_async_match_desc *match1,
struct v4l2_async_match_desc *match2)
{
if (match1->type != match2->type)
return false;
switch (match1->type) {
case V4L2_ASYNC_MATCH_TYPE_I2C:
return match1->i2c.adapter_id == match2->i2c.adapter_id &&
match1->i2c.address == match2->i2c.address;
case V4L2_ASYNC_MATCH_TYPE_FWNODE:
return match1->fwnode == match2->fwnode;
default:
break;
}
return false;
}
/* Find the sub-device notifier registered by a sub-device driver. */
static struct v4l2_async_notifier *
v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
{
struct v4l2_async_notifier *n;
list_for_each_entry(n, ¬ifier_list, notifier_entry)
if (n->sd == sd)
return n;
return NULL;
}
/* Get v4l2_device related to the notifier if one can be found. */
static struct v4l2_device *
v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
{
while (notifier->parent)
notifier = notifier->parent;
return notifier->v4l2_dev;
}
/*
* Return true if all child sub-device notifiers are complete, false otherwise.
*/
static bool
v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_connection *asc;
if (!list_empty(¬ifier->waiting_list))
return false;
list_for_each_entry(asc, ¬ifier->done_list, asc_entry) {
struct v4l2_async_notifier *subdev_notifier =
v4l2_async_find_subdev_notifier(asc->sd);
if (subdev_notifier &&
!v4l2_async_nf_can_complete(subdev_notifier))
return false;
}
return true;
}
/*
* Complete the master notifier if possible. This is done when all async
* sub-devices have been bound; v4l2_device is also available then.
*/
static int
v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_notifier *__notifier = notifier;
/* Quick check whether there are still more sub-devices here. */
if (!list_empty(¬ifier->waiting_list))
return 0;
if (notifier->sd)
dev_dbg(notifier_dev(notifier),
"v4l2-async: trying to complete\n");
/* Check the entire notifier tree; find the root notifier first. */
while (notifier->parent)
notifier = notifier->parent;
/* This is root if it has v4l2_dev. */
if (!notifier->v4l2_dev) {
dev_dbg(notifier_dev(__notifier),
"v4l2-async: V4L2 device not available\n");
return 0;
}
/* Is everything ready? */
if (!v4l2_async_nf_can_complete(notifier))
return 0;
dev_dbg(notifier_dev(__notifier), "v4l2-async: complete\n");
return v4l2_async_nf_call_complete(notifier);
}
static int
v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
struct v4l2_subdev *sd)
{
struct media_link *link = NULL;
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
if (sd->entity.function != MEDIA_ENT_F_LENS &&
sd->entity.function != MEDIA_ENT_F_FLASH)
return 0;
link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
#endif
return IS_ERR(link) ? PTR_ERR(link) : 0;
}
static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
struct v4l2_device *v4l2_dev,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asc)
{
struct v4l2_async_notifier *subdev_notifier;
bool registered = false;
int ret;
if (list_empty(&sd->asc_list)) {
ret = v4l2_device_register_subdev(v4l2_dev, sd);
if (ret < 0)
return ret;
registered = true;
}
ret = v4l2_async_nf_call_bound(notifier, sd, asc);
if (ret < 0) {
if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
dev_dbg(notifier_dev(notifier),
"failed binding %pfw (%d)\n",
asc->match.fwnode, ret);
goto err_unregister_subdev;
}
if (registered) {
/*
* Depending of the function of the entities involved, we may
* want to create links between them (for example between a
* sensor and its lens or between a sensor's source pad and the
* connected device's sink pad).
*/
ret = v4l2_async_create_ancillary_links(notifier, sd);
if (ret) {
if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
dev_dbg(notifier_dev(notifier),
"failed creating links for %pfw (%d)\n",
asc->match.fwnode, ret);
goto err_call_unbind;
}
}
list_add(&asc->asc_subdev_entry, &sd->asc_list);
asc->sd = sd;
/* Move from the waiting list to notifier's done */
list_move(&asc->asc_entry, ¬ifier->done_list);
dev_dbg(notifier_dev(notifier), "v4l2-async: %s bound (ret %d)\n",
dev_name(sd->dev), ret);
/*
* See if the sub-device has a notifier. If not, return here.
*/
subdev_notifier = v4l2_async_find_subdev_notifier(sd);
if (!subdev_notifier || subdev_notifier->parent)
return 0;
/*
* Proceed with checking for the sub-device notifier's async
* sub-devices, and return the result. The error will be handled by the
* caller.
*/
subdev_notifier->parent = notifier;
return v4l2_async_nf_try_all_subdevs(subdev_notifier);
err_call_unbind:
v4l2_async_nf_call_unbind(notifier, sd, asc);
list_del(&asc->asc_subdev_entry);
err_unregister_subdev:
if (registered)
v4l2_device_unregister_subdev(sd);
return ret;
}
/* Test all async sub-devices in a notifier for a match. */
static int
v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
{
struct v4l2_device *v4l2_dev =
v4l2_async_nf_find_v4l2_dev(notifier);
struct v4l2_subdev *sd;
if (!v4l2_dev)
return 0;
dev_dbg(notifier_dev(notifier), "v4l2-async: trying all sub-devices\n");
again:
list_for_each_entry(sd, &subdev_list, async_list) {
struct v4l2_async_connection *asc;
int ret;
asc = v4l2_async_find_match(notifier, sd);
if (!asc)
continue;
dev_dbg(notifier_dev(notifier),
"v4l2-async: match found, subdev %s\n", sd->name);
ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asc);
if (ret < 0)
return ret;
/*
* v4l2_async_match_notify() may lead to registering a
* new notifier and thus changing the async subdevs
* list. In order to proceed safely from here, restart
* parsing the list from the beginning.
*/
goto again;
}
return 0;
}
static void v4l2_async_unbind_subdev_one(struct v4l2_async_notifier *notifier,
struct v4l2_async_connection *asc)
{
list_move_tail(&asc->asc_entry, ¬ifier->waiting_list);
if (list_is_singular(&asc->asc_subdev_entry)) {
v4l2_async_nf_call_unbind(notifier, asc->sd, asc);
v4l2_device_unregister_subdev(asc->sd);
asc->sd = NULL;
}
list_del(&asc->asc_subdev_entry);
}
/* Unbind all sub-devices in the notifier tree. */
static void
v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_connection *asc, *asc_tmp;
list_for_each_entry_safe(asc, asc_tmp, ¬ifier->done_list,
asc_entry) {
struct v4l2_async_notifier *subdev_notifier =
v4l2_async_find_subdev_notifier(asc->sd);
if (subdev_notifier)
v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
v4l2_async_unbind_subdev_one(notifier, asc);
}
notifier->parent = NULL;
}
/* See if an async sub-device can be found in a notifier's lists. */
static bool
v4l2_async_nf_has_async_match_entry(struct v4l2_async_notifier *notifier,
struct v4l2_async_match_desc *match)
{
struct v4l2_async_connection *asc;
list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry)
if (v4l2_async_match_equal(&asc->match, match))
return true;
list_for_each_entry(asc, ¬ifier->done_list, asc_entry)
if (v4l2_async_match_equal(&asc->match, match))
return true;
return false;
}
/*
* Find out whether an async sub-device was set up already or whether it exists
* in a given notifier.
*/
static bool
v4l2_async_nf_has_async_match(struct v4l2_async_notifier *notifier,
struct v4l2_async_match_desc *match)
{
struct list_head *heads[] = {
¬ifier->waiting_list,
¬ifier->done_list,
};
unsigned int i;
lockdep_assert_held(&list_lock);
/* Check that an asd is not being added more than once. */
for (i = 0; i < ARRAY_SIZE(heads); i++) {
struct v4l2_async_connection *asc;
list_for_each_entry(asc, heads[i], asc_entry) {
if (&asc->match == match)
continue;
if (v4l2_async_match_equal(&asc->match, match))
return true;
}
}
/* Check that an asc does not exist in other notifiers. */
list_for_each_entry(notifier, ¬ifier_list, notifier_entry)
if (v4l2_async_nf_has_async_match_entry(notifier, match))
return true;
return false;
}
static int v4l2_async_nf_match_valid(struct v4l2_async_notifier *notifier,
struct v4l2_async_match_desc *match)
{
struct device *dev = notifier_dev(notifier);
switch (match->type) {
case V4L2_ASYNC_MATCH_TYPE_I2C:
case V4L2_ASYNC_MATCH_TYPE_FWNODE:
if (v4l2_async_nf_has_async_match(notifier, match)) {
dev_dbg(dev, "v4l2-async: match descriptor already listed in a notifier\n");
return -EEXIST;
}
break;
default:
dev_err(dev, "v4l2-async: Invalid match type %u on %p\n",
match->type, match);
return -EINVAL;
}
return 0;
}
void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
struct v4l2_device *v4l2_dev)
{
INIT_LIST_HEAD(¬ifier->waiting_list);
INIT_LIST_HEAD(¬ifier->done_list);
notifier->v4l2_dev = v4l2_dev;
}
EXPORT_SYMBOL(v4l2_async_nf_init);
void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd)
{
INIT_LIST_HEAD(¬ifier->waiting_list);
INIT_LIST_HEAD(¬ifier->done_list);
notifier->sd = sd;
}
EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init);
static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_connection *asc;
int ret;
mutex_lock(&list_lock);
list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) {
ret = v4l2_async_nf_match_valid(notifier, &asc->match);
if (ret)
goto err_unlock;
}
ret = v4l2_async_nf_try_all_subdevs(notifier);
if (ret < 0)
goto err_unbind;
ret = v4l2_async_nf_try_complete(notifier);
if (ret < 0)
goto err_unbind;
/* Keep also completed notifiers on the list */
list_add(¬ifier->notifier_entry, ¬ifier_list);
mutex_unlock(&list_lock);
return 0;
err_unbind:
/*
* On failure, unbind all sub-devices registered through this notifier.
*/
v4l2_async_nf_unbind_all_subdevs(notifier);
err_unlock:
mutex_unlock(&list_lock);
return ret;
}
int v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
{
int ret;
if (WARN_ON(!notifier->v4l2_dev == !notifier->sd))
return -EINVAL;
ret = __v4l2_async_nf_register(notifier);
if (ret)
notifier->v4l2_dev = NULL;
return ret;
}
EXPORT_SYMBOL(v4l2_async_nf_register);
static void
__v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
{
if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
return;
v4l2_async_nf_unbind_all_subdevs(notifier);
list_del(¬ifier->notifier_entry);
}
void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
{
mutex_lock(&list_lock);
__v4l2_async_nf_unregister(notifier);
mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(v4l2_async_nf_unregister);
static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_connection *asc, *tmp;
if (!notifier || !notifier->waiting_list.next)
return;
WARN_ON(!list_empty(¬ifier->done_list));
list_for_each_entry_safe(asc, tmp, ¬ifier->waiting_list, asc_entry) {
list_del(&asc->asc_entry);
v4l2_async_nf_call_destroy(notifier, asc);
if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
fwnode_handle_put(asc->match.fwnode);
kfree(asc);
}
notifier->sd = NULL;
notifier->v4l2_dev = NULL;
}
void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
{
mutex_lock(&list_lock);
__v4l2_async_nf_cleanup(notifier);
mutex_unlock(&list_lock);
}
EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
static void __v4l2_async_nf_add_connection(struct v4l2_async_notifier *notifier,
struct v4l2_async_connection *asc)
{
mutex_lock(&list_lock);
list_add_tail(&asc->asc_entry, ¬ifier->waiting_list);
mutex_unlock(&list_lock);
}
struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
struct fwnode_handle *fwnode,
unsigned int asc_struct_size)
{
struct v4l2_async_connection *asc;
asc = kzalloc(asc_struct_size, GFP_KERNEL);
if (!asc)
return ERR_PTR(-ENOMEM);
asc->notifier = notifier;
asc->match.type = V4L2_ASYNC_MATCH_TYPE_FWNODE;
asc->match.fwnode = fwnode_handle_get(fwnode);
__v4l2_async_nf_add_connection(notifier, asc);
return asc;
}
EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
struct fwnode_handle *endpoint,
unsigned int asc_struct_size)
{
struct v4l2_async_connection *asc;
struct fwnode_handle *remote;
remote = fwnode_graph_get_remote_endpoint(endpoint);
if (!remote)
return ERR_PTR(-ENOTCONN);
asc = __v4l2_async_nf_add_fwnode(notif, remote, asc_struct_size);
/*
* Calling __v4l2_async_nf_add_fwnode grabs a refcount,
* so drop the one we got in fwnode_graph_get_remote_port_parent.
*/
fwnode_handle_put(remote);
return asc;
}
EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
struct v4l2_async_connection *
__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
unsigned short address, unsigned int asc_struct_size)
{
struct v4l2_async_connection *asc;
asc = kzalloc(asc_struct_size, GFP_KERNEL);
if (!asc)
return ERR_PTR(-ENOMEM);
asc->notifier = notifier;
asc->match.type = V4L2_ASYNC_MATCH_TYPE_I2C;
asc->match.i2c.adapter_id = adapter_id;
asc->match.i2c.address = address;
__v4l2_async_nf_add_connection(notifier, asc);
return asc;
}
EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd,
struct fwnode_handle *fwnode)
{
struct v4l2_async_subdev_endpoint *ase;
ase = kmalloc(sizeof(*ase), GFP_KERNEL);
if (!ase)
return -ENOMEM;
ase->endpoint = fwnode;
list_add(&ase->async_subdev_endpoint_entry,
&sd->async_subdev_endpoint_list);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_async_subdev_endpoint_add);
struct v4l2_async_connection *
v4l2_async_connection_unique(struct v4l2_subdev *sd)
{
if (!list_is_singular(&sd->asc_list))
return NULL;
return list_first_entry(&sd->asc_list,
struct v4l2_async_connection, asc_subdev_entry);
}
EXPORT_SYMBOL_GPL(v4l2_async_connection_unique);
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
struct v4l2_async_notifier *subdev_notifier;
struct v4l2_async_notifier *notifier;
struct v4l2_async_connection *asc;
int ret;
INIT_LIST_HEAD(&sd->asc_list);
/*
* No reference taken. The reference is held by the device (struct
* v4l2_subdev.dev), and async sub-device does not exist independently
* of the device at any point of time.
*
* The async sub-device shall always be registered for its device node,
* not the endpoint node.
*/
if (!sd->fwnode && sd->dev) {
sd->fwnode = dev_fwnode(sd->dev);
} else if (fwnode_graph_is_endpoint(sd->fwnode)) {
dev_warn(sd->dev, "sub-device fwnode is an endpoint!\n");
return -EINVAL;
}
mutex_lock(&list_lock);
list_for_each_entry(notifier, ¬ifier_list, notifier_entry) {
struct v4l2_device *v4l2_dev =
v4l2_async_nf_find_v4l2_dev(notifier);
if (!v4l2_dev)
continue;
while ((asc = v4l2_async_find_match(notifier, sd))) {
ret = v4l2_async_match_notify(notifier, v4l2_dev, sd,
asc);
if (ret)
goto err_unbind;
ret = v4l2_async_nf_try_complete(notifier);
if (ret)
goto err_unbind;
}
}
/* None matched, wait for hot-plugging */
list_add(&sd->async_list, &subdev_list);
mutex_unlock(&list_lock);
return 0;
err_unbind:
/*
* Complete failed. Unbind the sub-devices bound through registering
* this async sub-device.
*/
subdev_notifier = v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier)
v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
if (asc)
v4l2_async_unbind_subdev_one(notifier, asc);
mutex_unlock(&list_lock);
return ret;
}
EXPORT_SYMBOL(v4l2_async_register_subdev);
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
struct v4l2_async_connection *asc, *asc_tmp;
if (!sd->async_list.next)
return;
v4l2_subdev_put_privacy_led(sd);
mutex_lock(&list_lock);
__v4l2_async_nf_unregister(sd->subdev_notifier);
__v4l2_async_nf_cleanup(sd->subdev_notifier);
kfree(sd->subdev_notifier);
sd->subdev_notifier = NULL;
if (sd->asc_list.next) {
list_for_each_entry_safe(asc, asc_tmp, &sd->asc_list,
asc_subdev_entry) {
list_move(&asc->asc_entry,
&asc->notifier->waiting_list);
v4l2_async_unbind_subdev_one(asc->notifier, asc);
list_del(&asc->asc_subdev_entry);
}
}
list_del(&sd->async_list);
sd->async_list.next = NULL;
mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(v4l2_async_unregister_subdev);
static void print_waiting_match(struct seq_file *s,
struct v4l2_async_match_desc *match)
{
switch (match->type) {
case V4L2_ASYNC_MATCH_TYPE_I2C:
seq_printf(s, " [i2c] dev=%d-%04x\n", match->i2c.adapter_id,
match->i2c.address);
break;
case V4L2_ASYNC_MATCH_TYPE_FWNODE: {
struct fwnode_handle *devnode, *fwnode = match->fwnode;
devnode = fwnode_graph_is_endpoint(fwnode) ?
fwnode_graph_get_port_parent(fwnode) :
fwnode_handle_get(fwnode);
seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
devnode->dev ? dev_name(devnode->dev) : "nil",
fwnode);
fwnode_handle_put(devnode);
break;
}
}
}
static const char *
v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
{
if (notifier->v4l2_dev)
return notifier->v4l2_dev->name;
else if (notifier->sd)
return notifier->sd->name;
else
return "nil";
}
static int pending_subdevs_show(struct seq_file *s, void *data)
{
struct v4l2_async_notifier *notif;
struct v4l2_async_connection *asc;
mutex_lock(&list_lock);
list_for_each_entry(notif, ¬ifier_list, notifier_entry) {
seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
list_for_each_entry(asc, ¬if->waiting_list, asc_entry)
print_waiting_match(s, &asc->match);
}
mutex_unlock(&list_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
static struct dentry *v4l2_async_debugfs_dir;
static int __init v4l2_async_init(void)
{
v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
debugfs_create_file("pending_async_subdevices", 0444,
v4l2_async_debugfs_dir, NULL,
&pending_subdevs_fops);
return 0;
}
static void __exit v4l2_async_exit(void)
{
debugfs_remove_recursive(v4l2_async_debugfs_dir);
}
subsys_initcall(v4l2_async_init);
module_exit(v4l2_async_exit);
MODULE_AUTHOR("Guennadi Liakhovetski <[email protected]>");
MODULE_AUTHOR("Sakari Ailus <[email protected]>");
MODULE_AUTHOR("Ezequiel Garcia <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/v4l2-core/v4l2-async.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* i2c tv tuner chip device driver
* core core, i.e. kernel interfaces, registering and so on
*
* Copyright(c) by Ralph Metzler, Gerd Knorr, Gunther Mayer
*
* Copyright(c) 2005-2011 by Mauro Carvalho Chehab
* - Added support for a separate Radio tuner
* - Major rework and cleanups at the code
*
* This driver supports many devices and the idea is to let the driver
* detect which device is present. So rather than listing all supported
* devices here, we pretend to support a single, fake device type that will
* handle both radio and analog TV tuning.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/i2c.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/videodev2.h>
#include <media/tuner.h>
#include <media/tuner-types.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include "mt20xx.h"
#include "tda8290.h"
#include "tea5761.h"
#include "tea5767.h"
#include "xc2028.h"
#include "tuner-simple.h"
#include "tda9887.h"
#include "xc5000.h"
#include "tda18271.h"
#include "xc4000.h"
#define UNSET (-1U)
/*
* Driver modprobe parameters
*/
/* insmod options used at init time => read/only */
static unsigned int addr;
static unsigned int no_autodetect;
static unsigned int show_i2c;
module_param(addr, int, 0444);
module_param(no_autodetect, int, 0444);
module_param(show_i2c, int, 0444);
/* insmod options used at runtime => read/write */
static int tuner_debug;
static unsigned int tv_range[2] = { 44, 958 };
static unsigned int radio_range[2] = { 65, 108 };
static char pal[] = "--";
static char secam[] = "--";
static char ntsc[] = "-";
module_param_named(debug, tuner_debug, int, 0644);
module_param_array(tv_range, int, NULL, 0644);
module_param_array(radio_range, int, NULL, 0644);
module_param_string(pal, pal, sizeof(pal), 0644);
module_param_string(secam, secam, sizeof(secam), 0644);
module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
/*
* Static vars
*/
static LIST_HEAD(tuner_list);
static const struct v4l2_subdev_ops tuner_ops;
/*
* Debug macros
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": %d-%04x: " fmt, \
i2c_adapter_id(t->i2c->adapter), t->i2c->addr
#define dprintk(fmt, arg...) do { \
if (tuner_debug) \
printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg); \
} while (0)
/*
* Internal enums/struct used inside the driver
*/
/**
* enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER
*
* @TUNER_PAD_RF_INPUT:
* Radiofrequency (RF) sink pad, usually linked to a RF connector entity.
* @TUNER_PAD_OUTPUT:
* tuner video output source pad. Contains the video chrominance
* and luminance or the hole bandwidth of the signal converted to
* an Intermediate Frequency (IF) or to baseband (on zero-IF tuners).
* @TUNER_PAD_AUD_OUT:
* Tuner audio output source pad. Tuners used to decode analog TV
* signals have an extra pad for audio output. Old tuners use an
* analog stage with a saw filter for the audio IF frequency. The
* output of the pad is, in this case, the audio IF, with should be
* decoded either by the bridge chipset (that's the case of cx2388x
* chipsets) or may require an external IF sound processor, like
* msp34xx. On modern silicon tuners, the audio IF decoder is usually
* incorporated at the tuner. On such case, the output of this pad
* is an audio sampled data.
* @TUNER_NUM_PADS:
* Number of pads of the tuner.
*/
enum tuner_pad_index {
TUNER_PAD_RF_INPUT,
TUNER_PAD_OUTPUT,
TUNER_PAD_AUD_OUT,
TUNER_NUM_PADS
};
/**
* enum if_vid_dec_pad_index - video IF-PLL pad index
* for MEDIA_ENT_F_IF_VID_DECODER
*
* @IF_VID_DEC_PAD_IF_INPUT:
* video Intermediate Frequency (IF) sink pad
* @IF_VID_DEC_PAD_OUT:
* IF-PLL video output source pad. Contains the video chrominance
* and luminance IF signals.
* @IF_VID_DEC_PAD_NUM_PADS:
* Number of pads of the video IF-PLL.
*/
enum if_vid_dec_pad_index {
IF_VID_DEC_PAD_IF_INPUT,
IF_VID_DEC_PAD_OUT,
IF_VID_DEC_PAD_NUM_PADS
};
struct tuner {
/* device */
struct dvb_frontend fe;
struct i2c_client *i2c;
struct v4l2_subdev sd;
struct list_head list;
/* keep track of the current settings */
v4l2_std_id std;
unsigned int tv_freq;
unsigned int radio_freq;
unsigned int audmode;
enum v4l2_tuner_type mode;
unsigned int mode_mask; /* Combination of allowable modes */
bool standby; /* Standby mode */
unsigned int type; /* chip type id */
void *config;
const char *name;
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_pad pad[TUNER_NUM_PADS];
#endif
};
/*
* Function prototypes
*/
static void set_tv_freq(struct i2c_client *c, unsigned int freq);
static void set_radio_freq(struct i2c_client *c, unsigned int freq);
/*
* tuner attach/detach logic
*/
/* This macro allows us to probe dynamically, avoiding static links */
#ifdef CONFIG_MEDIA_ATTACH
#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
int __r = -EINVAL; \
typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
if (__a) { \
__r = (int) __a(ARGS); \
symbol_put(FUNCTION); \
} else { \
printk(KERN_ERR "TUNER: Unable to find " \
"symbol "#FUNCTION"()\n"); \
} \
__r; \
})
static void tuner_detach(struct dvb_frontend *fe)
{
if (fe->ops.tuner_ops.release) {
fe->ops.tuner_ops.release(fe);
symbol_put_addr(fe->ops.tuner_ops.release);
}
if (fe->ops.analog_ops.release) {
fe->ops.analog_ops.release(fe);
symbol_put_addr(fe->ops.analog_ops.release);
}
}
#else
#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
FUNCTION(ARGS); \
})
static void tuner_detach(struct dvb_frontend *fe)
{
if (fe->ops.tuner_ops.release)
fe->ops.tuner_ops.release(fe);
if (fe->ops.analog_ops.release)
fe->ops.analog_ops.release(fe);
}
#endif
static inline struct tuner *to_tuner(struct v4l2_subdev *sd)
{
return container_of(sd, struct tuner, sd);
}
/*
* struct analog_demod_ops callbacks
*/
static void fe_set_params(struct dvb_frontend *fe,
struct analog_parameters *params)
{
struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
struct tuner *t = fe->analog_demod_priv;
if (NULL == fe_tuner_ops->set_analog_params) {
pr_warn("Tuner frontend module has no way to set freq\n");
return;
}
fe_tuner_ops->set_analog_params(fe, params);
}
static void fe_standby(struct dvb_frontend *fe)
{
struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
if (fe_tuner_ops->sleep)
fe_tuner_ops->sleep(fe);
}
static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
{
struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
struct tuner *t = fe->analog_demod_priv;
if (fe_tuner_ops->set_config)
return fe_tuner_ops->set_config(fe, priv_cfg);
pr_warn("Tuner frontend module has no way to set config\n");
return 0;
}
static void tuner_status(struct dvb_frontend *fe);
static const struct analog_demod_ops tuner_analog_ops = {
.set_params = fe_set_params,
.standby = fe_standby,
.set_config = fe_set_config,
.tuner_status = tuner_status
};
/*
* Functions to select between radio and TV and tuner probe/remove functions
*/
/**
* set_type - Sets the tuner type for a given device
*
* @c: i2c_client descriptor
* @type: type of the tuner (e. g. tuner number)
* @new_mode_mask: Indicates if tuner supports TV and/or Radio
* @new_config: an optional parameter used by a few tuners to adjust
* internal parameters, like LNA mode
* @tuner_callback: an optional function to be called when switching
* to analog mode
*
* This function applies the tuner config to tuner specified
* by tun_setup structure. It contains several per-tuner initialization "magic"
*/
static void set_type(struct i2c_client *c, unsigned int type,
unsigned int new_mode_mask, void *new_config,
int (*tuner_callback) (void *dev, int component, int cmd, int arg))
{
struct tuner *t = to_tuner(i2c_get_clientdata(c));
struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
unsigned char buffer[4];
int tune_now = 1;
if (type == UNSET || type == TUNER_ABSENT) {
dprintk("tuner 0x%02x: Tuner type absent\n", c->addr);
return;
}
t->type = type;
t->config = new_config;
if (tuner_callback != NULL) {
dprintk("defining GPIO callback\n");
t->fe.callback = tuner_callback;
}
/* discard private data, in case set_type() was previously called */
tuner_detach(&t->fe);
t->fe.analog_demod_priv = NULL;
switch (t->type) {
case TUNER_MT2032:
if (!dvb_attach(microtune_attach,
&t->fe, t->i2c->adapter, t->i2c->addr))
goto attach_failed;
break;
case TUNER_PHILIPS_TDA8290:
{
if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter,
t->i2c->addr, t->config))
goto attach_failed;
break;
}
case TUNER_TEA5767:
if (!dvb_attach(tea5767_attach, &t->fe,
t->i2c->adapter, t->i2c->addr))
goto attach_failed;
t->mode_mask = T_RADIO;
break;
case TUNER_TEA5761:
if (!dvb_attach(tea5761_attach, &t->fe,
t->i2c->adapter, t->i2c->addr))
goto attach_failed;
t->mode_mask = T_RADIO;
break;
case TUNER_PHILIPS_FMD1216ME_MK3:
case TUNER_PHILIPS_FMD1216MEX_MK3:
buffer[0] = 0x0b;
buffer[1] = 0xdc;
buffer[2] = 0x9c;
buffer[3] = 0x60;
i2c_master_send(c, buffer, 4);
mdelay(1);
buffer[2] = 0x86;
buffer[3] = 0x54;
i2c_master_send(c, buffer, 4);
if (!dvb_attach(simple_tuner_attach, &t->fe,
t->i2c->adapter, t->i2c->addr, t->type))
goto attach_failed;
break;
case TUNER_PHILIPS_TD1316:
buffer[0] = 0x0b;
buffer[1] = 0xdc;
buffer[2] = 0x86;
buffer[3] = 0xa4;
i2c_master_send(c, buffer, 4);
if (!dvb_attach(simple_tuner_attach, &t->fe,
t->i2c->adapter, t->i2c->addr, t->type))
goto attach_failed;
break;
case TUNER_XC2028:
{
struct xc2028_config cfg = {
.i2c_adap = t->i2c->adapter,
.i2c_addr = t->i2c->addr,
};
if (!dvb_attach(xc2028_attach, &t->fe, &cfg))
goto attach_failed;
tune_now = 0;
break;
}
case TUNER_TDA9887:
if (!dvb_attach(tda9887_attach,
&t->fe, t->i2c->adapter, t->i2c->addr))
goto attach_failed;
break;
case TUNER_XC5000:
{
struct xc5000_config xc5000_cfg = {
.i2c_address = t->i2c->addr,
/* if_khz will be set at dvb_attach() */
.if_khz = 0,
};
if (!dvb_attach(xc5000_attach,
&t->fe, t->i2c->adapter, &xc5000_cfg))
goto attach_failed;
tune_now = 0;
break;
}
case TUNER_XC5000C:
{
struct xc5000_config xc5000c_cfg = {
.i2c_address = t->i2c->addr,
/* if_khz will be set at dvb_attach() */
.if_khz = 0,
.chip_id = XC5000C,
};
if (!dvb_attach(xc5000_attach,
&t->fe, t->i2c->adapter, &xc5000c_cfg))
goto attach_failed;
tune_now = 0;
break;
}
case TUNER_NXP_TDA18271:
{
struct tda18271_config cfg = {
.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
};
if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr,
t->i2c->adapter, &cfg))
goto attach_failed;
tune_now = 0;
break;
}
case TUNER_XC4000:
{
struct xc4000_config xc4000_cfg = {
.i2c_address = t->i2c->addr,
/* FIXME: the correct parameters will be set */
/* only when the digital dvb_attach() occurs */
.default_pm = 0,
.dvb_amplitude = 0,
.set_smoothedcvbs = 0,
.if_khz = 0
};
if (!dvb_attach(xc4000_attach,
&t->fe, t->i2c->adapter, &xc4000_cfg))
goto attach_failed;
tune_now = 0;
break;
}
default:
if (!dvb_attach(simple_tuner_attach, &t->fe,
t->i2c->adapter, t->i2c->addr, t->type))
goto attach_failed;
break;
}
if ((NULL == analog_ops->set_params) &&
(fe_tuner_ops->set_analog_params)) {
t->name = fe_tuner_ops->info.name;
t->fe.analog_demod_priv = t;
memcpy(analog_ops, &tuner_analog_ops,
sizeof(struct analog_demod_ops));
if (fe_tuner_ops->get_rf_strength)
analog_ops->has_signal = fe_tuner_ops->get_rf_strength;
if (fe_tuner_ops->get_afc)
analog_ops->get_afc = fe_tuner_ops->get_afc;
} else {
t->name = analog_ops->info.name;
}
#ifdef CONFIG_MEDIA_CONTROLLER
t->sd.entity.name = t->name;
#endif
dprintk("type set to %s\n", t->name);
t->mode_mask = new_mode_mask;
/* Some tuners require more initialization setup before use,
such as firmware download or device calibration.
trying to set a frequency here will just fail
FIXME: better to move set_freq to the tuner code. This is needed
on analog tuners for PLL to properly work
*/
if (tune_now) {
if (V4L2_TUNER_RADIO == t->mode)
set_radio_freq(c, t->radio_freq);
else
set_tv_freq(c, t->tv_freq);
}
dprintk("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
c->adapter->name, c->dev.driver->name, c->addr << 1, type,
t->mode_mask);
return;
attach_failed:
dprintk("Tuner attach for type = %d failed.\n", t->type);
t->type = TUNER_ABSENT;
return;
}
/**
* tuner_s_type_addr - Sets the tuner type for a device
*
* @sd: subdev descriptor
* @tun_setup: type to be associated to a given tuner i2c address
*
* This function applies the tuner config to tuner specified
* by tun_setup structure.
* If tuner I2C address is UNSET, then it will only set the device
* if the tuner supports the mode specified in the call.
* If the address is specified, the change will be applied only if
* tuner I2C address matches.
* The call can change the tuner number and the tuner mode.
*/
static int tuner_s_type_addr(struct v4l2_subdev *sd,
struct tuner_setup *tun_setup)
{
struct tuner *t = to_tuner(sd);
struct i2c_client *c = v4l2_get_subdevdata(sd);
dprintk("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
tun_setup->type,
tun_setup->addr,
tun_setup->mode_mask,
tun_setup->config);
if ((t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) &&
(t->mode_mask & tun_setup->mode_mask))) ||
(tun_setup->addr == c->addr)) {
set_type(c, tun_setup->type, tun_setup->mode_mask,
tun_setup->config, tun_setup->tuner_callback);
} else
dprintk("set addr discarded for type %i, mask %x. Asked to change tuner at addr 0x%02x, with mask %x\n",
t->type, t->mode_mask,
tun_setup->addr, tun_setup->mode_mask);
return 0;
}
/**
* tuner_s_config - Sets tuner configuration
*
* @sd: subdev descriptor
* @cfg: tuner configuration
*
* Calls tuner set_config() private function to set some tuner-internal
* parameters
*/
static int tuner_s_config(struct v4l2_subdev *sd,
const struct v4l2_priv_tun_config *cfg)
{
struct tuner *t = to_tuner(sd);
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
if (t->type != cfg->tuner)
return 0;
if (analog_ops->set_config) {
analog_ops->set_config(&t->fe, cfg->priv);
return 0;
}
dprintk("Tuner frontend module has no way to set config\n");
return 0;
}
/**
* tuner_lookup - Seek for tuner adapters
*
* @adap: i2c_adapter struct
* @radio: pointer to be filled if the adapter is radio
* @tv: pointer to be filled if the adapter is TV
*
* Search for existing radio and/or TV tuners on the given I2C adapter,
* discarding demod-only adapters (tda9887).
*
* Note that when this function is called from tuner_probe you can be
* certain no other devices will be added/deleted at the same time, I2C
* core protects against that.
*/
static void tuner_lookup(struct i2c_adapter *adap,
struct tuner **radio, struct tuner **tv)
{
struct tuner *pos;
*radio = NULL;
*tv = NULL;
list_for_each_entry(pos, &tuner_list, list) {
int mode_mask;
if (pos->i2c->adapter != adap ||
strcmp(pos->i2c->dev.driver->name, "tuner"))
continue;
mode_mask = pos->mode_mask;
if (*radio == NULL && mode_mask == T_RADIO)
*radio = pos;
/* Note: currently TDA9887 is the only demod-only
device. If other devices appear then we need to
make this test more general. */
else if (*tv == NULL && pos->type != TUNER_TDA9887 &&
(pos->mode_mask & T_ANALOG_TV))
*tv = pos;
}
}
/**
*tuner_probe - Probes the existing tuners on an I2C bus
*
* @client: i2c_client descriptor
*
* This routine probes for tuners at the expected I2C addresses. On most
* cases, if a device answers to a given I2C address, it assumes that the
* device is a tuner. On a few cases, however, an additional logic is needed
* to double check if the device is really a tuner, or to identify the tuner
* type, like on tea5767/5761 devices.
*
* During client attach, set_type is called by adapter's attach_inform callback.
* set_type must then be completed by tuner_probe.
*/
static int tuner_probe(struct i2c_client *client)
{
struct tuner *t;
struct tuner *radio;
struct tuner *tv;
#ifdef CONFIG_MEDIA_CONTROLLER
int ret;
#endif
t = kzalloc(sizeof(struct tuner), GFP_KERNEL);
if (NULL == t)
return -ENOMEM;
v4l2_i2c_subdev_init(&t->sd, client, &tuner_ops);
t->i2c = client;
t->name = "(tuner unset)";
t->type = UNSET;
t->audmode = V4L2_TUNER_MODE_STEREO;
t->standby = true;
t->radio_freq = 87.5 * 16000; /* Initial freq range */
t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */
if (show_i2c) {
unsigned char buffer[16];
int rc;
memset(buffer, 0, sizeof(buffer));
rc = i2c_master_recv(client, buffer, sizeof(buffer));
if (rc >= 0)
pr_info("I2C RECV = %*ph\n", rc, buffer);
}
/* autodetection code based on the i2c addr */
if (!no_autodetect) {
switch (client->addr) {
case 0x10:
if (tuner_symbol_probe(tea5761_autodetection,
t->i2c->adapter,
t->i2c->addr) >= 0) {
t->type = TUNER_TEA5761;
t->mode_mask = T_RADIO;
tuner_lookup(t->i2c->adapter, &radio, &tv);
if (tv)
tv->mode_mask &= ~T_RADIO;
goto register_client;
}
kfree(t);
return -ENODEV;
case 0x42:
case 0x43:
case 0x4a:
case 0x4b:
/* If chip is not tda8290, don't register.
since it can be tda9887*/
if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
t->i2c->addr) >= 0) {
dprintk("tda829x detected\n");
} else {
/* Default is being tda9887 */
t->type = TUNER_TDA9887;
t->mode_mask = T_RADIO | T_ANALOG_TV;
goto register_client;
}
break;
case 0x60:
if (tuner_symbol_probe(tea5767_autodetection,
t->i2c->adapter, t->i2c->addr)
>= 0) {
t->type = TUNER_TEA5767;
t->mode_mask = T_RADIO;
/* Sets freq to FM range */
tuner_lookup(t->i2c->adapter, &radio, &tv);
if (tv)
tv->mode_mask &= ~T_RADIO;
goto register_client;
}
break;
}
}
/* Initializes only the first TV tuner on this adapter. Why only the
first? Because there are some devices (notably the ones with TI
tuners) that have more than one i2c address for the *same* device.
Experience shows that, except for just one case, the first
address is the right one. The exception is a Russian tuner
(ACORP_Y878F). So, the desired behavior is just to enable the
first found TV tuner. */
tuner_lookup(t->i2c->adapter, &radio, &tv);
if (tv == NULL) {
t->mode_mask = T_ANALOG_TV;
if (radio == NULL)
t->mode_mask |= T_RADIO;
dprintk("Setting mode_mask to 0x%02x\n", t->mode_mask);
}
/* Should be just before return */
register_client:
#if defined(CONFIG_MEDIA_CONTROLLER)
t->sd.entity.name = t->name;
/*
* Handle the special case where the tuner has actually
* two stages: the PLL to tune into a frequency and the
* IF-PLL demodulator (tda988x).
*/
if (t->type == TUNER_TDA9887) {
t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
t->pad[IF_VID_DEC_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
t->pad[IF_VID_DEC_PAD_OUT].sig_type = PAD_SIGNAL_ANALOG;
ret = media_entity_pads_init(&t->sd.entity,
IF_VID_DEC_PAD_NUM_PADS,
&t->pad[0]);
t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER;
} else {
t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
t->pad[TUNER_PAD_RF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
t->pad[TUNER_PAD_OUTPUT].sig_type = PAD_SIGNAL_ANALOG;
t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE;
t->pad[TUNER_PAD_AUD_OUT].sig_type = PAD_SIGNAL_AUDIO;
ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS,
&t->pad[0]);
t->sd.entity.function = MEDIA_ENT_F_TUNER;
}
if (ret < 0) {
pr_err("failed to initialize media entity!\n");
kfree(t);
return ret;
}
#endif
/* Sets a default mode */
if (t->mode_mask & T_ANALOG_TV)
t->mode = V4L2_TUNER_ANALOG_TV;
else
t->mode = V4L2_TUNER_RADIO;
set_type(client, t->type, t->mode_mask, t->config, t->fe.callback);
list_add_tail(&t->list, &tuner_list);
pr_info("Tuner %d found with type(s)%s%s.\n",
t->type,
t->mode_mask & T_RADIO ? " Radio" : "",
t->mode_mask & T_ANALOG_TV ? " TV" : "");
return 0;
}
/**
* tuner_remove - detaches a tuner
*
* @client: i2c_client descriptor
*/
static void tuner_remove(struct i2c_client *client)
{
struct tuner *t = to_tuner(i2c_get_clientdata(client));
v4l2_device_unregister_subdev(&t->sd);
tuner_detach(&t->fe);
t->fe.analog_demod_priv = NULL;
list_del(&t->list);
kfree(t);
}
/*
* Functions to switch between Radio and TV
*
* A few cards have a separate I2C tuner for radio. Those routines
* take care of switching between TV/Radio mode, filtering only the
* commands that apply to the Radio or TV tuner.
*/
/**
* check_mode - Verify if tuner supports the requested mode
* @t: a pointer to the module's internal struct_tuner
* @mode: mode of the tuner, as defined by &enum v4l2_tuner_type.
*
* This function checks if the tuner is capable of tuning analog TV,
* digital TV or radio, depending on what the caller wants. If the
* tuner can't support that mode, it returns -EINVAL. Otherwise, it
* returns 0.
* This function is needed for boards that have a separate tuner for
* radio (like devices with tea5767).
*
* NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
* select a TV frequency. So, t_mode = T_ANALOG_TV could actually
* be used to represent a Digital TV too.
*/
static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
{
int t_mode;
if (mode == V4L2_TUNER_RADIO)
t_mode = T_RADIO;
else
t_mode = T_ANALOG_TV;
if ((t_mode & t->mode_mask) == 0)
return -EINVAL;
return 0;
}
/**
* set_mode - Switch tuner to other mode.
* @t: a pointer to the module's internal struct_tuner
* @mode: enum v4l2_type (radio or TV)
*
* If tuner doesn't support the needed mode (radio or TV), prints a
* debug message and returns -EINVAL, changing its state to standby.
* Otherwise, changes the mode and returns 0.
*/
static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
{
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
if (mode != t->mode) {
if (check_mode(t, mode) == -EINVAL) {
dprintk("Tuner doesn't support mode %d. Putting tuner to sleep\n",
mode);
t->standby = true;
if (analog_ops->standby)
analog_ops->standby(&t->fe);
return -EINVAL;
}
t->mode = mode;
dprintk("Changing to mode %d\n", mode);
}
return 0;
}
/**
* set_freq - Set the tuner to the desired frequency.
* @t: a pointer to the module's internal struct_tuner
* @freq: frequency to set (0 means to use the current frequency)
*/
static void set_freq(struct tuner *t, unsigned int freq)
{
struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
if (t->mode == V4L2_TUNER_RADIO) {
if (!freq)
freq = t->radio_freq;
set_radio_freq(client, freq);
} else {
if (!freq)
freq = t->tv_freq;
set_tv_freq(client, freq);
}
}
/*
* Functions that are specific for TV mode
*/
/**
* set_tv_freq - Set tuner frequency, freq in Units of 62.5 kHz = 1/16MHz
*
* @c: i2c_client descriptor
* @freq: frequency
*/
static void set_tv_freq(struct i2c_client *c, unsigned int freq)
{
struct tuner *t = to_tuner(i2c_get_clientdata(c));
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
struct analog_parameters params = {
.mode = t->mode,
.audmode = t->audmode,
.std = t->std
};
if (t->type == UNSET) {
pr_warn("tuner type not set\n");
return;
}
if (NULL == analog_ops->set_params) {
pr_warn("Tuner has no way to set tv freq\n");
return;
}
if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) {
dprintk("TV freq (%d.%02d) out of range (%d-%d)\n",
freq / 16, freq % 16 * 100 / 16, tv_range[0],
tv_range[1]);
/* V4L2 spec: if the freq is not possible then the closest
possible value should be selected */
if (freq < tv_range[0] * 16)
freq = tv_range[0] * 16;
else
freq = tv_range[1] * 16;
}
params.frequency = freq;
dprintk("tv freq set to %d.%02d\n",
freq / 16, freq % 16 * 100 / 16);
t->tv_freq = freq;
t->standby = false;
analog_ops->set_params(&t->fe, ¶ms);
}
/**
* tuner_fixup_std - force a given video standard variant
*
* @t: tuner internal struct
* @std: TV standard
*
* A few devices or drivers have problem to detect some standard variations.
* On other operational systems, the drivers generally have a per-country
* code, and some logic to apply per-country hacks. V4L2 API doesn't provide
* such hacks. Instead, it relies on a proper video standard selection from
* the userspace application. However, as some apps are buggy, not allowing
* to distinguish all video standard variations, a modprobe parameter can
* be used to force a video standard match.
*/
static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
{
if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) {
switch (pal[0]) {
case '6':
return V4L2_STD_PAL_60;
case 'b':
case 'B':
case 'g':
case 'G':
return V4L2_STD_PAL_BG;
case 'i':
case 'I':
return V4L2_STD_PAL_I;
case 'd':
case 'D':
case 'k':
case 'K':
return V4L2_STD_PAL_DK;
case 'M':
case 'm':
return V4L2_STD_PAL_M;
case 'N':
case 'n':
if (pal[1] == 'c' || pal[1] == 'C')
return V4L2_STD_PAL_Nc;
return V4L2_STD_PAL_N;
default:
pr_warn("pal= argument not recognised\n");
break;
}
}
if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
switch (secam[0]) {
case 'b':
case 'B':
case 'g':
case 'G':
case 'h':
case 'H':
return V4L2_STD_SECAM_B |
V4L2_STD_SECAM_G |
V4L2_STD_SECAM_H;
case 'd':
case 'D':
case 'k':
case 'K':
return V4L2_STD_SECAM_DK;
case 'l':
case 'L':
if ((secam[1] == 'C') || (secam[1] == 'c'))
return V4L2_STD_SECAM_LC;
return V4L2_STD_SECAM_L;
default:
pr_warn("secam= argument not recognised\n");
break;
}
}
if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
switch (ntsc[0]) {
case 'm':
case 'M':
return V4L2_STD_NTSC_M;
case 'j':
case 'J':
return V4L2_STD_NTSC_M_JP;
case 'k':
case 'K':
return V4L2_STD_NTSC_M_KR;
default:
pr_info("ntsc= argument not recognised\n");
break;
}
}
return std;
}
/*
* Functions that are specific for Radio mode
*/
/**
* set_radio_freq - Set tuner frequency, freq in Units of 62.5 Hz = 1/16kHz
*
* @c: i2c_client descriptor
* @freq: frequency
*/
static void set_radio_freq(struct i2c_client *c, unsigned int freq)
{
struct tuner *t = to_tuner(i2c_get_clientdata(c));
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
struct analog_parameters params = {
.mode = t->mode,
.audmode = t->audmode,
.std = t->std
};
if (t->type == UNSET) {
pr_warn("tuner type not set\n");
return;
}
if (NULL == analog_ops->set_params) {
pr_warn("tuner has no way to set radio frequency\n");
return;
}
if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) {
dprintk("radio freq (%d.%02d) out of range (%d-%d)\n",
freq / 16000, freq % 16000 * 100 / 16000,
radio_range[0], radio_range[1]);
/* V4L2 spec: if the freq is not possible then the closest
possible value should be selected */
if (freq < radio_range[0] * 16000)
freq = radio_range[0] * 16000;
else
freq = radio_range[1] * 16000;
}
params.frequency = freq;
dprintk("radio freq set to %d.%02d\n",
freq / 16000, freq % 16000 * 100 / 16000);
t->radio_freq = freq;
t->standby = false;
analog_ops->set_params(&t->fe, ¶ms);
/*
* The tuner driver might decide to change the audmode if it only
* supports stereo, so update t->audmode.
*/
t->audmode = params.audmode;
}
/*
* Debug function for reporting tuner status to userspace
*/
/**
* tuner_status - Dumps the current tuner status at dmesg
* @fe: pointer to struct dvb_frontend
*
* This callback is used only for driver debug purposes, answering to
* VIDIOC_LOG_STATUS. No changes should happen on this call.
*/
static void tuner_status(struct dvb_frontend *fe)
{
struct tuner *t = fe->analog_demod_priv;
unsigned long freq, freq_fraction;
struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
struct analog_demod_ops *analog_ops = &fe->ops.analog_ops;
const char *p;
switch (t->mode) {
case V4L2_TUNER_RADIO:
p = "radio";
break;
case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */
p = "digital TV";
break;
case V4L2_TUNER_ANALOG_TV:
default:
p = "analog TV";
break;
}
if (t->mode == V4L2_TUNER_RADIO) {
freq = t->radio_freq / 16000;
freq_fraction = (t->radio_freq % 16000) * 100 / 16000;
} else {
freq = t->tv_freq / 16;
freq_fraction = (t->tv_freq % 16) * 100 / 16;
}
pr_info("Tuner mode: %s%s\n", p,
t->standby ? " on standby mode" : "");
pr_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
pr_info("Standard: 0x%08lx\n", (unsigned long)t->std);
if (t->mode != V4L2_TUNER_RADIO)
return;
if (fe_tuner_ops->get_status) {
u32 tuner_status = 0;
fe_tuner_ops->get_status(&t->fe, &tuner_status);
if (tuner_status & TUNER_STATUS_LOCKED)
pr_info("Tuner is locked.\n");
if (tuner_status & TUNER_STATUS_STEREO)
pr_info("Stereo: yes\n");
}
if (analog_ops->has_signal) {
u16 signal;
if (!analog_ops->has_signal(fe, &signal))
pr_info("Signal strength: %hu\n", signal);
}
}
/*
* Function to splicitly change mode to radio. Probably not needed anymore
*/
static int tuner_s_radio(struct v4l2_subdev *sd)
{
struct tuner *t = to_tuner(sd);
if (set_mode(t, V4L2_TUNER_RADIO) == 0)
set_freq(t, 0);
return 0;
}
/*
* Tuner callbacks to handle userspace ioctl's
*/
/**
* tuner_standby - places the tuner in standby mode
* @sd: pointer to struct v4l2_subdev
*/
static int tuner_standby(struct v4l2_subdev *sd)
{
struct tuner *t = to_tuner(sd);
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
dprintk("Putting tuner to sleep\n");
t->standby = true;
if (analog_ops->standby)
analog_ops->standby(&t->fe);
return 0;
}
static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct tuner *t = to_tuner(sd);
if (set_mode(t, V4L2_TUNER_ANALOG_TV))
return 0;
t->std = tuner_fixup_std(t, std);
if (t->std != std)
dprintk("Fixup standard %llx to %llx\n", std, t->std);
set_freq(t, 0);
return 0;
}
static int tuner_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *f)
{
struct tuner *t = to_tuner(sd);
if (set_mode(t, f->type) == 0)
set_freq(t, f->frequency);
return 0;
}
/**
* tuner_g_frequency - Get the tuned frequency for the tuner
* @sd: pointer to struct v4l2_subdev
* @f: pointer to struct v4l2_frequency
*
* At return, the structure f will be filled with tuner frequency
* if the tuner matches the f->type.
* Note: f->type should be initialized before calling it.
* This is done by either video_ioctl2 or by the bridge driver.
*/
static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
{
struct tuner *t = to_tuner(sd);
struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
if (check_mode(t, f->type) == -EINVAL)
return 0;
if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) {
u32 abs_freq;
fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
DIV_ROUND_CLOSEST(abs_freq * 2, 125) :
DIV_ROUND_CLOSEST(abs_freq, 62500);
} else {
f->frequency = (V4L2_TUNER_RADIO == f->type) ?
t->radio_freq : t->tv_freq;
}
return 0;
}
/**
* tuner_g_tuner - Fill in tuner information
* @sd: pointer to struct v4l2_subdev
* @vt: pointer to struct v4l2_tuner
*
* At return, the structure vt will be filled with tuner information
* if the tuner matches vt->type.
* Note: vt->type should be initialized before calling it.
* This is done by either video_ioctl2 or by the bridge driver.
*/
static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct tuner *t = to_tuner(sd);
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
if (check_mode(t, vt->type) == -EINVAL)
return 0;
if (vt->type == t->mode && analog_ops->get_afc)
analog_ops->get_afc(&t->fe, &vt->afc);
if (vt->type == t->mode && analog_ops->has_signal) {
u16 signal = (u16)vt->signal;
if (!analog_ops->has_signal(&t->fe, &signal))
vt->signal = signal;
}
if (vt->type != V4L2_TUNER_RADIO) {
vt->capability |= V4L2_TUNER_CAP_NORM;
vt->rangelow = tv_range[0] * 16;
vt->rangehigh = tv_range[1] * 16;
return 0;
}
/* radio mode */
if (vt->type == t->mode) {
vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
if (fe_tuner_ops->get_status) {
u32 tuner_status = 0;
fe_tuner_ops->get_status(&t->fe, &tuner_status);
vt->rxsubchans =
(tuner_status & TUNER_STATUS_STEREO) ?
V4L2_TUNER_SUB_STEREO :
V4L2_TUNER_SUB_MONO;
}
vt->audmode = t->audmode;
}
vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
vt->rangelow = radio_range[0] * 16000;
vt->rangehigh = radio_range[1] * 16000;
return 0;
}
/**
* tuner_s_tuner - Set the tuner's audio mode
* @sd: pointer to struct v4l2_subdev
* @vt: pointer to struct v4l2_tuner
*
* Sets the audio mode if the tuner matches vt->type.
* Note: vt->type should be initialized before calling it.
* This is done by either video_ioctl2 or by the bridge driver.
*/
static int tuner_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
{
struct tuner *t = to_tuner(sd);
if (set_mode(t, vt->type))
return 0;
if (t->mode == V4L2_TUNER_RADIO) {
t->audmode = vt->audmode;
/*
* For radio audmode can only be mono or stereo. Map any
* other values to stereo. The actual tuner driver that is
* called in set_radio_freq can decide to limit the audmode to
* mono if only mono is supported.
*/
if (t->audmode != V4L2_TUNER_MODE_MONO &&
t->audmode != V4L2_TUNER_MODE_STEREO)
t->audmode = V4L2_TUNER_MODE_STEREO;
}
set_freq(t, 0);
return 0;
}
static int tuner_log_status(struct v4l2_subdev *sd)
{
struct tuner *t = to_tuner(sd);
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
if (analog_ops->tuner_status)
analog_ops->tuner_status(&t->fe);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int tuner_suspend(struct device *dev)
{
struct i2c_client *c = to_i2c_client(dev);
struct tuner *t = to_tuner(i2c_get_clientdata(c));
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
dprintk("suspend\n");
if (t->fe.ops.tuner_ops.suspend)
t->fe.ops.tuner_ops.suspend(&t->fe);
else if (!t->standby && analog_ops->standby)
analog_ops->standby(&t->fe);
return 0;
}
static int tuner_resume(struct device *dev)
{
struct i2c_client *c = to_i2c_client(dev);
struct tuner *t = to_tuner(i2c_get_clientdata(c));
dprintk("resume\n");
if (t->fe.ops.tuner_ops.resume)
t->fe.ops.tuner_ops.resume(&t->fe);
else if (!t->standby)
if (set_mode(t, t->mode) == 0)
set_freq(t, 0);
return 0;
}
#endif
static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
/* TUNER_SET_CONFIG is still called by tuner-simple.c, so we have
to handle it here.
There must be a better way of doing this... */
switch (cmd) {
case TUNER_SET_CONFIG:
return tuner_s_config(sd, arg);
}
return -ENOIOCTLCMD;
}
/*
* Callback structs
*/
static const struct v4l2_subdev_core_ops tuner_core_ops = {
.log_status = tuner_log_status,
};
static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = {
.standby = tuner_standby,
.s_radio = tuner_s_radio,
.g_tuner = tuner_g_tuner,
.s_tuner = tuner_s_tuner,
.s_frequency = tuner_s_frequency,
.g_frequency = tuner_g_frequency,
.s_type_addr = tuner_s_type_addr,
.s_config = tuner_s_config,
};
static const struct v4l2_subdev_video_ops tuner_video_ops = {
.s_std = tuner_s_std,
};
static const struct v4l2_subdev_ops tuner_ops = {
.core = &tuner_core_ops,
.tuner = &tuner_tuner_ops,
.video = &tuner_video_ops,
};
/*
* I2C structs and module init functions
*/
static const struct dev_pm_ops tuner_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tuner_suspend, tuner_resume)
};
static const struct i2c_device_id tuner_id[] = {
{ "tuner", }, /* autodetect */
{ }
};
MODULE_DEVICE_TABLE(i2c, tuner_id);
static struct i2c_driver tuner_driver = {
.driver = {
.name = "tuner",
.pm = &tuner_pm_ops,
},
.probe = tuner_probe,
.remove = tuner_remove,
.command = tuner_command,
.id_table = tuner_id,
};
module_i2c_driver(tuner_driver);
MODULE_DESCRIPTION("device driver for various TV and TV+FM radio tuners");
MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/v4l2-core/tuner-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* v4l2-fh.c
*
* V4L2 file handles.
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
* Contact: Sakari Ailus <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
{
fh->vdev = vdev;
/* Inherit from video_device. May be overridden by the driver. */
fh->ctrl_handler = vdev->ctrl_handler;
INIT_LIST_HEAD(&fh->list);
set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
/*
* determine_valid_ioctls() does not know if struct v4l2_fh
* is used by this driver, but here we do. So enable the
* prio ioctls here.
*/
set_bit(_IOC_NR(VIDIOC_G_PRIORITY), vdev->valid_ioctls);
set_bit(_IOC_NR(VIDIOC_S_PRIORITY), vdev->valid_ioctls);
fh->prio = V4L2_PRIORITY_UNSET;
init_waitqueue_head(&fh->wait);
INIT_LIST_HEAD(&fh->available);
INIT_LIST_HEAD(&fh->subscribed);
fh->sequence = -1;
mutex_init(&fh->subscribe_lock);
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
void v4l2_fh_add(struct v4l2_fh *fh)
{
unsigned long flags;
v4l2_prio_open(fh->vdev->prio, &fh->prio);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_add(&fh->list, &fh->vdev->fh_list);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_fh_add);
int v4l2_fh_open(struct file *filp)
{
struct video_device *vdev = video_devdata(filp);
struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
filp->private_data = fh;
if (fh == NULL)
return -ENOMEM;
v4l2_fh_init(fh, vdev);
v4l2_fh_add(fh);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fh_open);
void v4l2_fh_del(struct v4l2_fh *fh)
{
unsigned long flags;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_del_init(&fh->list);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
v4l2_prio_close(fh->vdev->prio, fh->prio);
}
EXPORT_SYMBOL_GPL(v4l2_fh_del);
void v4l2_fh_exit(struct v4l2_fh *fh)
{
if (fh->vdev == NULL)
return;
v4l_disable_media_source(fh->vdev);
v4l2_event_unsubscribe_all(fh);
mutex_destroy(&fh->subscribe_lock);
fh->vdev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
int v4l2_fh_release(struct file *filp)
{
struct v4l2_fh *fh = filp->private_data;
if (fh) {
v4l2_fh_del(fh);
v4l2_fh_exit(fh);
kfree(fh);
filp->private_data = NULL;
}
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fh_release);
int v4l2_fh_is_singular(struct v4l2_fh *fh)
{
unsigned long flags;
int is_singular;
if (fh == NULL || fh->vdev == NULL)
return 0;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
is_singular = list_is_singular(&fh->list);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return is_singular;
}
EXPORT_SYMBOL_GPL(v4l2_fh_is_singular);
| linux-master | drivers/media/v4l2-core/v4l2-fh.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Video for Linux Two
*
* A generic video device interface for the LINUX operating system
* using a set of device structures/vectors for low level operations.
*
* This file replaces the videodev.c file that comes with the
* regular kernel distribution.
*
* Author: Bill Dirks <[email protected]>
* based on code by Alan Cox, <[email protected]>
*/
/*
* Video capture interface for Linux
*
* A generic video device interface for the LINUX operating system
* using a set of device structures/vectors for low level operations.
*
* Author: Alan Cox, <[email protected]>
*
* Fixes:
*/
/*
* Video4linux 1/2 integration by Justin Schoeman
* <[email protected]>
* 2.4 PROCFS support ported from 2.4 kernels by
* Iñaki García Etxebarria <[email protected]>
* Makefile fix by "W. Michael Petullo" <[email protected]>
* 2.4 devfs support ported from 2.4 kernels by
* Dan Merillat <[email protected]>
* Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/div64.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <linux/videodev2.h>
/*
*
* V 4 L 2 D R I V E R H E L P E R A P I
*
*/
/*
* Video Standard Operations (contributed by Michael Schimek)
*/
/* Helper functions for control handling */
/* Fill in a struct v4l2_queryctrl */
int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
{
const char *name;
s64 min = _min;
s64 max = _max;
u64 step = _step;
s64 def = _def;
v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
&min, &max, &step, &def, &qctrl->flags);
if (name == NULL)
return -EINVAL;
qctrl->minimum = min;
qctrl->maximum = max;
qctrl->step = step;
qctrl->default_value = def;
qctrl->reserved[0] = qctrl->reserved[1] = 0;
strscpy(qctrl->name, name, sizeof(qctrl->name));
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_query_fill);
/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
* and max don't have to be aligned, but there must be at least one valid
* value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
* of 16 between 17 and 31. */
static unsigned int clamp_align(unsigned int x, unsigned int min,
unsigned int max, unsigned int align)
{
/* Bits that must be zero to be aligned */
unsigned int mask = ~((1 << align) - 1);
/* Clamp to aligned min and max */
x = clamp(x, (min + ~mask) & mask, max & mask);
/* Round to nearest aligned value */
if (align)
x = (x + (1 << (align - 1))) & mask;
return x;
}
static unsigned int clamp_roundup(unsigned int x, unsigned int min,
unsigned int max, unsigned int alignment)
{
x = clamp(x, min, max);
if (alignment)
x = round_up(x, alignment);
return x;
}
void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
unsigned int walign,
u32 *h, unsigned int hmin, unsigned int hmax,
unsigned int halign, unsigned int salign)
{
*w = clamp_align(*w, wmin, wmax, walign);
*h = clamp_align(*h, hmin, hmax, halign);
/* Usually we don't need to align the size and are done now. */
if (!salign)
return;
/* How much alignment do we have? */
walign = __ffs(*w);
halign = __ffs(*h);
/* Enough to satisfy the image alignment? */
if (walign + halign < salign) {
/* Max walign where there is still a valid width */
unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
/* Max halign where there is still a valid height */
unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
/* up the smaller alignment until we have enough */
do {
if (halign >= hmaxa ||
(walign <= halign && walign < wmaxa)) {
*w = clamp_align(*w, wmin, wmax, walign + 1);
walign = __ffs(*w);
} else {
*h = clamp_align(*h, hmin, hmax, halign + 1);
halign = __ffs(*h);
}
} while (halign + walign < salign);
}
}
EXPORT_SYMBOL_GPL(v4l_bound_align_image);
const void *
__v4l2_find_nearest_size(const void *array, size_t array_size,
size_t entry_size, size_t width_offset,
size_t height_offset, s32 width, s32 height)
{
u32 error, min_error = U32_MAX;
const void *best = NULL;
unsigned int i;
if (!array)
return NULL;
for (i = 0; i < array_size; i++, array += entry_size) {
const u32 *entry_width = array + width_offset;
const u32 *entry_height = array + height_offset;
error = abs(*entry_width - width) + abs(*entry_height - height);
if (error > min_error)
continue;
min_error = error;
best = array;
if (!error)
break;
}
return best;
}
EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size);
int v4l2_g_parm_cap(struct video_device *vdev,
struct v4l2_subdev *sd, struct v4l2_streamparm *a)
{
struct v4l2_subdev_frame_interval ival = { 0 };
int ret;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
if (vdev->device_caps & V4L2_CAP_READWRITE)
a->parm.capture.readbuffers = 2;
if (v4l2_subdev_has_op(sd, video, g_frame_interval))
a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
ret = v4l2_subdev_call(sd, video, g_frame_interval, &ival);
if (!ret)
a->parm.capture.timeperframe = ival.interval;
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_g_parm_cap);
int v4l2_s_parm_cap(struct video_device *vdev,
struct v4l2_subdev *sd, struct v4l2_streamparm *a)
{
struct v4l2_subdev_frame_interval ival = {
.interval = a->parm.capture.timeperframe
};
int ret;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
memset(&a->parm, 0, sizeof(a->parm));
if (vdev->device_caps & V4L2_CAP_READWRITE)
a->parm.capture.readbuffers = 2;
else
a->parm.capture.readbuffers = 0;
if (v4l2_subdev_has_op(sd, video, g_frame_interval))
a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
ret = v4l2_subdev_call(sd, video, s_frame_interval, &ival);
if (!ret)
a->parm.capture.timeperframe = ival.interval;
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_s_parm_cap);
const struct v4l2_format_info *v4l2_format_info(u32 format)
{
static const struct v4l2_format_info formats[] = {
/* RGB formats */
{ .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
/* YUV packed formats */
{ .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
.block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
{ .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
.block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
/* YUV planar formats */
{ .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
{ .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
{ .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
/* Tiled YUV formats */
{ .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
.block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }},
{ .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
/* YUV planar formats, non contiguous variant */
{ .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
/* Bayer RGB formats */
{ .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); ++i)
if (formats[i].format == format)
return &formats[i];
return NULL;
}
EXPORT_SYMBOL(v4l2_format_info);
static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane)
{
if (!info->block_w[plane])
return 1;
return info->block_w[plane];
}
static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane)
{
if (!info->block_h[plane])
return 1;
return info->block_h[plane];
}
void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
const struct v4l2_frmsize_stepwise *frmsize)
{
if (!frmsize)
return;
/*
* Clamp width/height to meet min/max constraints and round it up to
* macroblock alignment.
*/
*width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width,
frmsize->step_width);
*height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height,
frmsize->step_height);
}
EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints);
int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
u32 pixelformat, u32 width, u32 height)
{
const struct v4l2_format_info *info;
struct v4l2_plane_pix_format *plane;
int i;
info = v4l2_format_info(pixelformat);
if (!info)
return -EINVAL;
pixfmt->width = width;
pixfmt->height = height;
pixfmt->pixelformat = pixelformat;
pixfmt->num_planes = info->mem_planes;
if (info->mem_planes == 1) {
plane = &pixfmt->plane_fmt[0];
plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
plane->sizeimage = 0;
for (i = 0; i < info->comp_planes; i++) {
unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
unsigned int aligned_width;
unsigned int aligned_height;
aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
plane->sizeimage += info->bpp[i] *
DIV_ROUND_UP(aligned_width, hdiv) *
DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
}
} else {
for (i = 0; i < info->comp_planes; i++) {
unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
unsigned int aligned_width;
unsigned int aligned_height;
aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
plane = &pixfmt->plane_fmt[i];
plane->bytesperline =
info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) / info->bpp_div[i];
plane->sizeimage =
plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv);
}
}
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp);
int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
u32 width, u32 height)
{
const struct v4l2_format_info *info;
int i;
info = v4l2_format_info(pixelformat);
if (!info)
return -EINVAL;
/* Single planar API cannot be used for multi plane formats. */
if (info->mem_planes > 1)
return -EINVAL;
pixfmt->width = width;
pixfmt->height = height;
pixfmt->pixelformat = pixelformat;
pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
pixfmt->sizeimage = 0;
for (i = 0; i < info->comp_planes; i++) {
unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
unsigned int aligned_width;
unsigned int aligned_height;
aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
pixfmt->sizeimage += info->bpp[i] *
DIV_ROUND_UP(aligned_width, hdiv) *
DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
}
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
unsigned int div)
{
struct v4l2_ctrl *ctrl;
s64 freq;
ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ);
if (ctrl) {
struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
int ret;
qm.index = v4l2_ctrl_g_ctrl(ctrl);
ret = v4l2_querymenu(handler, &qm);
if (ret)
return -ENOENT;
freq = qm.value;
} else {
if (!mul || !div)
return -ENOENT;
ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE);
if (!ctrl)
return -ENOENT;
freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
__func__);
pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
__func__);
}
return freq > 0 ? freq : -EINVAL;
}
EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
/*
* Simplify a fraction using a simple continued fraction decomposition. The
* idea here is to convert fractions such as 333333/10000000 to 1/30 using
* 32 bit arithmetic only. The algorithm is not perfect and relies upon two
* arbitrary parameters to remove non-significative terms from the simple
* continued fraction decomposition. Using 8 and 333 for n_terms and threshold
* respectively seems to give nice results.
*/
void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
unsigned int n_terms, unsigned int threshold)
{
u32 *an;
u32 x, y, r;
unsigned int i, n;
an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
if (an == NULL)
return;
/*
* Convert the fraction to a simple continued fraction. See
* https://en.wikipedia.org/wiki/Continued_fraction
* Stop if the current term is bigger than or equal to the given
* threshold.
*/
x = *numerator;
y = *denominator;
for (n = 0; n < n_terms && y != 0; ++n) {
an[n] = x / y;
if (an[n] >= threshold) {
if (n < 2)
n++;
break;
}
r = x - an[n] * y;
x = y;
y = r;
}
/* Expand the simple continued fraction back to an integer fraction. */
x = 0;
y = 1;
for (i = n; i > 0; --i) {
r = y;
y = an[i-1] * y + x;
x = r;
}
*numerator = y;
*denominator = x;
kfree(an);
}
EXPORT_SYMBOL_GPL(v4l2_simplify_fraction);
/*
* Convert a fraction to a frame interval in 100ns multiples. The idea here is
* to compute numerator / denominator * 10000000 using 32 bit fixed point
* arithmetic only.
*/
u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator)
{
u32 multiplier;
/* Saturate the result if the operation would overflow. */
if (denominator == 0 ||
numerator/denominator >= ((u32)-1)/10000000)
return (u32)-1;
/*
* Divide both the denominator and the multiplier by two until
* numerator * multiplier doesn't overflow. If anyone knows a better
* algorithm please let me know.
*/
multiplier = 10000000;
while (numerator > ((u32)-1)/multiplier) {
multiplier /= 2;
denominator /= 2;
}
return denominator ? numerator * multiplier / denominator : 0;
}
EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval);
| linux-master | drivers/media/v4l2-core/v4l2-common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Memory-to-memory device framework for Video for Linux 2 and vb2.
*
* Helper functions for devices that use vb2 buffers for both their
* source and destination.
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <[email protected]>
* Marek Szyprowski, <[email protected]>
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/media-device.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
MODULE_DESCRIPTION("Mem to mem device framework for vb2");
MODULE_AUTHOR("Pawel Osciak, <[email protected]>");
MODULE_LICENSE("GPL");
static bool debug;
module_param(debug, bool, 0644);
#define dprintk(fmt, arg...) \
do { \
if (debug) \
printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
} while (0)
/* Instance is already queued on the job_queue */
#define TRANS_QUEUED (1 << 0)
/* Instance is currently running in hardware */
#define TRANS_RUNNING (1 << 1)
/* Instance is currently aborting */
#define TRANS_ABORT (1 << 2)
/* The job queue is not running new jobs */
#define QUEUE_PAUSED (1 << 0)
/* Offset base for buffers on the destination queue - used to distinguish
* between source and destination buffers when mmapping - they receive the same
* offsets but for different queues */
#define DST_QUEUE_OFF_BASE (1 << 30)
enum v4l2_m2m_entity_type {
MEM2MEM_ENT_TYPE_SOURCE,
MEM2MEM_ENT_TYPE_SINK,
MEM2MEM_ENT_TYPE_PROC
};
static const char * const m2m_entity_name[] = {
"source",
"sink",
"proc"
};
/**
* struct v4l2_m2m_dev - per-device context
* @source: &struct media_entity pointer with the source entity
* Used only when the M2M device is registered via
* v4l2_m2m_register_media_controller().
* @source_pad: &struct media_pad with the source pad.
* Used only when the M2M device is registered via
* v4l2_m2m_register_media_controller().
* @sink: &struct media_entity pointer with the sink entity
* Used only when the M2M device is registered via
* v4l2_m2m_register_media_controller().
* @sink_pad: &struct media_pad with the sink pad.
* Used only when the M2M device is registered via
* v4l2_m2m_register_media_controller().
* @proc: &struct media_entity pointer with the M2M device itself.
* @proc_pads: &struct media_pad with the @proc pads.
* Used only when the M2M device is registered via
* v4l2_m2m_unregister_media_controller().
* @intf_devnode: &struct media_intf devnode pointer with the interface
* with controls the M2M device.
* @curr_ctx: currently running instance
* @job_queue: instances queued to run
* @job_spinlock: protects job_queue
* @job_work: worker to run queued jobs.
* @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
* @m2m_ops: driver callbacks
*/
struct v4l2_m2m_dev {
struct v4l2_m2m_ctx *curr_ctx;
#ifdef CONFIG_MEDIA_CONTROLLER
struct media_entity *source;
struct media_pad source_pad;
struct media_entity sink;
struct media_pad sink_pad;
struct media_entity proc;
struct media_pad proc_pads[2];
struct media_intf_devnode *intf_devnode;
#endif
struct list_head job_queue;
spinlock_t job_spinlock;
struct work_struct job_work;
unsigned long job_queue_flags;
const struct v4l2_m2m_ops *m2m_ops;
};
static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
if (V4L2_TYPE_IS_OUTPUT(type))
return &m2m_ctx->out_q_ctx;
else
return &m2m_ctx->cap_q_ctx;
}
struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
struct v4l2_m2m_queue_ctx *q_ctx;
q_ctx = get_queue_ctx(m2m_ctx, type);
if (!q_ctx)
return NULL;
return &q_ctx->q;
}
EXPORT_SYMBOL(v4l2_m2m_get_vq);
struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
if (list_empty(&q_ctx->rdy_queue)) {
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return NULL;
}
b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return &b->vb;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
if (list_empty(&q_ctx->rdy_queue)) {
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return NULL;
}
b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return &b->vb;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
if (list_empty(&q_ctx->rdy_queue)) {
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return NULL;
}
b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
list_del(&b->list);
q_ctx->num_rdy--;
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return &b->vb;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
struct vb2_v4l2_buffer *vbuf)
{
struct v4l2_m2m_buffer *b;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
list_del(&b->list);
q_ctx->num_rdy--;
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
struct vb2_v4l2_buffer *
v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
{
struct v4l2_m2m_buffer *b, *tmp;
struct vb2_v4l2_buffer *ret = NULL;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
if (b->vb.vb2_buf.index == idx) {
list_del(&b->list);
q_ctx->num_rdy--;
ret = &b->vb;
break;
}
}
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
/*
* Scheduling handlers
*/
void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
{
unsigned long flags;
void *ret = NULL;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
if (m2m_dev->curr_ctx)
ret = m2m_dev->curr_ctx->priv;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
return ret;
}
EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
/**
* v4l2_m2m_try_run() - select next job to perform and run it if possible
* @m2m_dev: per-device context
*
* Get next transaction (if present) from the waiting jobs list and run it.
*
* Note that this function can run on a given v4l2_m2m_ctx context,
* but call .device_run for another context.
*/
static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
{
unsigned long flags;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
if (NULL != m2m_dev->curr_ctx) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Another instance is running, won't run now\n");
return;
}
if (list_empty(&m2m_dev->job_queue)) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("No job pending\n");
return;
}
if (m2m_dev->job_queue_flags & QUEUE_PAUSED) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Running new jobs is paused\n");
return;
}
m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
struct v4l2_m2m_ctx, queue);
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
}
/*
* __v4l2_m2m_try_queue() - queue a job
* @m2m_dev: m2m device
* @m2m_ctx: m2m context
*
* Check if this context is ready to queue a job.
*
* This function can run in interrupt context.
*/
static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
unsigned long flags_job;
struct vb2_v4l2_buffer *dst, *src;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
if (!m2m_ctx->out_q_ctx.q.streaming
|| !m2m_ctx->cap_q_ctx.q.streaming) {
dprintk("Streaming needs to be on for both queues\n");
return;
}
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
/* If the context is aborted then don't schedule it */
if (m2m_ctx->job_flags & TRANS_ABORT) {
dprintk("Aborted context\n");
goto job_unlock;
}
if (m2m_ctx->job_flags & TRANS_QUEUED) {
dprintk("On job queue already\n");
goto job_unlock;
}
src = v4l2_m2m_next_src_buf(m2m_ctx);
dst = v4l2_m2m_next_dst_buf(m2m_ctx);
if (!src && !m2m_ctx->out_q_ctx.buffered) {
dprintk("No input buffers available\n");
goto job_unlock;
}
if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
dprintk("No output buffers available\n");
goto job_unlock;
}
m2m_ctx->new_frame = true;
if (src && dst && dst->is_held &&
dst->vb2_buf.copied_timestamp &&
dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
dprintk("Timestamp mismatch, returning held capture buffer\n");
dst->is_held = false;
v4l2_m2m_dst_buf_remove(m2m_ctx);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
dst = v4l2_m2m_next_dst_buf(m2m_ctx);
if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
dprintk("No output buffers available after returning held buffer\n");
goto job_unlock;
}
}
if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
if (m2m_ctx->has_stopped) {
dprintk("Device has stopped\n");
goto job_unlock;
}
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
dprintk("Driver not ready\n");
goto job_unlock;
}
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
m2m_ctx->job_flags |= TRANS_QUEUED;
job_unlock:
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
}
/**
* v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
* @m2m_ctx: m2m context
*
* Check if this context is ready to queue a job. If suitable,
* run the next queued job on the mem2mem device.
*
* This function shouldn't run in interrupt context.
*
* Note that v4l2_m2m_try_schedule() can schedule one job for this context,
* and then run another job for another context.
*/
void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
{
struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
__v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
v4l2_m2m_try_run(m2m_dev);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
/**
* v4l2_m2m_device_run_work() - run pending jobs for the context
* @work: Work structure used for scheduling the execution of this function.
*/
static void v4l2_m2m_device_run_work(struct work_struct *work)
{
struct v4l2_m2m_dev *m2m_dev =
container_of(work, struct v4l2_m2m_dev, job_work);
v4l2_m2m_try_run(m2m_dev);
}
/**
* v4l2_m2m_cancel_job() - cancel pending jobs for the context
* @m2m_ctx: m2m context with jobs to be canceled
*
* In case of streamoff or release called on any context,
* 1] If the context is currently running, then abort job will be called
* 2] If the context is queued, then the context will be removed from
* the job_queue
*/
static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
{
struct v4l2_m2m_dev *m2m_dev;
unsigned long flags;
m2m_dev = m2m_ctx->m2m_dev;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
m2m_ctx->job_flags |= TRANS_ABORT;
if (m2m_ctx->job_flags & TRANS_RUNNING) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
if (m2m_dev->m2m_ops->job_abort)
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
wait_event(m2m_ctx->finished,
!(m2m_ctx->job_flags & TRANS_RUNNING));
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
list_del(&m2m_ctx->queue);
m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("m2m_ctx: %p had been on queue and was removed\n",
m2m_ctx);
} else {
/* Do nothing, was not on queue/running */
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
}
}
/*
* Schedule the next job, called from v4l2_m2m_job_finish() or
* v4l2_m2m_buf_done_and_job_finish().
*/
static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
/*
* This instance might have more buffers ready, but since we do not
* allow more than one job on the job_queue per instance, each has
* to be scheduled separately after the previous one finishes.
*/
__v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
/*
* We might be running in atomic context,
* but the job must be run in non-atomic context.
*/
schedule_work(&m2m_dev->job_work);
}
/*
* Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
* v4l2_m2m_buf_done_and_job_finish().
*/
static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
dprintk("Called by an instance not currently running\n");
return false;
}
list_del(&m2m_dev->curr_ctx->queue);
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL;
return true;
}
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
unsigned long flags;
bool schedule_next;
/*
* This function should not be used for drivers that support
* holding capture buffers. Those should use
* v4l2_m2m_buf_done_and_job_finish() instead.
*/
WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
if (schedule_next)
v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx,
enum vb2_buffer_state state)
{
struct vb2_v4l2_buffer *src_buf, *dst_buf;
bool schedule_next = false;
unsigned long flags;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
if (WARN_ON(!src_buf || !dst_buf))
goto unlock;
dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
if (!dst_buf->is_held) {
v4l2_m2m_dst_buf_remove(m2m_ctx);
v4l2_m2m_buf_done(dst_buf, state);
}
/*
* If the request API is being used, returning the OUTPUT
* (src) buffer will wake-up any process waiting on the
* request file descriptor.
*
* Therefore, return the CAPTURE (dst) buffer first,
* to avoid signalling the request file descriptor
* before the CAPTURE buffer is done.
*/
v4l2_m2m_buf_done(src_buf, state);
schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
unlock:
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
if (schedule_next)
v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
}
EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev)
{
unsigned long flags;
struct v4l2_m2m_ctx *curr_ctx;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
m2m_dev->job_queue_flags |= QUEUE_PAUSED;
curr_ctx = m2m_dev->curr_ctx;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
if (curr_ctx)
wait_event(curr_ctx->finished,
!(curr_ctx->job_flags & TRANS_RUNNING));
}
EXPORT_SYMBOL(v4l2_m2m_suspend);
void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev)
{
unsigned long flags;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
m2m_dev->job_queue_flags &= ~QUEUE_PAUSED;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
v4l2_m2m_try_run(m2m_dev);
}
EXPORT_SYMBOL(v4l2_m2m_resume);
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
ret = vb2_reqbufs(vq, reqbufs);
/* If count == 0, then the owner has released all buffers and he
is no longer owner of the queue. Otherwise we have an owner. */
if (ret == 0)
vq->owner = reqbufs->count ? file->private_data : NULL;
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
struct v4l2_buffer *buf)
{
/* Adjust MMAP memory offsets for the CAPTURE queue */
if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) {
if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
unsigned int i;
for (i = 0; i < buf->length; ++i)
buf->m.planes[i].m.mem_offset
+= DST_QUEUE_OFF_BASE;
} else {
buf->m.offset += DST_QUEUE_OFF_BASE;
}
}
}
int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
ret = vb2_querybuf(vq, buf);
if (ret)
return ret;
/* Adjust MMAP memory offsets for the CAPTURE queue */
v4l2_m2m_adjust_mem_offset(vq, buf);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
/*
* This will add the LAST flag and mark the buffer management
* state as stopped.
* This is called when the last capture buffer must be flagged as LAST
* in draining mode from the encoder/decoder driver buf_queue() callback
* or from v4l2_update_last_buf_state() when a capture buffer is available.
*/
void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *vbuf)
{
vbuf->flags |= V4L2_BUF_FLAG_LAST;
vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
v4l2_m2m_mark_stopped(m2m_ctx);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done);
/* When stop command is issued, update buffer management state */
static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx)
{
struct vb2_v4l2_buffer *next_dst_buf;
if (m2m_ctx->is_draining)
return -EBUSY;
if (m2m_ctx->has_stopped)
return 0;
m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx);
m2m_ctx->is_draining = true;
/*
* The processing of the last output buffer queued before
* the STOP command is expected to mark the buffer management
* state as stopped with v4l2_m2m_mark_stopped().
*/
if (m2m_ctx->last_src_buf)
return 0;
/*
* In case the output queue is empty, try to mark the last capture
* buffer as LAST.
*/
next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
if (!next_dst_buf) {
/*
* Wait for the next queued one in encoder/decoder driver
* buf_queue() callback using the v4l2_m2m_dst_buf_is_last()
* helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet
* streaming.
*/
m2m_ctx->next_buf_last = true;
return 0;
}
v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf);
return 0;
}
/*
* Updates the encoding/decoding buffer management state, should
* be called from encoder/decoder drivers start_streaming()
*/
void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_queue *q)
{
/* If start streaming again, untag the last output buffer */
if (V4L2_TYPE_IS_OUTPUT(q->type))
m2m_ctx->last_src_buf = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state);
/*
* Updates the encoding/decoding buffer management state, should
* be called from encoder/decoder driver stop_streaming()
*/
void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_queue *q)
{
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
/*
* If in draining state, either mark next dst buffer as
* done or flag next one to be marked as done either
* in encoder/decoder driver buf_queue() callback using
* the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf()
* if encoder/decoder is not yet streaming
*/
if (m2m_ctx->is_draining) {
struct vb2_v4l2_buffer *next_dst_buf;
m2m_ctx->last_src_buf = NULL;
next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
if (!next_dst_buf)
m2m_ctx->next_buf_last = true;
else
v4l2_m2m_last_buffer_done(m2m_ctx,
next_dst_buf);
}
} else {
v4l2_m2m_clear_state(m2m_ctx);
}
}
EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state);
static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_queue *q)
{
struct vb2_buffer *vb;
struct vb2_v4l2_buffer *vbuf;
unsigned int i;
if (WARN_ON(q->is_output))
return;
if (list_empty(&q->queued_list))
return;
vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry);
for (i = 0; i < vb->num_planes; i++)
vb2_set_plane_payload(vb, i, 0);
/*
* Since the buffer hasn't been queued to the ready queue,
* mark is active and owned before marking it LAST and DONE
*/
vb->state = VB2_BUF_STATE_ACTIVE;
atomic_inc(&q->owned_by_drv_count);
vbuf = to_vb2_v4l2_buffer(vb);
vbuf->field = V4L2_FIELD_NONE;
v4l2_m2m_last_buffer_done(m2m_ctx, vbuf);
}
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
if (V4L2_TYPE_IS_CAPTURE(vq->type) &&
(buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
dprintk("%s: requests cannot be used with capture buffers\n",
__func__);
return -EPERM;
}
ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
if (ret)
return ret;
/* Adjust MMAP memory offsets for the CAPTURE queue */
v4l2_m2m_adjust_mem_offset(vq, buf);
/*
* If the capture queue is streaming, but streaming hasn't started
* on the device, but was asked to stop, mark the previously queued
* buffer as DONE with LAST flag since it won't be queued on the
* device.
*/
if (V4L2_TYPE_IS_CAPTURE(vq->type) &&
vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) &&
(v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx)))
v4l2_m2m_force_last_buf_done(m2m_ctx, vq);
else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
v4l2_m2m_try_schedule(m2m_ctx);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
if (ret)
return ret;
/* Adjust MMAP memory offsets for the CAPTURE queue */
v4l2_m2m_adjust_mem_offset(vq, buf);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
if (ret)
return ret;
/* Adjust MMAP memory offsets for the CAPTURE queue */
v4l2_m2m_adjust_mem_offset(vq, buf);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_create_buffers *create)
{
struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
return vb2_create_bufs(vq, create);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_exportbuffer *eb)
{
struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
return vb2_expbuf(vq, eb);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, type);
ret = vb2_streamon(vq, type);
if (!ret)
v4l2_m2m_try_schedule(m2m_ctx);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
struct v4l2_m2m_dev *m2m_dev;
struct v4l2_m2m_queue_ctx *q_ctx;
unsigned long flags_job, flags;
int ret;
/* wait until the current context is dequeued from job_queue */
v4l2_m2m_cancel_job(m2m_ctx);
q_ctx = get_queue_ctx(m2m_ctx, type);
ret = vb2_streamoff(&q_ctx->q, type);
if (ret)
return ret;
m2m_dev = m2m_ctx->m2m_dev;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
/* We should not be scheduled anymore, since we're dropping a queue. */
if (m2m_ctx->job_flags & TRANS_QUEUED)
list_del(&m2m_ctx->queue);
m2m_ctx->job_flags = 0;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
/* Drop queue, since streamoff returns device to the same state as after
* calling reqbufs. */
INIT_LIST_HEAD(&q_ctx->rdy_queue);
q_ctx->num_rdy = 0;
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
if (m2m_dev->curr_ctx == m2m_ctx) {
m2m_dev->curr_ctx = NULL;
wake_up(&m2m_ctx->finished);
}
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
static __poll_t v4l2_m2m_poll_for_data(struct file *file,
struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
struct vb2_queue *src_q, *dst_q;
__poll_t rc = 0;
unsigned long flags;
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
/*
* There has to be at least one buffer queued on each queued_list, which
* means either in driver already or waiting for driver to claim it
* and start processing.
*/
if ((!vb2_is_streaming(src_q) || src_q->error ||
list_empty(&src_q->queued_list)) &&
(!vb2_is_streaming(dst_q) || dst_q->error ||
(list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued)))
return EPOLLERR;
spin_lock_irqsave(&src_q->done_lock, flags);
if (!list_empty(&src_q->done_list))
rc |= EPOLLOUT | EPOLLWRNORM;
spin_unlock_irqrestore(&src_q->done_lock, flags);
spin_lock_irqsave(&dst_q->done_lock, flags);
/*
* If the last buffer was dequeued from the capture queue, signal
* userspace. DQBUF(CAPTURE) will return -EPIPE.
*/
if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued)
rc |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&dst_q->done_lock, flags);
return rc;
}
__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
struct video_device *vfd = video_devdata(file);
struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx);
struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
__poll_t req_events = poll_requested_events(wait);
__poll_t rc = 0;
/*
* poll_wait() MUST be called on the first invocation on all the
* potential queues of interest, even if we are not interested in their
* events during this first call. Failure to do so will result in
* queue's events to be ignored because the poll_table won't be capable
* of adding new wait queues thereafter.
*/
poll_wait(file, &src_q->done_wq, wait);
poll_wait(file, &dst_q->done_wq, wait);
if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
struct v4l2_fh *fh = file->private_data;
poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
rc |= EPOLLPRI;
}
return rc;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct vm_area_struct *vma)
{
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
struct vb2_queue *vq;
if (offset < DST_QUEUE_OFF_BASE) {
vq = v4l2_m2m_get_src_vq(m2m_ctx);
} else {
vq = v4l2_m2m_get_dst_vq(m2m_ctx);
vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
}
return vb2_mmap(vq, vma);
}
EXPORT_SYMBOL(v4l2_m2m_mmap);
#ifndef CONFIG_MMU
unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct v4l2_fh *fh = file->private_data;
unsigned long offset = pgoff << PAGE_SHIFT;
struct vb2_queue *vq;
if (offset < DST_QUEUE_OFF_BASE) {
vq = v4l2_m2m_get_src_vq(fh->m2m_ctx);
} else {
vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx);
pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
}
return vb2_get_unmapped_area(vq, addr, len, pgoff, flags);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area);
#endif
#if defined(CONFIG_MEDIA_CONTROLLER)
void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
{
media_remove_intf_links(&m2m_dev->intf_devnode->intf);
media_devnode_remove(m2m_dev->intf_devnode);
media_entity_remove_links(m2m_dev->source);
media_entity_remove_links(&m2m_dev->sink);
media_entity_remove_links(&m2m_dev->proc);
media_device_unregister_entity(m2m_dev->source);
media_device_unregister_entity(&m2m_dev->sink);
media_device_unregister_entity(&m2m_dev->proc);
kfree(m2m_dev->source->name);
kfree(m2m_dev->sink.name);
kfree(m2m_dev->proc.name);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
static int v4l2_m2m_register_entity(struct media_device *mdev,
struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
struct video_device *vdev, int function)
{
struct media_entity *entity;
struct media_pad *pads;
char *name;
unsigned int len;
int num_pads;
int ret;
switch (type) {
case MEM2MEM_ENT_TYPE_SOURCE:
entity = m2m_dev->source;
pads = &m2m_dev->source_pad;
pads[0].flags = MEDIA_PAD_FL_SOURCE;
num_pads = 1;
break;
case MEM2MEM_ENT_TYPE_SINK:
entity = &m2m_dev->sink;
pads = &m2m_dev->sink_pad;
pads[0].flags = MEDIA_PAD_FL_SINK;
num_pads = 1;
break;
case MEM2MEM_ENT_TYPE_PROC:
entity = &m2m_dev->proc;
pads = m2m_dev->proc_pads;
pads[0].flags = MEDIA_PAD_FL_SINK;
pads[1].flags = MEDIA_PAD_FL_SOURCE;
num_pads = 2;
break;
default:
return -EINVAL;
}
entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
if (type != MEM2MEM_ENT_TYPE_PROC) {
entity->info.dev.major = VIDEO_MAJOR;
entity->info.dev.minor = vdev->minor;
}
len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
name = kmalloc(len, GFP_KERNEL);
if (!name)
return -ENOMEM;
snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
entity->name = name;
entity->function = function;
ret = media_entity_pads_init(entity, num_pads, pads);
if (ret)
return ret;
ret = media_device_register_entity(mdev, entity);
if (ret)
return ret;
return 0;
}
int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
struct video_device *vdev, int function)
{
struct media_device *mdev = vdev->v4l2_dev->mdev;
struct media_link *link;
int ret;
if (!mdev)
return 0;
/* A memory-to-memory device consists in two
* DMA engine and one video processing entities.
* The DMA engine entities are linked to a V4L interface
*/
/* Create the three entities with their pads */
m2m_dev->source = &vdev->entity;
ret = v4l2_m2m_register_entity(mdev, m2m_dev,
MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
if (ret)
return ret;
ret = v4l2_m2m_register_entity(mdev, m2m_dev,
MEM2MEM_ENT_TYPE_PROC, vdev, function);
if (ret)
goto err_rel_entity0;
ret = v4l2_m2m_register_entity(mdev, m2m_dev,
MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
if (ret)
goto err_rel_entity1;
/* Connect the three entities */
ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret)
goto err_rel_entity2;
ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret)
goto err_rm_links0;
/* Create video interface */
m2m_dev->intf_devnode = media_devnode_create(mdev,
MEDIA_INTF_T_V4L_VIDEO, 0,
VIDEO_MAJOR, vdev->minor);
if (!m2m_dev->intf_devnode) {
ret = -ENOMEM;
goto err_rm_links1;
}
/* Connect the two DMA engines to the interface */
link = media_create_intf_link(m2m_dev->source,
&m2m_dev->intf_devnode->intf,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (!link) {
ret = -ENOMEM;
goto err_rm_devnode;
}
link = media_create_intf_link(&m2m_dev->sink,
&m2m_dev->intf_devnode->intf,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (!link) {
ret = -ENOMEM;
goto err_rm_intf_link;
}
return 0;
err_rm_intf_link:
media_remove_intf_links(&m2m_dev->intf_devnode->intf);
err_rm_devnode:
media_devnode_remove(m2m_dev->intf_devnode);
err_rm_links1:
media_entity_remove_links(&m2m_dev->sink);
err_rm_links0:
media_entity_remove_links(&m2m_dev->proc);
media_entity_remove_links(m2m_dev->source);
err_rel_entity2:
media_device_unregister_entity(&m2m_dev->proc);
kfree(m2m_dev->proc.name);
err_rel_entity1:
media_device_unregister_entity(&m2m_dev->sink);
kfree(m2m_dev->sink.name);
err_rel_entity0:
media_device_unregister_entity(m2m_dev->source);
kfree(m2m_dev->source->name);
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
#endif
struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
return ERR_PTR(-EINVAL);
m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
if (!m2m_dev)
return ERR_PTR(-ENOMEM);
m2m_dev->curr_ctx = NULL;
m2m_dev->m2m_ops = m2m_ops;
INIT_LIST_HEAD(&m2m_dev->job_queue);
spin_lock_init(&m2m_dev->job_spinlock);
INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
return m2m_dev;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_init);
void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
{
kfree(m2m_dev);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_release);
struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
void *drv_priv,
int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
{
struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
int ret;
m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
if (!m2m_ctx)
return ERR_PTR(-ENOMEM);
m2m_ctx->priv = drv_priv;
m2m_ctx->m2m_dev = m2m_dev;
init_waitqueue_head(&m2m_ctx->finished);
out_q_ctx = &m2m_ctx->out_q_ctx;
cap_q_ctx = &m2m_ctx->cap_q_ctx;
INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
spin_lock_init(&out_q_ctx->rdy_spinlock);
spin_lock_init(&cap_q_ctx->rdy_spinlock);
INIT_LIST_HEAD(&m2m_ctx->queue);
ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
if (ret)
goto err;
/*
* Both queues should use same the mutex to lock the m2m context.
* This lock is used in some v4l2_m2m_* helpers.
*/
if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
ret = -EINVAL;
goto err;
}
m2m_ctx->q_lock = out_q_ctx->q.lock;
return m2m_ctx;
err:
kfree(m2m_ctx);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
{
/* wait until the current context is dequeued from job_queue */
v4l2_m2m_cancel_job(m2m_ctx);
vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
vb2_queue_release(&m2m_ctx->out_q_ctx.q);
kfree(m2m_ctx);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *vbuf)
{
struct v4l2_m2m_buffer *b = container_of(vbuf,
struct v4l2_m2m_buffer, vb);
struct v4l2_m2m_queue_ctx *q_ctx;
unsigned long flags;
q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
if (!q_ctx)
return;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
list_add_tail(&b->list, &q_ctx->rdy_queue);
q_ctx->num_rdy++;
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
struct vb2_v4l2_buffer *cap_vb,
bool copy_frame_flags)
{
u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
if (copy_frame_flags)
mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
V4L2_BUF_FLAG_BFRAME;
cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp;
if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE)
cap_vb->timecode = out_vb->timecode;
cap_vb->field = out_vb->field;
cap_vb->flags &= ~mask;
cap_vb->flags |= out_vb->flags & mask;
cap_vb->vb2_buf.copied_timestamp = 1;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata);
void v4l2_m2m_request_queue(struct media_request *req)
{
struct media_request_object *obj, *obj_safe;
struct v4l2_m2m_ctx *m2m_ctx = NULL;
/*
* Queue all objects. Note that buffer objects are at the end of the
* objects list, after all other object types. Once buffer objects
* are queued, the driver might delete them immediately (if the driver
* processes the buffer at once), so we have to use
* list_for_each_entry_safe() to handle the case where the object we
* queue is deleted.
*/
list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
struct v4l2_m2m_ctx *m2m_ctx_obj;
struct vb2_buffer *vb;
if (!obj->ops->queue)
continue;
if (vb2_request_object_is_buffer(obj)) {
/* Sanity checks */
vb = container_of(obj, struct vb2_buffer, req_obj);
WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
m2m_ctx_obj = container_of(vb->vb2_queue,
struct v4l2_m2m_ctx,
out_q_ctx.q);
WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
m2m_ctx = m2m_ctx_obj;
}
/*
* The buffer we queue here can in theory be immediately
* unbound, hence the use of list_for_each_entry_safe()
* above and why we call the queue op last.
*/
obj->ops->queue(obj);
}
WARN_ON(!m2m_ctx);
if (m2m_ctx)
v4l2_m2m_try_schedule(m2m_ctx);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
/* Videobuf2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
struct v4l2_create_buffers *create)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
struct v4l2_exportbuffer *eb)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec)
{
if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
return -EINVAL;
ec->flags = 0;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd);
int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc)
{
if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
return -EINVAL;
dc->flags = 0;
if (dc->cmd == V4L2_DEC_CMD_STOP) {
dc->stop.pts = 0;
} else if (dc->cmd == V4L2_DEC_CMD_START) {
dc->start.speed = 0;
dc->start.format = V4L2_DEC_START_FMT_NONE;
}
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
/*
* Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START
* Should be called from the encoder driver encoder_cmd() callback
*/
int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_encoder_cmd *ec)
{
if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
return -EINVAL;
if (ec->cmd == V4L2_ENC_CMD_STOP)
return v4l2_update_last_buf_state(m2m_ctx);
if (m2m_ctx->is_draining)
return -EBUSY;
if (m2m_ctx->has_stopped)
m2m_ctx->has_stopped = false;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd);
/*
* Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START
* Should be called from the decoder driver decoder_cmd() callback
*/
int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_decoder_cmd *dc)
{
if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
return -EINVAL;
if (dc->cmd == V4L2_DEC_CMD_STOP)
return v4l2_update_last_buf_state(m2m_ctx);
if (m2m_ctx->is_draining)
return -EBUSY;
if (m2m_ctx->has_stopped)
m2m_ctx->has_stopped = false;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd);
int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd);
int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd);
int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc)
{
if (dc->cmd != V4L2_DEC_CMD_FLUSH)
return -EINVAL;
dc->flags = 0;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
struct v4l2_fh *fh = file->private_data;
struct vb2_v4l2_buffer *out_vb, *cap_vb;
struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
unsigned long flags;
int ret;
ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc);
if (ret < 0)
return ret;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx);
cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx);
/*
* If there is an out buffer pending, then clear any HOLD flag.
*
* By clearing this flag we ensure that when this output
* buffer is processed any held capture buffer will be released.
*/
if (out_vb) {
out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
} else if (cap_vb && cap_vb->is_held) {
/*
* If there were no output buffers, but there is a
* capture buffer that is held, then release that
* buffer.
*/
cap_vb->is_held = false;
v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE);
}
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
/*
* v4l2_file_operations helpers. It is assumed here same lock is used
* for the output and the capture buffer queue.
*/
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
{
struct v4l2_fh *fh = file->private_data;
return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
{
struct v4l2_fh *fh = file->private_data;
struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
__poll_t ret;
if (m2m_ctx->q_lock)
mutex_lock(m2m_ctx->q_lock);
ret = v4l2_m2m_poll(file, m2m_ctx, wait);
if (m2m_ctx->q_lock)
mutex_unlock(m2m_ctx->q_lock);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
| linux-master | drivers/media/v4l2-core/v4l2-mem2mem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
* Separated from fs stuff by Arnd Bergmann <[email protected]>
*
* Copyright (C) 1997-2000 Jakub Jelinek ([email protected])
* Copyright (C) 1998 Eddie C. Dost ([email protected])
* Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
* Copyright (C) 2003 Pavel Machek ([email protected])
* Copyright (C) 2005 Philippe De Muyter ([email protected])
* Copyright (C) 2008 Hans Verkuil <[email protected]>
*
* These routines maintain argument size conversion between 32bit and 64bit
* ioctls.
*/
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/videodev2.h>
#include <linux/v4l2-subdev.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-ioctl.h>
/*
* Per-ioctl data copy handlers.
*
* Those come in pairs, with a get_v4l2_foo() and a put_v4l2_foo() routine,
* where "v4l2_foo" is the name of the V4L2 struct.
*
* They basically get two __user pointers, one with a 32-bits struct that
* came from the userspace call and a 64-bits struct, also allocated as
* userspace, but filled internally by do_video_ioctl().
*
* For ioctls that have pointers inside it, the functions will also
* receive an ancillary buffer with extra space, used to pass extra
* data to the routine.
*/
struct v4l2_window32 {
struct v4l2_rect w;
__u32 field; /* enum v4l2_field */
__u32 chromakey;
compat_caddr_t clips; /* always NULL */
__u32 clipcount; /* always 0 */
compat_caddr_t bitmap; /* always NULL */
__u8 global_alpha;
};
static int get_v4l2_window32(struct v4l2_window *p64,
struct v4l2_window32 __user *p32)
{
struct v4l2_window32 w32;
if (copy_from_user(&w32, p32, sizeof(w32)))
return -EFAULT;
*p64 = (struct v4l2_window) {
.w = w32.w,
.field = w32.field,
.chromakey = w32.chromakey,
.clips = NULL,
.clipcount = 0,
.bitmap = NULL,
.global_alpha = w32.global_alpha,
};
return 0;
}
static int put_v4l2_window32(struct v4l2_window *p64,
struct v4l2_window32 __user *p32)
{
struct v4l2_window32 w32;
memset(&w32, 0, sizeof(w32));
w32 = (struct v4l2_window32) {
.w = p64->w,
.field = p64->field,
.chromakey = p64->chromakey,
.clips = 0,
.clipcount = 0,
.bitmap = 0,
.global_alpha = p64->global_alpha,
};
if (copy_to_user(p32, &w32, sizeof(w32)))
return -EFAULT;
return 0;
}
struct v4l2_format32 {
__u32 type; /* enum v4l2_buf_type */
union {
struct v4l2_pix_format pix;
struct v4l2_pix_format_mplane pix_mp;
struct v4l2_window32 win;
struct v4l2_vbi_format vbi;
struct v4l2_sliced_vbi_format sliced;
struct v4l2_sdr_format sdr;
struct v4l2_meta_format meta;
__u8 raw_data[200]; /* user-defined */
} fmt;
};
/**
* struct v4l2_create_buffers32 - VIDIOC_CREATE_BUFS32 argument
* @index: on return, index of the first created buffer
* @count: entry: number of requested buffers,
* return: number of created buffers
* @memory: buffer memory type
* @format: frame format, for which buffers are requested
* @capabilities: capabilities of this buffer type.
* @flags: additional buffer management attributes (ignored unless the
* queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and
* configured for MMAP streaming I/O).
* @reserved: future extensions
*/
struct v4l2_create_buffers32 {
__u32 index;
__u32 count;
__u32 memory; /* enum v4l2_memory */
struct v4l2_format32 format;
__u32 capabilities;
__u32 flags;
__u32 reserved[6];
};
static int get_v4l2_format32(struct v4l2_format *p64,
struct v4l2_format32 __user *p32)
{
if (get_user(p64->type, &p32->type))
return -EFAULT;
switch (p64->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return copy_from_user(&p64->fmt.pix, &p32->fmt.pix,
sizeof(p64->fmt.pix)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
return copy_from_user(&p64->fmt.pix_mp, &p32->fmt.pix_mp,
sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
return get_v4l2_window32(&p64->fmt.win, &p32->fmt.win);
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
return copy_from_user(&p64->fmt.vbi, &p32->fmt.vbi,
sizeof(p64->fmt.vbi)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
return copy_from_user(&p64->fmt.sliced, &p32->fmt.sliced,
sizeof(p64->fmt.sliced)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_SDR_CAPTURE:
case V4L2_BUF_TYPE_SDR_OUTPUT:
return copy_from_user(&p64->fmt.sdr, &p32->fmt.sdr,
sizeof(p64->fmt.sdr)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_META_CAPTURE:
case V4L2_BUF_TYPE_META_OUTPUT:
return copy_from_user(&p64->fmt.meta, &p32->fmt.meta,
sizeof(p64->fmt.meta)) ? -EFAULT : 0;
default:
return -EINVAL;
}
}
static int get_v4l2_create32(struct v4l2_create_buffers *p64,
struct v4l2_create_buffers32 __user *p32)
{
if (copy_from_user(p64, p32,
offsetof(struct v4l2_create_buffers32, format)))
return -EFAULT;
if (copy_from_user(&p64->flags, &p32->flags, sizeof(p32->flags)))
return -EFAULT;
return get_v4l2_format32(&p64->format, &p32->format);
}
static int put_v4l2_format32(struct v4l2_format *p64,
struct v4l2_format32 __user *p32)
{
switch (p64->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return copy_to_user(&p32->fmt.pix, &p64->fmt.pix,
sizeof(p64->fmt.pix)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
return copy_to_user(&p32->fmt.pix_mp, &p64->fmt.pix_mp,
sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
return put_v4l2_window32(&p64->fmt.win, &p32->fmt.win);
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
return copy_to_user(&p32->fmt.vbi, &p64->fmt.vbi,
sizeof(p64->fmt.vbi)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
return copy_to_user(&p32->fmt.sliced, &p64->fmt.sliced,
sizeof(p64->fmt.sliced)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_SDR_CAPTURE:
case V4L2_BUF_TYPE_SDR_OUTPUT:
return copy_to_user(&p32->fmt.sdr, &p64->fmt.sdr,
sizeof(p64->fmt.sdr)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_META_CAPTURE:
case V4L2_BUF_TYPE_META_OUTPUT:
return copy_to_user(&p32->fmt.meta, &p64->fmt.meta,
sizeof(p64->fmt.meta)) ? -EFAULT : 0;
default:
return -EINVAL;
}
}
static int put_v4l2_create32(struct v4l2_create_buffers *p64,
struct v4l2_create_buffers32 __user *p32)
{
if (copy_to_user(p32, p64,
offsetof(struct v4l2_create_buffers32, format)) ||
put_user(p64->capabilities, &p32->capabilities) ||
put_user(p64->flags, &p32->flags) ||
copy_to_user(p32->reserved, p64->reserved, sizeof(p64->reserved)))
return -EFAULT;
return put_v4l2_format32(&p64->format, &p32->format);
}
struct v4l2_standard32 {
__u32 index;
compat_u64 id;
__u8 name[24];
struct v4l2_fract frameperiod; /* Frames, not fields */
__u32 framelines;
__u32 reserved[4];
};
static int get_v4l2_standard32(struct v4l2_standard *p64,
struct v4l2_standard32 __user *p32)
{
/* other fields are not set by the user, nor used by the driver */
return get_user(p64->index, &p32->index);
}
static int put_v4l2_standard32(struct v4l2_standard *p64,
struct v4l2_standard32 __user *p32)
{
if (put_user(p64->index, &p32->index) ||
put_user(p64->id, &p32->id) ||
copy_to_user(p32->name, p64->name, sizeof(p32->name)) ||
copy_to_user(&p32->frameperiod, &p64->frameperiod,
sizeof(p32->frameperiod)) ||
put_user(p64->framelines, &p32->framelines) ||
copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
return -EFAULT;
return 0;
}
struct v4l2_plane32 {
__u32 bytesused;
__u32 length;
union {
__u32 mem_offset;
compat_long_t userptr;
__s32 fd;
} m;
__u32 data_offset;
__u32 reserved[11];
};
/*
* This is correct for all architectures including i386, but not x32,
* which has different alignment requirements for timestamp
*/
struct v4l2_buffer32 {
__u32 index;
__u32 type; /* enum v4l2_buf_type */
__u32 bytesused;
__u32 flags;
__u32 field; /* enum v4l2_field */
struct {
compat_s64 tv_sec;
compat_s64 tv_usec;
} timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
/* memory location */
__u32 memory; /* enum v4l2_memory */
union {
__u32 offset;
compat_long_t userptr;
compat_caddr_t planes;
__s32 fd;
} m;
__u32 length;
__u32 reserved2;
__s32 request_fd;
};
#ifdef CONFIG_COMPAT_32BIT_TIME
struct v4l2_buffer32_time32 {
__u32 index;
__u32 type; /* enum v4l2_buf_type */
__u32 bytesused;
__u32 flags;
__u32 field; /* enum v4l2_field */
struct old_timeval32 timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
/* memory location */
__u32 memory; /* enum v4l2_memory */
union {
__u32 offset;
compat_long_t userptr;
compat_caddr_t planes;
__s32 fd;
} m;
__u32 length;
__u32 reserved2;
__s32 request_fd;
};
#endif
static int get_v4l2_plane32(struct v4l2_plane *p64,
struct v4l2_plane32 __user *p32,
enum v4l2_memory memory)
{
struct v4l2_plane32 plane32;
typeof(p64->m) m = {};
if (copy_from_user(&plane32, p32, sizeof(plane32)))
return -EFAULT;
switch (memory) {
case V4L2_MEMORY_MMAP:
case V4L2_MEMORY_OVERLAY:
m.mem_offset = plane32.m.mem_offset;
break;
case V4L2_MEMORY_USERPTR:
m.userptr = (unsigned long)compat_ptr(plane32.m.userptr);
break;
case V4L2_MEMORY_DMABUF:
m.fd = plane32.m.fd;
break;
}
memset(p64, 0, sizeof(*p64));
*p64 = (struct v4l2_plane) {
.bytesused = plane32.bytesused,
.length = plane32.length,
.m = m,
.data_offset = plane32.data_offset,
};
return 0;
}
static int put_v4l2_plane32(struct v4l2_plane *p64,
struct v4l2_plane32 __user *p32,
enum v4l2_memory memory)
{
struct v4l2_plane32 plane32;
memset(&plane32, 0, sizeof(plane32));
plane32 = (struct v4l2_plane32) {
.bytesused = p64->bytesused,
.length = p64->length,
.data_offset = p64->data_offset,
};
switch (memory) {
case V4L2_MEMORY_MMAP:
case V4L2_MEMORY_OVERLAY:
plane32.m.mem_offset = p64->m.mem_offset;
break;
case V4L2_MEMORY_USERPTR:
plane32.m.userptr = (uintptr_t)(p64->m.userptr);
break;
case V4L2_MEMORY_DMABUF:
plane32.m.fd = p64->m.fd;
break;
}
if (copy_to_user(p32, &plane32, sizeof(plane32)))
return -EFAULT;
return 0;
}
static int get_v4l2_buffer32(struct v4l2_buffer *vb,
struct v4l2_buffer32 __user *arg)
{
struct v4l2_buffer32 vb32;
if (copy_from_user(&vb32, arg, sizeof(vb32)))
return -EFAULT;
memset(vb, 0, sizeof(*vb));
*vb = (struct v4l2_buffer) {
.index = vb32.index,
.type = vb32.type,
.bytesused = vb32.bytesused,
.flags = vb32.flags,
.field = vb32.field,
.timestamp.tv_sec = vb32.timestamp.tv_sec,
.timestamp.tv_usec = vb32.timestamp.tv_usec,
.timecode = vb32.timecode,
.sequence = vb32.sequence,
.memory = vb32.memory,
.m.offset = vb32.m.offset,
.length = vb32.length,
.request_fd = vb32.request_fd,
};
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
case V4L2_MEMORY_OVERLAY:
vb->m.offset = vb32.m.offset;
break;
case V4L2_MEMORY_USERPTR:
vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr);
break;
case V4L2_MEMORY_DMABUF:
vb->m.fd = vb32.m.fd;
break;
}
if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
vb->m.planes = (void __force *)
compat_ptr(vb32.m.planes);
return 0;
}
#ifdef CONFIG_COMPAT_32BIT_TIME
static int get_v4l2_buffer32_time32(struct v4l2_buffer *vb,
struct v4l2_buffer32_time32 __user *arg)
{
struct v4l2_buffer32_time32 vb32;
if (copy_from_user(&vb32, arg, sizeof(vb32)))
return -EFAULT;
*vb = (struct v4l2_buffer) {
.index = vb32.index,
.type = vb32.type,
.bytesused = vb32.bytesused,
.flags = vb32.flags,
.field = vb32.field,
.timestamp.tv_sec = vb32.timestamp.tv_sec,
.timestamp.tv_usec = vb32.timestamp.tv_usec,
.timecode = vb32.timecode,
.sequence = vb32.sequence,
.memory = vb32.memory,
.m.offset = vb32.m.offset,
.length = vb32.length,
.request_fd = vb32.request_fd,
};
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
case V4L2_MEMORY_OVERLAY:
vb->m.offset = vb32.m.offset;
break;
case V4L2_MEMORY_USERPTR:
vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr);
break;
case V4L2_MEMORY_DMABUF:
vb->m.fd = vb32.m.fd;
break;
}
if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
vb->m.planes = (void __force *)
compat_ptr(vb32.m.planes);
return 0;
}
#endif
static int put_v4l2_buffer32(struct v4l2_buffer *vb,
struct v4l2_buffer32 __user *arg)
{
struct v4l2_buffer32 vb32;
memset(&vb32, 0, sizeof(vb32));
vb32 = (struct v4l2_buffer32) {
.index = vb->index,
.type = vb->type,
.bytesused = vb->bytesused,
.flags = vb->flags,
.field = vb->field,
.timestamp.tv_sec = vb->timestamp.tv_sec,
.timestamp.tv_usec = vb->timestamp.tv_usec,
.timecode = vb->timecode,
.sequence = vb->sequence,
.memory = vb->memory,
.m.offset = vb->m.offset,
.length = vb->length,
.request_fd = vb->request_fd,
};
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
case V4L2_MEMORY_OVERLAY:
vb32.m.offset = vb->m.offset;
break;
case V4L2_MEMORY_USERPTR:
vb32.m.userptr = (uintptr_t)(vb->m.userptr);
break;
case V4L2_MEMORY_DMABUF:
vb32.m.fd = vb->m.fd;
break;
}
if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
vb32.m.planes = (uintptr_t)vb->m.planes;
if (copy_to_user(arg, &vb32, sizeof(vb32)))
return -EFAULT;
return 0;
}
#ifdef CONFIG_COMPAT_32BIT_TIME
static int put_v4l2_buffer32_time32(struct v4l2_buffer *vb,
struct v4l2_buffer32_time32 __user *arg)
{
struct v4l2_buffer32_time32 vb32;
memset(&vb32, 0, sizeof(vb32));
vb32 = (struct v4l2_buffer32_time32) {
.index = vb->index,
.type = vb->type,
.bytesused = vb->bytesused,
.flags = vb->flags,
.field = vb->field,
.timestamp.tv_sec = vb->timestamp.tv_sec,
.timestamp.tv_usec = vb->timestamp.tv_usec,
.timecode = vb->timecode,
.sequence = vb->sequence,
.memory = vb->memory,
.m.offset = vb->m.offset,
.length = vb->length,
.request_fd = vb->request_fd,
};
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
case V4L2_MEMORY_OVERLAY:
vb32.m.offset = vb->m.offset;
break;
case V4L2_MEMORY_USERPTR:
vb32.m.userptr = (uintptr_t)(vb->m.userptr);
break;
case V4L2_MEMORY_DMABUF:
vb32.m.fd = vb->m.fd;
break;
}
if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
vb32.m.planes = (uintptr_t)vb->m.planes;
if (copy_to_user(arg, &vb32, sizeof(vb32)))
return -EFAULT;
return 0;
}
#endif
struct v4l2_framebuffer32 {
__u32 capability;
__u32 flags;
compat_caddr_t base;
struct {
__u32 width;
__u32 height;
__u32 pixelformat;
__u32 field;
__u32 bytesperline;
__u32 sizeimage;
__u32 colorspace;
__u32 priv;
} fmt;
};
static int get_v4l2_framebuffer32(struct v4l2_framebuffer *p64,
struct v4l2_framebuffer32 __user *p32)
{
if (get_user(p64->capability, &p32->capability) ||
get_user(p64->flags, &p32->flags) ||
copy_from_user(&p64->fmt, &p32->fmt, sizeof(p64->fmt)))
return -EFAULT;
p64->base = NULL;
return 0;
}
static int put_v4l2_framebuffer32(struct v4l2_framebuffer *p64,
struct v4l2_framebuffer32 __user *p32)
{
if (put_user((uintptr_t)p64->base, &p32->base) ||
put_user(p64->capability, &p32->capability) ||
put_user(p64->flags, &p32->flags) ||
copy_to_user(&p32->fmt, &p64->fmt, sizeof(p64->fmt)))
return -EFAULT;
return 0;
}
struct v4l2_input32 {
__u32 index; /* Which input */
__u8 name[32]; /* Label */
__u32 type; /* Type of input */
__u32 audioset; /* Associated audios (bitfield) */
__u32 tuner; /* Associated tuner */
compat_u64 std;
__u32 status;
__u32 capabilities;
__u32 reserved[3];
};
/*
* The 64-bit v4l2_input struct has extra padding at the end of the struct.
* Otherwise it is identical to the 32-bit version.
*/
static inline int get_v4l2_input32(struct v4l2_input *p64,
struct v4l2_input32 __user *p32)
{
if (copy_from_user(p64, p32, sizeof(*p32)))
return -EFAULT;
return 0;
}
static inline int put_v4l2_input32(struct v4l2_input *p64,
struct v4l2_input32 __user *p32)
{
if (copy_to_user(p32, p64, sizeof(*p32)))
return -EFAULT;
return 0;
}
struct v4l2_ext_controls32 {
__u32 which;
__u32 count;
__u32 error_idx;
__s32 request_fd;
__u32 reserved[1];
compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
};
struct v4l2_ext_control32 {
__u32 id;
__u32 size;
__u32 reserved2[1];
union {
__s32 value;
__s64 value64;
compat_caddr_t string; /* actually char * */
};
} __attribute__ ((packed));
/* Return true if this control is a pointer type. */
static inline bool ctrl_is_pointer(struct file *file, u32 id)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_fh *fh = NULL;
struct v4l2_ctrl_handler *hdl = NULL;
struct v4l2_query_ext_ctrl qec = { id };
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
fh = file->private_data;
if (fh && fh->ctrl_handler)
hdl = fh->ctrl_handler;
else if (vdev->ctrl_handler)
hdl = vdev->ctrl_handler;
if (hdl) {
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id);
return ctrl && ctrl->is_ptr;
}
if (!ops || !ops->vidioc_query_ext_ctrl)
return false;
return !ops->vidioc_query_ext_ctrl(file, fh, &qec) &&
(qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD);
}
static int get_v4l2_ext_controls32(struct v4l2_ext_controls *p64,
struct v4l2_ext_controls32 __user *p32)
{
struct v4l2_ext_controls32 ec32;
if (copy_from_user(&ec32, p32, sizeof(ec32)))
return -EFAULT;
*p64 = (struct v4l2_ext_controls) {
.which = ec32.which,
.count = ec32.count,
.error_idx = ec32.error_idx,
.request_fd = ec32.request_fd,
.reserved[0] = ec32.reserved[0],
.controls = (void __force *)compat_ptr(ec32.controls),
};
return 0;
}
static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64,
struct v4l2_ext_controls32 __user *p32)
{
struct v4l2_ext_controls32 ec32;
memset(&ec32, 0, sizeof(ec32));
ec32 = (struct v4l2_ext_controls32) {
.which = p64->which,
.count = p64->count,
.error_idx = p64->error_idx,
.request_fd = p64->request_fd,
.reserved[0] = p64->reserved[0],
.controls = (uintptr_t)p64->controls,
};
if (copy_to_user(p32, &ec32, sizeof(ec32)))
return -EFAULT;
return 0;
}
#ifdef CONFIG_X86_64
/*
* x86 is the only compat architecture with different struct alignment
* between 32-bit and 64-bit tasks.
*/
struct v4l2_event32 {
__u32 type;
union {
compat_s64 value64;
__u8 data[64];
} u;
__u32 pending;
__u32 sequence;
struct {
compat_s64 tv_sec;
compat_s64 tv_nsec;
} timestamp;
__u32 id;
__u32 reserved[8];
};
static int put_v4l2_event32(struct v4l2_event *p64,
struct v4l2_event32 __user *p32)
{
if (put_user(p64->type, &p32->type) ||
copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) ||
put_user(p64->pending, &p32->pending) ||
put_user(p64->sequence, &p32->sequence) ||
put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) ||
put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) ||
put_user(p64->id, &p32->id) ||
copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
return -EFAULT;
return 0;
}
#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
struct v4l2_event32_time32 {
__u32 type;
union {
compat_s64 value64;
__u8 data[64];
} u;
__u32 pending;
__u32 sequence;
struct old_timespec32 timestamp;
__u32 id;
__u32 reserved[8];
};
static int put_v4l2_event32_time32(struct v4l2_event *p64,
struct v4l2_event32_time32 __user *p32)
{
if (put_user(p64->type, &p32->type) ||
copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) ||
put_user(p64->pending, &p32->pending) ||
put_user(p64->sequence, &p32->sequence) ||
put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) ||
put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) ||
put_user(p64->id, &p32->id) ||
copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
return -EFAULT;
return 0;
}
#endif
struct v4l2_edid32 {
__u32 pad;
__u32 start_block;
__u32 blocks;
__u32 reserved[5];
compat_caddr_t edid;
};
static int get_v4l2_edid32(struct v4l2_edid *p64,
struct v4l2_edid32 __user *p32)
{
compat_uptr_t edid;
if (copy_from_user(p64, p32, offsetof(struct v4l2_edid32, edid)) ||
get_user(edid, &p32->edid))
return -EFAULT;
p64->edid = (void __force *)compat_ptr(edid);
return 0;
}
static int put_v4l2_edid32(struct v4l2_edid *p64,
struct v4l2_edid32 __user *p32)
{
if (copy_to_user(p32, p64, offsetof(struct v4l2_edid32, edid)))
return -EFAULT;
return 0;
}
/*
* List of ioctls that require 32-bits/64-bits conversion
*
* The V4L2 ioctls that aren't listed there don't have pointer arguments
* and the struct size is identical for both 32 and 64 bits versions, so
* they don't need translations.
*/
#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32)
#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32)
#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32)
#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32)
#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32)
#define VIDIOC_QBUF32 _IOWR('V', 15, struct v4l2_buffer32)
#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32)
#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32)
#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32)
#define VIDIOC_CREATE_BUFS32 _IOWR('V', 92, struct v4l2_create_buffers32)
#define VIDIOC_PREPARE_BUF32 _IOWR('V', 93, struct v4l2_buffer32)
#ifdef CONFIG_COMPAT_32BIT_TIME
#define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32)
#define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32)
#define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32)
#define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32)
#define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32)
#endif
unsigned int v4l2_compat_translate_cmd(unsigned int cmd)
{
switch (cmd) {
case VIDIOC_G_FMT32:
return VIDIOC_G_FMT;
case VIDIOC_S_FMT32:
return VIDIOC_S_FMT;
case VIDIOC_TRY_FMT32:
return VIDIOC_TRY_FMT;
case VIDIOC_G_FBUF32:
return VIDIOC_G_FBUF;
case VIDIOC_S_FBUF32:
return VIDIOC_S_FBUF;
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_QUERYBUF32_TIME32:
return VIDIOC_QUERYBUF;
case VIDIOC_QBUF32_TIME32:
return VIDIOC_QBUF;
case VIDIOC_DQBUF32_TIME32:
return VIDIOC_DQBUF;
case VIDIOC_PREPARE_BUF32_TIME32:
return VIDIOC_PREPARE_BUF;
#endif
case VIDIOC_QUERYBUF32:
return VIDIOC_QUERYBUF;
case VIDIOC_QBUF32:
return VIDIOC_QBUF;
case VIDIOC_DQBUF32:
return VIDIOC_DQBUF;
case VIDIOC_CREATE_BUFS32:
return VIDIOC_CREATE_BUFS;
case VIDIOC_G_EXT_CTRLS32:
return VIDIOC_G_EXT_CTRLS;
case VIDIOC_S_EXT_CTRLS32:
return VIDIOC_S_EXT_CTRLS;
case VIDIOC_TRY_EXT_CTRLS32:
return VIDIOC_TRY_EXT_CTRLS;
case VIDIOC_PREPARE_BUF32:
return VIDIOC_PREPARE_BUF;
case VIDIOC_ENUMSTD32:
return VIDIOC_ENUMSTD;
case VIDIOC_ENUMINPUT32:
return VIDIOC_ENUMINPUT;
case VIDIOC_G_EDID32:
return VIDIOC_G_EDID;
case VIDIOC_S_EDID32:
return VIDIOC_S_EDID;
#ifdef CONFIG_X86_64
case VIDIOC_DQEVENT32:
return VIDIOC_DQEVENT;
#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_DQEVENT32_TIME32:
return VIDIOC_DQEVENT;
#endif
}
return cmd;
}
int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd)
{
switch (cmd) {
case VIDIOC_G_FMT32:
case VIDIOC_S_FMT32:
case VIDIOC_TRY_FMT32:
return get_v4l2_format32(parg, arg);
case VIDIOC_S_FBUF32:
return get_v4l2_framebuffer32(parg, arg);
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_QUERYBUF32_TIME32:
case VIDIOC_QBUF32_TIME32:
case VIDIOC_DQBUF32_TIME32:
case VIDIOC_PREPARE_BUF32_TIME32:
return get_v4l2_buffer32_time32(parg, arg);
#endif
case VIDIOC_QUERYBUF32:
case VIDIOC_QBUF32:
case VIDIOC_DQBUF32:
case VIDIOC_PREPARE_BUF32:
return get_v4l2_buffer32(parg, arg);
case VIDIOC_G_EXT_CTRLS32:
case VIDIOC_S_EXT_CTRLS32:
case VIDIOC_TRY_EXT_CTRLS32:
return get_v4l2_ext_controls32(parg, arg);
case VIDIOC_CREATE_BUFS32:
return get_v4l2_create32(parg, arg);
case VIDIOC_ENUMSTD32:
return get_v4l2_standard32(parg, arg);
case VIDIOC_ENUMINPUT32:
return get_v4l2_input32(parg, arg);
case VIDIOC_G_EDID32:
case VIDIOC_S_EDID32:
return get_v4l2_edid32(parg, arg);
}
return 0;
}
int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd)
{
switch (cmd) {
case VIDIOC_G_FMT32:
case VIDIOC_S_FMT32:
case VIDIOC_TRY_FMT32:
return put_v4l2_format32(parg, arg);
case VIDIOC_G_FBUF32:
return put_v4l2_framebuffer32(parg, arg);
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_QUERYBUF32_TIME32:
case VIDIOC_QBUF32_TIME32:
case VIDIOC_DQBUF32_TIME32:
case VIDIOC_PREPARE_BUF32_TIME32:
return put_v4l2_buffer32_time32(parg, arg);
#endif
case VIDIOC_QUERYBUF32:
case VIDIOC_QBUF32:
case VIDIOC_DQBUF32:
case VIDIOC_PREPARE_BUF32:
return put_v4l2_buffer32(parg, arg);
case VIDIOC_G_EXT_CTRLS32:
case VIDIOC_S_EXT_CTRLS32:
case VIDIOC_TRY_EXT_CTRLS32:
return put_v4l2_ext_controls32(parg, arg);
case VIDIOC_CREATE_BUFS32:
return put_v4l2_create32(parg, arg);
case VIDIOC_ENUMSTD32:
return put_v4l2_standard32(parg, arg);
case VIDIOC_ENUMINPUT32:
return put_v4l2_input32(parg, arg);
case VIDIOC_G_EDID32:
case VIDIOC_S_EDID32:
return put_v4l2_edid32(parg, arg);
#ifdef CONFIG_X86_64
case VIDIOC_DQEVENT32:
return put_v4l2_event32(parg, arg);
#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_DQEVENT32_TIME32:
return put_v4l2_event32_time32(parg, arg);
#endif
}
return 0;
}
int v4l2_compat_get_array_args(struct file *file, void *mbuf,
void __user *user_ptr, size_t array_size,
unsigned int cmd, void *arg)
{
int err = 0;
memset(mbuf, 0, array_size);
switch (cmd) {
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_QUERYBUF32_TIME32:
case VIDIOC_QBUF32_TIME32:
case VIDIOC_DQBUF32_TIME32:
case VIDIOC_PREPARE_BUF32_TIME32:
#endif
case VIDIOC_QUERYBUF32:
case VIDIOC_QBUF32:
case VIDIOC_DQBUF32:
case VIDIOC_PREPARE_BUF32: {
struct v4l2_buffer *b64 = arg;
struct v4l2_plane *p64 = mbuf;
struct v4l2_plane32 __user *p32 = user_ptr;
if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) {
u32 num_planes = b64->length;
if (num_planes == 0)
return 0;
while (num_planes--) {
err = get_v4l2_plane32(p64, p32, b64->memory);
if (err)
return err;
++p64;
++p32;
}
}
break;
}
case VIDIOC_G_EXT_CTRLS32:
case VIDIOC_S_EXT_CTRLS32:
case VIDIOC_TRY_EXT_CTRLS32: {
struct v4l2_ext_controls *ecs64 = arg;
struct v4l2_ext_control *ec64 = mbuf;
struct v4l2_ext_control32 __user *ec32 = user_ptr;
int n;
for (n = 0; n < ecs64->count; n++) {
if (copy_from_user(ec64, ec32, sizeof(*ec32)))
return -EFAULT;
if (ctrl_is_pointer(file, ec64->id)) {
compat_uptr_t p;
if (get_user(p, &ec32->string))
return -EFAULT;
ec64->string = compat_ptr(p);
}
ec32++;
ec64++;
}
break;
}
default:
if (copy_from_user(mbuf, user_ptr, array_size))
err = -EFAULT;
break;
}
return err;
}
int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr,
void *mbuf, size_t array_size,
unsigned int cmd, void *arg)
{
int err = 0;
switch (cmd) {
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_QUERYBUF32_TIME32:
case VIDIOC_QBUF32_TIME32:
case VIDIOC_DQBUF32_TIME32:
case VIDIOC_PREPARE_BUF32_TIME32:
#endif
case VIDIOC_QUERYBUF32:
case VIDIOC_QBUF32:
case VIDIOC_DQBUF32:
case VIDIOC_PREPARE_BUF32: {
struct v4l2_buffer *b64 = arg;
struct v4l2_plane *p64 = mbuf;
struct v4l2_plane32 __user *p32 = user_ptr;
if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) {
u32 num_planes = b64->length;
if (num_planes == 0)
return 0;
while (num_planes--) {
err = put_v4l2_plane32(p64, p32, b64->memory);
if (err)
return err;
++p64;
++p32;
}
}
break;
}
case VIDIOC_G_EXT_CTRLS32:
case VIDIOC_S_EXT_CTRLS32:
case VIDIOC_TRY_EXT_CTRLS32: {
struct v4l2_ext_controls *ecs64 = arg;
struct v4l2_ext_control *ec64 = mbuf;
struct v4l2_ext_control32 __user *ec32 = user_ptr;
int n;
for (n = 0; n < ecs64->count; n++) {
unsigned int size = sizeof(*ec32);
/*
* Do not modify the pointer when copying a pointer
* control. The contents of the pointer was changed,
* not the pointer itself.
* The structures are otherwise compatible.
*/
if (ctrl_is_pointer(file, ec64->id))
size -= sizeof(ec32->value64);
if (copy_to_user(ec32, ec64, size))
return -EFAULT;
ec32++;
ec64++;
}
break;
}
default:
if (copy_to_user(user_ptr, mbuf, array_size))
err = -EFAULT;
break;
}
return err;
}
/**
* v4l2_compat_ioctl32() - Handles a compat32 ioctl call
*
* @file: pointer to &struct file with the file handler
* @cmd: ioctl to be called
* @arg: arguments passed from/to the ioctl handler
*
* This function is meant to be used as .compat_ioctl fops at v4l2-dev.c
* in order to deal with 32-bit calls on a 64-bits Kernel.
*
* This function calls do_video_ioctl() for non-private V4L2 ioctls.
* If the function is a private one it calls vdev->fops->compat_ioctl32
* instead.
*/
long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
{
struct video_device *vdev = video_devdata(file);
long ret = -ENOIOCTLCMD;
if (!file->f_op->unlocked_ioctl)
return ret;
if (!video_is_registered(vdev))
return -ENODEV;
if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
ret = file->f_op->unlocked_ioctl(file, cmd,
(unsigned long)compat_ptr(arg));
else if (vdev->fops->compat_ioctl32)
ret = vdev->fops->compat_ioctl32(file, cmd, arg);
if (ret == -ENOIOCTLCMD)
pr_debug("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
_IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32);
| linux-master | drivers/media/v4l2-core/v4l2-compat-ioctl32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Video capture interface for Linux version 2
*
* A generic video device interface for the LINUX operating system
* using a set of device structures/vectors for low level operations.
*
* Authors: Alan Cox, <[email protected]> (version 1)
* Mauro Carvalho Chehab <[email protected]> (version 2)
*
* Fixes: 20000516 Claudio Matsuoka <[email protected]>
* - Added procfs support
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#define VIDEO_NUM_DEVICES 256
#define VIDEO_NAME "video4linux"
#define dprintk(fmt, arg...) do { \
printk(KERN_DEBUG pr_fmt("%s: " fmt), \
__func__, ##arg); \
} while (0)
/*
* sysfs stuff
*/
static ssize_t index_show(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%i\n", vdev->index);
}
static DEVICE_ATTR_RO(index);
static ssize_t dev_debug_show(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%i\n", vdev->dev_debug);
}
static ssize_t dev_debug_store(struct device *cd, struct device_attribute *attr,
const char *buf, size_t len)
{
struct video_device *vdev = to_video_device(cd);
int res = 0;
u16 value;
res = kstrtou16(buf, 0, &value);
if (res)
return res;
vdev->dev_debug = value;
return len;
}
static DEVICE_ATTR_RW(dev_debug);
static ssize_t name_show(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name);
}
static DEVICE_ATTR_RO(name);
static struct attribute *video_device_attrs[] = {
&dev_attr_name.attr,
&dev_attr_dev_debug.attr,
&dev_attr_index.attr,
NULL,
};
ATTRIBUTE_GROUPS(video_device);
/*
* Active devices
*/
static struct video_device *video_devices[VIDEO_NUM_DEVICES];
static DEFINE_MUTEX(videodev_lock);
static DECLARE_BITMAP(devnode_nums[VFL_TYPE_MAX], VIDEO_NUM_DEVICES);
/* Device node utility functions */
/* Note: these utility functions all assume that vfl_type is in the range
[0, VFL_TYPE_MAX-1]. */
#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
/* Return the bitmap corresponding to vfl_type. */
static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type)
{
/* Any types not assigned to fixed minor ranges must be mapped to
one single bitmap for the purposes of finding a free node number
since all those unassigned types use the same minor range. */
int idx = (vfl_type > VFL_TYPE_RADIO) ? VFL_TYPE_MAX - 1 : vfl_type;
return devnode_nums[idx];
}
#else
/* Return the bitmap corresponding to vfl_type. */
static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type)
{
return devnode_nums[vfl_type];
}
#endif
/* Mark device node number vdev->num as used */
static inline void devnode_set(struct video_device *vdev)
{
set_bit(vdev->num, devnode_bits(vdev->vfl_type));
}
/* Mark device node number vdev->num as unused */
static inline void devnode_clear(struct video_device *vdev)
{
clear_bit(vdev->num, devnode_bits(vdev->vfl_type));
}
/* Try to find a free device node number in the range [from, to> */
static inline int devnode_find(struct video_device *vdev, int from, int to)
{
return find_next_zero_bit(devnode_bits(vdev->vfl_type), to, from);
}
struct video_device *video_device_alloc(void)
{
return kzalloc(sizeof(struct video_device), GFP_KERNEL);
}
EXPORT_SYMBOL(video_device_alloc);
void video_device_release(struct video_device *vdev)
{
kfree(vdev);
}
EXPORT_SYMBOL(video_device_release);
void video_device_release_empty(struct video_device *vdev)
{
/* Do nothing */
/* Only valid when the video_device struct is a static. */
}
EXPORT_SYMBOL(video_device_release_empty);
static inline void video_get(struct video_device *vdev)
{
get_device(&vdev->dev);
}
static inline void video_put(struct video_device *vdev)
{
put_device(&vdev->dev);
}
/* Called when the last user of the video device exits. */
static void v4l2_device_release(struct device *cd)
{
struct video_device *vdev = to_video_device(cd);
struct v4l2_device *v4l2_dev = vdev->v4l2_dev;
mutex_lock(&videodev_lock);
if (WARN_ON(video_devices[vdev->minor] != vdev)) {
/* should not happen */
mutex_unlock(&videodev_lock);
return;
}
/* Free up this device for reuse */
video_devices[vdev->minor] = NULL;
/* Delete the cdev on this minor as well */
cdev_del(vdev->cdev);
/* Just in case some driver tries to access this from
the release() callback. */
vdev->cdev = NULL;
/* Mark device node number as free */
devnode_clear(vdev);
mutex_unlock(&videodev_lock);
#if defined(CONFIG_MEDIA_CONTROLLER)
if (v4l2_dev->mdev && vdev->vfl_dir != VFL_DIR_M2M) {
/* Remove interfaces and interface links */
media_devnode_remove(vdev->intf_devnode);
if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN)
media_device_unregister_entity(&vdev->entity);
}
#endif
/* Do not call v4l2_device_put if there is no release callback set.
* Drivers that have no v4l2_device release callback might free the
* v4l2_dev instance in the video_device release callback below, so we
* must perform this check here.
*
* TODO: In the long run all drivers that use v4l2_device should use the
* v4l2_device release callback. This check will then be unnecessary.
*/
if (v4l2_dev->release == NULL)
v4l2_dev = NULL;
/* Release video_device and perform other
cleanups as needed. */
vdev->release(vdev);
/* Decrease v4l2_device refcount */
if (v4l2_dev)
v4l2_device_put(v4l2_dev);
}
static struct class video_class = {
.name = VIDEO_NAME,
.dev_groups = video_device_groups,
};
struct video_device *video_devdata(struct file *file)
{
return video_devices[iminor(file_inode(file))];
}
EXPORT_SYMBOL(video_devdata);
/* Priority handling */
static inline bool prio_is_valid(enum v4l2_priority prio)
{
return prio == V4L2_PRIORITY_BACKGROUND ||
prio == V4L2_PRIORITY_INTERACTIVE ||
prio == V4L2_PRIORITY_RECORD;
}
void v4l2_prio_init(struct v4l2_prio_state *global)
{
memset(global, 0, sizeof(*global));
}
EXPORT_SYMBOL(v4l2_prio_init);
int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local,
enum v4l2_priority new)
{
if (!prio_is_valid(new))
return -EINVAL;
if (*local == new)
return 0;
atomic_inc(&global->prios[new]);
if (prio_is_valid(*local))
atomic_dec(&global->prios[*local]);
*local = new;
return 0;
}
EXPORT_SYMBOL(v4l2_prio_change);
void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local)
{
v4l2_prio_change(global, local, V4L2_PRIORITY_DEFAULT);
}
EXPORT_SYMBOL(v4l2_prio_open);
void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local)
{
if (prio_is_valid(local))
atomic_dec(&global->prios[local]);
}
EXPORT_SYMBOL(v4l2_prio_close);
enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global)
{
if (atomic_read(&global->prios[V4L2_PRIORITY_RECORD]) > 0)
return V4L2_PRIORITY_RECORD;
if (atomic_read(&global->prios[V4L2_PRIORITY_INTERACTIVE]) > 0)
return V4L2_PRIORITY_INTERACTIVE;
if (atomic_read(&global->prios[V4L2_PRIORITY_BACKGROUND]) > 0)
return V4L2_PRIORITY_BACKGROUND;
return V4L2_PRIORITY_UNSET;
}
EXPORT_SYMBOL(v4l2_prio_max);
int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local)
{
return (local < v4l2_prio_max(global)) ? -EBUSY : 0;
}
EXPORT_SYMBOL(v4l2_prio_check);
static ssize_t v4l2_read(struct file *filp, char __user *buf,
size_t sz, loff_t *off)
{
struct video_device *vdev = video_devdata(filp);
int ret = -ENODEV;
if (!vdev->fops->read)
return -EINVAL;
if (video_is_registered(vdev))
ret = vdev->fops->read(filp, buf, sz, off);
if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) &&
(vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING))
dprintk("%s: read: %zd (%d)\n",
video_device_node_name(vdev), sz, ret);
return ret;
}
static ssize_t v4l2_write(struct file *filp, const char __user *buf,
size_t sz, loff_t *off)
{
struct video_device *vdev = video_devdata(filp);
int ret = -ENODEV;
if (!vdev->fops->write)
return -EINVAL;
if (video_is_registered(vdev))
ret = vdev->fops->write(filp, buf, sz, off);
if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) &&
(vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING))
dprintk("%s: write: %zd (%d)\n",
video_device_node_name(vdev), sz, ret);
return ret;
}
static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll)
{
struct video_device *vdev = video_devdata(filp);
__poll_t res = EPOLLERR | EPOLLHUP | EPOLLPRI;
if (video_is_registered(vdev)) {
if (!vdev->fops->poll)
res = DEFAULT_POLLMASK;
else
res = vdev->fops->poll(filp, poll);
}
if (vdev->dev_debug & V4L2_DEV_DEBUG_POLL)
dprintk("%s: poll: %08x %08x\n",
video_device_node_name(vdev), res,
poll_requested_events(poll));
return res;
}
static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct video_device *vdev = video_devdata(filp);
int ret = -ENODEV;
if (vdev->fops->unlocked_ioctl) {
if (video_is_registered(vdev))
ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
} else
ret = -ENOTTY;
return ret;
}
#ifdef CONFIG_MMU
#define v4l2_get_unmapped_area NULL
#else
static unsigned long v4l2_get_unmapped_area(struct file *filp,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct video_device *vdev = video_devdata(filp);
int ret;
if (!vdev->fops->get_unmapped_area)
return -ENOSYS;
if (!video_is_registered(vdev))
return -ENODEV;
ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
dprintk("%s: get_unmapped_area (%d)\n",
video_device_node_name(vdev), ret);
return ret;
}
#endif
static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
{
struct video_device *vdev = video_devdata(filp);
int ret = -ENODEV;
if (!vdev->fops->mmap)
return -ENODEV;
if (video_is_registered(vdev))
ret = vdev->fops->mmap(filp, vm);
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
dprintk("%s: mmap (%d)\n",
video_device_node_name(vdev), ret);
return ret;
}
/* Override for the open function */
static int v4l2_open(struct inode *inode, struct file *filp)
{
struct video_device *vdev;
int ret = 0;
/* Check if the video device is available */
mutex_lock(&videodev_lock);
vdev = video_devdata(filp);
/* return ENODEV if the video device has already been removed. */
if (vdev == NULL || !video_is_registered(vdev)) {
mutex_unlock(&videodev_lock);
return -ENODEV;
}
/* and increase the device refcount */
video_get(vdev);
mutex_unlock(&videodev_lock);
if (vdev->fops->open) {
if (video_is_registered(vdev))
ret = vdev->fops->open(filp);
else
ret = -ENODEV;
}
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
dprintk("%s: open (%d)\n",
video_device_node_name(vdev), ret);
/* decrease the refcount in case of an error */
if (ret)
video_put(vdev);
return ret;
}
/* Override for the release function */
static int v4l2_release(struct inode *inode, struct file *filp)
{
struct video_device *vdev = video_devdata(filp);
int ret = 0;
/*
* We need to serialize the release() with queueing new requests.
* The release() may trigger the cancellation of a streaming
* operation, and that should not be mixed with queueing a new
* request at the same time.
*/
if (vdev->fops->release) {
if (v4l2_device_supports_requests(vdev->v4l2_dev)) {
mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
ret = vdev->fops->release(filp);
mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
} else {
ret = vdev->fops->release(filp);
}
}
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
dprintk("%s: release\n",
video_device_node_name(vdev));
/* decrease the refcount unconditionally since the release()
return value is ignored. */
video_put(vdev);
return ret;
}
static const struct file_operations v4l2_fops = {
.owner = THIS_MODULE,
.read = v4l2_read,
.write = v4l2_write,
.open = v4l2_open,
.get_unmapped_area = v4l2_get_unmapped_area,
.mmap = v4l2_mmap,
.unlocked_ioctl = v4l2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = v4l2_compat_ioctl32,
#endif
.release = v4l2_release,
.poll = v4l2_poll,
.llseek = no_llseek,
};
/**
* get_index - assign stream index number based on v4l2_dev
* @vdev: video_device to assign index number to, vdev->v4l2_dev should be assigned
*
* Note that when this is called the new device has not yet been registered
* in the video_device array, but it was able to obtain a minor number.
*
* This means that we can always obtain a free stream index number since
* the worst case scenario is that there are VIDEO_NUM_DEVICES - 1 slots in
* use of the video_device array.
*
* Returns a free index number.
*/
static int get_index(struct video_device *vdev)
{
/* This can be static since this function is called with the global
videodev_lock held. */
static DECLARE_BITMAP(used, VIDEO_NUM_DEVICES);
int i;
bitmap_zero(used, VIDEO_NUM_DEVICES);
for (i = 0; i < VIDEO_NUM_DEVICES; i++) {
if (video_devices[i] != NULL &&
video_devices[i]->v4l2_dev == vdev->v4l2_dev) {
__set_bit(video_devices[i]->index, used);
}
}
return find_first_zero_bit(used, VIDEO_NUM_DEVICES);
}
#define SET_VALID_IOCTL(ops, cmd, op) \
do { if ((ops)->op) __set_bit(_IOC_NR(cmd), valid_ioctls); } while (0)
/* This determines which ioctls are actually implemented in the driver.
It's a one-time thing which simplifies video_ioctl2 as it can just do
a bit test.
Note that drivers can override this by setting bits to 1 in
vdev->valid_ioctls. If an ioctl is marked as 1 when this function is
called, then that ioctl will actually be marked as unimplemented.
It does that by first setting up the local valid_ioctls bitmap, and
at the end do a:
vdev->valid_ioctls = valid_ioctls & ~(vdev->valid_ioctls)
*/
static void determine_valid_ioctls(struct video_device *vdev)
{
const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VIDEO_CAPTURE_MPLANE |
V4L2_CAP_VIDEO_OUTPUT |
V4L2_CAP_VIDEO_OUTPUT_MPLANE |
V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
const u32 meta_caps = V4L2_CAP_META_CAPTURE |
V4L2_CAP_META_OUTPUT;
DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
bool is_vid = vdev->vfl_type == VFL_TYPE_VIDEO &&
(vdev->device_caps & vid_caps);
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
bool is_meta = vdev->vfl_type == VFL_TYPE_VIDEO &&
(vdev->device_caps & meta_caps);
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC;
bool has_streaming = vdev->device_caps & V4L2_CAP_STREAMING;
bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE);
/* vfl_type and vfl_dir independent ioctls */
SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap);
__set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
__set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
/* Note: the control handler can also be passed through the filehandle,
and that can't be tested here. If the bit for these control ioctls
is set, then the ioctl is valid. But if it is 0, then it can still
be valid if the filehandle passed the control handler. */
if (vdev->ctrl_handler || ops->vidioc_queryctrl)
__set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl)
__set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_querymenu)
__set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
if (!is_tch) {
SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
}
SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
#ifdef CONFIG_VIDEO_ADV_DEBUG
__set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
__set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls);
__set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls);
#endif
/* yes, really vidioc_subscribe_event */
SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
__set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
if (is_vid) {
/* video specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
ops->vidioc_enum_fmt_vid_overlay)) ||
(is_tx && ops->vidioc_enum_fmt_vid_out))
__set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
ops->vidioc_g_fmt_vid_cap_mplane ||
ops->vidioc_g_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_g_fmt_vid_out ||
ops->vidioc_g_fmt_vid_out_mplane ||
ops->vidioc_g_fmt_vid_out_overlay)))
__set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
ops->vidioc_s_fmt_vid_cap_mplane ||
ops->vidioc_s_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_s_fmt_vid_out ||
ops->vidioc_s_fmt_vid_out_mplane ||
ops->vidioc_s_fmt_vid_out_overlay)))
__set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
ops->vidioc_try_fmt_vid_cap_mplane ||
ops->vidioc_try_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_try_fmt_vid_out ||
ops->vidioc_try_fmt_vid_out_mplane ||
ops->vidioc_try_fmt_vid_out_overlay)))
__set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf);
SET_VALID_IOCTL(ops, VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp);
SET_VALID_IOCTL(ops, VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp);
SET_VALID_IOCTL(ops, VIDIOC_G_ENC_INDEX, vidioc_g_enc_index);
SET_VALID_IOCTL(ops, VIDIOC_ENCODER_CMD, vidioc_encoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
if (ops->vidioc_g_selection) {
__set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
__set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
}
if (ops->vidioc_s_selection)
__set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
}
if (is_meta && is_rx) {
/* metadata capture specific ioctls */
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap);
SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap);
SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap);
SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap);
} else if (is_meta && is_tx) {
/* metadata output specific ioctls */
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out);
SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out);
SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out);
SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out);
}
if (is_vbi) {
/* vbi specific ioctls */
if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
ops->vidioc_g_fmt_sliced_vbi_cap)) ||
(is_tx && (ops->vidioc_g_fmt_vbi_out ||
ops->vidioc_g_fmt_sliced_vbi_out)))
__set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vbi_cap ||
ops->vidioc_s_fmt_sliced_vbi_cap)) ||
(is_tx && (ops->vidioc_s_fmt_vbi_out ||
ops->vidioc_s_fmt_sliced_vbi_out)))
__set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vbi_cap ||
ops->vidioc_try_fmt_sliced_vbi_cap)) ||
(is_tx && (ops->vidioc_try_fmt_vbi_out ||
ops->vidioc_try_fmt_sliced_vbi_out)))
__set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
} else if (is_tch) {
/* touch specific ioctls */
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap);
SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap);
SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap);
SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
} else if (is_sdr && is_rx) {
/* SDR receiver specific ioctls */
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap);
SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap);
SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap);
SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap);
} else if (is_sdr && is_tx) {
/* SDR transmitter specific ioctls */
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out);
SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out);
SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out);
SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out);
}
if (has_streaming) {
/* ioctls valid for streaming I/O */
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
}
if (is_vid || is_vbi || is_meta) {
/* ioctls valid for video, vbi and metadata */
if (ops->vidioc_s_std)
__set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std);
if (is_rx) {
SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd);
if (is_io_mc) {
__set_bit(_IOC_NR(VIDIOC_ENUMINPUT), valid_ioctls);
__set_bit(_IOC_NR(VIDIOC_G_INPUT), valid_ioctls);
__set_bit(_IOC_NR(VIDIOC_S_INPUT), valid_ioctls);
} else {
SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
}
SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio);
SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);
SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);
}
if (is_tx) {
if (is_io_mc) {
__set_bit(_IOC_NR(VIDIOC_ENUMOUTPUT), valid_ioctls);
__set_bit(_IOC_NR(VIDIOC_G_OUTPUT), valid_ioctls);
__set_bit(_IOC_NR(VIDIOC_S_OUTPUT), valid_ioctls);
} else {
SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
}
SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout);
SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
}
if (ops->vidioc_g_parm || ops->vidioc_g_std)
__set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid);
}
if (is_tx && (is_radio || is_sdr)) {
/* radio transmitter only ioctls */
SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
}
if (is_rx && !is_tch) {
/* receiver only ioctls */
SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek);
}
bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls,
BASE_VIDIOC_PRIVATE);
}
static int video_register_media_controller(struct video_device *vdev)
{
#if defined(CONFIG_MEDIA_CONTROLLER)
u32 intf_type;
int ret;
/* Memory-to-memory devices are more complex and use
* their own function to register its mc entities.
*/
if (!vdev->v4l2_dev->mdev || vdev->vfl_dir == VFL_DIR_M2M)
return 0;
vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
vdev->entity.function = MEDIA_ENT_F_UNKNOWN;
switch (vdev->vfl_type) {
case VFL_TYPE_VIDEO:
intf_type = MEDIA_INTF_T_V4L_VIDEO;
vdev->entity.function = MEDIA_ENT_F_IO_V4L;
break;
case VFL_TYPE_VBI:
intf_type = MEDIA_INTF_T_V4L_VBI;
vdev->entity.function = MEDIA_ENT_F_IO_VBI;
break;
case VFL_TYPE_SDR:
intf_type = MEDIA_INTF_T_V4L_SWRADIO;
vdev->entity.function = MEDIA_ENT_F_IO_SWRADIO;
break;
case VFL_TYPE_TOUCH:
intf_type = MEDIA_INTF_T_V4L_TOUCH;
vdev->entity.function = MEDIA_ENT_F_IO_V4L;
break;
case VFL_TYPE_RADIO:
intf_type = MEDIA_INTF_T_V4L_RADIO;
/*
* Radio doesn't have an entity at the V4L2 side to represent
* radio input or output. Instead, the audio input/output goes
* via either physical wires or ALSA.
*/
break;
case VFL_TYPE_SUBDEV:
intf_type = MEDIA_INTF_T_V4L_SUBDEV;
/* Entity will be created via v4l2_device_register_subdev() */
break;
default:
return 0;
}
if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) {
vdev->entity.name = vdev->name;
/* Needed just for backward compatibility with legacy MC API */
vdev->entity.info.dev.major = VIDEO_MAJOR;
vdev->entity.info.dev.minor = vdev->minor;
ret = media_device_register_entity(vdev->v4l2_dev->mdev,
&vdev->entity);
if (ret < 0) {
pr_warn("%s: media_device_register_entity failed\n",
__func__);
return ret;
}
}
vdev->intf_devnode = media_devnode_create(vdev->v4l2_dev->mdev,
intf_type,
0, VIDEO_MAJOR,
vdev->minor);
if (!vdev->intf_devnode) {
media_device_unregister_entity(&vdev->entity);
return -ENOMEM;
}
if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) {
struct media_link *link;
link = media_create_intf_link(&vdev->entity,
&vdev->intf_devnode->intf,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (!link) {
media_devnode_remove(vdev->intf_devnode);
media_device_unregister_entity(&vdev->entity);
return -ENOMEM;
}
}
/* FIXME: how to create the other interface links? */
#endif
return 0;
}
int __video_register_device(struct video_device *vdev,
enum vfl_devnode_type type,
int nr, int warn_if_nr_in_use,
struct module *owner)
{
int i = 0;
int ret;
int minor_offset = 0;
int minor_cnt = VIDEO_NUM_DEVICES;
const char *name_base;
/* A minor value of -1 marks this video device as never
having been registered */
vdev->minor = -1;
/* the release callback MUST be present */
if (WARN_ON(!vdev->release))
return -EINVAL;
/* the v4l2_dev pointer MUST be present */
if (WARN_ON(!vdev->v4l2_dev))
return -EINVAL;
/* the device_caps field MUST be set for all but subdevs */
if (WARN_ON(type != VFL_TYPE_SUBDEV && !vdev->device_caps))
return -EINVAL;
/* v4l2_fh support */
spin_lock_init(&vdev->fh_lock);
INIT_LIST_HEAD(&vdev->fh_list);
/* Part 1: check device type */
switch (type) {
case VFL_TYPE_VIDEO:
name_base = "video";
break;
case VFL_TYPE_VBI:
name_base = "vbi";
break;
case VFL_TYPE_RADIO:
name_base = "radio";
break;
case VFL_TYPE_SUBDEV:
name_base = "v4l-subdev";
break;
case VFL_TYPE_SDR:
/* Use device name 'swradio' because 'sdr' was already taken. */
name_base = "swradio";
break;
case VFL_TYPE_TOUCH:
name_base = "v4l-touch";
break;
default:
pr_err("%s called with unknown type: %d\n",
__func__, type);
return -EINVAL;
}
vdev->vfl_type = type;
vdev->cdev = NULL;
if (vdev->dev_parent == NULL)
vdev->dev_parent = vdev->v4l2_dev->dev;
if (vdev->ctrl_handler == NULL)
vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler;
/* If the prio state pointer is NULL, then use the v4l2_device
prio state. */
if (vdev->prio == NULL)
vdev->prio = &vdev->v4l2_dev->prio;
/* Part 2: find a free minor, device node number and device index. */
#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
/* Keep the ranges for the first four types for historical
* reasons.
* Newer devices (not yet in place) should use the range
* of 128-191 and just pick the first free minor there
* (new style). */
switch (type) {
case VFL_TYPE_VIDEO:
minor_offset = 0;
minor_cnt = 64;
break;
case VFL_TYPE_RADIO:
minor_offset = 64;
minor_cnt = 64;
break;
case VFL_TYPE_VBI:
minor_offset = 224;
minor_cnt = 32;
break;
default:
minor_offset = 128;
minor_cnt = 64;
break;
}
#endif
/* Pick a device node number */
mutex_lock(&videodev_lock);
nr = devnode_find(vdev, nr == -1 ? 0 : nr, minor_cnt);
if (nr == minor_cnt)
nr = devnode_find(vdev, 0, minor_cnt);
if (nr == minor_cnt) {
pr_err("could not get a free device node number\n");
mutex_unlock(&videodev_lock);
return -ENFILE;
}
#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
/* 1-on-1 mapping of device node number to minor number */
i = nr;
#else
/* The device node number and minor numbers are independent, so
we just find the first free minor number. */
for (i = 0; i < VIDEO_NUM_DEVICES; i++)
if (video_devices[i] == NULL)
break;
if (i == VIDEO_NUM_DEVICES) {
mutex_unlock(&videodev_lock);
pr_err("could not get a free minor\n");
return -ENFILE;
}
#endif
vdev->minor = i + minor_offset;
vdev->num = nr;
/* Should not happen since we thought this minor was free */
if (WARN_ON(video_devices[vdev->minor])) {
mutex_unlock(&videodev_lock);
pr_err("video_device not empty!\n");
return -ENFILE;
}
devnode_set(vdev);
vdev->index = get_index(vdev);
video_devices[vdev->minor] = vdev;
mutex_unlock(&videodev_lock);
if (vdev->ioctl_ops)
determine_valid_ioctls(vdev);
/* Part 3: Initialize the character device */
vdev->cdev = cdev_alloc();
if (vdev->cdev == NULL) {
ret = -ENOMEM;
goto cleanup;
}
vdev->cdev->ops = &v4l2_fops;
vdev->cdev->owner = owner;
ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1);
if (ret < 0) {
pr_err("%s: cdev_add failed\n", __func__);
kfree(vdev->cdev);
vdev->cdev = NULL;
goto cleanup;
}
/* Part 4: register the device with sysfs */
vdev->dev.class = &video_class;
vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
vdev->dev.parent = vdev->dev_parent;
dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
ret = device_register(&vdev->dev);
if (ret < 0) {
pr_err("%s: device_register failed\n", __func__);
goto cleanup;
}
/* Register the release callback that will be called when the last
reference to the device goes away. */
vdev->dev.release = v4l2_device_release;
if (nr != -1 && nr != vdev->num && warn_if_nr_in_use)
pr_warn("%s: requested %s%d, got %s\n", __func__,
name_base, nr, video_device_node_name(vdev));
/* Increase v4l2_device refcount */
v4l2_device_get(vdev->v4l2_dev);
/* Part 5: Register the entity. */
ret = video_register_media_controller(vdev);
/* Part 6: Activate this minor. The char device can now be used. */
set_bit(V4L2_FL_REGISTERED, &vdev->flags);
return 0;
cleanup:
mutex_lock(&videodev_lock);
if (vdev->cdev)
cdev_del(vdev->cdev);
video_devices[vdev->minor] = NULL;
devnode_clear(vdev);
mutex_unlock(&videodev_lock);
/* Mark this video device as never having been registered. */
vdev->minor = -1;
return ret;
}
EXPORT_SYMBOL(__video_register_device);
/**
* video_unregister_device - unregister a video4linux device
* @vdev: the device to unregister
*
* This unregisters the passed device. Future open calls will
* be met with errors.
*/
void video_unregister_device(struct video_device *vdev)
{
/* Check if vdev was ever registered at all */
if (!vdev || !video_is_registered(vdev))
return;
mutex_lock(&videodev_lock);
/* This must be in a critical section to prevent a race with v4l2_open.
* Once this bit has been cleared video_get may never be called again.
*/
clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
mutex_unlock(&videodev_lock);
if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
v4l2_event_wake_all(vdev);
device_unregister(&vdev->dev);
}
EXPORT_SYMBOL(video_unregister_device);
#if defined(CONFIG_MEDIA_CONTROLLER)
__must_check int video_device_pipeline_start(struct video_device *vdev,
struct media_pipeline *pipe)
{
struct media_entity *entity = &vdev->entity;
if (entity->num_pads != 1)
return -ENODEV;
return media_pipeline_start(&entity->pads[0], pipe);
}
EXPORT_SYMBOL_GPL(video_device_pipeline_start);
__must_check int __video_device_pipeline_start(struct video_device *vdev,
struct media_pipeline *pipe)
{
struct media_entity *entity = &vdev->entity;
if (entity->num_pads != 1)
return -ENODEV;
return __media_pipeline_start(&entity->pads[0], pipe);
}
EXPORT_SYMBOL_GPL(__video_device_pipeline_start);
void video_device_pipeline_stop(struct video_device *vdev)
{
struct media_entity *entity = &vdev->entity;
if (WARN_ON(entity->num_pads != 1))
return;
return media_pipeline_stop(&entity->pads[0]);
}
EXPORT_SYMBOL_GPL(video_device_pipeline_stop);
void __video_device_pipeline_stop(struct video_device *vdev)
{
struct media_entity *entity = &vdev->entity;
if (WARN_ON(entity->num_pads != 1))
return;
return __media_pipeline_stop(&entity->pads[0]);
}
EXPORT_SYMBOL_GPL(__video_device_pipeline_stop);
__must_check int video_device_pipeline_alloc_start(struct video_device *vdev)
{
struct media_entity *entity = &vdev->entity;
if (entity->num_pads != 1)
return -ENODEV;
return media_pipeline_alloc_start(&entity->pads[0]);
}
EXPORT_SYMBOL_GPL(video_device_pipeline_alloc_start);
struct media_pipeline *video_device_pipeline(struct video_device *vdev)
{
struct media_entity *entity = &vdev->entity;
if (WARN_ON(entity->num_pads != 1))
return NULL;
return media_pad_pipeline(&entity->pads[0]);
}
EXPORT_SYMBOL_GPL(video_device_pipeline);
#endif /* CONFIG_MEDIA_CONTROLLER */
/*
* Initialise video for linux
*/
static int __init videodev_init(void)
{
dev_t dev = MKDEV(VIDEO_MAJOR, 0);
int ret;
pr_info("Linux video capture interface: v2.00\n");
ret = register_chrdev_region(dev, VIDEO_NUM_DEVICES, VIDEO_NAME);
if (ret < 0) {
pr_warn("videodev: unable to get major %d\n",
VIDEO_MAJOR);
return ret;
}
ret = class_register(&video_class);
if (ret < 0) {
unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
pr_warn("video_dev: class_register failed\n");
return -EIO;
}
return 0;
}
static void __exit videodev_exit(void)
{
dev_t dev = MKDEV(VIDEO_MAJOR, 0);
class_unregister(&video_class);
unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
}
subsys_initcall(videodev_init);
module_exit(videodev_exit)
MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <[email protected]>, Bill Dirks, Justin Schoeman, Gerd Knorr");
MODULE_DESCRIPTION("Video4Linux2 core driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR);
| linux-master | drivers/media/v4l2-core/v4l2-dev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* V4L2 sub-device
*
* Copyright (C) 2010 Nokia Corporation
*
* Contact: Laurent Pinchart <[email protected]>
* Sakari Ailus <[email protected]>
*/
#include <linux/export.h>
#include <linux/ioctl.h>
#include <linux/leds.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ioctl.h>
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
/*
* The Streams API is an experimental feature. To use the Streams API, set
* 'v4l2_subdev_enable_streams_api' to 1 below.
*/
static bool v4l2_subdev_enable_streams_api;
#endif
/*
* Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
* of streams.
*
* Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
* restricts the total number of streams in a pad, although the stream ID is
* not restricted.
*/
#define V4L2_SUBDEV_MAX_STREAM_ID 63
#include "v4l2-subdev-priv.h"
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
{
struct v4l2_subdev_state *state;
static struct lock_class_key key;
state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
if (IS_ERR(state))
return PTR_ERR(state);
fh->state = state;
return 0;
}
static void subdev_fh_free(struct v4l2_subdev_fh *fh)
{
__v4l2_subdev_state_free(fh->state);
fh->state = NULL;
}
static int subdev_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
struct v4l2_subdev_fh *subdev_fh;
int ret;
subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
if (subdev_fh == NULL)
return -ENOMEM;
ret = subdev_fh_init(subdev_fh, sd);
if (ret) {
kfree(subdev_fh);
return ret;
}
v4l2_fh_init(&subdev_fh->vfh, vdev);
v4l2_fh_add(&subdev_fh->vfh);
file->private_data = &subdev_fh->vfh;
if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
struct module *owner;
owner = sd->entity.graph_obj.mdev->dev->driver->owner;
if (!try_module_get(owner)) {
ret = -EBUSY;
goto err;
}
subdev_fh->owner = owner;
}
if (sd->internal_ops && sd->internal_ops->open) {
ret = sd->internal_ops->open(sd, subdev_fh);
if (ret < 0)
goto err;
}
return 0;
err:
module_put(subdev_fh->owner);
v4l2_fh_del(&subdev_fh->vfh);
v4l2_fh_exit(&subdev_fh->vfh);
subdev_fh_free(subdev_fh);
kfree(subdev_fh);
return ret;
}
static int subdev_close(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
struct v4l2_fh *vfh = file->private_data;
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
if (sd->internal_ops && sd->internal_ops->close)
sd->internal_ops->close(sd, subdev_fh);
module_put(subdev_fh->owner);
v4l2_fh_del(vfh);
v4l2_fh_exit(vfh);
subdev_fh_free(subdev_fh);
kfree(subdev_fh);
file->private_data = NULL;
return 0;
}
#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
static int subdev_open(struct file *file)
{
return -ENODEV;
}
static int subdev_close(struct file *file)
{
return -ENODEV;
}
#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
static inline int check_which(u32 which)
{
if (which != V4L2_SUBDEV_FORMAT_TRY &&
which != V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
return 0;
}
static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
{
#if defined(CONFIG_MEDIA_CONTROLLER)
if (sd->entity.num_pads) {
if (pad >= sd->entity.num_pads)
return -EINVAL;
return 0;
}
#endif
/* allow pad 0 on subdevices not registered as media entities */
if (pad > 0)
return -EINVAL;
return 0;
}
static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
u32 which, u32 pad, u32 stream)
{
if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
if (!v4l2_subdev_state_get_stream_format(state, pad, stream))
return -EINVAL;
return 0;
#else
return -EINVAL;
#endif
}
if (stream != 0)
return -EINVAL;
if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
return -EINVAL;
return 0;
}
static inline int check_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
if (!format)
return -EINVAL;
return check_which(format->which) ? : check_pad(sd, format->pad) ? :
check_state(sd, state, format->which, format->pad, format->stream);
}
static int call_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
return check_format(sd, state, format) ? :
sd->ops->pad->get_fmt(sd, state, format);
}
static int call_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
return check_format(sd, state, format) ? :
sd->ops->pad->set_fmt(sd, state, format);
}
static int call_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!code)
return -EINVAL;
return check_which(code->which) ? : check_pad(sd, code->pad) ? :
check_state(sd, state, code->which, code->pad, code->stream) ? :
sd->ops->pad->enum_mbus_code(sd, state, code);
}
static int call_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (!fse)
return -EINVAL;
return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
sd->ops->pad->enum_frame_size(sd, state, fse);
}
static inline int check_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
if (!fi)
return -EINVAL;
return check_pad(sd, fi->pad);
}
static int call_g_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
return check_frame_interval(sd, fi) ? :
sd->ops->video->g_frame_interval(sd, fi);
}
static int call_s_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
return check_frame_interval(sd, fi) ? :
sd->ops->video->s_frame_interval(sd, fi);
}
static int call_enum_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_interval_enum *fie)
{
if (!fie)
return -EINVAL;
return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
sd->ops->pad->enum_frame_interval(sd, state, fie);
}
static inline int check_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel)
{
if (!sel)
return -EINVAL;
return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
check_state(sd, state, sel->which, sel->pad, sel->stream);
}
static int call_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel)
{
return check_selection(sd, state, sel) ? :
sd->ops->pad->get_selection(sd, state, sel);
}
static int call_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel)
{
return check_selection(sd, state, sel) ? :
sd->ops->pad->set_selection(sd, state, sel);
}
static inline int check_edid(struct v4l2_subdev *sd,
struct v4l2_subdev_edid *edid)
{
if (!edid)
return -EINVAL;
if (edid->blocks && edid->edid == NULL)
return -EINVAL;
return check_pad(sd, edid->pad);
}
static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
{
return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
}
static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
{
return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
}
static int call_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
if (!cap)
return -EINVAL;
return check_pad(sd, cap->pad) ? :
sd->ops->pad->dv_timings_cap(sd, cap);
}
static int call_enum_dv_timings(struct v4l2_subdev *sd,
struct v4l2_enum_dv_timings *dvt)
{
if (!dvt)
return -EINVAL;
return check_pad(sd, dvt->pad) ? :
sd->ops->pad->enum_dv_timings(sd, dvt);
}
static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_config *config)
{
return check_pad(sd, pad) ? :
sd->ops->pad->get_mbus_config(sd, pad, config);
}
static int call_s_stream(struct v4l2_subdev *sd, int enable)
{
int ret;
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
if (!IS_ERR_OR_NULL(sd->privacy_led)) {
if (enable)
led_set_brightness(sd->privacy_led,
sd->privacy_led->max_brightness);
else
led_set_brightness(sd->privacy_led, 0);
}
#endif
ret = sd->ops->video->s_stream(sd, enable);
if (!enable && ret < 0) {
dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
return 0;
}
return ret;
}
#ifdef CONFIG_MEDIA_CONTROLLER
/*
* Create state-management wrapper for pad ops dealing with subdev state. The
* wrapper handles the case where the caller does not provide the called
* subdev's state. This should be removed when all the callers are fixed.
*/
#define DEFINE_STATE_WRAPPER(f, arg_type) \
static int call_##f##_state(struct v4l2_subdev *sd, \
struct v4l2_subdev_state *_state, \
arg_type *arg) \
{ \
struct v4l2_subdev_state *state = _state; \
int ret; \
if (!_state) \
state = v4l2_subdev_lock_and_get_active_state(sd); \
ret = call_##f(sd, state, arg); \
if (!_state && state) \
v4l2_subdev_unlock_state(state); \
return ret; \
}
#else /* CONFIG_MEDIA_CONTROLLER */
#define DEFINE_STATE_WRAPPER(f, arg_type) \
static int call_##f##_state(struct v4l2_subdev *sd, \
struct v4l2_subdev_state *state, \
arg_type *arg) \
{ \
return call_##f(sd, state, arg); \
}
#endif /* CONFIG_MEDIA_CONTROLLER */
DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
.get_fmt = call_get_fmt_state,
.set_fmt = call_set_fmt_state,
.enum_mbus_code = call_enum_mbus_code_state,
.enum_frame_size = call_enum_frame_size_state,
.enum_frame_interval = call_enum_frame_interval_state,
.get_selection = call_get_selection_state,
.set_selection = call_set_selection_state,
.get_edid = call_get_edid,
.set_edid = call_set_edid,
.dv_timings_cap = call_dv_timings_cap,
.enum_dv_timings = call_enum_dv_timings,
.get_mbus_config = call_get_mbus_config,
};
static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
.g_frame_interval = call_g_frame_interval,
.s_frame_interval = call_s_frame_interval,
.s_stream = call_s_stream,
};
const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
.pad = &v4l2_subdev_call_pad_wrappers,
.video = &v4l2_subdev_call_video_wrappers,
};
EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static struct v4l2_subdev_state *
subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
unsigned int cmd, void *arg)
{
u32 which;
switch (cmd) {
default:
return NULL;
case VIDIOC_SUBDEV_G_FMT:
case VIDIOC_SUBDEV_S_FMT:
which = ((struct v4l2_subdev_format *)arg)->which;
break;
case VIDIOC_SUBDEV_G_CROP:
case VIDIOC_SUBDEV_S_CROP:
which = ((struct v4l2_subdev_crop *)arg)->which;
break;
case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
break;
case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
break;
case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
break;
case VIDIOC_SUBDEV_G_SELECTION:
case VIDIOC_SUBDEV_S_SELECTION:
which = ((struct v4l2_subdev_selection *)arg)->which;
break;
case VIDIOC_SUBDEV_G_ROUTING:
case VIDIOC_SUBDEV_S_ROUTING:
which = ((struct v4l2_subdev_routing *)arg)->which;
break;
}
return which == V4L2_SUBDEV_FORMAT_TRY ?
subdev_fh->state :
v4l2_subdev_get_unlocked_active_state(sd);
}
static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
struct v4l2_subdev_state *state)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
struct v4l2_fh *vfh = file->private_data;
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
bool client_supports_streams = subdev_fh->client_caps &
V4L2_SUBDEV_CLIENT_CAP_STREAMS;
int rval;
switch (cmd) {
case VIDIOC_SUBDEV_QUERYCAP: {
struct v4l2_subdev_capability *cap = arg;
memset(cap->reserved, 0, sizeof(cap->reserved));
cap->version = LINUX_VERSION_CODE;
cap->capabilities =
(ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
(streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
return 0;
}
case VIDIOC_QUERYCTRL:
/*
* TODO: this really should be folded into v4l2_queryctrl (this
* currently returns -EINVAL for NULL control handlers).
* However, v4l2_queryctrl() is still called directly by
* drivers as well and until that has been addressed I believe
* it is safer to do the check here. The same is true for the
* other control ioctls below.
*/
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_queryctrl(vfh->ctrl_handler, arg);
case VIDIOC_QUERY_EXT_CTRL:
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
case VIDIOC_QUERYMENU:
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_querymenu(vfh->ctrl_handler, arg);
case VIDIOC_G_CTRL:
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_g_ctrl(vfh->ctrl_handler, arg);
case VIDIOC_S_CTRL:
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
case VIDIOC_G_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_g_ext_ctrls(vfh->ctrl_handler,
vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_S_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_TRY_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_try_ext_ctrls(vfh->ctrl_handler,
vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_DQEVENT:
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
return -ENOIOCTLCMD;
return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
case VIDIOC_SUBSCRIBE_EVENT:
return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
case VIDIOC_UNSUBSCRIBE_EVENT:
return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
#ifdef CONFIG_VIDEO_ADV_DEBUG
case VIDIOC_DBG_G_REGISTER:
{
struct v4l2_dbg_register *p = arg;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return v4l2_subdev_call(sd, core, g_register, p);
}
case VIDIOC_DBG_S_REGISTER:
{
struct v4l2_dbg_register *p = arg;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return v4l2_subdev_call(sd, core, s_register, p);
}
case VIDIOC_DBG_G_CHIP_INFO:
{
struct v4l2_dbg_chip_info *p = arg;
if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
return -EINVAL;
if (sd->ops->core && sd->ops->core->s_register)
p->flags |= V4L2_CHIP_FL_WRITABLE;
if (sd->ops->core && sd->ops->core->g_register)
p->flags |= V4L2_CHIP_FL_READABLE;
strscpy(p->name, sd->name, sizeof(p->name));
return 0;
}
#endif
case VIDIOC_LOG_STATUS: {
int ret;
pr_info("%s: ================= START STATUS =================\n",
sd->name);
ret = v4l2_subdev_call(sd, core, log_status);
pr_info("%s: ================== END STATUS ==================\n",
sd->name);
return ret;
}
case VIDIOC_SUBDEV_G_FMT: {
struct v4l2_subdev_format *format = arg;
if (!client_supports_streams)
format->stream = 0;
memset(format->reserved, 0, sizeof(format->reserved));
memset(format->format.reserved, 0, sizeof(format->format.reserved));
return v4l2_subdev_call(sd, pad, get_fmt, state, format);
}
case VIDIOC_SUBDEV_S_FMT: {
struct v4l2_subdev_format *format = arg;
if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
return -EPERM;
if (!client_supports_streams)
format->stream = 0;
memset(format->reserved, 0, sizeof(format->reserved));
memset(format->format.reserved, 0, sizeof(format->format.reserved));
return v4l2_subdev_call(sd, pad, set_fmt, state, format);
}
case VIDIOC_SUBDEV_G_CROP: {
struct v4l2_subdev_crop *crop = arg;
struct v4l2_subdev_selection sel;
if (!client_supports_streams)
crop->stream = 0;
memset(crop->reserved, 0, sizeof(crop->reserved));
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
sel.target = V4L2_SEL_TGT_CROP;
rval = v4l2_subdev_call(
sd, pad, get_selection, state, &sel);
crop->rect = sel.r;
return rval;
}
case VIDIOC_SUBDEV_S_CROP: {
struct v4l2_subdev_crop *crop = arg;
struct v4l2_subdev_selection sel;
if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
return -EPERM;
if (!client_supports_streams)
crop->stream = 0;
memset(crop->reserved, 0, sizeof(crop->reserved));
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = crop->rect;
rval = v4l2_subdev_call(
sd, pad, set_selection, state, &sel);
crop->rect = sel.r;
return rval;
}
case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
struct v4l2_subdev_mbus_code_enum *code = arg;
if (!client_supports_streams)
code->stream = 0;
memset(code->reserved, 0, sizeof(code->reserved));
return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
code);
}
case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
struct v4l2_subdev_frame_size_enum *fse = arg;
if (!client_supports_streams)
fse->stream = 0;
memset(fse->reserved, 0, sizeof(fse->reserved));
return v4l2_subdev_call(sd, pad, enum_frame_size, state,
fse);
}
case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval *fi = arg;
if (!client_supports_streams)
fi->stream = 0;
memset(fi->reserved, 0, sizeof(fi->reserved));
return v4l2_subdev_call(sd, video, g_frame_interval, arg);
}
case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval *fi = arg;
if (ro_subdev)
return -EPERM;
if (!client_supports_streams)
fi->stream = 0;
memset(fi->reserved, 0, sizeof(fi->reserved));
return v4l2_subdev_call(sd, video, s_frame_interval, arg);
}
case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval_enum *fie = arg;
if (!client_supports_streams)
fie->stream = 0;
memset(fie->reserved, 0, sizeof(fie->reserved));
return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
fie);
}
case VIDIOC_SUBDEV_G_SELECTION: {
struct v4l2_subdev_selection *sel = arg;
if (!client_supports_streams)
sel->stream = 0;
memset(sel->reserved, 0, sizeof(sel->reserved));
return v4l2_subdev_call(
sd, pad, get_selection, state, sel);
}
case VIDIOC_SUBDEV_S_SELECTION: {
struct v4l2_subdev_selection *sel = arg;
if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
return -EPERM;
if (!client_supports_streams)
sel->stream = 0;
memset(sel->reserved, 0, sizeof(sel->reserved));
return v4l2_subdev_call(
sd, pad, set_selection, state, sel);
}
case VIDIOC_G_EDID: {
struct v4l2_subdev_edid *edid = arg;
return v4l2_subdev_call(sd, pad, get_edid, edid);
}
case VIDIOC_S_EDID: {
struct v4l2_subdev_edid *edid = arg;
return v4l2_subdev_call(sd, pad, set_edid, edid);
}
case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
struct v4l2_dv_timings_cap *cap = arg;
return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
}
case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
struct v4l2_enum_dv_timings *dvt = arg;
return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
}
case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
return v4l2_subdev_call(sd, video, query_dv_timings, arg);
case VIDIOC_SUBDEV_G_DV_TIMINGS:
return v4l2_subdev_call(sd, video, g_dv_timings, arg);
case VIDIOC_SUBDEV_S_DV_TIMINGS:
if (ro_subdev)
return -EPERM;
return v4l2_subdev_call(sd, video, s_dv_timings, arg);
case VIDIOC_SUBDEV_G_STD:
return v4l2_subdev_call(sd, video, g_std, arg);
case VIDIOC_SUBDEV_S_STD: {
v4l2_std_id *std = arg;
if (ro_subdev)
return -EPERM;
return v4l2_subdev_call(sd, video, s_std, *std);
}
case VIDIOC_SUBDEV_ENUMSTD: {
struct v4l2_standard *p = arg;
v4l2_std_id id;
if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
return -EINVAL;
return v4l_video_std_enumstd(p, id);
}
case VIDIOC_SUBDEV_QUERYSTD:
return v4l2_subdev_call(sd, video, querystd, arg);
case VIDIOC_SUBDEV_G_ROUTING: {
struct v4l2_subdev_routing *routing = arg;
struct v4l2_subdev_krouting *krouting;
if (!v4l2_subdev_enable_streams_api)
return -ENOIOCTLCMD;
if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
return -ENOIOCTLCMD;
memset(routing->reserved, 0, sizeof(routing->reserved));
krouting = &state->routing;
if (routing->num_routes < krouting->num_routes) {
routing->num_routes = krouting->num_routes;
return -ENOSPC;
}
memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
krouting->routes,
krouting->num_routes * sizeof(*krouting->routes));
routing->num_routes = krouting->num_routes;
return 0;
}
case VIDIOC_SUBDEV_S_ROUTING: {
struct v4l2_subdev_routing *routing = arg;
struct v4l2_subdev_route *routes =
(struct v4l2_subdev_route *)(uintptr_t)routing->routes;
struct v4l2_subdev_krouting krouting = {};
unsigned int i;
if (!v4l2_subdev_enable_streams_api)
return -ENOIOCTLCMD;
if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
return -ENOIOCTLCMD;
if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
return -EPERM;
memset(routing->reserved, 0, sizeof(routing->reserved));
for (i = 0; i < routing->num_routes; ++i) {
const struct v4l2_subdev_route *route = &routes[i];
const struct media_pad *pads = sd->entity.pads;
if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
return -EINVAL;
if (route->sink_pad >= sd->entity.num_pads)
return -EINVAL;
if (!(pads[route->sink_pad].flags &
MEDIA_PAD_FL_SINK))
return -EINVAL;
if (route->source_pad >= sd->entity.num_pads)
return -EINVAL;
if (!(pads[route->source_pad].flags &
MEDIA_PAD_FL_SOURCE))
return -EINVAL;
}
krouting.num_routes = routing->num_routes;
krouting.routes = routes;
return v4l2_subdev_call(sd, pad, set_routing, state,
routing->which, &krouting);
}
case VIDIOC_SUBDEV_G_CLIENT_CAP: {
struct v4l2_subdev_client_capability *client_cap = arg;
client_cap->capabilities = subdev_fh->client_caps;
return 0;
}
case VIDIOC_SUBDEV_S_CLIENT_CAP: {
struct v4l2_subdev_client_capability *client_cap = arg;
/*
* Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not
* enabled. Remove this when streams API is no longer
* experimental.
*/
if (!v4l2_subdev_enable_streams_api)
client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
/* Filter out unsupported capabilities */
client_cap->capabilities &= V4L2_SUBDEV_CLIENT_CAP_STREAMS;
subdev_fh->client_caps = client_cap->capabilities;
return 0;
}
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
}
return 0;
}
static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *vdev = video_devdata(file);
struct mutex *lock = vdev->lock;
long ret = -ENODEV;
if (lock && mutex_lock_interruptible(lock))
return -ERESTARTSYS;
if (video_is_registered(vdev)) {
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
struct v4l2_fh *vfh = file->private_data;
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
struct v4l2_subdev_state *state;
state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
if (state)
v4l2_subdev_lock_state(state);
ret = subdev_do_ioctl(file, cmd, arg, state);
if (state)
v4l2_subdev_unlock_state(state);
}
if (lock)
mutex_unlock(lock);
return ret;
}
static long subdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
}
#ifdef CONFIG_COMPAT
static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
}
#endif
#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
static long subdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return -ENODEV;
}
#ifdef CONFIG_COMPAT
static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
unsigned long arg)
{
return -ENODEV;
}
#endif
#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
static __poll_t subdev_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
struct v4l2_fh *fh = file->private_data;
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
return EPOLLERR;
poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
return EPOLLPRI;
return 0;
}
const struct v4l2_file_operations v4l2_subdev_fops = {
.owner = THIS_MODULE,
.open = subdev_open,
.unlocked_ioctl = subdev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl32 = subdev_compat_ioctl32,
#endif
.release = subdev_close,
.poll = subdev_poll,
};
#ifdef CONFIG_MEDIA_CONTROLLER
int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
struct fwnode_endpoint *endpoint)
{
struct fwnode_handle *fwnode;
struct v4l2_subdev *sd;
if (!is_media_entity_v4l2_subdev(entity))
return -EINVAL;
sd = media_entity_to_v4l2_subdev(entity);
fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
fwnode_handle_put(fwnode);
if (device_match_fwnode(sd->dev, fwnode))
return endpoint->port;
return -ENXIO;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
struct media_link *link,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt)
{
bool pass = true;
/* The width, height and code must match. */
if (source_fmt->format.width != sink_fmt->format.width) {
dev_dbg(sd->entity.graph_obj.mdev->dev,
"%s: width does not match (source %u, sink %u)\n",
__func__,
source_fmt->format.width, sink_fmt->format.width);
pass = false;
}
if (source_fmt->format.height != sink_fmt->format.height) {
dev_dbg(sd->entity.graph_obj.mdev->dev,
"%s: height does not match (source %u, sink %u)\n",
__func__,
source_fmt->format.height, sink_fmt->format.height);
pass = false;
}
if (source_fmt->format.code != sink_fmt->format.code) {
dev_dbg(sd->entity.graph_obj.mdev->dev,
"%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
__func__,
source_fmt->format.code, sink_fmt->format.code);
pass = false;
}
/* The field order must match, or the sink field order must be NONE
* to support interlaced hardware connected to bridges that support
* progressive formats only.
*/
if (source_fmt->format.field != sink_fmt->format.field &&
sink_fmt->format.field != V4L2_FIELD_NONE) {
dev_dbg(sd->entity.graph_obj.mdev->dev,
"%s: field does not match (source %u, sink %u)\n",
__func__,
source_fmt->format.field, sink_fmt->format.field);
pass = false;
}
if (pass)
return 0;
dev_dbg(sd->entity.graph_obj.mdev->dev,
"%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
link->source->entity->name, link->source->index,
link->sink->entity->name, link->sink->index);
return -EPIPE;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
static int
v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
struct v4l2_subdev_format *fmt,
bool states_locked)
{
struct v4l2_subdev_state *state;
struct v4l2_subdev *sd;
int ret;
if (!is_media_entity_v4l2_subdev(pad->entity)) {
WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
"Driver bug! Wrong media entity type 0x%08x, entity %s\n",
pad->entity->function, pad->entity->name);
return -EINVAL;
}
sd = media_entity_to_v4l2_subdev(pad->entity);
fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
fmt->pad = pad->index;
fmt->stream = stream;
if (states_locked)
state = v4l2_subdev_get_locked_active_state(sd);
else
state = v4l2_subdev_lock_and_get_active_state(sd);
ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt);
if (!states_locked && state)
v4l2_subdev_unlock_state(state);
return ret;
}
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static void __v4l2_link_validate_get_streams(struct media_pad *pad,
u64 *streams_mask,
bool states_locked)
{
struct v4l2_subdev_route *route;
struct v4l2_subdev_state *state;
struct v4l2_subdev *subdev;
subdev = media_entity_to_v4l2_subdev(pad->entity);
*streams_mask = 0;
if (states_locked)
state = v4l2_subdev_get_locked_active_state(subdev);
else
state = v4l2_subdev_lock_and_get_active_state(subdev);
if (WARN_ON(!state))
return;
for_each_active_route(&state->routing, route) {
u32 route_pad;
u32 route_stream;
if (pad->flags & MEDIA_PAD_FL_SOURCE) {
route_pad = route->source_pad;
route_stream = route->source_stream;
} else {
route_pad = route->sink_pad;
route_stream = route->sink_stream;
}
if (route_pad != pad->index)
continue;
*streams_mask |= BIT_ULL(route_stream);
}
if (!states_locked)
v4l2_subdev_unlock_state(state);
}
#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
static void v4l2_link_validate_get_streams(struct media_pad *pad,
u64 *streams_mask,
bool states_locked)
{
struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
/* Non-streams subdevs have an implicit stream 0 */
*streams_mask = BIT_ULL(0);
return;
}
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
__v4l2_link_validate_get_streams(pad, streams_mask, states_locked);
#else
/* This shouldn't happen */
*streams_mask = 0;
#endif
}
static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked)
{
struct v4l2_subdev *sink_subdev =
media_entity_to_v4l2_subdev(link->sink->entity);
struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
u64 source_streams_mask;
u64 sink_streams_mask;
u64 dangling_sink_streams;
u32 stream;
int ret;
dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
link->source->entity->name, link->source->index,
link->sink->entity->name, link->sink->index);
v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked);
v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked);
/*
* It is ok to have more source streams than sink streams as extra
* source streams can just be ignored by the receiver, but having extra
* sink streams is an error as streams must have a source.
*/
dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
sink_streams_mask;
if (dangling_sink_streams) {
dev_err(dev, "Dangling sink streams: mask %#llx\n",
dangling_sink_streams);
return -EINVAL;
}
/* Validate source and sink stream formats */
for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
struct v4l2_subdev_format sink_fmt, source_fmt;
if (!(sink_streams_mask & BIT_ULL(stream)))
continue;
dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
link->source->entity->name, link->source->index, stream,
link->sink->entity->name, link->sink->index, stream);
ret = v4l2_subdev_link_validate_get_format(link->source, stream,
&source_fmt, states_locked);
if (ret < 0) {
dev_dbg(dev,
"Failed to get format for \"%s\":%u:%u (but that's ok)\n",
link->source->entity->name, link->source->index,
stream);
continue;
}
ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
&sink_fmt, states_locked);
if (ret < 0) {
dev_dbg(dev,
"Failed to get format for \"%s\":%u:%u (but that's ok)\n",
link->sink->entity->name, link->sink->index,
stream);
continue;
}
/* TODO: add stream number to link_validate() */
ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
&source_fmt, &sink_fmt);
if (!ret)
continue;
if (ret != -ENOIOCTLCMD)
return ret;
ret = v4l2_subdev_link_validate_default(sink_subdev, link,
&source_fmt, &sink_fmt);
if (ret)
return ret;
}
return 0;
}
int v4l2_subdev_link_validate(struct media_link *link)
{
struct v4l2_subdev *source_sd, *sink_sd;
struct v4l2_subdev_state *source_state, *sink_state;
bool states_locked;
int ret;
if (!is_media_entity_v4l2_subdev(link->sink->entity) ||
!is_media_entity_v4l2_subdev(link->source->entity)) {
pr_warn_once("%s of link '%s':%u->'%s':%u is not a V4L2 sub-device, driver bug!\n",
!is_media_entity_v4l2_subdev(link->sink->entity) ?
"sink" : "source",
link->source->entity->name, link->source->index,
link->sink->entity->name, link->sink->index);
return 0;
}
sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
source_sd = media_entity_to_v4l2_subdev(link->source->entity);
sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
states_locked = sink_state && source_state;
if (states_locked) {
v4l2_subdev_lock_state(sink_state);
v4l2_subdev_lock_state(source_state);
}
ret = v4l2_subdev_link_validate_locked(link, states_locked);
if (states_locked) {
v4l2_subdev_unlock_state(sink_state);
v4l2_subdev_unlock_state(source_state);
}
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
unsigned int pad0, unsigned int pad1)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct v4l2_subdev_krouting *routing;
struct v4l2_subdev_state *state;
unsigned int i;
state = v4l2_subdev_lock_and_get_active_state(sd);
routing = &state->routing;
for (i = 0; i < routing->num_routes; ++i) {
struct v4l2_subdev_route *route = &routing->routes[i];
if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
continue;
if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
(route->source_pad == pad0 && route->sink_pad == pad1)) {
v4l2_subdev_unlock_state(state);
return true;
}
}
v4l2_subdev_unlock_state(state);
return false;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
struct v4l2_subdev_state *
__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
struct lock_class_key *lock_key)
{
struct v4l2_subdev_state *state;
int ret;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
__mutex_init(&state->_lock, lock_name, lock_key);
if (sd->state_lock)
state->lock = sd->state_lock;
else
state->lock = &state->_lock;
/* Drivers that support streams do not need the legacy pad config */
if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
state->pads = kvcalloc(sd->entity.num_pads,
sizeof(*state->pads), GFP_KERNEL);
if (!state->pads) {
ret = -ENOMEM;
goto err;
}
}
/*
* There can be no race at this point, but we lock the state anyway to
* satisfy lockdep checks.
*/
v4l2_subdev_lock_state(state);
ret = v4l2_subdev_call(sd, pad, init_cfg, state);
v4l2_subdev_unlock_state(state);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto err;
return state;
err:
if (state && state->pads)
kvfree(state->pads);
kfree(state);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
{
if (!state)
return;
mutex_destroy(&state->_lock);
kfree(state->routing.routes);
kvfree(state->stream_configs.configs);
kvfree(state->pads);
kfree(state);
}
EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
struct lock_class_key *key)
{
struct v4l2_subdev_state *state;
state = __v4l2_subdev_state_alloc(sd, name, key);
if (IS_ERR(state))
return PTR_ERR(state);
sd->active_state = state;
return 0;
}
EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
{
struct v4l2_async_subdev_endpoint *ase, *ase_tmp;
__v4l2_subdev_state_free(sd->active_state);
sd->active_state = NULL;
if (list_empty(&sd->async_subdev_endpoint_list))
return;
list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
async_subdev_endpoint_entry) {
list_del(&ase->async_subdev_endpoint_entry);
kfree(ase);
}
}
EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static int
v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
const struct v4l2_subdev_krouting *routing)
{
struct v4l2_subdev_stream_configs new_configs = { 0 };
struct v4l2_subdev_route *route;
u32 idx;
/* Count number of formats needed */
for_each_active_route(routing, route) {
/*
* Each route needs a format on both ends of the route.
*/
new_configs.num_configs += 2;
}
if (new_configs.num_configs) {
new_configs.configs = kvcalloc(new_configs.num_configs,
sizeof(*new_configs.configs),
GFP_KERNEL);
if (!new_configs.configs)
return -ENOMEM;
}
/*
* Fill in the 'pad' and stream' value for each item in the array from
* the routing table
*/
idx = 0;
for_each_active_route(routing, route) {
new_configs.configs[idx].pad = route->sink_pad;
new_configs.configs[idx].stream = route->sink_stream;
idx++;
new_configs.configs[idx].pad = route->source_pad;
new_configs.configs[idx].stream = route->source_stream;
idx++;
}
kvfree(stream_configs->configs);
*stream_configs = new_configs;
return 0;
}
int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt;
if (sd->flags & V4L2_SUBDEV_FL_STREAMS)
fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
format->stream);
else if (format->pad < sd->entity.num_pads && format->stream == 0)
fmt = v4l2_subdev_get_pad_format(sd, state, format->pad);
else
fmt = NULL;
if (!fmt)
return -EINVAL;
format->format = *fmt;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
const struct v4l2_subdev_krouting *routing)
{
struct v4l2_subdev_krouting *dst = &state->routing;
const struct v4l2_subdev_krouting *src = routing;
struct v4l2_subdev_krouting new_routing = { 0 };
size_t bytes;
int r;
if (unlikely(check_mul_overflow((size_t)src->num_routes,
sizeof(*src->routes), &bytes)))
return -EOVERFLOW;
lockdep_assert_held(state->lock);
if (src->num_routes > 0) {
new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
if (!new_routing.routes)
return -ENOMEM;
}
new_routing.num_routes = src->num_routes;
r = v4l2_subdev_init_stream_configs(&state->stream_configs,
&new_routing);
if (r) {
kfree(new_routing.routes);
return r;
}
kfree(dst->routes);
*dst = new_routing;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
struct v4l2_subdev_route *
__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
struct v4l2_subdev_route *route)
{
if (route)
++route;
else
route = &routing->routes[0];
for (; route < routing->routes + routing->num_routes; ++route) {
if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
continue;
return route;
}
return NULL;
}
EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
const struct v4l2_subdev_krouting *routing,
const struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_subdev_stream_configs *stream_configs;
unsigned int i;
int ret;
ret = v4l2_subdev_set_routing(sd, state, routing);
if (ret)
return ret;
stream_configs = &state->stream_configs;
for (i = 0; i < stream_configs->num_configs; ++i)
stream_configs->configs[i].fmt = *fmt;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
struct v4l2_mbus_framefmt *
v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state,
unsigned int pad, u32 stream)
{
struct v4l2_subdev_stream_configs *stream_configs;
unsigned int i;
lockdep_assert_held(state->lock);
stream_configs = &state->stream_configs;
for (i = 0; i < stream_configs->num_configs; ++i) {
if (stream_configs->configs[i].pad == pad &&
stream_configs->configs[i].stream == stream)
return &stream_configs->configs[i].fmt;
}
return NULL;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_format);
struct v4l2_rect *
v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state,
unsigned int pad, u32 stream)
{
struct v4l2_subdev_stream_configs *stream_configs;
unsigned int i;
lockdep_assert_held(state->lock);
stream_configs = &state->stream_configs;
for (i = 0; i < stream_configs->num_configs; ++i) {
if (stream_configs->configs[i].pad == pad &&
stream_configs->configs[i].stream == stream)
return &stream_configs->configs[i].crop;
}
return NULL;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_crop);
struct v4l2_rect *
v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state,
unsigned int pad, u32 stream)
{
struct v4l2_subdev_stream_configs *stream_configs;
unsigned int i;
lockdep_assert_held(state->lock);
stream_configs = &state->stream_configs;
for (i = 0; i < stream_configs->num_configs; ++i) {
if (stream_configs->configs[i].pad == pad &&
stream_configs->configs[i].stream == stream)
return &stream_configs->configs[i].compose;
}
return NULL;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_compose);
int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
u32 pad, u32 stream, u32 *other_pad,
u32 *other_stream)
{
unsigned int i;
for (i = 0; i < routing->num_routes; ++i) {
struct v4l2_subdev_route *route = &routing->routes[i];
if (route->source_pad == pad &&
route->source_stream == stream) {
if (other_pad)
*other_pad = route->sink_pad;
if (other_stream)
*other_stream = route->sink_stream;
return 0;
}
if (route->sink_pad == pad && route->sink_stream == stream) {
if (other_pad)
*other_pad = route->source_pad;
if (other_stream)
*other_stream = route->source_stream;
return 0;
}
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
struct v4l2_mbus_framefmt *
v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
u32 pad, u32 stream)
{
u32 other_pad, other_stream;
int ret;
ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
pad, stream,
&other_pad, &other_stream);
if (ret)
return NULL;
return v4l2_subdev_state_get_stream_format(state, other_pad,
other_stream);
}
EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
u32 pad0, u32 pad1, u64 *streams)
{
const struct v4l2_subdev_krouting *routing = &state->routing;
struct v4l2_subdev_route *route;
u64 streams0 = 0;
u64 streams1 = 0;
for_each_active_route(routing, route) {
if (route->sink_pad == pad0 && route->source_pad == pad1 &&
(*streams & BIT_ULL(route->sink_stream))) {
streams0 |= BIT_ULL(route->sink_stream);
streams1 |= BIT_ULL(route->source_stream);
}
if (route->source_pad == pad0 && route->sink_pad == pad1 &&
(*streams & BIT_ULL(route->source_stream))) {
streams0 |= BIT_ULL(route->source_stream);
streams1 |= BIT_ULL(route->sink_stream);
}
}
*streams = streams0;
return streams1;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
const struct v4l2_subdev_krouting *routing,
enum v4l2_subdev_routing_restriction disallow)
{
u32 *remote_pads = NULL;
unsigned int i, j;
int ret = -EINVAL;
if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX |
V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) {
remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
GFP_KERNEL);
if (!remote_pads)
return -ENOMEM;
for (i = 0; i < sd->entity.num_pads; ++i)
remote_pads[i] = U32_MAX;
}
for (i = 0; i < routing->num_routes; ++i) {
const struct v4l2_subdev_route *route = &routing->routes[i];
/* Validate the sink and source pad numbers. */
if (route->sink_pad >= sd->entity.num_pads ||
!(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
i, route->sink_pad);
goto out;
}
if (route->source_pad >= sd->entity.num_pads ||
!(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
i, route->source_pad);
goto out;
}
/*
* V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a
* sink pad must be routed to a single source pad.
*/
if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) {
if (remote_pads[route->sink_pad] != U32_MAX &&
remote_pads[route->sink_pad] != route->source_pad) {
dev_dbg(sd->dev,
"route %u attempts to mix %s streams\n",
i, "sink");
goto out;
}
}
/*
* V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a
* source pad must originate from a single sink pad.
*/
if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) {
if (remote_pads[route->source_pad] != U32_MAX &&
remote_pads[route->source_pad] != route->sink_pad) {
dev_dbg(sd->dev,
"route %u attempts to mix %s streams\n",
i, "source");
goto out;
}
}
/*
* V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink
* side can not do stream multiplexing, i.e. there can be only
* a single stream in a sink pad.
*/
if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) {
if (remote_pads[route->sink_pad] != U32_MAX) {
dev_dbg(sd->dev,
"route %u attempts to multiplex on %s pad %u\n",
i, "sink", route->sink_pad);
goto out;
}
}
/*
* V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the
* source side can not do stream multiplexing, i.e. there can
* be only a single stream in a source pad.
*/
if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) {
if (remote_pads[route->source_pad] != U32_MAX) {
dev_dbg(sd->dev,
"route %u attempts to multiplex on %s pad %u\n",
i, "source", route->source_pad);
goto out;
}
}
if (remote_pads) {
remote_pads[route->sink_pad] = route->source_pad;
remote_pads[route->source_pad] = route->sink_pad;
}
for (j = i + 1; j < routing->num_routes; ++j) {
const struct v4l2_subdev_route *r = &routing->routes[j];
/*
* V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
* originate from the same (sink) stream.
*/
if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
route->sink_pad == r->sink_pad &&
route->sink_stream == r->sink_stream) {
dev_dbg(sd->dev,
"routes %u and %u originate from same sink (%u/%u)\n",
i, j, route->sink_pad,
route->sink_stream);
goto out;
}
/*
* V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
* at the same (source) stream.
*/
if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
route->source_pad == r->source_pad &&
route->source_stream == r->source_stream) {
dev_dbg(sd->dev,
"routes %u and %u end at same source (%u/%u)\n",
i, j, route->source_pad,
route->source_stream);
goto out;
}
}
}
ret = 0;
out:
kfree(remote_pads);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
u64 streams_mask)
{
struct device *dev = sd->entity.graph_obj.mdev->dev;
unsigned int i;
int ret;
/*
* The subdev doesn't implement pad-based stream enable, fall back
* on the .s_stream() operation. This can only be done for subdevs that
* have a single source pad, as sd->enabled_streams is global to the
* subdev.
*/
if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
return -EOPNOTSUPP;
for (i = 0; i < sd->entity.num_pads; ++i) {
if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
return -EOPNOTSUPP;
}
if (sd->enabled_streams & streams_mask) {
dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n",
streams_mask, sd->entity.name, pad);
return -EALREADY;
}
/* Start streaming when the first streams are enabled. */
if (!sd->enabled_streams) {
ret = v4l2_subdev_call(sd, video, s_stream, 1);
if (ret)
return ret;
}
sd->enabled_streams |= streams_mask;
return 0;
}
int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
u64 streams_mask)
{
struct device *dev = sd->entity.graph_obj.mdev->dev;
struct v4l2_subdev_state *state;
u64 found_streams = 0;
unsigned int i;
int ret;
/* A few basic sanity checks first. */
if (pad >= sd->entity.num_pads)
return -EINVAL;
if (!streams_mask)
return 0;
/* Fallback on .s_stream() if .enable_streams() isn't available. */
if (!sd->ops->pad || !sd->ops->pad->enable_streams)
return v4l2_subdev_enable_streams_fallback(sd, pad,
streams_mask);
state = v4l2_subdev_lock_and_get_active_state(sd);
/*
* Verify that the requested streams exist and that they are not
* already enabled.
*/
for (i = 0; i < state->stream_configs.num_configs; ++i) {
struct v4l2_subdev_stream_config *cfg =
&state->stream_configs.configs[i];
if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
continue;
found_streams |= BIT_ULL(cfg->stream);
if (cfg->enabled) {
dev_dbg(dev, "stream %u already enabled on %s:%u\n",
cfg->stream, sd->entity.name, pad);
ret = -EALREADY;
goto done;
}
}
if (found_streams != streams_mask) {
dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
streams_mask & ~found_streams, sd->entity.name, pad);
ret = -EINVAL;
goto done;
}
dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask);
/* Call the .enable_streams() operation. */
ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
streams_mask);
if (ret) {
dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad,
streams_mask, ret);
goto done;
}
/* Mark the streams as enabled. */
for (i = 0; i < state->stream_configs.num_configs; ++i) {
struct v4l2_subdev_stream_config *cfg =
&state->stream_configs.configs[i];
if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
cfg->enabled = true;
}
done:
v4l2_subdev_unlock_state(state);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
u64 streams_mask)
{
struct device *dev = sd->entity.graph_obj.mdev->dev;
unsigned int i;
int ret;
/*
* If the subdev doesn't implement pad-based stream enable, fall back
* on the .s_stream() operation. This can only be done for subdevs that
* have a single source pad, as sd->enabled_streams is global to the
* subdev.
*/
if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
return -EOPNOTSUPP;
for (i = 0; i < sd->entity.num_pads; ++i) {
if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
return -EOPNOTSUPP;
}
if ((sd->enabled_streams & streams_mask) != streams_mask) {
dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n",
streams_mask, sd->entity.name, pad);
return -EALREADY;
}
/* Stop streaming when the last streams are disabled. */
if (!(sd->enabled_streams & ~streams_mask)) {
ret = v4l2_subdev_call(sd, video, s_stream, 0);
if (ret)
return ret;
}
sd->enabled_streams &= ~streams_mask;
return 0;
}
int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
u64 streams_mask)
{
struct device *dev = sd->entity.graph_obj.mdev->dev;
struct v4l2_subdev_state *state;
u64 found_streams = 0;
unsigned int i;
int ret;
/* A few basic sanity checks first. */
if (pad >= sd->entity.num_pads)
return -EINVAL;
if (!streams_mask)
return 0;
/* Fallback on .s_stream() if .disable_streams() isn't available. */
if (!sd->ops->pad || !sd->ops->pad->disable_streams)
return v4l2_subdev_disable_streams_fallback(sd, pad,
streams_mask);
state = v4l2_subdev_lock_and_get_active_state(sd);
/*
* Verify that the requested streams exist and that they are not
* already disabled.
*/
for (i = 0; i < state->stream_configs.num_configs; ++i) {
struct v4l2_subdev_stream_config *cfg =
&state->stream_configs.configs[i];
if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
continue;
found_streams |= BIT_ULL(cfg->stream);
if (!cfg->enabled) {
dev_dbg(dev, "stream %u already disabled on %s:%u\n",
cfg->stream, sd->entity.name, pad);
ret = -EALREADY;
goto done;
}
}
if (found_streams != streams_mask) {
dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
streams_mask & ~found_streams, sd->entity.name, pad);
ret = -EINVAL;
goto done;
}
dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask);
/* Call the .disable_streams() operation. */
ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
streams_mask);
if (ret) {
dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad,
streams_mask, ret);
goto done;
}
/* Mark the streams as disabled. */
for (i = 0; i < state->stream_configs.num_configs; ++i) {
struct v4l2_subdev_stream_config *cfg =
&state->stream_configs.configs[i];
if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
cfg->enabled = false;
}
done:
v4l2_subdev_unlock_state(state);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
{
struct v4l2_subdev_state *state;
struct v4l2_subdev_route *route;
struct media_pad *pad;
u64 source_mask = 0;
int pad_index = -1;
/*
* Find the source pad. This helper is meant for subdevs that have a
* single source pad, so failures shouldn't happen, but catch them
* loudly nonetheless as they indicate a driver bug.
*/
media_entity_for_each_pad(&sd->entity, pad) {
if (pad->flags & MEDIA_PAD_FL_SOURCE) {
pad_index = pad->index;
break;
}
}
if (WARN_ON(pad_index == -1))
return -EINVAL;
/*
* As there's a single source pad, just collect all the source streams.
*/
state = v4l2_subdev_lock_and_get_active_state(sd);
for_each_active_route(&state->routing, route)
source_mask |= BIT_ULL(route->source_stream);
v4l2_subdev_unlock_state(state);
if (enable)
return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
else
return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
}
EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
#endif /* CONFIG_MEDIA_CONTROLLER */
void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
{
INIT_LIST_HEAD(&sd->list);
BUG_ON(!ops);
sd->ops = ops;
sd->v4l2_dev = NULL;
sd->flags = 0;
sd->name[0] = '\0';
sd->grp_id = 0;
sd->dev_priv = NULL;
sd->host_priv = NULL;
sd->privacy_led = NULL;
INIT_LIST_HEAD(&sd->async_subdev_endpoint_list);
#if defined(CONFIG_MEDIA_CONTROLLER)
sd->entity.name = sd->name;
sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
#endif
}
EXPORT_SYMBOL(v4l2_subdev_init);
void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
const struct v4l2_event *ev)
{
v4l2_event_queue(sd->devnode, ev);
v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
}
EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
{
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
sd->privacy_led = led_get(sd->dev, "privacy-led");
if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
"getting privacy LED\n");
if (!IS_ERR_OR_NULL(sd->privacy_led)) {
mutex_lock(&sd->privacy_led->led_access);
led_sysfs_disable(sd->privacy_led);
led_trigger_remove(sd->privacy_led);
led_set_brightness(sd->privacy_led, 0);
mutex_unlock(&sd->privacy_led->led_access);
}
#endif
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
{
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
if (!IS_ERR_OR_NULL(sd->privacy_led)) {
mutex_lock(&sd->privacy_led->led_access);
led_sysfs_enable(sd->privacy_led);
mutex_unlock(&sd->privacy_led->led_access);
led_put(sd->privacy_led);
}
#endif
}
EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
| linux-master | drivers/media/v4l2-core/v4l2-subdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* v4l2-event.c
*
* V4L2 events.
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
* Contact: Sakari Ailus <[email protected]>
*/
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/export.h>
static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx)
{
idx += sev->first;
return idx >= sev->elems ? idx - sev->elems : idx;
}
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
{
struct v4l2_kevent *kev;
struct timespec64 ts;
unsigned long flags;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
if (list_empty(&fh->available)) {
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return -ENOENT;
}
WARN_ON(fh->navailable == 0);
kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
list_del(&kev->list);
fh->navailable--;
kev->event.pending = fh->navailable;
*event = kev->event;
ts = ns_to_timespec64(kev->ts);
event->timestamp.tv_sec = ts.tv_sec;
event->timestamp.tv_nsec = ts.tv_nsec;
kev->sev->first = sev_pos(kev->sev, 1);
kev->sev->in_use--;
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return 0;
}
int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
int nonblocking)
{
int ret;
if (nonblocking)
return __v4l2_event_dequeue(fh, event);
/* Release the vdev lock while waiting */
if (fh->vdev->lock)
mutex_unlock(fh->vdev->lock);
do {
ret = wait_event_interruptible(fh->wait,
fh->navailable != 0);
if (ret < 0)
break;
ret = __v4l2_event_dequeue(fh, event);
} while (ret == -ENOENT);
if (fh->vdev->lock)
mutex_lock(fh->vdev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
/* Caller must hold fh->vdev->fh_lock! */
static struct v4l2_subscribed_event *v4l2_event_subscribed(
struct v4l2_fh *fh, u32 type, u32 id)
{
struct v4l2_subscribed_event *sev;
assert_spin_locked(&fh->vdev->fh_lock);
list_for_each_entry(sev, &fh->subscribed, list)
if (sev->type == type && sev->id == id)
return sev;
return NULL;
}
static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
const struct v4l2_event *ev, u64 ts)
{
struct v4l2_subscribed_event *sev;
struct v4l2_kevent *kev;
bool copy_payload = true;
/* Are we subscribed? */
sev = v4l2_event_subscribed(fh, ev->type, ev->id);
if (sev == NULL)
return;
/* Increase event sequence number on fh. */
fh->sequence++;
/* Do we have any free events? */
if (sev->in_use == sev->elems) {
/* no, remove the oldest one */
kev = sev->events + sev_pos(sev, 0);
list_del(&kev->list);
sev->in_use--;
sev->first = sev_pos(sev, 1);
fh->navailable--;
if (sev->elems == 1) {
if (sev->ops && sev->ops->replace) {
sev->ops->replace(&kev->event, ev);
copy_payload = false;
}
} else if (sev->ops && sev->ops->merge) {
struct v4l2_kevent *second_oldest =
sev->events + sev_pos(sev, 0);
sev->ops->merge(&kev->event, &second_oldest->event);
}
}
/* Take one and fill it. */
kev = sev->events + sev_pos(sev, sev->in_use);
kev->event.type = ev->type;
if (copy_payload)
kev->event.u = ev->u;
kev->event.id = ev->id;
kev->ts = ts;
kev->event.sequence = fh->sequence;
sev->in_use++;
list_add_tail(&kev->list, &fh->available);
fh->navailable++;
wake_up_all(&fh->wait);
}
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
{
struct v4l2_fh *fh;
unsigned long flags;
u64 ts;
if (vdev == NULL)
return;
ts = ktime_get_ns();
spin_lock_irqsave(&vdev->fh_lock, flags);
list_for_each_entry(fh, &vdev->fh_list, list)
__v4l2_event_queue_fh(fh, ev, ts);
spin_unlock_irqrestore(&vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_event_queue);
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
{
unsigned long flags;
u64 ts = ktime_get_ns();
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
__v4l2_event_queue_fh(fh, ev, ts);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
int v4l2_event_pending(struct v4l2_fh *fh)
{
return fh->navailable;
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);
void v4l2_event_wake_all(struct video_device *vdev)
{
struct v4l2_fh *fh;
unsigned long flags;
if (!vdev)
return;
spin_lock_irqsave(&vdev->fh_lock, flags);
list_for_each_entry(fh, &vdev->fh_list, list)
wake_up_all(&fh->wait);
spin_unlock_irqrestore(&vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_event_wake_all);
static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
{
struct v4l2_fh *fh = sev->fh;
unsigned int i;
lockdep_assert_held(&fh->subscribe_lock);
assert_spin_locked(&fh->vdev->fh_lock);
/* Remove any pending events for this subscription */
for (i = 0; i < sev->in_use; i++) {
list_del(&sev->events[sev_pos(sev, i)].list);
fh->navailable--;
}
list_del(&sev->list);
}
int v4l2_event_subscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub, unsigned int elems,
const struct v4l2_subscribed_event_ops *ops)
{
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned int i;
int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
if (elems < 1)
elems = 1;
sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
if (!sev)
return -ENOMEM;
for (i = 0; i < elems; i++)
sev->events[i].sev = sev;
sev->type = sub->type;
sev->id = sub->id;
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
sev->elems = elems;
mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (!found_ev)
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
/* Already listening */
kvfree(sev);
} else if (sev->ops && sev->ops->add) {
ret = sev->ops->add(sev, elems);
if (ret) {
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
__v4l2_event_unsubscribe(sev);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
kvfree(sev);
}
}
mutex_unlock(&fh->subscribe_lock);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
{
struct v4l2_event_subscription sub;
struct v4l2_subscribed_event *sev;
unsigned long flags;
do {
sev = NULL;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
if (!list_empty(&fh->subscribed)) {
sev = list_first_entry(&fh->subscribed,
struct v4l2_subscribed_event, list);
sub.type = sev->type;
sub.id = sev->id;
}
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (sev)
v4l2_event_unsubscribe(fh, &sub);
} while (sev);
}
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
struct v4l2_subscribed_event *sev;
unsigned long flags;
if (sub->type == V4L2_EVENT_ALL) {
v4l2_event_unsubscribe_all(fh);
return 0;
}
mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (sev != NULL)
__v4l2_event_unsubscribe(sev);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
mutex_unlock(&fh->subscribe_lock);
kvfree(sev);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
static void v4l2_event_src_replace(struct v4l2_event *old,
const struct v4l2_event *new)
{
u32 old_changes = old->u.src_change.changes;
old->u.src_change = new->u.src_change;
old->u.src_change.changes |= old_changes;
}
static void v4l2_event_src_merge(const struct v4l2_event *old,
struct v4l2_event *new)
{
new->u.src_change.changes |= old->u.src_change.changes;
}
static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
.replace = v4l2_event_src_replace,
.merge = v4l2_event_src_merge,
};
int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
{
return v4l2_src_change_event_subscribe(fh, sub);
}
EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
| linux-master | drivers/media/v4l2-core/v4l2-event.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* V4L2 controls framework core implementation.
*
* Copyright (C) 2010-2021 Hans Verkuil <[email protected]>
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include "v4l2-ctrls-priv.h"
static const union v4l2_ctrl_ptr ptr_null;
static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl,
u32 changes)
{
memset(ev, 0, sizeof(*ev));
ev->type = V4L2_EVENT_CTRL;
ev->id = ctrl->id;
ev->u.ctrl.changes = changes;
ev->u.ctrl.type = ctrl->type;
ev->u.ctrl.flags = user_flags(ctrl);
if (ctrl->is_ptr)
ev->u.ctrl.value64 = 0;
else
ev->u.ctrl.value64 = *ctrl->p_cur.p_s64;
ev->u.ctrl.minimum = ctrl->minimum;
ev->u.ctrl.maximum = ctrl->maximum;
if (ctrl->type == V4L2_CTRL_TYPE_MENU
|| ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
ev->u.ctrl.step = 1;
else
ev->u.ctrl.step = ctrl->step;
ev->u.ctrl.default_value = ctrl->default_value;
}
void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl)
{
struct v4l2_event ev;
u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
changes |= V4L2_EVENT_CTRL_CH_VALUE;
fill_event(&ev, ctrl, changes);
v4l2_event_queue_fh(fh, &ev);
}
void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
{
struct v4l2_event ev;
struct v4l2_subscribed_event *sev;
if (list_empty(&ctrl->ev_subs))
return;
fill_event(&ev, ctrl, changes);
list_for_each_entry(sev, &ctrl->ev_subs, node)
if (sev->fh != fh ||
(sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
v4l2_event_queue_fh(sev->fh, &ev);
}
bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl,
union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2)
{
unsigned int i;
switch (ctrl->type) {
case V4L2_CTRL_TYPE_BUTTON:
return false;
case V4L2_CTRL_TYPE_STRING:
for (i = 0; i < ctrl->elems; i++) {
unsigned int idx = i * ctrl->elem_size;
/* strings are always 0-terminated */
if (strcmp(ptr1.p_char + idx, ptr2.p_char + idx))
return false;
}
return true;
default:
return !memcmp(ptr1.p_const, ptr2.p_const,
ctrl->elems * ctrl->elem_size);
}
}
EXPORT_SYMBOL(v4l2_ctrl_type_op_equal);
/* Default intra MPEG-2 quantisation coefficients, from the specification. */
static const u8 mpeg2_intra_quant_matrix[64] = {
8, 16, 16, 19, 16, 19, 22, 22,
22, 22, 22, 22, 26, 24, 26, 27,
27, 27, 26, 26, 26, 26, 27, 27,
27, 29, 29, 29, 34, 34, 34, 29,
29, 29, 27, 27, 29, 29, 32, 32,
34, 34, 37, 38, 37, 35, 35, 34,
35, 38, 38, 40, 40, 40, 48, 48,
46, 46, 56, 56, 58, 69, 69, 83
};
static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr)
{
struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant;
struct v4l2_ctrl_vp8_frame *p_vp8_frame;
struct v4l2_ctrl_vp9_frame *p_vp9_frame;
struct v4l2_ctrl_fwht_params *p_fwht_params;
struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix;
struct v4l2_ctrl_av1_sequence *p_av1_sequence;
void *p = ptr.p + idx * ctrl->elem_size;
if (ctrl->p_def.p_const)
memcpy(p, ctrl->p_def.p_const, ctrl->elem_size);
else
memset(p, 0, ctrl->elem_size);
switch ((u32)ctrl->type) {
case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
p_mpeg2_sequence = p;
/* 4:2:0 */
p_mpeg2_sequence->chroma_format = 1;
break;
case V4L2_CTRL_TYPE_MPEG2_PICTURE:
p_mpeg2_picture = p;
/* interlaced top field */
p_mpeg2_picture->picture_structure = V4L2_MPEG2_PIC_TOP_FIELD;
p_mpeg2_picture->picture_coding_type =
V4L2_MPEG2_PIC_CODING_TYPE_I;
break;
case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
p_mpeg2_quant = p;
memcpy(p_mpeg2_quant->intra_quantiser_matrix,
mpeg2_intra_quant_matrix,
ARRAY_SIZE(mpeg2_intra_quant_matrix));
/*
* The default non-intra MPEG-2 quantisation
* coefficients are all 16, as per the specification.
*/
memset(p_mpeg2_quant->non_intra_quantiser_matrix, 16,
sizeof(p_mpeg2_quant->non_intra_quantiser_matrix));
break;
case V4L2_CTRL_TYPE_VP8_FRAME:
p_vp8_frame = p;
p_vp8_frame->num_dct_parts = 1;
break;
case V4L2_CTRL_TYPE_VP9_FRAME:
p_vp9_frame = p;
p_vp9_frame->profile = 0;
p_vp9_frame->bit_depth = 8;
p_vp9_frame->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING |
V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING;
break;
case V4L2_CTRL_TYPE_AV1_SEQUENCE:
p_av1_sequence = p;
p_av1_sequence->bit_depth = 8;
break;
case V4L2_CTRL_TYPE_FWHT_PARAMS:
p_fwht_params = p;
p_fwht_params->version = V4L2_FWHT_VERSION;
p_fwht_params->width = 1280;
p_fwht_params->height = 720;
p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
(2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
break;
case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
p_h264_scaling_matrix = p;
/*
* The default (flat) H.264 scaling matrix when none are
* specified in the bitstream, this is according to formulas
* (7-8) and (7-9) of the specification.
*/
memset(p_h264_scaling_matrix, 16, sizeof(*p_h264_scaling_matrix));
break;
}
}
void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
union v4l2_ctrl_ptr ptr)
{
unsigned int i;
u32 tot_elems = ctrl->elems;
u32 elems = tot_elems - from_idx;
if (from_idx >= tot_elems)
return;
switch (ctrl->type) {
case V4L2_CTRL_TYPE_STRING:
for (i = from_idx; i < tot_elems; i++) {
unsigned int offset = i * ctrl->elem_size;
memset(ptr.p_char + offset, ' ', ctrl->minimum);
ptr.p_char[offset + ctrl->minimum] = '\0';
}
break;
case V4L2_CTRL_TYPE_INTEGER64:
if (ctrl->default_value) {
for (i = from_idx; i < tot_elems; i++)
ptr.p_s64[i] = ctrl->default_value;
} else {
memset(ptr.p_s64 + from_idx, 0, elems * sizeof(s64));
}
break;
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_BOOLEAN:
if (ctrl->default_value) {
for (i = from_idx; i < tot_elems; i++)
ptr.p_s32[i] = ctrl->default_value;
} else {
memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
}
break;
case V4L2_CTRL_TYPE_BUTTON:
case V4L2_CTRL_TYPE_CTRL_CLASS:
memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
break;
case V4L2_CTRL_TYPE_U8:
memset(ptr.p_u8 + from_idx, ctrl->default_value, elems);
break;
case V4L2_CTRL_TYPE_U16:
if (ctrl->default_value) {
for (i = from_idx; i < tot_elems; i++)
ptr.p_u16[i] = ctrl->default_value;
} else {
memset(ptr.p_u16 + from_idx, 0, elems * sizeof(u16));
}
break;
case V4L2_CTRL_TYPE_U32:
if (ctrl->default_value) {
for (i = from_idx; i < tot_elems; i++)
ptr.p_u32[i] = ctrl->default_value;
} else {
memset(ptr.p_u32 + from_idx, 0, elems * sizeof(u32));
}
break;
default:
for (i = from_idx; i < tot_elems; i++)
std_init_compound(ctrl, i, ptr);
break;
}
}
EXPORT_SYMBOL(v4l2_ctrl_type_op_init);
void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl)
{
union v4l2_ctrl_ptr ptr = ctrl->p_cur;
if (ctrl->is_array) {
unsigned i;
for (i = 0; i < ctrl->nr_of_dims; i++)
pr_cont("[%u]", ctrl->dims[i]);
pr_cont(" ");
}
switch (ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER:
pr_cont("%d", *ptr.p_s32);
break;
case V4L2_CTRL_TYPE_BOOLEAN:
pr_cont("%s", *ptr.p_s32 ? "true" : "false");
break;
case V4L2_CTRL_TYPE_MENU:
pr_cont("%s", ctrl->qmenu[*ptr.p_s32]);
break;
case V4L2_CTRL_TYPE_INTEGER_MENU:
pr_cont("%lld", ctrl->qmenu_int[*ptr.p_s32]);
break;
case V4L2_CTRL_TYPE_BITMASK:
pr_cont("0x%08x", *ptr.p_s32);
break;
case V4L2_CTRL_TYPE_INTEGER64:
pr_cont("%lld", *ptr.p_s64);
break;
case V4L2_CTRL_TYPE_STRING:
pr_cont("%s", ptr.p_char);
break;
case V4L2_CTRL_TYPE_U8:
pr_cont("%u", (unsigned)*ptr.p_u8);
break;
case V4L2_CTRL_TYPE_U16:
pr_cont("%u", (unsigned)*ptr.p_u16);
break;
case V4L2_CTRL_TYPE_U32:
pr_cont("%u", (unsigned)*ptr.p_u32);
break;
case V4L2_CTRL_TYPE_H264_SPS:
pr_cont("H264_SPS");
break;
case V4L2_CTRL_TYPE_H264_PPS:
pr_cont("H264_PPS");
break;
case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
pr_cont("H264_SCALING_MATRIX");
break;
case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
pr_cont("H264_SLICE_PARAMS");
break;
case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
pr_cont("H264_DECODE_PARAMS");
break;
case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
pr_cont("H264_PRED_WEIGHTS");
break;
case V4L2_CTRL_TYPE_FWHT_PARAMS:
pr_cont("FWHT_PARAMS");
break;
case V4L2_CTRL_TYPE_VP8_FRAME:
pr_cont("VP8_FRAME");
break;
case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
pr_cont("HDR10_CLL_INFO");
break;
case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
pr_cont("HDR10_MASTERING_DISPLAY");
break;
case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
pr_cont("MPEG2_QUANTISATION");
break;
case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
pr_cont("MPEG2_SEQUENCE");
break;
case V4L2_CTRL_TYPE_MPEG2_PICTURE:
pr_cont("MPEG2_PICTURE");
break;
case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
pr_cont("VP9_COMPRESSED_HDR");
break;
case V4L2_CTRL_TYPE_VP9_FRAME:
pr_cont("VP9_FRAME");
break;
case V4L2_CTRL_TYPE_HEVC_SPS:
pr_cont("HEVC_SPS");
break;
case V4L2_CTRL_TYPE_HEVC_PPS:
pr_cont("HEVC_PPS");
break;
case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
pr_cont("HEVC_SLICE_PARAMS");
break;
case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
pr_cont("HEVC_SCALING_MATRIX");
break;
case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
pr_cont("HEVC_DECODE_PARAMS");
break;
case V4L2_CTRL_TYPE_AV1_SEQUENCE:
pr_cont("AV1_SEQUENCE");
break;
case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
pr_cont("AV1_TILE_GROUP_ENTRY");
break;
case V4L2_CTRL_TYPE_AV1_FRAME:
pr_cont("AV1_FRAME");
break;
case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
pr_cont("AV1_FILM_GRAIN");
break;
default:
pr_cont("unknown type %d", ctrl->type);
break;
}
}
EXPORT_SYMBOL(v4l2_ctrl_type_op_log);
/*
* Round towards the closest legal value. Be careful when we are
* close to the maximum range of the control type to prevent
* wrap-arounds.
*/
#define ROUND_TO_RANGE(val, offset_type, ctrl) \
({ \
offset_type offset; \
if ((ctrl)->maximum >= 0 && \
val >= (ctrl)->maximum - (s32)((ctrl)->step / 2)) \
val = (ctrl)->maximum; \
else \
val += (s32)((ctrl)->step / 2); \
val = clamp_t(typeof(val), val, \
(ctrl)->minimum, (ctrl)->maximum); \
offset = (val) - (ctrl)->minimum; \
offset = (ctrl)->step * (offset / (u32)(ctrl)->step); \
val = (ctrl)->minimum + offset; \
0; \
})
/* Validate a new control */
#define zero_padding(s) \
memset(&(s).padding, 0, sizeof((s).padding))
#define zero_reserved(s) \
memset(&(s).reserved, 0, sizeof((s).reserved))
static int
validate_vp9_lf_params(struct v4l2_vp9_loop_filter *lf)
{
unsigned int i;
if (lf->flags & ~(V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED |
V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE))
return -EINVAL;
/* That all values are in the accepted range. */
if (lf->level > GENMASK(5, 0))
return -EINVAL;
if (lf->sharpness > GENMASK(2, 0))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++)
if (lf->ref_deltas[i] < -63 || lf->ref_deltas[i] > 63)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++)
if (lf->mode_deltas[i] < -63 || lf->mode_deltas[i] > 63)
return -EINVAL;
zero_reserved(*lf);
return 0;
}
static int
validate_vp9_quant_params(struct v4l2_vp9_quantization *quant)
{
if (quant->delta_q_y_dc < -15 || quant->delta_q_y_dc > 15 ||
quant->delta_q_uv_dc < -15 || quant->delta_q_uv_dc > 15 ||
quant->delta_q_uv_ac < -15 || quant->delta_q_uv_ac > 15)
return -EINVAL;
zero_reserved(*quant);
return 0;
}
static int
validate_vp9_seg_params(struct v4l2_vp9_segmentation *seg)
{
unsigned int i, j;
if (seg->flags & ~(V4L2_VP9_SEGMENTATION_FLAG_ENABLED |
V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP |
V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE |
V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA |
V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(seg->feature_enabled); i++) {
if (seg->feature_enabled[i] &
~V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK)
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(seg->feature_data); i++) {
static const int range[] = { 255, 63, 3, 0 };
for (j = 0; j < ARRAY_SIZE(seg->feature_data[j]); j++) {
if (seg->feature_data[i][j] < -range[j] ||
seg->feature_data[i][j] > range[j])
return -EINVAL;
}
}
zero_reserved(*seg);
return 0;
}
static int
validate_vp9_compressed_hdr(struct v4l2_ctrl_vp9_compressed_hdr *hdr)
{
if (hdr->tx_mode > V4L2_VP9_TX_MODE_SELECT)
return -EINVAL;
return 0;
}
static int
validate_vp9_frame(struct v4l2_ctrl_vp9_frame *frame)
{
int ret;
/* Make sure we're not passed invalid flags. */
if (frame->flags & ~(V4L2_VP9_FRAME_FLAG_KEY_FRAME |
V4L2_VP9_FRAME_FLAG_SHOW_FRAME |
V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT |
V4L2_VP9_FRAME_FLAG_INTRA_ONLY |
V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV |
V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX |
V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE |
V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING |
V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING |
V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING))
return -EINVAL;
if (frame->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT &&
frame->flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX)
return -EINVAL;
if (frame->profile > V4L2_VP9_PROFILE_MAX)
return -EINVAL;
if (frame->reset_frame_context > V4L2_VP9_RESET_FRAME_CTX_ALL)
return -EINVAL;
if (frame->frame_context_idx >= V4L2_VP9_NUM_FRAME_CTX)
return -EINVAL;
/*
* Profiles 0 and 1 only support 8-bit depth, profiles 2 and 3 only 10
* and 12 bit depths.
*/
if ((frame->profile < 2 && frame->bit_depth != 8) ||
(frame->profile >= 2 &&
(frame->bit_depth != 10 && frame->bit_depth != 12)))
return -EINVAL;
/* Profile 0 and 2 only accept YUV 4:2:0. */
if ((frame->profile == 0 || frame->profile == 2) &&
(!(frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) ||
!(frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING)))
return -EINVAL;
/* Profile 1 and 3 only accept YUV 4:2:2, 4:4:0 and 4:4:4. */
if ((frame->profile == 1 || frame->profile == 3) &&
((frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) &&
(frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING)))
return -EINVAL;
if (frame->interpolation_filter > V4L2_VP9_INTERP_FILTER_SWITCHABLE)
return -EINVAL;
/*
* According to the spec, tile_cols_log2 shall be less than or equal
* to 6.
*/
if (frame->tile_cols_log2 > 6)
return -EINVAL;
if (frame->reference_mode > V4L2_VP9_REFERENCE_MODE_SELECT)
return -EINVAL;
ret = validate_vp9_lf_params(&frame->lf);
if (ret)
return ret;
ret = validate_vp9_quant_params(&frame->quant);
if (ret)
return ret;
ret = validate_vp9_seg_params(&frame->seg);
if (ret)
return ret;
zero_reserved(*frame);
return 0;
}
static int validate_av1_quantization(struct v4l2_av1_quantization *q)
{
if (q->flags > GENMASK(2, 0))
return -EINVAL;
if (q->delta_q_y_dc < -64 || q->delta_q_y_dc > 63 ||
q->delta_q_u_dc < -64 || q->delta_q_u_dc > 63 ||
q->delta_q_v_dc < -64 || q->delta_q_v_dc > 63 ||
q->delta_q_u_ac < -64 || q->delta_q_u_ac > 63 ||
q->delta_q_v_ac < -64 || q->delta_q_v_ac > 63 ||
q->delta_q_res > GENMASK(1, 0))
return -EINVAL;
if (q->qm_y > GENMASK(3, 0) ||
q->qm_u > GENMASK(3, 0) ||
q->qm_v > GENMASK(3, 0))
return -EINVAL;
return 0;
}
static int validate_av1_segmentation(struct v4l2_av1_segmentation *s)
{
u32 i;
u32 j;
if (s->flags > GENMASK(4, 0))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(s->feature_data); i++) {
static const int segmentation_feature_signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 };
static const int segmentation_feature_max[] = { 255, 63, 63, 63, 63, 7, 0, 0};
for (j = 0; j < ARRAY_SIZE(s->feature_data[j]); j++) {
s32 limit = segmentation_feature_max[j];
if (segmentation_feature_signed[j]) {
if (s->feature_data[i][j] < -limit ||
s->feature_data[i][j] > limit)
return -EINVAL;
} else {
if (s->feature_data[i][j] < 0 || s->feature_data[i][j] > limit)
return -EINVAL;
}
}
}
return 0;
}
static int validate_av1_loop_filter(struct v4l2_av1_loop_filter *lf)
{
u32 i;
if (lf->flags > GENMASK(3, 0))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(lf->level); i++) {
if (lf->level[i] > GENMASK(5, 0))
return -EINVAL;
}
if (lf->sharpness > GENMASK(2, 0))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) {
if (lf->ref_deltas[i] < -64 || lf->ref_deltas[i] > 63)
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) {
if (lf->mode_deltas[i] < -64 || lf->mode_deltas[i] > 63)
return -EINVAL;
}
return 0;
}
static int validate_av1_cdef(struct v4l2_av1_cdef *cdef)
{
u32 i;
if (cdef->damping_minus_3 > GENMASK(1, 0) ||
cdef->bits > GENMASK(1, 0))
return -EINVAL;
for (i = 0; i < 1 << cdef->bits; i++) {
if (cdef->y_pri_strength[i] > GENMASK(3, 0) ||
cdef->y_sec_strength[i] > 4 ||
cdef->uv_pri_strength[i] > GENMASK(3, 0) ||
cdef->uv_sec_strength[i] > 4)
return -EINVAL;
}
return 0;
}
static int validate_av1_loop_restauration(struct v4l2_av1_loop_restoration *lr)
{
if (lr->lr_unit_shift > 3 || lr->lr_uv_shift > 1)
return -EINVAL;
return 0;
}
static int validate_av1_film_grain(struct v4l2_ctrl_av1_film_grain *fg)
{
u32 i;
if (fg->flags > GENMASK(4, 0))
return -EINVAL;
if (fg->film_grain_params_ref_idx > GENMASK(2, 0) ||
fg->num_y_points > 14 ||
fg->num_cb_points > 10 ||
fg->num_cr_points > GENMASK(3, 0) ||
fg->grain_scaling_minus_8 > GENMASK(1, 0) ||
fg->ar_coeff_lag > GENMASK(1, 0) ||
fg->ar_coeff_shift_minus_6 > GENMASK(1, 0) ||
fg->grain_scale_shift > GENMASK(1, 0))
return -EINVAL;
if (!(fg->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN))
return 0;
for (i = 1; i < fg->num_y_points; i++)
if (fg->point_y_value[i] <= fg->point_y_value[i - 1])
return -EINVAL;
for (i = 1; i < fg->num_cb_points; i++)
if (fg->point_cb_value[i] <= fg->point_cb_value[i - 1])
return -EINVAL;
for (i = 1; i < fg->num_cr_points; i++)
if (fg->point_cr_value[i] <= fg->point_cr_value[i - 1])
return -EINVAL;
return 0;
}
static int validate_av1_frame(struct v4l2_ctrl_av1_frame *f)
{
int ret = 0;
ret = validate_av1_quantization(&f->quantization);
if (ret)
return ret;
ret = validate_av1_segmentation(&f->segmentation);
if (ret)
return ret;
ret = validate_av1_loop_filter(&f->loop_filter);
if (ret)
return ret;
ret = validate_av1_cdef(&f->cdef);
if (ret)
return ret;
ret = validate_av1_loop_restauration(&f->loop_restoration);
if (ret)
return ret;
if (f->flags &
~(V4L2_AV1_FRAME_FLAG_SHOW_FRAME |
V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME |
V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE |
V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE |
V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS |
V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV |
V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC |
V4L2_AV1_FRAME_FLAG_USE_SUPERRES |
V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV |
V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE |
V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS |
V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF |
V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION |
V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT |
V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET |
V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED |
V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT |
V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE |
V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT |
V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING))
return -EINVAL;
if (f->superres_denom > GENMASK(2, 0) + 9)
return -EINVAL;
return 0;
}
static int validate_av1_sequence(struct v4l2_ctrl_av1_sequence *s)
{
if (s->flags &
~(V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE |
V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF |
V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION |
V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME |
V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE |
V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X |
V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y |
V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT |
V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q))
return -EINVAL;
if (s->seq_profile == 1 && s->flags & V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME)
return -EINVAL;
/* reserved */
if (s->seq_profile > 2)
return -EINVAL;
/* TODO: PROFILES */
return 0;
}
/*
* Compound controls validation requires setting unused fields/flags to zero
* in order to properly detect unchanged controls with v4l2_ctrl_type_op_equal's
* memcmp.
*/
static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr)
{
struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
struct v4l2_ctrl_vp8_frame *p_vp8_frame;
struct v4l2_ctrl_fwht_params *p_fwht_params;
struct v4l2_ctrl_h264_sps *p_h264_sps;
struct v4l2_ctrl_h264_pps *p_h264_pps;
struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights;
struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
struct v4l2_ctrl_h264_decode_params *p_h264_dec_params;
struct v4l2_ctrl_hevc_sps *p_hevc_sps;
struct v4l2_ctrl_hevc_pps *p_hevc_pps;
struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params;
struct v4l2_area *area;
void *p = ptr.p + idx * ctrl->elem_size;
unsigned int i;
switch ((u32)ctrl->type) {
case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
p_mpeg2_sequence = p;
switch (p_mpeg2_sequence->chroma_format) {
case 1: /* 4:2:0 */
case 2: /* 4:2:2 */
case 3: /* 4:4:4 */
break;
default:
return -EINVAL;
}
break;
case V4L2_CTRL_TYPE_MPEG2_PICTURE:
p_mpeg2_picture = p;
switch (p_mpeg2_picture->intra_dc_precision) {
case 0: /* 8 bits */
case 1: /* 9 bits */
case 2: /* 10 bits */
case 3: /* 11 bits */
break;
default:
return -EINVAL;
}
switch (p_mpeg2_picture->picture_structure) {
case V4L2_MPEG2_PIC_TOP_FIELD:
case V4L2_MPEG2_PIC_BOTTOM_FIELD:
case V4L2_MPEG2_PIC_FRAME:
break;
default:
return -EINVAL;
}
switch (p_mpeg2_picture->picture_coding_type) {
case V4L2_MPEG2_PIC_CODING_TYPE_I:
case V4L2_MPEG2_PIC_CODING_TYPE_P:
case V4L2_MPEG2_PIC_CODING_TYPE_B:
break;
default:
return -EINVAL;
}
zero_reserved(*p_mpeg2_picture);
break;
case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
break;
case V4L2_CTRL_TYPE_FWHT_PARAMS:
p_fwht_params = p;
if (p_fwht_params->version < V4L2_FWHT_VERSION)
return -EINVAL;
if (!p_fwht_params->width || !p_fwht_params->height)
return -EINVAL;
break;
case V4L2_CTRL_TYPE_H264_SPS:
p_h264_sps = p;
/* Some syntax elements are only conditionally valid */
if (p_h264_sps->pic_order_cnt_type != 0) {
p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 = 0;
} else if (p_h264_sps->pic_order_cnt_type != 1) {
p_h264_sps->num_ref_frames_in_pic_order_cnt_cycle = 0;
p_h264_sps->offset_for_non_ref_pic = 0;
p_h264_sps->offset_for_top_to_bottom_field = 0;
memset(&p_h264_sps->offset_for_ref_frame, 0,
sizeof(p_h264_sps->offset_for_ref_frame));
}
if (!V4L2_H264_SPS_HAS_CHROMA_FORMAT(p_h264_sps)) {
p_h264_sps->chroma_format_idc = 1;
p_h264_sps->bit_depth_luma_minus8 = 0;
p_h264_sps->bit_depth_chroma_minus8 = 0;
p_h264_sps->flags &=
~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS;
if (p_h264_sps->chroma_format_idc < 3)
p_h264_sps->flags &=
~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
}
if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
p_h264_sps->flags &=
~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD;
/*
* Chroma 4:2:2 format require at least High 4:2:2 profile.
*
* The H264 specification and well-known parser implementations
* use profile-idc values directly, as that is clearer and
* less ambiguous. We do the same here.
*/
if (p_h264_sps->profile_idc < 122 &&
p_h264_sps->chroma_format_idc > 1)
return -EINVAL;
/* Chroma 4:4:4 format require at least High 4:2:2 profile */
if (p_h264_sps->profile_idc < 244 &&
p_h264_sps->chroma_format_idc > 2)
return -EINVAL;
if (p_h264_sps->chroma_format_idc > 3)
return -EINVAL;
if (p_h264_sps->bit_depth_luma_minus8 > 6)
return -EINVAL;
if (p_h264_sps->bit_depth_chroma_minus8 > 6)
return -EINVAL;
if (p_h264_sps->log2_max_frame_num_minus4 > 12)
return -EINVAL;
if (p_h264_sps->pic_order_cnt_type > 2)
return -EINVAL;
if (p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 > 12)
return -EINVAL;
if (p_h264_sps->max_num_ref_frames > V4L2_H264_REF_LIST_LEN)
return -EINVAL;
break;
case V4L2_CTRL_TYPE_H264_PPS:
p_h264_pps = p;
if (p_h264_pps->num_slice_groups_minus1 > 7)
return -EINVAL;
if (p_h264_pps->num_ref_idx_l0_default_active_minus1 >
(V4L2_H264_REF_LIST_LEN - 1))
return -EINVAL;
if (p_h264_pps->num_ref_idx_l1_default_active_minus1 >
(V4L2_H264_REF_LIST_LEN - 1))
return -EINVAL;
if (p_h264_pps->weighted_bipred_idc > 2)
return -EINVAL;
/*
* pic_init_qp_minus26 shall be in the range of
* -(26 + QpBdOffset_y) to +25, inclusive,
* where QpBdOffset_y is 6 * bit_depth_luma_minus8
*/
if (p_h264_pps->pic_init_qp_minus26 < -62 ||
p_h264_pps->pic_init_qp_minus26 > 25)
return -EINVAL;
if (p_h264_pps->pic_init_qs_minus26 < -26 ||
p_h264_pps->pic_init_qs_minus26 > 25)
return -EINVAL;
if (p_h264_pps->chroma_qp_index_offset < -12 ||
p_h264_pps->chroma_qp_index_offset > 12)
return -EINVAL;
if (p_h264_pps->second_chroma_qp_index_offset < -12 ||
p_h264_pps->second_chroma_qp_index_offset > 12)
return -EINVAL;
break;
case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
break;
case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
p_h264_pred_weights = p;
if (p_h264_pred_weights->luma_log2_weight_denom > 7)
return -EINVAL;
if (p_h264_pred_weights->chroma_log2_weight_denom > 7)
return -EINVAL;
break;
case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
p_h264_slice_params = p;
if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B)
p_h264_slice_params->flags &=
~V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED;
if (p_h264_slice_params->colour_plane_id > 2)
return -EINVAL;
if (p_h264_slice_params->cabac_init_idc > 2)
return -EINVAL;
if (p_h264_slice_params->disable_deblocking_filter_idc > 2)
return -EINVAL;
if (p_h264_slice_params->slice_alpha_c0_offset_div2 < -6 ||
p_h264_slice_params->slice_alpha_c0_offset_div2 > 6)
return -EINVAL;
if (p_h264_slice_params->slice_beta_offset_div2 < -6 ||
p_h264_slice_params->slice_beta_offset_div2 > 6)
return -EINVAL;
if (p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_I ||
p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_SI)
p_h264_slice_params->num_ref_idx_l0_active_minus1 = 0;
if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B)
p_h264_slice_params->num_ref_idx_l1_active_minus1 = 0;
if (p_h264_slice_params->num_ref_idx_l0_active_minus1 >
(V4L2_H264_REF_LIST_LEN - 1))
return -EINVAL;
if (p_h264_slice_params->num_ref_idx_l1_active_minus1 >
(V4L2_H264_REF_LIST_LEN - 1))
return -EINVAL;
zero_reserved(*p_h264_slice_params);
break;
case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
p_h264_dec_params = p;
if (p_h264_dec_params->nal_ref_idc > 3)
return -EINVAL;
for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) {
struct v4l2_h264_dpb_entry *dpb_entry =
&p_h264_dec_params->dpb[i];
zero_reserved(*dpb_entry);
}
zero_reserved(*p_h264_dec_params);
break;
case V4L2_CTRL_TYPE_VP8_FRAME:
p_vp8_frame = p;
switch (p_vp8_frame->num_dct_parts) {
case 1:
case 2:
case 4:
case 8:
break;
default:
return -EINVAL;
}
zero_padding(p_vp8_frame->segment);
zero_padding(p_vp8_frame->lf);
zero_padding(p_vp8_frame->quant);
zero_padding(p_vp8_frame->entropy);
zero_padding(p_vp8_frame->coder_state);
break;
case V4L2_CTRL_TYPE_HEVC_SPS:
p_hevc_sps = p;
if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) {
p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0;
p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0;
p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0;
}
if (!(p_hevc_sps->flags &
V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT))
p_hevc_sps->num_long_term_ref_pics_sps = 0;
break;
case V4L2_CTRL_TYPE_HEVC_PPS:
p_hevc_pps = p;
if (!(p_hevc_pps->flags &
V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED))
p_hevc_pps->diff_cu_qp_delta_depth = 0;
if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) {
p_hevc_pps->num_tile_columns_minus1 = 0;
p_hevc_pps->num_tile_rows_minus1 = 0;
memset(&p_hevc_pps->column_width_minus1, 0,
sizeof(p_hevc_pps->column_width_minus1));
memset(&p_hevc_pps->row_height_minus1, 0,
sizeof(p_hevc_pps->row_height_minus1));
p_hevc_pps->flags &=
~V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED;
}
if (p_hevc_pps->flags &
V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) {
p_hevc_pps->pps_beta_offset_div2 = 0;
p_hevc_pps->pps_tc_offset_div2 = 0;
}
break;
case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
p_hevc_decode_params = p;
if (p_hevc_decode_params->num_active_dpb_entries >
V4L2_HEVC_DPB_ENTRIES_NUM_MAX)
return -EINVAL;
break;
case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
break;
case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
break;
case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
p_hdr10_mastering = p;
for (i = 0; i < 3; ++i) {
if (p_hdr10_mastering->display_primaries_x[i] <
V4L2_HDR10_MASTERING_PRIMARIES_X_LOW ||
p_hdr10_mastering->display_primaries_x[i] >
V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH ||
p_hdr10_mastering->display_primaries_y[i] <
V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW ||
p_hdr10_mastering->display_primaries_y[i] >
V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH)
return -EINVAL;
}
if (p_hdr10_mastering->white_point_x <
V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW ||
p_hdr10_mastering->white_point_x >
V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH ||
p_hdr10_mastering->white_point_y <
V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW ||
p_hdr10_mastering->white_point_y >
V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH)
return -EINVAL;
if (p_hdr10_mastering->max_display_mastering_luminance <
V4L2_HDR10_MASTERING_MAX_LUMA_LOW ||
p_hdr10_mastering->max_display_mastering_luminance >
V4L2_HDR10_MASTERING_MAX_LUMA_HIGH ||
p_hdr10_mastering->min_display_mastering_luminance <
V4L2_HDR10_MASTERING_MIN_LUMA_LOW ||
p_hdr10_mastering->min_display_mastering_luminance >
V4L2_HDR10_MASTERING_MIN_LUMA_HIGH)
return -EINVAL;
/* The following restriction comes from ITU-T Rec. H.265 spec */
if (p_hdr10_mastering->max_display_mastering_luminance ==
V4L2_HDR10_MASTERING_MAX_LUMA_LOW &&
p_hdr10_mastering->min_display_mastering_luminance ==
V4L2_HDR10_MASTERING_MIN_LUMA_HIGH)
return -EINVAL;
break;
case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
break;
case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
return validate_vp9_compressed_hdr(p);
case V4L2_CTRL_TYPE_VP9_FRAME:
return validate_vp9_frame(p);
case V4L2_CTRL_TYPE_AV1_FRAME:
return validate_av1_frame(p);
case V4L2_CTRL_TYPE_AV1_SEQUENCE:
return validate_av1_sequence(p);
case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
break;
case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
return validate_av1_film_grain(p);
case V4L2_CTRL_TYPE_AREA:
area = p;
if (!area->width || !area->height)
return -EINVAL;
break;
default:
return -EINVAL;
}
return 0;
}
static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr)
{
size_t len;
u64 offset;
s64 val;
switch ((u32)ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER:
return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
case V4L2_CTRL_TYPE_INTEGER64:
/*
* We can't use the ROUND_TO_RANGE define here due to
* the u64 divide that needs special care.
*/
val = ptr.p_s64[idx];
if (ctrl->maximum >= 0 && val >= ctrl->maximum - (s64)(ctrl->step / 2))
val = ctrl->maximum;
else
val += (s64)(ctrl->step / 2);
val = clamp_t(s64, val, ctrl->minimum, ctrl->maximum);
offset = val - ctrl->minimum;
do_div(offset, ctrl->step);
ptr.p_s64[idx] = ctrl->minimum + offset * ctrl->step;
return 0;
case V4L2_CTRL_TYPE_U8:
return ROUND_TO_RANGE(ptr.p_u8[idx], u8, ctrl);
case V4L2_CTRL_TYPE_U16:
return ROUND_TO_RANGE(ptr.p_u16[idx], u16, ctrl);
case V4L2_CTRL_TYPE_U32:
return ROUND_TO_RANGE(ptr.p_u32[idx], u32, ctrl);
case V4L2_CTRL_TYPE_BOOLEAN:
ptr.p_s32[idx] = !!ptr.p_s32[idx];
return 0;
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_INTEGER_MENU:
if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum)
return -ERANGE;
if (ptr.p_s32[idx] < BITS_PER_LONG_LONG &&
(ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx])))
return -EINVAL;
if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
ctrl->qmenu[ptr.p_s32[idx]][0] == '\0')
return -EINVAL;
return 0;
case V4L2_CTRL_TYPE_BITMASK:
ptr.p_s32[idx] &= ctrl->maximum;
return 0;
case V4L2_CTRL_TYPE_BUTTON:
case V4L2_CTRL_TYPE_CTRL_CLASS:
ptr.p_s32[idx] = 0;
return 0;
case V4L2_CTRL_TYPE_STRING:
idx *= ctrl->elem_size;
len = strlen(ptr.p_char + idx);
if (len < ctrl->minimum)
return -ERANGE;
if ((len - (u32)ctrl->minimum) % (u32)ctrl->step)
return -ERANGE;
return 0;
default:
return std_validate_compound(ctrl, idx, ptr);
}
}
int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl,
union v4l2_ctrl_ptr ptr)
{
unsigned int i;
int ret = 0;
switch ((u32)ctrl->type) {
case V4L2_CTRL_TYPE_U8:
if (ctrl->maximum == 0xff && ctrl->minimum == 0 && ctrl->step == 1)
return 0;
break;
case V4L2_CTRL_TYPE_U16:
if (ctrl->maximum == 0xffff && ctrl->minimum == 0 && ctrl->step == 1)
return 0;
break;
case V4L2_CTRL_TYPE_U32:
if (ctrl->maximum == 0xffffffff && ctrl->minimum == 0 && ctrl->step == 1)
return 0;
break;
case V4L2_CTRL_TYPE_BUTTON:
case V4L2_CTRL_TYPE_CTRL_CLASS:
memset(ptr.p_s32, 0, ctrl->new_elems * sizeof(s32));
return 0;
}
for (i = 0; !ret && i < ctrl->new_elems; i++)
ret = std_validate_elem(ctrl, i, ptr);
return ret;
}
EXPORT_SYMBOL(v4l2_ctrl_type_op_validate);
static const struct v4l2_ctrl_type_ops std_type_ops = {
.equal = v4l2_ctrl_type_op_equal,
.init = v4l2_ctrl_type_op_init,
.log = v4l2_ctrl_type_op_log,
.validate = v4l2_ctrl_type_op_validate,
};
void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
{
if (!ctrl)
return;
if (!notify) {
ctrl->call_notify = 0;
return;
}
if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify))
return;
ctrl->handler->notify = notify;
ctrl->handler->notify_priv = priv;
ctrl->call_notify = 1;
}
EXPORT_SYMBOL(v4l2_ctrl_notify);
/* Copy the one value to another. */
static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to,
unsigned int elems)
{
if (ctrl == NULL)
return;
memcpy(to.p, from.p_const, elems * ctrl->elem_size);
}
/* Copy the new value to the current value. */
void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
{
bool changed;
if (ctrl == NULL)
return;
/* has_changed is set by cluster_changed */
changed = ctrl->has_changed;
if (changed) {
if (ctrl->is_dyn_array)
ctrl->elems = ctrl->new_elems;
ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur, ctrl->elems);
}
if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
/* Note: CH_FLAGS is only set for auto clusters. */
ctrl->flags &=
~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
if (!is_cur_manual(ctrl->cluster[0])) {
ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
if (ctrl->cluster[0]->has_volatiles)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
}
fh = NULL;
}
if (changed || ch_flags) {
/* If a control was changed that was not one of the controls
modified by the application, then send the event to all. */
if (!ctrl->is_new)
fh = NULL;
send_event(fh, ctrl,
(changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags);
if (ctrl->call_notify && changed && ctrl->handler->notify)
ctrl->handler->notify(ctrl, ctrl->handler->notify_priv);
}
}
/* Copy the current value to the new value */
void cur_to_new(struct v4l2_ctrl *ctrl)
{
if (ctrl == NULL)
return;
if (ctrl->is_dyn_array)
ctrl->new_elems = ctrl->elems;
ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems);
}
static bool req_alloc_array(struct v4l2_ctrl_ref *ref, u32 elems)
{
void *tmp;
if (elems == ref->p_req_array_alloc_elems)
return true;
if (ref->ctrl->is_dyn_array &&
elems < ref->p_req_array_alloc_elems)
return true;
tmp = kvmalloc(elems * ref->ctrl->elem_size, GFP_KERNEL);
if (!tmp) {
ref->p_req_array_enomem = true;
return false;
}
ref->p_req_array_enomem = false;
kvfree(ref->p_req.p);
ref->p_req.p = tmp;
ref->p_req_array_alloc_elems = elems;
return true;
}
/* Copy the new value to the request value */
void new_to_req(struct v4l2_ctrl_ref *ref)
{
struct v4l2_ctrl *ctrl;
if (!ref)
return;
ctrl = ref->ctrl;
if (ctrl->is_array && !req_alloc_array(ref, ctrl->new_elems))
return;
ref->p_req_elems = ctrl->new_elems;
ptr_to_ptr(ctrl, ctrl->p_new, ref->p_req, ref->p_req_elems);
ref->p_req_valid = true;
}
/* Copy the current value to the request value */
void cur_to_req(struct v4l2_ctrl_ref *ref)
{
struct v4l2_ctrl *ctrl;
if (!ref)
return;
ctrl = ref->ctrl;
if (ctrl->is_array && !req_alloc_array(ref, ctrl->elems))
return;
ref->p_req_elems = ctrl->elems;
ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req, ctrl->elems);
ref->p_req_valid = true;
}
/* Copy the request value to the new value */
int req_to_new(struct v4l2_ctrl_ref *ref)
{
struct v4l2_ctrl *ctrl;
if (!ref)
return 0;
ctrl = ref->ctrl;
/*
* This control was never set in the request, so just use the current
* value.
*/
if (!ref->p_req_valid) {
if (ctrl->is_dyn_array)
ctrl->new_elems = ctrl->elems;
ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems);
return 0;
}
/* Not an array, so just copy the request value */
if (!ctrl->is_array) {
ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems);
return 0;
}
/* Sanity check, should never happen */
if (WARN_ON(!ref->p_req_array_alloc_elems))
return -ENOMEM;
if (!ctrl->is_dyn_array &&
ref->p_req_elems != ctrl->p_array_alloc_elems)
return -ENOMEM;
/*
* Check if the number of elements in the request is more than the
* elements in ctrl->p_array. If so, attempt to realloc ctrl->p_array.
* Note that p_array is allocated with twice the number of elements
* in the dynamic array since it has to store both the current and
* new value of such a control.
*/
if (ref->p_req_elems > ctrl->p_array_alloc_elems) {
unsigned int sz = ref->p_req_elems * ctrl->elem_size;
void *old = ctrl->p_array;
void *tmp = kvzalloc(2 * sz, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size);
memcpy(tmp + sz, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size);
ctrl->p_new.p = tmp;
ctrl->p_cur.p = tmp + sz;
ctrl->p_array = tmp;
ctrl->p_array_alloc_elems = ref->p_req_elems;
kvfree(old);
}
ctrl->new_elems = ref->p_req_elems;
ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems);
return 0;
}
/* Control range checking */
int check_range(enum v4l2_ctrl_type type,
s64 min, s64 max, u64 step, s64 def)
{
switch (type) {
case V4L2_CTRL_TYPE_BOOLEAN:
if (step != 1 || max > 1 || min < 0)
return -ERANGE;
fallthrough;
case V4L2_CTRL_TYPE_U8:
case V4L2_CTRL_TYPE_U16:
case V4L2_CTRL_TYPE_U32:
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_INTEGER64:
if (step == 0 || min > max || def < min || def > max)
return -ERANGE;
return 0;
case V4L2_CTRL_TYPE_BITMASK:
if (step || min || !max || (def & ~max))
return -ERANGE;
return 0;
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_INTEGER_MENU:
if (min > max || def < min || def > max)
return -ERANGE;
/* Note: step == menu_skip_mask for menu controls.
So here we check if the default value is masked out. */
if (step && ((1 << def) & step))
return -EINVAL;
return 0;
case V4L2_CTRL_TYPE_STRING:
if (min > max || min < 0 || step < 1 || def)
return -ERANGE;
return 0;
default:
return 0;
}
}
/* Set the handler's error code if it wasn't set earlier already */
static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
{
if (hdl->error == 0)
hdl->error = err;
return err;
}
/* Initialize the handler */
int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
unsigned nr_of_controls_hint,
struct lock_class_key *key, const char *name)
{
mutex_init(&hdl->_lock);
hdl->lock = &hdl->_lock;
lockdep_set_class_and_name(hdl->lock, key, name);
INIT_LIST_HEAD(&hdl->ctrls);
INIT_LIST_HEAD(&hdl->ctrl_refs);
hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
hdl->buckets = kvcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]),
GFP_KERNEL);
hdl->error = hdl->buckets ? 0 : -ENOMEM;
v4l2_ctrl_handler_init_request(hdl);
return hdl->error;
}
EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
/* Free all controls and control refs */
void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
{
struct v4l2_ctrl_ref *ref, *next_ref;
struct v4l2_ctrl *ctrl, *next_ctrl;
struct v4l2_subscribed_event *sev, *next_sev;
if (hdl == NULL || hdl->buckets == NULL)
return;
v4l2_ctrl_handler_free_request(hdl);
mutex_lock(hdl->lock);
/* Free all nodes */
list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
list_del(&ref->node);
if (ref->p_req_array_alloc_elems)
kvfree(ref->p_req.p);
kfree(ref);
}
/* Free all controls owned by the handler */
list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
list_del(&ctrl->node);
list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
list_del(&sev->node);
kvfree(ctrl->p_array);
kvfree(ctrl);
}
kvfree(hdl->buckets);
hdl->buckets = NULL;
hdl->cached = NULL;
hdl->error = 0;
mutex_unlock(hdl->lock);
mutex_destroy(&hdl->_lock);
}
EXPORT_SYMBOL(v4l2_ctrl_handler_free);
/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer
be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing
with applications that do not use the NEXT_CTRL flag.
We just find the n-th private user control. It's O(N), but that should not
be an issue in this particular case. */
static struct v4l2_ctrl_ref *find_private_ref(
struct v4l2_ctrl_handler *hdl, u32 id)
{
struct v4l2_ctrl_ref *ref;
id -= V4L2_CID_PRIVATE_BASE;
list_for_each_entry(ref, &hdl->ctrl_refs, node) {
/* Search for private user controls that are compatible with
VIDIOC_G/S_CTRL. */
if (V4L2_CTRL_ID2WHICH(ref->ctrl->id) == V4L2_CTRL_CLASS_USER &&
V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) {
if (!ref->ctrl->is_int)
continue;
if (id == 0)
return ref;
id--;
}
}
return NULL;
}
/* Find a control with the given ID. */
struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id)
{
struct v4l2_ctrl_ref *ref;
int bucket;
id &= V4L2_CTRL_ID_MASK;
/* Old-style private controls need special handling */
if (id >= V4L2_CID_PRIVATE_BASE)
return find_private_ref(hdl, id);
bucket = id % hdl->nr_of_buckets;
/* Simple optimization: cache the last control found */
if (hdl->cached && hdl->cached->ctrl->id == id)
return hdl->cached;
/* Not in cache, search the hash */
ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
while (ref && ref->ctrl->id != id)
ref = ref->next;
if (ref)
hdl->cached = ref; /* cache it! */
return ref;
}
/* Find a control with the given ID. Take the handler's lock first. */
struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id)
{
struct v4l2_ctrl_ref *ref = NULL;
if (hdl) {
mutex_lock(hdl->lock);
ref = find_ref(hdl, id);
mutex_unlock(hdl->lock);
}
return ref;
}
/* Find a control with the given ID. */
struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
{
struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
return ref ? ref->ctrl : NULL;
}
EXPORT_SYMBOL(v4l2_ctrl_find);
/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
int handler_new_ref(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl *ctrl,
struct v4l2_ctrl_ref **ctrl_ref,
bool from_other_dev, bool allocate_req)
{
struct v4l2_ctrl_ref *ref;
struct v4l2_ctrl_ref *new_ref;
u32 id = ctrl->id;
u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1;
int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
unsigned int size_extra_req = 0;
if (ctrl_ref)
*ctrl_ref = NULL;
/*
* Automatically add the control class if it is not yet present and
* the new control is not a compound control.
*/
if (ctrl->type < V4L2_CTRL_COMPOUND_TYPES &&
id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL)
if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0))
return hdl->error;
if (hdl->error)
return hdl->error;
if (allocate_req && !ctrl->is_array)
size_extra_req = ctrl->elems * ctrl->elem_size;
new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL);
if (!new_ref)
return handler_set_err(hdl, -ENOMEM);
new_ref->ctrl = ctrl;
new_ref->from_other_dev = from_other_dev;
if (size_extra_req)
new_ref->p_req.p = &new_ref[1];
INIT_LIST_HEAD(&new_ref->node);
mutex_lock(hdl->lock);
/* Add immediately at the end of the list if the list is empty, or if
the last element in the list has a lower ID.
This ensures that when elements are added in ascending order the
insertion is an O(1) operation. */
if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) {
list_add_tail(&new_ref->node, &hdl->ctrl_refs);
goto insert_in_hash;
}
/* Find insert position in sorted list */
list_for_each_entry(ref, &hdl->ctrl_refs, node) {
if (ref->ctrl->id < id)
continue;
/* Don't add duplicates */
if (ref->ctrl->id == id) {
kfree(new_ref);
goto unlock;
}
list_add(&new_ref->node, ref->node.prev);
break;
}
insert_in_hash:
/* Insert the control node in the hash */
new_ref->next = hdl->buckets[bucket];
hdl->buckets[bucket] = new_ref;
if (ctrl_ref)
*ctrl_ref = new_ref;
if (ctrl->handler == hdl) {
/* By default each control starts in a cluster of its own.
* new_ref->ctrl is basically a cluster array with one
* element, so that's perfect to use as the cluster pointer.
* But only do this for the handler that owns the control.
*/
ctrl->cluster = &new_ref->ctrl;
ctrl->ncontrols = 1;
}
unlock:
mutex_unlock(hdl->lock);
return 0;
}
/* Add a new control */
static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
const struct v4l2_ctrl_type_ops *type_ops,
u32 id, const char *name, enum v4l2_ctrl_type type,
s64 min, s64 max, u64 step, s64 def,
const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
u32 flags, const char * const *qmenu,
const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def,
void *priv)
{
struct v4l2_ctrl *ctrl;
unsigned sz_extra;
unsigned nr_of_dims = 0;
unsigned elems = 1;
bool is_array;
unsigned tot_ctrl_size;
void *data;
int err;
if (hdl->error)
return NULL;
while (dims && dims[nr_of_dims]) {
elems *= dims[nr_of_dims];
nr_of_dims++;
if (nr_of_dims == V4L2_CTRL_MAX_DIMS)
break;
}
is_array = nr_of_dims > 0;
/* Prefill elem_size for all types handled by std_type_ops */
switch ((u32)type) {
case V4L2_CTRL_TYPE_INTEGER64:
elem_size = sizeof(s64);
break;
case V4L2_CTRL_TYPE_STRING:
elem_size = max + 1;
break;
case V4L2_CTRL_TYPE_U8:
elem_size = sizeof(u8);
break;
case V4L2_CTRL_TYPE_U16:
elem_size = sizeof(u16);
break;
case V4L2_CTRL_TYPE_U32:
elem_size = sizeof(u32);
break;
case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
elem_size = sizeof(struct v4l2_ctrl_mpeg2_sequence);
break;
case V4L2_CTRL_TYPE_MPEG2_PICTURE:
elem_size = sizeof(struct v4l2_ctrl_mpeg2_picture);
break;
case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantisation);
break;
case V4L2_CTRL_TYPE_FWHT_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_fwht_params);
break;
case V4L2_CTRL_TYPE_H264_SPS:
elem_size = sizeof(struct v4l2_ctrl_h264_sps);
break;
case V4L2_CTRL_TYPE_H264_PPS:
elem_size = sizeof(struct v4l2_ctrl_h264_pps);
break;
case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix);
break;
case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_h264_slice_params);
break;
case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_h264_decode_params);
break;
case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
elem_size = sizeof(struct v4l2_ctrl_h264_pred_weights);
break;
case V4L2_CTRL_TYPE_VP8_FRAME:
elem_size = sizeof(struct v4l2_ctrl_vp8_frame);
break;
case V4L2_CTRL_TYPE_HEVC_SPS:
elem_size = sizeof(struct v4l2_ctrl_hevc_sps);
break;
case V4L2_CTRL_TYPE_HEVC_PPS:
elem_size = sizeof(struct v4l2_ctrl_hevc_pps);
break;
case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
break;
case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
elem_size = sizeof(struct v4l2_ctrl_hevc_scaling_matrix);
break;
case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params);
break;
case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
elem_size = sizeof(struct v4l2_ctrl_hdr10_cll_info);
break;
case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
elem_size = sizeof(struct v4l2_ctrl_hdr10_mastering_display);
break;
case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
elem_size = sizeof(struct v4l2_ctrl_vp9_compressed_hdr);
break;
case V4L2_CTRL_TYPE_VP9_FRAME:
elem_size = sizeof(struct v4l2_ctrl_vp9_frame);
break;
case V4L2_CTRL_TYPE_AV1_SEQUENCE:
elem_size = sizeof(struct v4l2_ctrl_av1_sequence);
break;
case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
elem_size = sizeof(struct v4l2_ctrl_av1_tile_group_entry);
break;
case V4L2_CTRL_TYPE_AV1_FRAME:
elem_size = sizeof(struct v4l2_ctrl_av1_frame);
break;
case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
elem_size = sizeof(struct v4l2_ctrl_av1_film_grain);
break;
case V4L2_CTRL_TYPE_AREA:
elem_size = sizeof(struct v4l2_area);
break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
break;
}
/* Sanity checks */
if (id == 0 || name == NULL || !elem_size ||
id >= V4L2_CID_PRIVATE_BASE ||
(type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
(type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) {
handler_set_err(hdl, -ERANGE);
return NULL;
}
err = check_range(type, min, max, step, def);
if (err) {
handler_set_err(hdl, err);
return NULL;
}
if (is_array &&
(type == V4L2_CTRL_TYPE_BUTTON ||
type == V4L2_CTRL_TYPE_CTRL_CLASS)) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
if (flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY) {
/*
* For now only support this for one-dimensional arrays only.
*
* This can be relaxed in the future, but this will
* require more effort.
*/
if (nr_of_dims != 1) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
/* Start with just 1 element */
elems = 1;
}
tot_ctrl_size = elem_size * elems;
sz_extra = 0;
if (type == V4L2_CTRL_TYPE_BUTTON)
flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
flags |= V4L2_CTRL_FLAG_READ_ONLY;
else if (!is_array &&
(type == V4L2_CTRL_TYPE_INTEGER64 ||
type == V4L2_CTRL_TYPE_STRING ||
type >= V4L2_CTRL_COMPOUND_TYPES))
sz_extra += 2 * tot_ctrl_size;
if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
sz_extra += elem_size;
ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
if (ctrl == NULL) {
handler_set_err(hdl, -ENOMEM);
return NULL;
}
INIT_LIST_HEAD(&ctrl->node);
INIT_LIST_HEAD(&ctrl->ev_subs);
ctrl->handler = hdl;
ctrl->ops = ops;
ctrl->type_ops = type_ops ? type_ops : &std_type_ops;
ctrl->id = id;
ctrl->name = name;
ctrl->type = type;
ctrl->flags = flags;
ctrl->minimum = min;
ctrl->maximum = max;
ctrl->step = step;
ctrl->default_value = def;
ctrl->is_string = !is_array && type == V4L2_CTRL_TYPE_STRING;
ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string;
ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64;
ctrl->is_array = is_array;
ctrl->is_dyn_array = !!(flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY);
ctrl->elems = elems;
ctrl->new_elems = elems;
ctrl->nr_of_dims = nr_of_dims;
if (nr_of_dims)
memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0]));
ctrl->elem_size = elem_size;
if (type == V4L2_CTRL_TYPE_MENU)
ctrl->qmenu = qmenu;
else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
ctrl->qmenu_int = qmenu_int;
ctrl->priv = priv;
ctrl->cur.val = ctrl->val = def;
data = &ctrl[1];
if (ctrl->is_array) {
ctrl->p_array_alloc_elems = elems;
ctrl->p_array = kvzalloc(2 * elems * elem_size, GFP_KERNEL);
if (!ctrl->p_array) {
kvfree(ctrl);
return NULL;
}
data = ctrl->p_array;
}
if (!ctrl->is_int) {
ctrl->p_new.p = data;
ctrl->p_cur.p = data + tot_ctrl_size;
} else {
ctrl->p_new.p = &ctrl->val;
ctrl->p_cur.p = &ctrl->cur.val;
}
if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
if (ctrl->is_array)
ctrl->p_def.p = &ctrl[1];
else
ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
}
ctrl->type_ops->init(ctrl, 0, ctrl->p_cur);
cur_to_new(ctrl);
if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
kvfree(ctrl->p_array);
kvfree(ctrl);
return NULL;
}
mutex_lock(hdl->lock);
list_add_tail(&ctrl->node, &hdl->ctrls);
mutex_unlock(hdl->lock);
return ctrl;
}
struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_config *cfg, void *priv)
{
bool is_menu;
struct v4l2_ctrl *ctrl;
const char *name = cfg->name;
const char * const *qmenu = cfg->qmenu;
const s64 *qmenu_int = cfg->qmenu_int;
enum v4l2_ctrl_type type = cfg->type;
u32 flags = cfg->flags;
s64 min = cfg->min;
s64 max = cfg->max;
u64 step = cfg->step;
s64 def = cfg->def;
if (name == NULL)
v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
&def, &flags);
is_menu = (type == V4L2_CTRL_TYPE_MENU ||
type == V4L2_CTRL_TYPE_INTEGER_MENU);
if (is_menu)
WARN_ON(step);
else
WARN_ON(cfg->menu_skip_mask);
if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
qmenu = v4l2_ctrl_get_menu(cfg->id);
} else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->type_ops, cfg->id, name,
type, min, max,
is_menu ? cfg->menu_skip_mask : step, def,
cfg->dims, cfg->elem_size,
flags, qmenu, qmenu_int, cfg->p_def, priv);
if (ctrl)
ctrl->is_private = cfg->is_private;
return ctrl;
}
EXPORT_SYMBOL(v4l2_ctrl_new_custom);
/* Helper function for standard non-menu controls */
struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, s64 min, s64 max, u64 step, s64 def)
{
const char *name;
enum v4l2_ctrl_type type;
u32 flags;
v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
if (type == V4L2_CTRL_TYPE_MENU ||
type == V4L2_CTRL_TYPE_INTEGER_MENU ||
type >= V4L2_CTRL_COMPOUND_TYPES) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
min, max, step, def, NULL, 0,
flags, NULL, NULL, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std);
/* Helper function for standard menu controls */
struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, u8 _max, u64 mask, u8 _def)
{
const char * const *qmenu = NULL;
const s64 *qmenu_int = NULL;
unsigned int qmenu_int_len = 0;
const char *name;
enum v4l2_ctrl_type type;
s64 min;
s64 max = _max;
s64 def = _def;
u64 step;
u32 flags;
v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
if (type == V4L2_CTRL_TYPE_MENU)
qmenu = v4l2_ctrl_get_menu(id);
else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len);
if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
flags, qmenu, qmenu_int, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
/* Helper function for standard menu controls with driver defined menu */
struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops, u32 id, u8 _max,
u64 mask, u8 _def, const char * const *qmenu)
{
enum v4l2_ctrl_type type;
const char *name;
u32 flags;
u64 step;
s64 min;
s64 max = _max;
s64 def = _def;
/* v4l2_ctrl_new_std_menu_items() should only be called for
* standard controls without a standard menu.
*/
if (v4l2_ctrl_get_menu(id)) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
flags, qmenu, NULL, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
/* Helper function for standard compound controls */
struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops, u32 id,
const union v4l2_ctrl_ptr p_def)
{
const char *name;
enum v4l2_ctrl_type type;
u32 flags;
s64 min, max, step, def;
v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
if (type < V4L2_CTRL_COMPOUND_TYPES) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
min, max, step, def, NULL, 0,
flags, NULL, NULL, p_def, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
/* Helper function for standard integer menu controls */
struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, u8 _max, u8 _def, const s64 *qmenu_int)
{
const char *name;
enum v4l2_ctrl_type type;
s64 min;
u64 step;
s64 max = _max;
s64 def = _def;
u32 flags;
v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
if (type != V4L2_CTRL_TYPE_INTEGER_MENU) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, 0, def, NULL, 0,
flags, NULL, qmenu_int, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
/* Add the controls from another handler to our own. */
int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_handler *add,
bool (*filter)(const struct v4l2_ctrl *ctrl),
bool from_other_dev)
{
struct v4l2_ctrl_ref *ref;
int ret = 0;
/* Do nothing if either handler is NULL or if they are the same */
if (!hdl || !add || hdl == add)
return 0;
if (hdl->error)
return hdl->error;
mutex_lock(add->lock);
list_for_each_entry(ref, &add->ctrl_refs, node) {
struct v4l2_ctrl *ctrl = ref->ctrl;
/* Skip handler-private controls. */
if (ctrl->is_private)
continue;
/* And control classes */
if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
continue;
/* Filter any unwanted controls */
if (filter && !filter(ctrl))
continue;
ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false);
if (ret)
break;
}
mutex_unlock(add->lock);
return ret;
}
EXPORT_SYMBOL(v4l2_ctrl_add_handler);
bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
{
if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
return true;
if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
return true;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
case V4L2_CID_AUDIO_VOLUME:
case V4L2_CID_AUDIO_BALANCE:
case V4L2_CID_AUDIO_BASS:
case V4L2_CID_AUDIO_TREBLE:
case V4L2_CID_AUDIO_LOUDNESS:
return true;
default:
break;
}
return false;
}
EXPORT_SYMBOL(v4l2_ctrl_radio_filter);
/* Cluster controls */
void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
{
bool has_volatiles = false;
int i;
/* The first control is the master control and it must not be NULL */
if (WARN_ON(ncontrols == 0 || controls[0] == NULL))
return;
for (i = 0; i < ncontrols; i++) {
if (controls[i]) {
controls[i]->cluster = controls;
controls[i]->ncontrols = ncontrols;
if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE)
has_volatiles = true;
}
}
controls[0]->has_volatiles = has_volatiles;
}
EXPORT_SYMBOL(v4l2_ctrl_cluster);
void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
u8 manual_val, bool set_volatile)
{
struct v4l2_ctrl *master = controls[0];
u32 flag = 0;
int i;
v4l2_ctrl_cluster(ncontrols, controls);
WARN_ON(ncontrols <= 1);
WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl));
master->is_auto = true;
master->has_volatiles = set_volatile;
master->manual_mode_value = manual_val;
master->flags |= V4L2_CTRL_FLAG_UPDATE;
if (!is_cur_manual(master))
flag = V4L2_CTRL_FLAG_INACTIVE |
(set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0);
for (i = 1; i < ncontrols; i++)
if (controls[i])
controls[i]->flags |= flag;
}
EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
/*
* Obtain the current volatile values of an autocluster and mark them
* as new.
*/
void update_from_auto_cluster(struct v4l2_ctrl *master)
{
int i;
for (i = 1; i < master->ncontrols; i++)
cur_to_new(master->cluster[i]);
if (!call_op(master, g_volatile_ctrl))
for (i = 1; i < master->ncontrols; i++)
if (master->cluster[i])
master->cluster[i]->is_new = 1;
}
/*
* Return non-zero if one or more of the controls in the cluster has a new
* value that differs from the current value.
*/
static int cluster_changed(struct v4l2_ctrl *master)
{
bool changed = false;
int i;
for (i = 0; i < master->ncontrols; i++) {
struct v4l2_ctrl *ctrl = master->cluster[i];
bool ctrl_changed = false;
if (!ctrl)
continue;
if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE) {
changed = true;
ctrl_changed = true;
}
/*
* Set has_changed to false to avoid generating
* the event V4L2_EVENT_CTRL_CH_VALUE
*/
if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
ctrl->has_changed = false;
continue;
}
if (ctrl->elems != ctrl->new_elems)
ctrl_changed = true;
if (!ctrl_changed)
ctrl_changed = !ctrl->type_ops->equal(ctrl,
ctrl->p_cur, ctrl->p_new);
ctrl->has_changed = ctrl_changed;
changed |= ctrl->has_changed;
}
return changed;
}
/*
* Core function that calls try/s_ctrl and ensures that the new value is
* copied to the current value on a set.
* Must be called with ctrl->handler->lock held.
*/
int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
bool set, u32 ch_flags)
{
bool update_flag;
int ret;
int i;
/*
* Go through the cluster and either validate the new value or
* (if no new value was set), copy the current value to the new
* value, ensuring a consistent view for the control ops when
* called.
*/
for (i = 0; i < master->ncontrols; i++) {
struct v4l2_ctrl *ctrl = master->cluster[i];
if (!ctrl)
continue;
if (!ctrl->is_new) {
cur_to_new(ctrl);
continue;
}
/*
* Check again: it may have changed since the
* previous check in try_or_set_ext_ctrls().
*/
if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
return -EBUSY;
}
ret = call_op(master, try_ctrl);
/* Don't set if there is no change */
if (ret || !set || !cluster_changed(master))
return ret;
ret = call_op(master, s_ctrl);
if (ret)
return ret;
/* If OK, then make the new values permanent. */
update_flag = is_cur_manual(master) != is_new_manual(master);
for (i = 0; i < master->ncontrols; i++) {
/*
* If we switch from auto to manual mode, and this cluster
* contains volatile controls, then all non-master controls
* have to be marked as changed. The 'new' value contains
* the volatile value (obtained by update_from_auto_cluster),
* which now has to become the current value.
*/
if (i && update_flag && is_new_manual(master) &&
master->has_volatiles && master->cluster[i])
master->cluster[i]->has_changed = true;
new_to_cur(fh, master->cluster[i], ch_flags |
((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
}
return 0;
}
/* Activate/deactivate a control. */
void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
{
/* invert since the actual flag is called 'inactive' */
bool inactive = !active;
bool old;
if (ctrl == NULL)
return;
if (inactive)
/* set V4L2_CTRL_FLAG_INACTIVE */
old = test_and_set_bit(4, &ctrl->flags);
else
/* clear V4L2_CTRL_FLAG_INACTIVE */
old = test_and_clear_bit(4, &ctrl->flags);
if (old != inactive)
send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
}
EXPORT_SYMBOL(v4l2_ctrl_activate);
void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
{
bool old;
if (ctrl == NULL)
return;
lockdep_assert_held(ctrl->handler->lock);
if (grabbed)
/* set V4L2_CTRL_FLAG_GRABBED */
old = test_and_set_bit(1, &ctrl->flags);
else
/* clear V4L2_CTRL_FLAG_GRABBED */
old = test_and_clear_bit(1, &ctrl->flags);
if (old != grabbed)
send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
}
EXPORT_SYMBOL(__v4l2_ctrl_grab);
/* Call s_ctrl for all controls owned by the handler */
int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
{
struct v4l2_ctrl *ctrl;
int ret = 0;
if (hdl == NULL)
return 0;
lockdep_assert_held(hdl->lock);
list_for_each_entry(ctrl, &hdl->ctrls, node)
ctrl->done = false;
list_for_each_entry(ctrl, &hdl->ctrls, node) {
struct v4l2_ctrl *master = ctrl->cluster[0];
int i;
/* Skip if this control was already handled by a cluster. */
/* Skip button controls and read-only controls. */
if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
(ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
continue;
for (i = 0; i < master->ncontrols; i++) {
if (master->cluster[i]) {
cur_to_new(master->cluster[i]);
master->cluster[i]->is_new = 1;
master->cluster[i]->done = true;
}
}
ret = call_op(master, s_ctrl);
if (ret)
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(__v4l2_ctrl_handler_setup);
int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
{
int ret;
if (hdl == NULL)
return 0;
mutex_lock(hdl->lock);
ret = __v4l2_ctrl_handler_setup(hdl);
mutex_unlock(hdl->lock);
return ret;
}
EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
/* Log the control name and value */
static void log_ctrl(const struct v4l2_ctrl *ctrl,
const char *prefix, const char *colon)
{
if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
return;
if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
return;
pr_info("%s%s%s: ", prefix, colon, ctrl->name);
ctrl->type_ops->log(ctrl);
if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
V4L2_CTRL_FLAG_GRABBED |
V4L2_CTRL_FLAG_VOLATILE)) {
if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
pr_cont(" inactive");
if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)
pr_cont(" grabbed");
if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE)
pr_cont(" volatile");
}
pr_cont("\n");
}
/* Log all controls owned by the handler */
void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
const char *prefix)
{
struct v4l2_ctrl *ctrl;
const char *colon = "";
int len;
if (!hdl)
return;
if (!prefix)
prefix = "";
len = strlen(prefix);
if (len && prefix[len - 1] != ' ')
colon = ": ";
mutex_lock(hdl->lock);
list_for_each_entry(ctrl, &hdl->ctrls, node)
if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
log_ctrl(ctrl, prefix, colon);
mutex_unlock(hdl->lock);
}
EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ctrl_ops,
const struct v4l2_fwnode_device_properties *p)
{
if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) {
u32 orientation_ctrl;
switch (p->orientation) {
case V4L2_FWNODE_ORIENTATION_FRONT:
orientation_ctrl = V4L2_CAMERA_ORIENTATION_FRONT;
break;
case V4L2_FWNODE_ORIENTATION_BACK:
orientation_ctrl = V4L2_CAMERA_ORIENTATION_BACK;
break;
case V4L2_FWNODE_ORIENTATION_EXTERNAL:
orientation_ctrl = V4L2_CAMERA_ORIENTATION_EXTERNAL;
break;
default:
return -EINVAL;
}
if (!v4l2_ctrl_new_std_menu(hdl, ctrl_ops,
V4L2_CID_CAMERA_ORIENTATION,
V4L2_CAMERA_ORIENTATION_EXTERNAL, 0,
orientation_ctrl))
return hdl->error;
}
if (p->rotation != V4L2_FWNODE_PROPERTY_UNSET) {
if (!v4l2_ctrl_new_std(hdl, ctrl_ops,
V4L2_CID_CAMERA_SENSOR_ROTATION,
p->rotation, p->rotation, 1,
p->rotation))
return hdl->error;
}
return hdl->error;
}
EXPORT_SYMBOL(v4l2_ctrl_new_fwnode_properties);
| linux-master | drivers/media/v4l2-core/v4l2-ctrls-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* V4L2 controls framework control definitions.
*
* Copyright (C) 2010-2021 Hans Verkuil <[email protected]>
*/
#include <linux/export.h>
#include <media/v4l2-ctrls.h>
/*
* Returns NULL or a character pointer array containing the menu for
* the given control ID. The pointer array ends with a NULL pointer.
* An empty string signifies a menu entry that is invalid. This allows
* drivers to disable certain options if it is not supported.
*/
const char * const *v4l2_ctrl_get_menu(u32 id)
{
static const char * const mpeg_audio_sampling_freq[] = {
"44.1 kHz",
"48 kHz",
"32 kHz",
NULL
};
static const char * const mpeg_audio_encoding[] = {
"MPEG-1/2 Layer I",
"MPEG-1/2 Layer II",
"MPEG-1/2 Layer III",
"MPEG-2/4 AAC",
"AC-3",
NULL
};
static const char * const mpeg_audio_l1_bitrate[] = {
"32 kbps",
"64 kbps",
"96 kbps",
"128 kbps",
"160 kbps",
"192 kbps",
"224 kbps",
"256 kbps",
"288 kbps",
"320 kbps",
"352 kbps",
"384 kbps",
"416 kbps",
"448 kbps",
NULL
};
static const char * const mpeg_audio_l2_bitrate[] = {
"32 kbps",
"48 kbps",
"56 kbps",
"64 kbps",
"80 kbps",
"96 kbps",
"112 kbps",
"128 kbps",
"160 kbps",
"192 kbps",
"224 kbps",
"256 kbps",
"320 kbps",
"384 kbps",
NULL
};
static const char * const mpeg_audio_l3_bitrate[] = {
"32 kbps",
"40 kbps",
"48 kbps",
"56 kbps",
"64 kbps",
"80 kbps",
"96 kbps",
"112 kbps",
"128 kbps",
"160 kbps",
"192 kbps",
"224 kbps",
"256 kbps",
"320 kbps",
NULL
};
static const char * const mpeg_audio_ac3_bitrate[] = {
"32 kbps",
"40 kbps",
"48 kbps",
"56 kbps",
"64 kbps",
"80 kbps",
"96 kbps",
"112 kbps",
"128 kbps",
"160 kbps",
"192 kbps",
"224 kbps",
"256 kbps",
"320 kbps",
"384 kbps",
"448 kbps",
"512 kbps",
"576 kbps",
"640 kbps",
NULL
};
static const char * const mpeg_audio_mode[] = {
"Stereo",
"Joint Stereo",
"Dual",
"Mono",
NULL
};
static const char * const mpeg_audio_mode_extension[] = {
"Bound 4",
"Bound 8",
"Bound 12",
"Bound 16",
NULL
};
static const char * const mpeg_audio_emphasis[] = {
"No Emphasis",
"50/15 us",
"CCITT J17",
NULL
};
static const char * const mpeg_audio_crc[] = {
"No CRC",
"16-bit CRC",
NULL
};
static const char * const mpeg_audio_dec_playback[] = {
"Auto",
"Stereo",
"Left",
"Right",
"Mono",
"Swapped Stereo",
NULL
};
static const char * const mpeg_video_encoding[] = {
"MPEG-1",
"MPEG-2",
"MPEG-4 AVC",
NULL
};
static const char * const mpeg_video_aspect[] = {
"1x1",
"4x3",
"16x9",
"2.21x1",
NULL
};
static const char * const mpeg_video_bitrate_mode[] = {
"Variable Bitrate",
"Constant Bitrate",
"Constant Quality",
NULL
};
static const char * const mpeg_stream_type[] = {
"MPEG-2 Program Stream",
"MPEG-2 Transport Stream",
"MPEG-1 System Stream",
"MPEG-2 DVD-compatible Stream",
"MPEG-1 VCD-compatible Stream",
"MPEG-2 SVCD-compatible Stream",
NULL
};
static const char * const mpeg_stream_vbi_fmt[] = {
"No VBI",
"Private Packet, IVTV Format",
NULL
};
static const char * const camera_power_line_frequency[] = {
"Disabled",
"50 Hz",
"60 Hz",
"Auto",
NULL
};
static const char * const camera_exposure_auto[] = {
"Auto Mode",
"Manual Mode",
"Shutter Priority Mode",
"Aperture Priority Mode",
NULL
};
static const char * const camera_exposure_metering[] = {
"Average",
"Center Weighted",
"Spot",
"Matrix",
NULL
};
static const char * const camera_auto_focus_range[] = {
"Auto",
"Normal",
"Macro",
"Infinity",
NULL
};
static const char * const colorfx[] = {
"None",
"Black & White",
"Sepia",
"Negative",
"Emboss",
"Sketch",
"Sky Blue",
"Grass Green",
"Skin Whiten",
"Vivid",
"Aqua",
"Art Freeze",
"Silhouette",
"Solarization",
"Antique",
"Set Cb/Cr",
NULL
};
static const char * const auto_n_preset_white_balance[] = {
"Manual",
"Auto",
"Incandescent",
"Fluorescent",
"Fluorescent H",
"Horizon",
"Daylight",
"Flash",
"Cloudy",
"Shade",
NULL,
};
static const char * const camera_iso_sensitivity_auto[] = {
"Manual",
"Auto",
NULL
};
static const char * const scene_mode[] = {
"None",
"Backlight",
"Beach/Snow",
"Candle Light",
"Dusk/Dawn",
"Fall Colors",
"Fireworks",
"Landscape",
"Night",
"Party/Indoor",
"Portrait",
"Sports",
"Sunset",
"Text",
NULL
};
static const char * const tune_emphasis[] = {
"None",
"50 Microseconds",
"75 Microseconds",
NULL,
};
static const char * const header_mode[] = {
"Separate Buffer",
"Joined With 1st Frame",
NULL,
};
static const char * const multi_slice[] = {
"Single",
"Max Macroblocks",
"Max Bytes",
NULL,
};
static const char * const entropy_mode[] = {
"CAVLC",
"CABAC",
NULL,
};
static const char * const mpeg_h264_level[] = {
"1",
"1b",
"1.1",
"1.2",
"1.3",
"2",
"2.1",
"2.2",
"3",
"3.1",
"3.2",
"4",
"4.1",
"4.2",
"5",
"5.1",
"5.2",
"6.0",
"6.1",
"6.2",
NULL,
};
static const char * const h264_loop_filter[] = {
"Enabled",
"Disabled",
"Disabled at Slice Boundary",
NULL,
};
static const char * const h264_profile[] = {
"Baseline",
"Constrained Baseline",
"Main",
"Extended",
"High",
"High 10",
"High 422",
"High 444 Predictive",
"High 10 Intra",
"High 422 Intra",
"High 444 Intra",
"CAVLC 444 Intra",
"Scalable Baseline",
"Scalable High",
"Scalable High Intra",
"Stereo High",
"Multiview High",
"Constrained High",
NULL,
};
static const char * const vui_sar_idc[] = {
"Unspecified",
"1:1",
"12:11",
"10:11",
"16:11",
"40:33",
"24:11",
"20:11",
"32:11",
"80:33",
"18:11",
"15:11",
"64:33",
"160:99",
"4:3",
"3:2",
"2:1",
"Extended SAR",
NULL,
};
static const char * const h264_fp_arrangement_type[] = {
"Checkerboard",
"Column",
"Row",
"Side by Side",
"Top Bottom",
"Temporal",
NULL,
};
static const char * const h264_fmo_map_type[] = {
"Interleaved Slices",
"Scattered Slices",
"Foreground with Leftover",
"Box Out",
"Raster Scan",
"Wipe Scan",
"Explicit",
NULL,
};
static const char * const h264_decode_mode[] = {
"Slice-Based",
"Frame-Based",
NULL,
};
static const char * const h264_start_code[] = {
"No Start Code",
"Annex B Start Code",
NULL,
};
static const char * const h264_hierarchical_coding_type[] = {
"Hier Coding B",
"Hier Coding P",
NULL,
};
static const char * const mpeg_mpeg2_level[] = {
"Low",
"Main",
"High 1440",
"High",
NULL,
};
static const char * const mpeg2_profile[] = {
"Simple",
"Main",
"SNR Scalable",
"Spatially Scalable",
"High",
NULL,
};
static const char * const mpeg_mpeg4_level[] = {
"0",
"0b",
"1",
"2",
"3",
"3b",
"4",
"5",
NULL,
};
static const char * const mpeg4_profile[] = {
"Simple",
"Advanced Simple",
"Core",
"Simple Scalable",
"Advanced Coding Efficiency",
NULL,
};
static const char * const vpx_golden_frame_sel[] = {
"Use Previous Frame",
"Use Previous Specific Frame",
NULL,
};
static const char * const vp8_profile[] = {
"0",
"1",
"2",
"3",
NULL,
};
static const char * const vp9_profile[] = {
"0",
"1",
"2",
"3",
NULL,
};
static const char * const vp9_level[] = {
"1",
"1.1",
"2",
"2.1",
"3",
"3.1",
"4",
"4.1",
"5",
"5.1",
"5.2",
"6",
"6.1",
"6.2",
NULL,
};
static const char * const flash_led_mode[] = {
"Off",
"Flash",
"Torch",
NULL,
};
static const char * const flash_strobe_source[] = {
"Software",
"External",
NULL,
};
static const char * const jpeg_chroma_subsampling[] = {
"4:4:4",
"4:2:2",
"4:2:0",
"4:1:1",
"4:1:0",
"Gray",
NULL,
};
static const char * const dv_tx_mode[] = {
"DVI-D",
"HDMI",
NULL,
};
static const char * const dv_rgb_range[] = {
"Automatic",
"RGB Limited Range (16-235)",
"RGB Full Range (0-255)",
NULL,
};
static const char * const dv_it_content_type[] = {
"Graphics",
"Photo",
"Cinema",
"Game",
"No IT Content",
NULL,
};
static const char * const detect_md_mode[] = {
"Disabled",
"Global",
"Threshold Grid",
"Region Grid",
NULL,
};
static const char * const av1_profile[] = {
"Main",
"High",
"Professional",
NULL,
};
static const char * const av1_level[] = {
"2.0",
"2.1",
"2.2",
"2.3",
"3.0",
"3.1",
"3.2",
"3.3",
"4.0",
"4.1",
"4.2",
"4.3",
"5.0",
"5.1",
"5.2",
"5.3",
"6.0",
"6.1",
"6.2",
"6.3",
"7.0",
"7.1",
"7.2",
"7.3",
NULL,
};
static const char * const hevc_profile[] = {
"Main",
"Main Still Picture",
"Main 10",
NULL,
};
static const char * const hevc_level[] = {
"1",
"2",
"2.1",
"3",
"3.1",
"4",
"4.1",
"5",
"5.1",
"5.2",
"6",
"6.1",
"6.2",
NULL,
};
static const char * const hevc_hierarchial_coding_type[] = {
"B",
"P",
NULL,
};
static const char * const hevc_refresh_type[] = {
"None",
"CRA",
"IDR",
NULL,
};
static const char * const hevc_size_of_length_field[] = {
"0",
"1",
"2",
"4",
NULL,
};
static const char * const hevc_tier[] = {
"Main",
"High",
NULL,
};
static const char * const hevc_loop_filter_mode[] = {
"Disabled",
"Enabled",
"Disabled at slice boundary",
"NULL",
};
static const char * const hevc_decode_mode[] = {
"Slice-Based",
"Frame-Based",
NULL,
};
static const char * const hevc_start_code[] = {
"No Start Code",
"Annex B Start Code",
NULL,
};
static const char * const camera_orientation[] = {
"Front",
"Back",
"External",
NULL,
};
static const char * const mpeg_video_frame_skip[] = {
"Disabled",
"Level Limit",
"VBV/CPB Limit",
NULL,
};
static const char * const intra_refresh_period_type[] = {
"Random",
"Cyclic",
NULL,
};
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
return mpeg_audio_sampling_freq;
case V4L2_CID_MPEG_AUDIO_ENCODING:
return mpeg_audio_encoding;
case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
return mpeg_audio_l1_bitrate;
case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
return mpeg_audio_l2_bitrate;
case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
return mpeg_audio_l3_bitrate;
case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
return mpeg_audio_ac3_bitrate;
case V4L2_CID_MPEG_AUDIO_MODE:
return mpeg_audio_mode;
case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
return mpeg_audio_mode_extension;
case V4L2_CID_MPEG_AUDIO_EMPHASIS:
return mpeg_audio_emphasis;
case V4L2_CID_MPEG_AUDIO_CRC:
return mpeg_audio_crc;
case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
return mpeg_audio_dec_playback;
case V4L2_CID_MPEG_VIDEO_ENCODING:
return mpeg_video_encoding;
case V4L2_CID_MPEG_VIDEO_ASPECT:
return mpeg_video_aspect;
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
return mpeg_video_bitrate_mode;
case V4L2_CID_MPEG_STREAM_TYPE:
return mpeg_stream_type;
case V4L2_CID_MPEG_STREAM_VBI_FMT:
return mpeg_stream_vbi_fmt;
case V4L2_CID_POWER_LINE_FREQUENCY:
return camera_power_line_frequency;
case V4L2_CID_EXPOSURE_AUTO:
return camera_exposure_auto;
case V4L2_CID_EXPOSURE_METERING:
return camera_exposure_metering;
case V4L2_CID_AUTO_FOCUS_RANGE:
return camera_auto_focus_range;
case V4L2_CID_COLORFX:
return colorfx;
case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
return auto_n_preset_white_balance;
case V4L2_CID_ISO_SENSITIVITY_AUTO:
return camera_iso_sensitivity_auto;
case V4L2_CID_SCENE_MODE:
return scene_mode;
case V4L2_CID_TUNE_PREEMPHASIS:
return tune_emphasis;
case V4L2_CID_TUNE_DEEMPHASIS:
return tune_emphasis;
case V4L2_CID_FLASH_LED_MODE:
return flash_led_mode;
case V4L2_CID_FLASH_STROBE_SOURCE:
return flash_strobe_source;
case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
return header_mode;
case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
return mpeg_video_frame_skip;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
return multi_slice;
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
return entropy_mode;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
return mpeg_h264_level;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
return h264_loop_filter;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
return h264_profile;
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
return vui_sar_idc;
case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
return h264_fp_arrangement_type;
case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
return h264_fmo_map_type;
case V4L2_CID_STATELESS_H264_DECODE_MODE:
return h264_decode_mode;
case V4L2_CID_STATELESS_H264_START_CODE:
return h264_start_code;
case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
return h264_hierarchical_coding_type;
case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
return mpeg_mpeg2_level;
case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
return mpeg2_profile;
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
return mpeg_mpeg4_level;
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
return mpeg4_profile;
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
return vpx_golden_frame_sel;
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
return vp8_profile;
case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
return vp9_profile;
case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
return vp9_level;
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
return jpeg_chroma_subsampling;
case V4L2_CID_DV_TX_MODE:
return dv_tx_mode;
case V4L2_CID_DV_TX_RGB_RANGE:
case V4L2_CID_DV_RX_RGB_RANGE:
return dv_rgb_range;
case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
return dv_it_content_type;
case V4L2_CID_DETECT_MD_MODE:
return detect_md_mode;
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
return hevc_profile;
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
return hevc_level;
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
return hevc_hierarchial_coding_type;
case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
return hevc_refresh_type;
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
return hevc_size_of_length_field;
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
return hevc_tier;
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
return hevc_loop_filter_mode;
case V4L2_CID_MPEG_VIDEO_AV1_PROFILE:
return av1_profile;
case V4L2_CID_MPEG_VIDEO_AV1_LEVEL:
return av1_level;
case V4L2_CID_STATELESS_HEVC_DECODE_MODE:
return hevc_decode_mode;
case V4L2_CID_STATELESS_HEVC_START_CODE:
return hevc_start_code;
case V4L2_CID_CAMERA_ORIENTATION:
return camera_orientation;
case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE:
return intra_refresh_period_type;
default:
return NULL;
}
}
EXPORT_SYMBOL(v4l2_ctrl_get_menu);
#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); (arr); })
/*
* Returns NULL or an s64 type array containing the menu for given
* control ID. The total number of the menu items is returned in @len.
*/
const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
{
static const s64 qmenu_int_vpx_num_partitions[] = {
1, 2, 4, 8,
};
static const s64 qmenu_int_vpx_num_ref_frames[] = {
1, 2, 3,
};
switch (id) {
case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len);
case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len);
default:
*len = 0;
return NULL;
}
}
EXPORT_SYMBOL(v4l2_ctrl_get_int_menu);
/* Return the control name. */
const char *v4l2_ctrl_get_name(u32 id)
{
switch (id) {
/* USER controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_USER_CLASS: return "User Controls";
case V4L2_CID_BRIGHTNESS: return "Brightness";
case V4L2_CID_CONTRAST: return "Contrast";
case V4L2_CID_SATURATION: return "Saturation";
case V4L2_CID_HUE: return "Hue";
case V4L2_CID_AUDIO_VOLUME: return "Volume";
case V4L2_CID_AUDIO_BALANCE: return "Balance";
case V4L2_CID_AUDIO_BASS: return "Bass";
case V4L2_CID_AUDIO_TREBLE: return "Treble";
case V4L2_CID_AUDIO_MUTE: return "Mute";
case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
case V4L2_CID_BLACK_LEVEL: return "Black Level";
case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
case V4L2_CID_RED_BALANCE: return "Red Balance";
case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
case V4L2_CID_GAMMA: return "Gamma";
case V4L2_CID_EXPOSURE: return "Exposure";
case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
case V4L2_CID_GAIN: return "Gain";
case V4L2_CID_HFLIP: return "Horizontal Flip";
case V4L2_CID_VFLIP: return "Vertical Flip";
case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
case V4L2_CID_SHARPNESS: return "Sharpness";
case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
case V4L2_CID_COLOR_KILLER: return "Color Killer";
case V4L2_CID_COLORFX: return "Color Effects";
case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
case V4L2_CID_ROTATE: return "Rotate";
case V4L2_CID_BG_COLOR: return "Background Color";
case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
case V4L2_CID_COLORFX_RGB: return "Color Effects, RGB";
/*
* Codec controls
*
* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical.
*
* Keep the order of the 'case's the same as in videodev2.h!
*/
case V4L2_CID_CODEC_CLASS: return "Codec Controls";
case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID";
case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID";
case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID";
case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency";
case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding";
case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate";
case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate";
case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate";
case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode";
case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension";
case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis";
case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC";
case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute";
case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate";
case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate";
case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback";
case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback";
case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding";
case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect";
case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames";
case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size";
case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality";
case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return "Intra Refresh Period Type";
case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: return "Intra Refresh Period";
case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode";
case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: return "Display Delay";
case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: return "Display Delay Enable";
case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: return "Generate Access Unit Delimiters";
case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI";
case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0";
case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type";
case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering";
case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO";
case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups";
case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change";
case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp";
case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs";
case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering";
case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order";
case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding";
case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type";
case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers";
case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP:
return "H264 Set QP Value for HC Layers";
case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
return "H264 Constrained Intra Pred";
case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset";
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate";
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate";
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate";
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate";
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate";
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate";
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate";
case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level";
case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile";
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: return "Video Decoder Conceal Color";
case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range";
case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID";
case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count";
case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index";
case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames";
case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value";
case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value";
/* VPX controls */
case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable";
case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame";
case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range";
case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control";
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period";
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator";
case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile";
case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile";
case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level";
/* HEVC controls */
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: return "HEVC P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile";
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level";
case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier";
case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION: return "HEVC Frame Rate Resolution";
case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH: return "HEVC Maximum Coding Unit Depth";
case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return "HEVC Refresh Type";
case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED: return "HEVC Constant Intra Prediction";
case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU: return "HEVC Lossless Encoding";
case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT: return "HEVC Wavefront";
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return "HEVC Loop Filter";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP: return "HEVC QP Values";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return "HEVC Hierarchical Coding Type";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: return "HEVC Hierarchical Coding Layer";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: return "HEVC Hierarchical Layer 0 QP";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: return "HEVC Hierarchical Layer 1 QP";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: return "HEVC Hierarchical Layer 2 QP";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: return "HEVC Hierarchical Layer 3 QP";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: return "HEVC Hierarchical Layer 4 QP";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: return "HEVC Hierarchical Layer 5 QP";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP: return "HEVC Hierarchical Layer 6 QP";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: return "HEVC Hierarchical Lay 0 BitRate";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: return "HEVC Hierarchical Lay 1 BitRate";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: return "HEVC Hierarchical Lay 2 BitRate";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: return "HEVC Hierarchical Lay 3 BitRate";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: return "HEVC Hierarchical Lay 4 BitRate";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: return "HEVC Hierarchical Lay 5 BitRate";
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR: return "HEVC Hierarchical Lay 6 BitRate";
case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB: return "HEVC General PB";
case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID: return "HEVC Temporal ID";
case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING: return "HEVC Strong Intra Smoothing";
case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT: return "HEVC Intra PU Split";
case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION: return "HEVC TMV Prediction";
case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1: return "HEVC Max Num of Candidate MVs";
case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE: return "HEVC ENC Without Startcode";
case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD: return "HEVC Num of I-Frame b/w 2 IDR";
case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2: return "HEVC Loop Filter Beta Offset";
case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2: return "HEVC Loop Filter TC Offset";
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field";
case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
/* AV1 controls */
case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return "AV1 Profile";
case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return "AV1 Level";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
case V4L2_CID_PAN_RESET: return "Pan, Reset";
case V4L2_CID_TILT_RESET: return "Tilt, Reset";
case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous";
case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
case V4L2_CID_PRIVACY: return "Privacy";
case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias";
case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset";
case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range";
case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization";
case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity";
case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto";
case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode";
case V4L2_CID_SCENE_MODE: return "Scene Mode";
case V4L2_CID_3A_LOCK: return "3A Lock";
case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start";
case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
case V4L2_CID_PAN_SPEED: return "Pan, Speed";
case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size";
case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation";
case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation";
case V4L2_CID_HDR_SENSOR_MODE: return "HDR Sensor Mode";
/* FM Radio Modulator controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls";
case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation";
case V4L2_CID_RDS_TX_PI: return "RDS Program ID";
case V4L2_CID_RDS_TX_PTY: return "RDS Program Type";
case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name";
case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text";
case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo";
case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head";
case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed";
case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY";
case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music";
case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies";
case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies";
case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time";
case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
/* Flash controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_FLASH_CLASS: return "Flash Controls";
case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
case V4L2_CID_FLASH_STROBE: return "Strobe";
case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
case V4L2_CID_FLASH_FAULT: return "Faults";
case V4L2_CID_FLASH_CHARGE: return "Charge";
case V4L2_CID_FLASH_READY: return "Ready to Strobe";
/* JPEG encoder controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls";
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling";
case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval";
case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
/* Image source controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls";
case V4L2_CID_VBLANK: return "Vertical Blanking";
case V4L2_CID_HBLANK: return "Horizontal Blanking";
case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value";
case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value";
case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value";
case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value";
case V4L2_CID_NOTIFY_GAINS: return "Notify Gains";
/* Image processing controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
case V4L2_CID_LINK_FREQ: return "Link Frequency";
case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
case V4L2_CID_TEST_PATTERN: return "Test Pattern";
case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode";
case V4L2_CID_DIGITAL_GAIN: return "Digital Gain";
/* DV controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_DV_CLASS: return "Digital Video Controls";
case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present";
case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present";
case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present";
case V4L2_CID_DV_TX_MODE: return "Transmit Mode";
case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range";
case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type";
case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present";
case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range";
case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type";
case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls";
case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
case V4L2_CID_RDS_RECEPTION: return "RDS Reception";
case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls";
case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain";
case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto";
case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain";
case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto";
case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain";
case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto";
case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain";
case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto";
case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth";
case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock";
case V4L2_CID_RDS_RX_PTY: return "RDS Program Type";
case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name";
case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text";
case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music";
/* Detection controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_DETECT_CLASS: return "Detection Controls";
case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode";
case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold";
case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid";
case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid";
/* Stateless Codec controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_CODEC_STATELESS_CLASS: return "Stateless Codec Controls";
case V4L2_CID_STATELESS_H264_DECODE_MODE: return "H264 Decode Mode";
case V4L2_CID_STATELESS_H264_START_CODE: return "H264 Start Code";
case V4L2_CID_STATELESS_H264_SPS: return "H264 Sequence Parameter Set";
case V4L2_CID_STATELESS_H264_PPS: return "H264 Picture Parameter Set";
case V4L2_CID_STATELESS_H264_SCALING_MATRIX: return "H264 Scaling Matrix";
case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table";
case V4L2_CID_STATELESS_H264_SLICE_PARAMS: return "H264 Slice Parameters";
case V4L2_CID_STATELESS_H264_DECODE_PARAMS: return "H264 Decode Parameters";
case V4L2_CID_STATELESS_FWHT_PARAMS: return "FWHT Stateless Parameters";
case V4L2_CID_STATELESS_VP8_FRAME: return "VP8 Frame Parameters";
case V4L2_CID_STATELESS_MPEG2_SEQUENCE: return "MPEG-2 Sequence Header";
case V4L2_CID_STATELESS_MPEG2_PICTURE: return "MPEG-2 Picture Header";
case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices";
case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: return "VP9 Probabilities Updates";
case V4L2_CID_STATELESS_VP9_FRAME: return "VP9 Frame Decode Parameters";
case V4L2_CID_STATELESS_HEVC_SPS: return "HEVC Sequence Parameter Set";
case V4L2_CID_STATELESS_HEVC_PPS: return "HEVC Picture Parameter Set";
case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix";
case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters";
case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return "HEVC Decode Mode";
case V4L2_CID_STATELESS_HEVC_START_CODE: return "HEVC Start Code";
case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: return "HEVC Entry Point Offsets";
case V4L2_CID_STATELESS_AV1_SEQUENCE: return "AV1 Sequence Parameters";
case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: return "AV1 Tile Group Entry";
case V4L2_CID_STATELESS_AV1_FRAME: return "AV1 Frame Parameters";
case V4L2_CID_STATELESS_AV1_FILM_GRAIN: return "AV1 Film Grain";
/* Colorimetry controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_COLORIMETRY_CLASS: return "Colorimetry Controls";
case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: return "HDR10 Content Light Info";
case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: return "HDR10 Mastering Display";
default:
return NULL;
}
}
EXPORT_SYMBOL(v4l2_ctrl_get_name);
void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags)
{
*name = v4l2_ctrl_get_name(id);
*flags = 0;
switch (id) {
case V4L2_CID_AUDIO_MUTE:
case V4L2_CID_AUDIO_LOUDNESS:
case V4L2_CID_AUTO_WHITE_BALANCE:
case V4L2_CID_AUTOGAIN:
case V4L2_CID_HFLIP:
case V4L2_CID_VFLIP:
case V4L2_CID_HUE_AUTO:
case V4L2_CID_CHROMA_AGC:
case V4L2_CID_COLOR_KILLER:
case V4L2_CID_AUTOBRIGHTNESS:
case V4L2_CID_MPEG_AUDIO_MUTE:
case V4L2_CID_MPEG_VIDEO_MUTE:
case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
case V4L2_CID_MPEG_VIDEO_PULLDOWN:
case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
case V4L2_CID_FOCUS_AUTO:
case V4L2_CID_PRIVACY:
case V4L2_CID_AUDIO_LIMITER_ENABLED:
case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
case V4L2_CID_PILOT_TONE_ENABLED:
case V4L2_CID_ILLUMINATORS_1:
case V4L2_CID_ILLUMINATORS_2:
case V4L2_CID_FLASH_STROBE_STATUS:
case V4L2_CID_FLASH_CHARGE:
case V4L2_CID_FLASH_READY:
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER:
case V4L2_CID_MPEG_VIDEO_AU_DELIMITER:
case V4L2_CID_WIDE_DYNAMIC_RANGE:
case V4L2_CID_IMAGE_STABILIZATION:
case V4L2_CID_RDS_RECEPTION:
case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO:
case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO:
case V4L2_CID_RF_TUNER_IF_GAIN_AUTO:
case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
case V4L2_CID_RF_TUNER_PLL_LOCK:
case V4L2_CID_RDS_TX_MONO_STEREO:
case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD:
case V4L2_CID_RDS_TX_COMPRESSED:
case V4L2_CID_RDS_TX_DYNAMIC_PTY:
case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT:
case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM:
case V4L2_CID_RDS_TX_MUSIC_SPEECH:
case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE:
case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
case V4L2_CID_RDS_RX_MUSIC_SPEECH:
*type = V4L2_CTRL_TYPE_BOOLEAN;
*min = 0;
*max = *step = 1;
break;
case V4L2_CID_ROTATE:
*type = V4L2_CTRL_TYPE_INTEGER;
*flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
break;
case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY:
case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
case V4L2_CID_MPEG_VIDEO_LTR_COUNT:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX:
*type = V4L2_CTRL_TYPE_INTEGER;
*flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
break;
case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES:
*type = V4L2_CTRL_TYPE_BITMASK;
*flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
break;
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
case V4L2_CID_PAN_RESET:
case V4L2_CID_TILT_RESET:
case V4L2_CID_FLASH_STROBE:
case V4L2_CID_FLASH_STROBE_STOP:
case V4L2_CID_AUTO_FOCUS_START:
case V4L2_CID_AUTO_FOCUS_STOP:
case V4L2_CID_DO_WHITE_BALANCE:
*type = V4L2_CTRL_TYPE_BUTTON;
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
*min = *max = *step = *def = 0;
break;
case V4L2_CID_POWER_LINE_FREQUENCY:
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
case V4L2_CID_MPEG_AUDIO_ENCODING:
case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
case V4L2_CID_MPEG_AUDIO_MODE:
case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
case V4L2_CID_MPEG_AUDIO_EMPHASIS:
case V4L2_CID_MPEG_AUDIO_CRC:
case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
case V4L2_CID_MPEG_VIDEO_ENCODING:
case V4L2_CID_MPEG_VIDEO_ASPECT:
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
case V4L2_CID_MPEG_STREAM_TYPE:
case V4L2_CID_MPEG_STREAM_VBI_FMT:
case V4L2_CID_EXPOSURE_AUTO:
case V4L2_CID_AUTO_FOCUS_RANGE:
case V4L2_CID_COLORFX:
case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
case V4L2_CID_TUNE_PREEMPHASIS:
case V4L2_CID_FLASH_LED_MODE:
case V4L2_CID_FLASH_STROBE_SOURCE:
case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
case V4L2_CID_ISO_SENSITIVITY_AUTO:
case V4L2_CID_EXPOSURE_METERING:
case V4L2_CID_SCENE_MODE:
case V4L2_CID_DV_TX_MODE:
case V4L2_CID_DV_TX_RGB_RANGE:
case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
case V4L2_CID_DV_RX_RGB_RANGE:
case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
case V4L2_CID_TEST_PATTERN:
case V4L2_CID_DEINTERLACING_MODE:
case V4L2_CID_TUNE_DEEMPHASIS:
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
case V4L2_CID_DETECT_MD_MODE:
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
case V4L2_CID_MPEG_VIDEO_AV1_PROFILE:
case V4L2_CID_MPEG_VIDEO_AV1_LEVEL:
case V4L2_CID_STATELESS_HEVC_DECODE_MODE:
case V4L2_CID_STATELESS_HEVC_START_CODE:
case V4L2_CID_STATELESS_H264_DECODE_MODE:
case V4L2_CID_STATELESS_H264_START_CODE:
case V4L2_CID_CAMERA_ORIENTATION:
case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE:
case V4L2_CID_HDR_SENSOR_MODE:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_LINK_FREQ:
*type = V4L2_CTRL_TYPE_INTEGER_MENU;
break;
case V4L2_CID_RDS_TX_PS_NAME:
case V4L2_CID_RDS_TX_RADIO_TEXT:
case V4L2_CID_RDS_RX_PS_NAME:
case V4L2_CID_RDS_RX_RADIO_TEXT:
*type = V4L2_CTRL_TYPE_STRING;
break;
case V4L2_CID_ISO_SENSITIVITY:
case V4L2_CID_AUTO_EXPOSURE_BIAS:
case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
*type = V4L2_CTRL_TYPE_INTEGER_MENU;
break;
case V4L2_CID_USER_CLASS:
case V4L2_CID_CAMERA_CLASS:
case V4L2_CID_CODEC_CLASS:
case V4L2_CID_FM_TX_CLASS:
case V4L2_CID_FLASH_CLASS:
case V4L2_CID_JPEG_CLASS:
case V4L2_CID_IMAGE_SOURCE_CLASS:
case V4L2_CID_IMAGE_PROC_CLASS:
case V4L2_CID_DV_CLASS:
case V4L2_CID_FM_RX_CLASS:
case V4L2_CID_RF_TUNER_CLASS:
case V4L2_CID_DETECT_CLASS:
case V4L2_CID_CODEC_STATELESS_CLASS:
case V4L2_CID_COLORIMETRY_CLASS:
*type = V4L2_CTRL_TYPE_CTRL_CLASS;
/* You can neither read nor write these */
*flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
*min = *max = *step = *def = 0;
break;
case V4L2_CID_BG_COLOR:
case V4L2_CID_COLORFX_RGB:
*type = V4L2_CTRL_TYPE_INTEGER;
*step = 1;
*min = 0;
/* Max is calculated as RGB888 that is 2^24 - 1 */
*max = 0xffffff;
break;
case V4L2_CID_COLORFX_CBCR:
*type = V4L2_CTRL_TYPE_INTEGER;
*step = 1;
*min = 0;
*max = 0xffff;
break;
case V4L2_CID_FLASH_FAULT:
case V4L2_CID_JPEG_ACTIVE_MARKER:
case V4L2_CID_3A_LOCK:
case V4L2_CID_AUTO_FOCUS_STATUS:
case V4L2_CID_DV_TX_HOTPLUG:
case V4L2_CID_DV_TX_RXSENSE:
case V4L2_CID_DV_TX_EDID_PRESENT:
case V4L2_CID_DV_RX_POWER_PRESENT:
*type = V4L2_CTRL_TYPE_BITMASK;
break;
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
*type = V4L2_CTRL_TYPE_INTEGER;
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
case V4L2_CID_MPEG_VIDEO_DEC_PTS:
*type = V4L2_CTRL_TYPE_INTEGER64;
*flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
*min = *def = 0;
*max = 0x1ffffffffLL;
*step = 1;
break;
case V4L2_CID_MPEG_VIDEO_DEC_FRAME:
*type = V4L2_CTRL_TYPE_INTEGER64;
*flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
*min = *def = 0;
*max = 0x7fffffffffffffffLL;
*step = 1;
break;
case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR:
*type = V4L2_CTRL_TYPE_INTEGER64;
*min = 0;
/* default for 8 bit black, luma is 16, chroma is 128 */
*def = 0x8000800010LL;
*max = 0xffffffffffffLL;
*step = 1;
break;
case V4L2_CID_PIXEL_RATE:
*type = V4L2_CTRL_TYPE_INTEGER64;
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
case V4L2_CID_DETECT_MD_REGION_GRID:
*type = V4L2_CTRL_TYPE_U8;
break;
case V4L2_CID_DETECT_MD_THRESHOLD_GRID:
*type = V4L2_CTRL_TYPE_U16;
break;
case V4L2_CID_RDS_TX_ALT_FREQS:
*type = V4L2_CTRL_TYPE_U32;
break;
case V4L2_CID_STATELESS_MPEG2_SEQUENCE:
*type = V4L2_CTRL_TYPE_MPEG2_SEQUENCE;
break;
case V4L2_CID_STATELESS_MPEG2_PICTURE:
*type = V4L2_CTRL_TYPE_MPEG2_PICTURE;
break;
case V4L2_CID_STATELESS_MPEG2_QUANTISATION:
*type = V4L2_CTRL_TYPE_MPEG2_QUANTISATION;
break;
case V4L2_CID_STATELESS_FWHT_PARAMS:
*type = V4L2_CTRL_TYPE_FWHT_PARAMS;
break;
case V4L2_CID_STATELESS_H264_SPS:
*type = V4L2_CTRL_TYPE_H264_SPS;
break;
case V4L2_CID_STATELESS_H264_PPS:
*type = V4L2_CTRL_TYPE_H264_PPS;
break;
case V4L2_CID_STATELESS_H264_SCALING_MATRIX:
*type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX;
break;
case V4L2_CID_STATELESS_H264_SLICE_PARAMS:
*type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS;
break;
case V4L2_CID_STATELESS_H264_DECODE_PARAMS:
*type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS;
break;
case V4L2_CID_STATELESS_H264_PRED_WEIGHTS:
*type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS;
break;
case V4L2_CID_STATELESS_VP8_FRAME:
*type = V4L2_CTRL_TYPE_VP8_FRAME;
break;
case V4L2_CID_STATELESS_HEVC_SPS:
*type = V4L2_CTRL_TYPE_HEVC_SPS;
break;
case V4L2_CID_STATELESS_HEVC_PPS:
*type = V4L2_CTRL_TYPE_HEVC_PPS;
break;
case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
*flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
break;
case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX:
*type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX;
break;
case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS;
break;
case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS:
*type = V4L2_CTRL_TYPE_U32;
*flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
break;
case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR:
*type = V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR;
break;
case V4L2_CID_STATELESS_VP9_FRAME:
*type = V4L2_CTRL_TYPE_VP9_FRAME;
break;
case V4L2_CID_STATELESS_AV1_SEQUENCE:
*type = V4L2_CTRL_TYPE_AV1_SEQUENCE;
break;
case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY:
*type = V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY;
*flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
break;
case V4L2_CID_STATELESS_AV1_FRAME:
*type = V4L2_CTRL_TYPE_AV1_FRAME;
break;
case V4L2_CID_STATELESS_AV1_FILM_GRAIN:
*type = V4L2_CTRL_TYPE_AV1_FILM_GRAIN;
break;
case V4L2_CID_UNIT_CELL_SIZE:
*type = V4L2_CTRL_TYPE_AREA;
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO:
*type = V4L2_CTRL_TYPE_HDR10_CLL_INFO;
break;
case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY:
*type = V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY;
break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
}
switch (id) {
case V4L2_CID_MPEG_AUDIO_ENCODING:
case V4L2_CID_MPEG_AUDIO_MODE:
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
case V4L2_CID_MPEG_VIDEO_B_FRAMES:
case V4L2_CID_MPEG_STREAM_TYPE:
*flags |= V4L2_CTRL_FLAG_UPDATE;
break;
case V4L2_CID_AUDIO_VOLUME:
case V4L2_CID_AUDIO_BALANCE:
case V4L2_CID_AUDIO_BASS:
case V4L2_CID_AUDIO_TREBLE:
case V4L2_CID_BRIGHTNESS:
case V4L2_CID_CONTRAST:
case V4L2_CID_SATURATION:
case V4L2_CID_HUE:
case V4L2_CID_RED_BALANCE:
case V4L2_CID_BLUE_BALANCE:
case V4L2_CID_GAMMA:
case V4L2_CID_SHARPNESS:
case V4L2_CID_CHROMA_GAIN:
case V4L2_CID_RDS_TX_DEVIATION:
case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
case V4L2_CID_AUDIO_LIMITER_DEVIATION:
case V4L2_CID_AUDIO_COMPRESSION_GAIN:
case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
case V4L2_CID_PILOT_TONE_DEVIATION:
case V4L2_CID_PILOT_TONE_FREQUENCY:
case V4L2_CID_TUNE_POWER_LEVEL:
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
case V4L2_CID_RF_TUNER_RF_GAIN:
case V4L2_CID_RF_TUNER_LNA_GAIN:
case V4L2_CID_RF_TUNER_MIXER_GAIN:
case V4L2_CID_RF_TUNER_IF_GAIN:
case V4L2_CID_RF_TUNER_BANDWIDTH:
case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD:
*flags |= V4L2_CTRL_FLAG_SLIDER;
break;
case V4L2_CID_PAN_RELATIVE:
case V4L2_CID_TILT_RELATIVE:
case V4L2_CID_FOCUS_RELATIVE:
case V4L2_CID_IRIS_RELATIVE:
case V4L2_CID_ZOOM_RELATIVE:
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
break;
case V4L2_CID_FLASH_STROBE_STATUS:
case V4L2_CID_AUTO_FOCUS_STATUS:
case V4L2_CID_FLASH_READY:
case V4L2_CID_DV_TX_HOTPLUG:
case V4L2_CID_DV_TX_RXSENSE:
case V4L2_CID_DV_TX_EDID_PRESENT:
case V4L2_CID_DV_RX_POWER_PRESENT:
case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
case V4L2_CID_RDS_RX_PTY:
case V4L2_CID_RDS_RX_PS_NAME:
case V4L2_CID_RDS_RX_RADIO_TEXT:
case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
case V4L2_CID_RDS_RX_MUSIC_SPEECH:
case V4L2_CID_CAMERA_ORIENTATION:
case V4L2_CID_CAMERA_SENSOR_ROTATION:
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
case V4L2_CID_RF_TUNER_PLL_LOCK:
*flags |= V4L2_CTRL_FLAG_VOLATILE;
break;
}
}
EXPORT_SYMBOL(v4l2_ctrl_fill);
| linux-master | drivers/media/v4l2-core/v4l2-ctrls-defs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* V4L2 controls framework uAPI implementation:
*
* Copyright (C) 2010-2021 Hans Verkuil <[email protected]>
*/
#define pr_fmt(fmt) "v4l2-ctrls: " fmt
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include "v4l2-ctrls-priv.h"
/* Internal temporary helper struct, one for each v4l2_ext_control */
struct v4l2_ctrl_helper {
/* Pointer to the control reference of the master control */
struct v4l2_ctrl_ref *mref;
/* The control ref corresponding to the v4l2_ext_control ID field. */
struct v4l2_ctrl_ref *ref;
/*
* v4l2_ext_control index of the next control belonging to the
* same cluster, or 0 if there isn't any.
*/
u32 next;
};
/*
* Helper functions to copy control payload data from kernel space to
* user space and vice versa.
*/
/* Helper function: copy the given control value back to the caller */
static int ptr_to_user(struct v4l2_ext_control *c,
struct v4l2_ctrl *ctrl,
union v4l2_ctrl_ptr ptr)
{
u32 len;
if (ctrl->is_ptr && !ctrl->is_string)
return copy_to_user(c->ptr, ptr.p_const, c->size) ?
-EFAULT : 0;
switch (ctrl->type) {
case V4L2_CTRL_TYPE_STRING:
len = strlen(ptr.p_char);
if (c->size < len + 1) {
c->size = ctrl->elem_size;
return -ENOSPC;
}
return copy_to_user(c->string, ptr.p_char, len + 1) ?
-EFAULT : 0;
case V4L2_CTRL_TYPE_INTEGER64:
c->value64 = *ptr.p_s64;
break;
default:
c->value = *ptr.p_s32;
break;
}
return 0;
}
/* Helper function: copy the current control value back to the caller */
static int cur_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
{
return ptr_to_user(c, ctrl, ctrl->p_cur);
}
/* Helper function: copy the new control value back to the caller */
static int new_to_user(struct v4l2_ext_control *c,
struct v4l2_ctrl *ctrl)
{
return ptr_to_user(c, ctrl, ctrl->p_new);
}
/* Helper function: copy the request value back to the caller */
static int req_to_user(struct v4l2_ext_control *c,
struct v4l2_ctrl_ref *ref)
{
return ptr_to_user(c, ref->ctrl, ref->p_req);
}
/* Helper function: copy the initial control value back to the caller */
static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
{
ctrl->type_ops->init(ctrl, 0, ctrl->p_new);
return ptr_to_user(c, ctrl, ctrl->p_new);
}
/* Helper function: copy the caller-provider value as the new control value */
static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
{
int ret;
u32 size;
ctrl->is_new = 0;
if (ctrl->is_dyn_array &&
c->size > ctrl->p_array_alloc_elems * ctrl->elem_size) {
void *old = ctrl->p_array;
void *tmp = kvzalloc(2 * c->size, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size);
memcpy(tmp + c->size, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size);
ctrl->p_new.p = tmp;
ctrl->p_cur.p = tmp + c->size;
ctrl->p_array = tmp;
ctrl->p_array_alloc_elems = c->size / ctrl->elem_size;
kvfree(old);
}
if (ctrl->is_ptr && !ctrl->is_string) {
unsigned int elems = c->size / ctrl->elem_size;
if (copy_from_user(ctrl->p_new.p, c->ptr, c->size))
return -EFAULT;
ctrl->is_new = 1;
if (ctrl->is_dyn_array)
ctrl->new_elems = elems;
else if (ctrl->is_array)
ctrl->type_ops->init(ctrl, elems, ctrl->p_new);
return 0;
}
switch (ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER64:
*ctrl->p_new.p_s64 = c->value64;
break;
case V4L2_CTRL_TYPE_STRING:
size = c->size;
if (size == 0)
return -ERANGE;
if (size > ctrl->maximum + 1)
size = ctrl->maximum + 1;
ret = copy_from_user(ctrl->p_new.p_char, c->string, size) ? -EFAULT : 0;
if (!ret) {
char last = ctrl->p_new.p_char[size - 1];
ctrl->p_new.p_char[size - 1] = 0;
/*
* If the string was longer than ctrl->maximum,
* then return an error.
*/
if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last)
return -ERANGE;
ctrl->is_new = 1;
}
return ret;
default:
*ctrl->p_new.p_s32 = c->value;
break;
}
ctrl->is_new = 1;
return 0;
}
/*
* VIDIOC_G/TRY/S_EXT_CTRLS implementation
*/
/*
* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
*
* It is not a fully atomic operation, just best-effort only. After all, if
* multiple controls have to be set through multiple i2c writes (for example)
* then some initial writes may succeed while others fail. Thus leaving the
* system in an inconsistent state. The question is how much effort you are
* willing to spend on trying to make something atomic that really isn't.
*
* From the point of view of an application the main requirement is that
* when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an
* error should be returned without actually affecting any controls.
*
* If all the values are correct, then it is acceptable to just give up
* in case of low-level errors.
*
* It is important though that the application can tell when only a partial
* configuration was done. The way we do that is through the error_idx field
* of struct v4l2_ext_controls: if that is equal to the count field then no
* controls were affected. Otherwise all controls before that index were
* successful in performing their 'get' or 'set' operation, the control at
* the given index failed, and you don't know what happened with the controls
* after the failed one. Since if they were part of a control cluster they
* could have been successfully processed (if a cluster member was encountered
* at index < error_idx), they could have failed (if a cluster member was at
* error_idx), or they may not have been processed yet (if the first cluster
* member appeared after error_idx).
*
* It is all fairly theoretical, though. In practice all you can do is to
* bail out. If error_idx == count, then it is an application bug. If
* error_idx < count then it is only an application bug if the error code was
* EBUSY. That usually means that something started streaming just when you
* tried to set the controls. In all other cases it is a driver/hardware
* problem and all you can do is to retry or bail out.
*
* Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that
* never modifies controls the error_idx is just set to whatever control
* has an invalid value.
*/
/*
* Prepare for the extended g/s/try functions.
* Find the controls in the control array and do some basic checks.
*/
static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
struct v4l2_ext_controls *cs,
struct v4l2_ctrl_helper *helpers,
struct video_device *vdev,
bool get)
{
struct v4l2_ctrl_helper *h;
bool have_clusters = false;
u32 i;
for (i = 0, h = helpers; i < cs->count; i++, h++) {
struct v4l2_ext_control *c = &cs->controls[i];
struct v4l2_ctrl_ref *ref;
struct v4l2_ctrl *ctrl;
u32 id = c->id & V4L2_CTRL_ID_MASK;
cs->error_idx = i;
if (cs->which &&
cs->which != V4L2_CTRL_WHICH_DEF_VAL &&
cs->which != V4L2_CTRL_WHICH_REQUEST_VAL &&
V4L2_CTRL_ID2WHICH(id) != cs->which) {
dprintk(vdev,
"invalid which 0x%x or control id 0x%x\n",
cs->which, id);
return -EINVAL;
}
/*
* Old-style private controls are not allowed for
* extended controls.
*/
if (id >= V4L2_CID_PRIVATE_BASE) {
dprintk(vdev,
"old-style private controls not allowed\n");
return -EINVAL;
}
ref = find_ref_lock(hdl, id);
if (!ref) {
dprintk(vdev, "cannot find control id 0x%x\n", id);
return -EINVAL;
}
h->ref = ref;
ctrl = ref->ctrl;
if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) {
dprintk(vdev, "control id 0x%x is disabled\n", id);
return -EINVAL;
}
if (ctrl->cluster[0]->ncontrols > 1)
have_clusters = true;
if (ctrl->cluster[0] != ctrl)
ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
if (ctrl->is_dyn_array) {
unsigned int max_size = ctrl->dims[0] * ctrl->elem_size;
unsigned int tot_size = ctrl->elem_size;
if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
tot_size *= ref->p_req_elems;
else
tot_size *= ctrl->elems;
c->size = ctrl->elem_size * (c->size / ctrl->elem_size);
if (get) {
if (c->size < tot_size) {
c->size = tot_size;
return -ENOSPC;
}
c->size = tot_size;
} else {
if (c->size > max_size) {
c->size = max_size;
return -ENOSPC;
}
if (!c->size)
return -EFAULT;
}
} else if (ctrl->is_ptr && !ctrl->is_string) {
unsigned int tot_size = ctrl->elems * ctrl->elem_size;
if (c->size < tot_size) {
/*
* In the get case the application first
* queries to obtain the size of the control.
*/
if (get) {
c->size = tot_size;
return -ENOSPC;
}
dprintk(vdev,
"pointer control id 0x%x size too small, %d bytes but %d bytes needed\n",
id, c->size, tot_size);
return -EFAULT;
}
c->size = tot_size;
}
/* Store the ref to the master control of the cluster */
h->mref = ref;
/*
* Initially set next to 0, meaning that there is no other
* control in this helper array belonging to the same
* cluster.
*/
h->next = 0;
}
/*
* We are done if there were no controls that belong to a multi-
* control cluster.
*/
if (!have_clusters)
return 0;
/*
* The code below figures out in O(n) time which controls in the list
* belong to the same cluster.
*/
/* This has to be done with the handler lock taken. */
mutex_lock(hdl->lock);
/* First zero the helper field in the master control references */
for (i = 0; i < cs->count; i++)
helpers[i].mref->helper = NULL;
for (i = 0, h = helpers; i < cs->count; i++, h++) {
struct v4l2_ctrl_ref *mref = h->mref;
/*
* If the mref->helper is set, then it points to an earlier
* helper that belongs to the same cluster.
*/
if (mref->helper) {
/*
* Set the next field of mref->helper to the current
* index: this means that the earlier helper now
* points to the next helper in the same cluster.
*/
mref->helper->next = i;
/*
* mref should be set only for the first helper in the
* cluster, clear the others.
*/
h->mref = NULL;
}
/* Point the mref helper to the current helper struct. */
mref->helper = h;
}
mutex_unlock(hdl->lock);
return 0;
}
/*
* Handles the corner case where cs->count == 0. It checks whether the
* specified control class exists. If that class ID is 0, then it checks
* whether there are any controls at all.
*/
static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
{
if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL ||
which == V4L2_CTRL_WHICH_REQUEST_VAL)
return 0;
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
}
/*
* Get extended controls. Allocates the helpers array if needed.
*
* Note that v4l2_g_ext_ctrls_common() with 'which' set to
* V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
* completed, and in that case p_req_valid is true for all controls.
*/
int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
struct v4l2_ext_controls *cs,
struct video_device *vdev)
{
struct v4l2_ctrl_helper helper[4];
struct v4l2_ctrl_helper *helpers = helper;
int ret;
int i, j;
bool is_default, is_request;
is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
cs->error_idx = cs->count;
cs->which = V4L2_CTRL_ID2WHICH(cs->which);
if (!hdl)
return -EINVAL;
if (cs->count == 0)
return class_check(hdl, cs->which);
if (cs->count > ARRAY_SIZE(helper)) {
helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
GFP_KERNEL);
if (!helpers)
return -ENOMEM;
}
ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true);
cs->error_idx = cs->count;
for (i = 0; !ret && i < cs->count; i++)
if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
ret = -EACCES;
for (i = 0; !ret && i < cs->count; i++) {
struct v4l2_ctrl *master;
bool is_volatile = false;
u32 idx = i;
if (!helpers[i].mref)
continue;
master = helpers[i].mref->ctrl;
cs->error_idx = i;
v4l2_ctrl_lock(master);
/*
* g_volatile_ctrl will update the new control values.
* This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
* V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
* it is v4l2_ctrl_request_complete() that copies the
* volatile controls at the time of request completion
* to the request, so you don't want to do that again.
*/
if (!is_default && !is_request &&
((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
(master->has_volatiles && !is_cur_manual(master)))) {
for (j = 0; j < master->ncontrols; j++)
cur_to_new(master->cluster[j]);
ret = call_op(master, g_volatile_ctrl);
is_volatile = true;
}
if (ret) {
v4l2_ctrl_unlock(master);
break;
}
/*
* Copy the default value (if is_default is true), the
* request value (if is_request is true and p_req is valid),
* the new volatile value (if is_volatile is true) or the
* current value.
*/
do {
struct v4l2_ctrl_ref *ref = helpers[idx].ref;
if (is_default)
ret = def_to_user(cs->controls + idx, ref->ctrl);
else if (is_request && ref->p_req_array_enomem)
ret = -ENOMEM;
else if (is_request && ref->p_req_valid)
ret = req_to_user(cs->controls + idx, ref);
else if (is_volatile)
ret = new_to_user(cs->controls + idx, ref->ctrl);
else
ret = cur_to_user(cs->controls + idx, ref->ctrl);
idx = helpers[idx].next;
} while (!ret && idx);
v4l2_ctrl_unlock(master);
}
if (cs->count > ARRAY_SIZE(helper))
kvfree(helpers);
return ret;
}
int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
struct media_device *mdev, struct v4l2_ext_controls *cs)
{
if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
return v4l2_g_ext_ctrls_request(hdl, vdev, mdev, cs);
return v4l2_g_ext_ctrls_common(hdl, cs, vdev);
}
EXPORT_SYMBOL(v4l2_g_ext_ctrls);
/* Validate a new control */
static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
{
return ctrl->type_ops->validate(ctrl, p_new);
}
/* Validate controls. */
static int validate_ctrls(struct v4l2_ext_controls *cs,
struct v4l2_ctrl_helper *helpers,
struct video_device *vdev,
bool set)
{
unsigned int i;
int ret = 0;
cs->error_idx = cs->count;
for (i = 0; i < cs->count; i++) {
struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl;
union v4l2_ctrl_ptr p_new;
cs->error_idx = i;
if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) {
dprintk(vdev,
"control id 0x%x is read-only\n",
ctrl->id);
return -EACCES;
}
/*
* This test is also done in try_set_control_cluster() which
* is called in atomic context, so that has the final say,
* but it makes sense to do an up-front check as well. Once
* an error occurs in try_set_control_cluster() some other
* controls may have been set already and we want to do a
* best-effort to avoid that.
*/
if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) {
dprintk(vdev,
"control id 0x%x is grabbed, cannot set\n",
ctrl->id);
return -EBUSY;
}
/*
* Skip validation for now if the payload needs to be copied
* from userspace into kernelspace. We'll validate those later.
*/
if (ctrl->is_ptr)
continue;
if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
p_new.p_s64 = &cs->controls[i].value64;
else
p_new.p_s32 = &cs->controls[i].value;
ret = validate_new(ctrl, p_new);
if (ret)
return ret;
}
return 0;
}
/* Try or try-and-set controls */
int try_set_ext_ctrls_common(struct v4l2_fh *fh,
struct v4l2_ctrl_handler *hdl,
struct v4l2_ext_controls *cs,
struct video_device *vdev, bool set)
{
struct v4l2_ctrl_helper helper[4];
struct v4l2_ctrl_helper *helpers = helper;
unsigned int i, j;
int ret;
cs->error_idx = cs->count;
/* Default value cannot be changed */
if (cs->which == V4L2_CTRL_WHICH_DEF_VAL) {
dprintk(vdev, "%s: cannot change default value\n",
video_device_node_name(vdev));
return -EINVAL;
}
cs->which = V4L2_CTRL_ID2WHICH(cs->which);
if (!hdl) {
dprintk(vdev, "%s: invalid null control handler\n",
video_device_node_name(vdev));
return -EINVAL;
}
if (cs->count == 0)
return class_check(hdl, cs->which);
if (cs->count > ARRAY_SIZE(helper)) {
helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
GFP_KERNEL);
if (!helpers)
return -ENOMEM;
}
ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false);
if (!ret)
ret = validate_ctrls(cs, helpers, vdev, set);
if (ret && set)
cs->error_idx = cs->count;
for (i = 0; !ret && i < cs->count; i++) {
struct v4l2_ctrl *master;
u32 idx = i;
if (!helpers[i].mref)
continue;
cs->error_idx = i;
master = helpers[i].mref->ctrl;
v4l2_ctrl_lock(master);
/* Reset the 'is_new' flags of the cluster */
for (j = 0; j < master->ncontrols; j++)
if (master->cluster[j])
master->cluster[j]->is_new = 0;
/*
* For volatile autoclusters that are currently in auto mode
* we need to discover if it will be set to manual mode.
* If so, then we have to copy the current volatile values
* first since those will become the new manual values (which
* may be overwritten by explicit new values from this set
* of controls).
*/
if (master->is_auto && master->has_volatiles &&
!is_cur_manual(master)) {
/* Pick an initial non-manual value */
s32 new_auto_val = master->manual_mode_value + 1;
u32 tmp_idx = idx;
do {
/*
* Check if the auto control is part of the
* list, and remember the new value.
*/
if (helpers[tmp_idx].ref->ctrl == master)
new_auto_val = cs->controls[tmp_idx].value;
tmp_idx = helpers[tmp_idx].next;
} while (tmp_idx);
/*
* If the new value == the manual value, then copy
* the current volatile values.
*/
if (new_auto_val == master->manual_mode_value)
update_from_auto_cluster(master);
}
/*
* Copy the new caller-supplied control values.
* user_to_new() sets 'is_new' to 1.
*/
do {
struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl;
ret = user_to_new(cs->controls + idx, ctrl);
if (!ret && ctrl->is_ptr) {
ret = validate_new(ctrl, ctrl->p_new);
if (ret)
dprintk(vdev,
"failed to validate control %s (%d)\n",
v4l2_ctrl_get_name(ctrl->id), ret);
}
idx = helpers[idx].next;
} while (!ret && idx);
if (!ret)
ret = try_or_set_cluster(fh, master,
!hdl->req_obj.req && set, 0);
if (!ret && hdl->req_obj.req && set) {
for (j = 0; j < master->ncontrols; j++) {
struct v4l2_ctrl_ref *ref =
find_ref(hdl, master->cluster[j]->id);
new_to_req(ref);
}
}
/* Copy the new values back to userspace. */
if (!ret) {
idx = i;
do {
ret = new_to_user(cs->controls + idx,
helpers[idx].ref->ctrl);
idx = helpers[idx].next;
} while (!ret && idx);
}
v4l2_ctrl_unlock(master);
}
if (cs->count > ARRAY_SIZE(helper))
kvfree(helpers);
return ret;
}
static int try_set_ext_ctrls(struct v4l2_fh *fh,
struct v4l2_ctrl_handler *hdl,
struct video_device *vdev,
struct media_device *mdev,
struct v4l2_ext_controls *cs, bool set)
{
int ret;
if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
return try_set_ext_ctrls_request(fh, hdl, vdev, mdev, cs, set);
ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set);
if (ret)
dprintk(vdev,
"%s: try_set_ext_ctrls_common failed (%d)\n",
video_device_node_name(vdev), ret);
return ret;
}
int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl,
struct video_device *vdev,
struct media_device *mdev,
struct v4l2_ext_controls *cs)
{
return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false);
}
EXPORT_SYMBOL(v4l2_try_ext_ctrls);
int v4l2_s_ext_ctrls(struct v4l2_fh *fh,
struct v4l2_ctrl_handler *hdl,
struct video_device *vdev,
struct media_device *mdev,
struct v4l2_ext_controls *cs)
{
return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true);
}
EXPORT_SYMBOL(v4l2_s_ext_ctrls);
/*
* VIDIOC_G/S_CTRL implementation
*/
/* Helper function to get a single control */
static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
{
struct v4l2_ctrl *master = ctrl->cluster[0];
int ret = 0;
int i;
/* Compound controls are not supported. The new_to_user() and
* cur_to_user() calls below would need to be modified not to access
* userspace memory when called from get_ctrl().
*/
if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64)
return -EINVAL;
if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
return -EACCES;
v4l2_ctrl_lock(master);
/* g_volatile_ctrl will update the current control values */
if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
for (i = 0; i < master->ncontrols; i++)
cur_to_new(master->cluster[i]);
ret = call_op(master, g_volatile_ctrl);
new_to_user(c, ctrl);
} else {
cur_to_user(c, ctrl);
}
v4l2_ctrl_unlock(master);
return ret;
}
int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
struct v4l2_ext_control c;
int ret;
if (!ctrl || !ctrl->is_int)
return -EINVAL;
ret = get_ctrl(ctrl, &c);
control->value = c.value;
return ret;
}
EXPORT_SYMBOL(v4l2_g_ctrl);
/* Helper function for VIDIOC_S_CTRL compatibility */
static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
{
struct v4l2_ctrl *master = ctrl->cluster[0];
int ret;
int i;
/* Reset the 'is_new' flags of the cluster */
for (i = 0; i < master->ncontrols; i++)
if (master->cluster[i])
master->cluster[i]->is_new = 0;
ret = validate_new(ctrl, ctrl->p_new);
if (ret)
return ret;
/*
* For autoclusters with volatiles that are switched from auto to
* manual mode we have to update the current volatile values since
* those will become the initial manual values after such a switch.
*/
if (master->is_auto && master->has_volatiles && ctrl == master &&
!is_cur_manual(master) && ctrl->val == master->manual_mode_value)
update_from_auto_cluster(master);
ctrl->is_new = 1;
return try_or_set_cluster(fh, master, true, ch_flags);
}
/* Helper function for VIDIOC_S_CTRL compatibility */
static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
struct v4l2_ext_control *c)
{
int ret;
v4l2_ctrl_lock(ctrl);
user_to_new(c, ctrl);
ret = set_ctrl(fh, ctrl, 0);
if (!ret)
cur_to_user(c, ctrl);
v4l2_ctrl_unlock(ctrl);
return ret;
}
int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
struct v4l2_control *control)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
struct v4l2_ext_control c = { control->id };
int ret;
if (!ctrl || !ctrl->is_int)
return -EINVAL;
if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
return -EACCES;
c.value = control->value;
ret = set_ctrl_lock(fh, ctrl, &c);
control->value = c.value;
return ret;
}
EXPORT_SYMBOL(v4l2_s_ctrl);
/*
* Helper functions for drivers to get/set controls.
*/
s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_ext_control c;
/* It's a driver bug if this happens. */
if (WARN_ON(!ctrl->is_int))
return 0;
c.value = 0;
get_ctrl(ctrl, &c);
return c.value;
}
EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl)
{
struct v4l2_ext_control c;
/* It's a driver bug if this happens. */
if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
return 0;
c.value64 = 0;
get_ctrl(ctrl, &c);
return c.value64;
}
EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
{
lockdep_assert_held(ctrl->handler->lock);
/* It's a driver bug if this happens. */
if (WARN_ON(!ctrl->is_int))
return -EINVAL;
ctrl->val = val;
return set_ctrl(NULL, ctrl, 0);
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl);
int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
{
lockdep_assert_held(ctrl->handler->lock);
/* It's a driver bug if this happens. */
if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
return -EINVAL;
*ctrl->p_new.p_s64 = val;
return set_ctrl(NULL, ctrl, 0);
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64);
int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
{
lockdep_assert_held(ctrl->handler->lock);
/* It's a driver bug if this happens. */
if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING))
return -EINVAL;
strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
return set_ctrl(NULL, ctrl, 0);
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl,
enum v4l2_ctrl_type type, const void *p)
{
lockdep_assert_held(ctrl->handler->lock);
/* It's a driver bug if this happens. */
if (WARN_ON(ctrl->type != type))
return -EINVAL;
/* Setting dynamic arrays is not (yet?) supported. */
if (WARN_ON(ctrl->is_dyn_array))
return -EINVAL;
memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size);
return set_ctrl(NULL, ctrl, 0);
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound);
/*
* Modify the range of a control.
*/
int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
s64 min, s64 max, u64 step, s64 def)
{
bool value_changed;
bool range_changed = false;
int ret;
lockdep_assert_held(ctrl->handler->lock);
switch (ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_INTEGER64:
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_U8:
case V4L2_CTRL_TYPE_U16:
case V4L2_CTRL_TYPE_U32:
if (ctrl->is_array)
return -EINVAL;
ret = check_range(ctrl->type, min, max, step, def);
if (ret)
return ret;
break;
default:
return -EINVAL;
}
if (ctrl->minimum != min || ctrl->maximum != max ||
ctrl->step != step || ctrl->default_value != def) {
range_changed = true;
ctrl->minimum = min;
ctrl->maximum = max;
ctrl->step = step;
ctrl->default_value = def;
}
cur_to_new(ctrl);
if (validate_new(ctrl, ctrl->p_new)) {
if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
*ctrl->p_new.p_s64 = def;
else
*ctrl->p_new.p_s32 = def;
}
if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64;
else
value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32;
if (value_changed)
ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
else if (range_changed)
send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
return ret;
}
EXPORT_SYMBOL(__v4l2_ctrl_modify_range);
int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
u32 dims[V4L2_CTRL_MAX_DIMS])
{
unsigned int elems = 1;
unsigned int i;
void *p_array;
lockdep_assert_held(ctrl->handler->lock);
if (!ctrl->is_array || ctrl->is_dyn_array)
return -EINVAL;
for (i = 0; i < ctrl->nr_of_dims; i++)
elems *= dims[i];
if (elems == 0)
return -EINVAL;
p_array = kvzalloc(2 * elems * ctrl->elem_size, GFP_KERNEL);
if (!p_array)
return -ENOMEM;
kvfree(ctrl->p_array);
ctrl->p_array_alloc_elems = elems;
ctrl->elems = elems;
ctrl->new_elems = elems;
ctrl->p_array = p_array;
ctrl->p_new.p = p_array;
ctrl->p_cur.p = p_array + elems * ctrl->elem_size;
for (i = 0; i < ctrl->nr_of_dims; i++)
ctrl->dims[i] = dims[i];
ctrl->type_ops->init(ctrl, 0, ctrl->p_cur);
cur_to_new(ctrl);
send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE |
V4L2_EVENT_CTRL_CH_DIMENSIONS);
return 0;
}
EXPORT_SYMBOL(__v4l2_ctrl_modify_dimensions);
/* Implement VIDIOC_QUERY_EXT_CTRL */
int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc)
{
const unsigned int next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND;
u32 id = qc->id & V4L2_CTRL_ID_MASK;
struct v4l2_ctrl_ref *ref;
struct v4l2_ctrl *ctrl;
if (!hdl)
return -EINVAL;
mutex_lock(hdl->lock);
/* Try to find it */
ref = find_ref(hdl, id);
if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) {
bool is_compound;
/* Match any control that is not hidden */
unsigned int mask = 1;
bool match = false;
if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) {
/* Match any hidden control */
match = true;
} else if ((qc->id & next_flags) == next_flags) {
/* Match any control, compound or not */
mask = 0;
}
/* Find the next control with ID > qc->id */
/* Did we reach the end of the control list? */
if (id >= node2id(hdl->ctrl_refs.prev)) {
ref = NULL; /* Yes, so there is no next control */
} else if (ref) {
/*
* We found a control with the given ID, so just get
* the next valid one in the list.
*/
list_for_each_entry_continue(ref, &hdl->ctrl_refs, node) {
is_compound = ref->ctrl->is_array ||
ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
if (id < ref->ctrl->id &&
(is_compound & mask) == match)
break;
}
if (&ref->node == &hdl->ctrl_refs)
ref = NULL;
} else {
/*
* No control with the given ID exists, so start
* searching for the next largest ID. We know there
* is one, otherwise the first 'if' above would have
* been true.
*/
list_for_each_entry(ref, &hdl->ctrl_refs, node) {
is_compound = ref->ctrl->is_array ||
ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
if (id < ref->ctrl->id &&
(is_compound & mask) == match)
break;
}
if (&ref->node == &hdl->ctrl_refs)
ref = NULL;
}
}
mutex_unlock(hdl->lock);
if (!ref)
return -EINVAL;
ctrl = ref->ctrl;
memset(qc, 0, sizeof(*qc));
if (id >= V4L2_CID_PRIVATE_BASE)
qc->id = id;
else
qc->id = ctrl->id;
strscpy(qc->name, ctrl->name, sizeof(qc->name));
qc->flags = user_flags(ctrl);
qc->type = ctrl->type;
qc->elem_size = ctrl->elem_size;
qc->elems = ctrl->elems;
qc->nr_of_dims = ctrl->nr_of_dims;
memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0]));
qc->minimum = ctrl->minimum;
qc->maximum = ctrl->maximum;
qc->default_value = ctrl->default_value;
if (ctrl->type == V4L2_CTRL_TYPE_MENU ||
ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
qc->step = 1;
else
qc->step = ctrl->step;
return 0;
}
EXPORT_SYMBOL(v4l2_query_ext_ctrl);
/* Implement VIDIOC_QUERYCTRL */
int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
{
struct v4l2_query_ext_ctrl qec = { qc->id };
int rc;
rc = v4l2_query_ext_ctrl(hdl, &qec);
if (rc)
return rc;
qc->id = qec.id;
qc->type = qec.type;
qc->flags = qec.flags;
strscpy(qc->name, qec.name, sizeof(qc->name));
switch (qc->type) {
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_STRING:
case V4L2_CTRL_TYPE_BITMASK:
qc->minimum = qec.minimum;
qc->maximum = qec.maximum;
qc->step = qec.step;
qc->default_value = qec.default_value;
break;
default:
qc->minimum = 0;
qc->maximum = 0;
qc->step = 0;
qc->default_value = 0;
break;
}
return 0;
}
EXPORT_SYMBOL(v4l2_queryctrl);
/* Implement VIDIOC_QUERYMENU */
int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
{
struct v4l2_ctrl *ctrl;
u32 i = qm->index;
ctrl = v4l2_ctrl_find(hdl, qm->id);
if (!ctrl)
return -EINVAL;
qm->reserved = 0;
/* Sanity checks */
switch (ctrl->type) {
case V4L2_CTRL_TYPE_MENU:
if (!ctrl->qmenu)
return -EINVAL;
break;
case V4L2_CTRL_TYPE_INTEGER_MENU:
if (!ctrl->qmenu_int)
return -EINVAL;
break;
default:
return -EINVAL;
}
if (i < ctrl->minimum || i > ctrl->maximum)
return -EINVAL;
/* Use mask to see if this menu item should be skipped */
if (ctrl->menu_skip_mask & (1ULL << i))
return -EINVAL;
/* Empty menu items should also be skipped */
if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
if (!ctrl->qmenu[i] || ctrl->qmenu[i][0] == '\0')
return -EINVAL;
strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
} else {
qm->value = ctrl->qmenu_int[i];
}
return 0;
}
EXPORT_SYMBOL(v4l2_querymenu);
/*
* VIDIOC_LOG_STATUS helpers
*/
int v4l2_ctrl_log_status(struct file *file, void *fh)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_fh *vfh = file->private_data;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev)
v4l2_ctrl_handler_log_status(vfh->ctrl_handler,
vfd->v4l2_dev->name);
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_log_status);
int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
{
v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
/*
* VIDIOC_(UN)SUBSCRIBE_EVENT implementation
*/
static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev,
unsigned int elems)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
if (!ctrl)
return -EINVAL;
v4l2_ctrl_lock(ctrl);
list_add_tail(&sev->node, &ctrl->ev_subs);
if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
(sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL))
send_initial_event(sev->fh, ctrl);
v4l2_ctrl_unlock(ctrl);
return 0;
}
static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
if (!ctrl)
return;
v4l2_ctrl_lock(ctrl);
list_del(&sev->node);
v4l2_ctrl_unlock(ctrl);
}
void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new)
{
u32 old_changes = old->u.ctrl.changes;
old->u.ctrl = new->u.ctrl;
old->u.ctrl.changes |= old_changes;
}
EXPORT_SYMBOL(v4l2_ctrl_replace);
void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new)
{
new->u.ctrl.changes |= old->u.ctrl.changes;
}
EXPORT_SYMBOL(v4l2_ctrl_merge);
const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = {
.add = v4l2_ctrl_add_event,
.del = v4l2_ctrl_del_event,
.replace = v4l2_ctrl_replace,
.merge = v4l2_ctrl_merge,
};
EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops);
int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
if (sub->type == V4L2_EVENT_CTRL)
return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
return -EINVAL;
}
EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
if (!sd->ctrl_handler)
return -EINVAL;
return v4l2_ctrl_subscribe_event(fh, sub);
}
EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
/*
* poll helper
*/
__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
struct v4l2_fh *fh = file->private_data;
poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
return EPOLLPRI;
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_poll);
| linux-master | drivers/media/v4l2-core/v4l2-ctrls-api.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* V4L2 JPEG header parser helpers.
*
* Copyright (C) 2019 Pengutronix, Philipp Zabel <[email protected]>
*
* For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) [1]
*
* [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf
*/
#include <asm/unaligned.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <media/v4l2-jpeg.h>
MODULE_DESCRIPTION("V4L2 JPEG header parser helpers");
MODULE_AUTHOR("Philipp Zabel <[email protected]>");
MODULE_LICENSE("GPL");
/* Table B.1 - Marker code assignments */
#define SOF0 0xffc0 /* start of frame */
#define SOF1 0xffc1
#define SOF2 0xffc2
#define SOF3 0xffc3
#define SOF5 0xffc5
#define SOF7 0xffc7
#define JPG 0xffc8 /* extensions */
#define SOF9 0xffc9
#define SOF11 0xffcb
#define SOF13 0xffcd
#define SOF15 0xffcf
#define DHT 0xffc4 /* huffman table */
#define DAC 0xffcc /* arithmetic coding conditioning */
#define RST0 0xffd0 /* restart */
#define RST7 0xffd7
#define SOI 0xffd8 /* start of image */
#define EOI 0xffd9 /* end of image */
#define SOS 0xffda /* start of stream */
#define DQT 0xffdb /* quantization table */
#define DNL 0xffdc /* number of lines */
#define DRI 0xffdd /* restart interval */
#define DHP 0xffde /* hierarchical progression */
#define EXP 0xffdf /* expand reference */
#define APP0 0xffe0 /* application data */
#define APP14 0xffee /* application data for colour encoding */
#define APP15 0xffef
#define JPG0 0xfff0 /* extensions */
#define JPG13 0xfffd
#define COM 0xfffe /* comment */
#define TEM 0xff01 /* temporary */
/**
* struct jpeg_stream - JPEG byte stream
* @curr: current position in stream
* @end: end position, after last byte
*/
struct jpeg_stream {
u8 *curr;
u8 *end;
};
/* returns a value that fits into u8, or negative error */
static int jpeg_get_byte(struct jpeg_stream *stream)
{
if (stream->curr >= stream->end)
return -EINVAL;
return *stream->curr++;
}
/* returns a value that fits into u16, or negative error */
static int jpeg_get_word_be(struct jpeg_stream *stream)
{
u16 word;
if (stream->curr + sizeof(__be16) > stream->end)
return -EINVAL;
word = get_unaligned_be16(stream->curr);
stream->curr += sizeof(__be16);
return word;
}
static int jpeg_skip(struct jpeg_stream *stream, size_t len)
{
if (stream->curr + len > stream->end)
return -EINVAL;
stream->curr += len;
return 0;
}
static int jpeg_next_marker(struct jpeg_stream *stream)
{
int byte;
u16 marker = 0;
while ((byte = jpeg_get_byte(stream)) >= 0) {
marker = (marker << 8) | byte;
/* skip stuffing bytes and REServed markers */
if (marker == TEM || (marker > 0xffbf && marker < 0xffff))
return marker;
}
return byte;
}
/* this does not advance the current position in the stream */
static int jpeg_reference_segment(struct jpeg_stream *stream,
struct v4l2_jpeg_reference *segment)
{
u16 len;
if (stream->curr + sizeof(__be16) > stream->end)
return -EINVAL;
len = get_unaligned_be16(stream->curr);
if (stream->curr + len > stream->end)
return -EINVAL;
segment->start = stream->curr;
segment->length = len;
return 0;
}
static int v4l2_jpeg_decode_subsampling(u8 nf, u8 h_v)
{
if (nf == 1)
return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
/* no chroma subsampling for 4-component images */
if (nf == 4 && h_v != 0x11)
return -EINVAL;
switch (h_v) {
case 0x11:
return V4L2_JPEG_CHROMA_SUBSAMPLING_444;
case 0x21:
return V4L2_JPEG_CHROMA_SUBSAMPLING_422;
case 0x22:
return V4L2_JPEG_CHROMA_SUBSAMPLING_420;
case 0x41:
return V4L2_JPEG_CHROMA_SUBSAMPLING_411;
default:
return -EINVAL;
}
}
static int jpeg_parse_frame_header(struct jpeg_stream *stream, u16 sof_marker,
struct v4l2_jpeg_frame_header *frame_header)
{
int len = jpeg_get_word_be(stream);
if (len < 0)
return len;
/* Lf = 8 + 3 * Nf, Nf >= 1 */
if (len < 8 + 3)
return -EINVAL;
if (frame_header) {
/* Table B.2 - Frame header parameter sizes and values */
int p, y, x, nf;
int i;
p = jpeg_get_byte(stream);
if (p < 0)
return p;
/*
* Baseline DCT only supports 8-bit precision.
* Extended sequential DCT also supports 12-bit precision.
*/
if (p != 8 && (p != 12 || sof_marker != SOF1))
return -EINVAL;
y = jpeg_get_word_be(stream);
if (y < 0)
return y;
if (y == 0)
return -EINVAL;
x = jpeg_get_word_be(stream);
if (x < 0)
return x;
if (x == 0)
return -EINVAL;
nf = jpeg_get_byte(stream);
if (nf < 0)
return nf;
/*
* The spec allows 1 <= Nf <= 255, but we only support up to 4
* components.
*/
if (nf < 1 || nf > V4L2_JPEG_MAX_COMPONENTS)
return -EINVAL;
if (len != 8 + 3 * nf)
return -EINVAL;
frame_header->precision = p;
frame_header->height = y;
frame_header->width = x;
frame_header->num_components = nf;
for (i = 0; i < nf; i++) {
struct v4l2_jpeg_frame_component_spec *component;
int c, h_v, tq;
c = jpeg_get_byte(stream);
if (c < 0)
return c;
h_v = jpeg_get_byte(stream);
if (h_v < 0)
return h_v;
if (i == 0) {
int subs;
subs = v4l2_jpeg_decode_subsampling(nf, h_v);
if (subs < 0)
return subs;
frame_header->subsampling = subs;
} else if (h_v != 0x11) {
/* all chroma sampling factors must be 1 */
return -EINVAL;
}
tq = jpeg_get_byte(stream);
if (tq < 0)
return tq;
component = &frame_header->component[i];
component->component_identifier = c;
component->horizontal_sampling_factor =
(h_v >> 4) & 0xf;
component->vertical_sampling_factor = h_v & 0xf;
component->quantization_table_selector = tq;
}
} else {
return jpeg_skip(stream, len - 2);
}
return 0;
}
static int jpeg_parse_scan_header(struct jpeg_stream *stream,
struct v4l2_jpeg_scan_header *scan_header)
{
size_t skip;
int len = jpeg_get_word_be(stream);
if (len < 0)
return len;
/* Ls = 8 + 3 * Ns, Ns >= 1 */
if (len < 6 + 2)
return -EINVAL;
if (scan_header) {
int ns;
int i;
ns = jpeg_get_byte(stream);
if (ns < 0)
return ns;
if (ns < 1 || ns > 4 || len != 6 + 2 * ns)
return -EINVAL;
scan_header->num_components = ns;
for (i = 0; i < ns; i++) {
struct v4l2_jpeg_scan_component_spec *component;
int cs, td_ta;
cs = jpeg_get_byte(stream);
if (cs < 0)
return cs;
td_ta = jpeg_get_byte(stream);
if (td_ta < 0)
return td_ta;
component = &scan_header->component[i];
component->component_selector = cs;
component->dc_entropy_coding_table_selector =
(td_ta >> 4) & 0xf;
component->ac_entropy_coding_table_selector =
td_ta & 0xf;
}
skip = 3; /* skip Ss, Se, Ah, and Al */
} else {
skip = len - 2;
}
return jpeg_skip(stream, skip);
}
/* B.2.4.1 Quantization table-specification syntax */
static int jpeg_parse_quantization_tables(struct jpeg_stream *stream,
u8 precision,
struct v4l2_jpeg_reference *tables)
{
int len = jpeg_get_word_be(stream);
if (len < 0)
return len;
/* Lq = 2 + n * 65 (for baseline DCT), n >= 1 */
if (len < 2 + 65)
return -EINVAL;
len -= 2;
while (len >= 65) {
u8 pq, tq, *qk;
int ret;
int pq_tq = jpeg_get_byte(stream);
if (pq_tq < 0)
return pq_tq;
/* quantization table element precision */
pq = (pq_tq >> 4) & 0xf;
/*
* Only 8-bit Qk values for 8-bit sample precision. Extended
* sequential DCT with 12-bit sample precision also supports
* 16-bit Qk values.
*/
if (pq != 0 && (pq != 1 || precision != 12))
return -EINVAL;
/* quantization table destination identifier */
tq = pq_tq & 0xf;
if (tq > 3)
return -EINVAL;
/* quantization table element */
qk = stream->curr;
ret = jpeg_skip(stream, pq ? 128 : 64);
if (ret < 0)
return -EINVAL;
if (tables) {
tables[tq].start = qk;
tables[tq].length = pq ? 128 : 64;
}
len -= pq ? 129 : 65;
}
return 0;
}
/* B.2.4.2 Huffman table-specification syntax */
static int jpeg_parse_huffman_tables(struct jpeg_stream *stream,
struct v4l2_jpeg_reference *tables)
{
int mt;
int len = jpeg_get_word_be(stream);
if (len < 0)
return len;
/* Table B.5 - Huffman table specification parameter sizes and values */
if (len < 2 + 17)
return -EINVAL;
for (len -= 2; len >= 17; len -= 17 + mt) {
u8 tc, th, *table;
int tc_th = jpeg_get_byte(stream);
int i, ret;
if (tc_th < 0)
return tc_th;
/* table class - 0 = DC, 1 = AC */
tc = (tc_th >> 4) & 0xf;
if (tc > 1)
return -EINVAL;
/* huffman table destination identifier */
th = tc_th & 0xf;
/* only two Huffman tables for baseline DCT */
if (th > 1)
return -EINVAL;
/* BITS - number of Huffman codes with length i */
table = stream->curr;
mt = 0;
for (i = 0; i < 16; i++) {
int li;
li = jpeg_get_byte(stream);
if (li < 0)
return li;
mt += li;
}
/* HUFFVAL - values associated with each Huffman code */
ret = jpeg_skip(stream, mt);
if (ret < 0)
return ret;
if (tables) {
tables[(tc << 1) | th].start = table;
tables[(tc << 1) | th].length = stream->curr - table;
}
}
return jpeg_skip(stream, len - 2);
}
/* B.2.4.4 Restart interval definition syntax */
static int jpeg_parse_restart_interval(struct jpeg_stream *stream,
u16 *restart_interval)
{
int len = jpeg_get_word_be(stream);
int ri;
if (len < 0)
return len;
if (len != 4)
return -EINVAL;
ri = jpeg_get_word_be(stream);
if (ri < 0)
return ri;
*restart_interval = ri;
return 0;
}
static int jpeg_skip_segment(struct jpeg_stream *stream)
{
int len = jpeg_get_word_be(stream);
if (len < 0)
return len;
if (len < 2)
return -EINVAL;
return jpeg_skip(stream, len - 2);
}
/* Rec. ITU-T T.872 (06/2012) 6.5.3 */
static int jpeg_parse_app14_data(struct jpeg_stream *stream,
enum v4l2_jpeg_app14_tf *tf)
{
int ret;
int lp;
int skip;
lp = jpeg_get_word_be(stream);
if (lp < 0)
return lp;
/* Check for "Adobe\0" in Ap1..6 */
if (stream->curr + 6 > stream->end ||
strncmp(stream->curr, "Adobe\0", 6))
return jpeg_skip(stream, lp - 2);
/* get to Ap12 */
ret = jpeg_skip(stream, 11);
if (ret < 0)
return ret;
ret = jpeg_get_byte(stream);
if (ret < 0)
return ret;
*tf = ret;
/* skip the rest of the segment, this ensures at least it is complete */
skip = lp - 2 - 11 - 1;
return jpeg_skip(stream, skip);
}
/**
* v4l2_jpeg_parse_header - locate marker segments and optionally parse headers
* @buf: address of the JPEG buffer, should start with a SOI marker
* @len: length of the JPEG buffer
* @out: returns marker segment positions and optionally parsed headers
*
* The out->scan_header pointer must be initialized to NULL or point to a valid
* v4l2_jpeg_scan_header structure. The out->huffman_tables and
* out->quantization_tables pointers must be initialized to NULL or point to a
* valid array of 4 v4l2_jpeg_reference structures each.
*
* Returns 0 or negative error if parsing failed.
*/
int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out)
{
struct jpeg_stream stream;
int marker;
int ret = 0;
stream.curr = buf;
stream.end = stream.curr + len;
out->num_dht = 0;
out->num_dqt = 0;
/* the first bytes must be SOI, B.2.1 High-level syntax */
if (jpeg_get_word_be(&stream) != SOI)
return -EINVAL;
/* init value to signal if this marker is not present */
out->app14_tf = V4L2_JPEG_APP14_TF_UNKNOWN;
/* loop through marker segments */
while ((marker = jpeg_next_marker(&stream)) >= 0) {
switch (marker) {
/* baseline DCT, extended sequential DCT */
case SOF0 ... SOF1:
ret = jpeg_reference_segment(&stream, &out->sof);
if (ret < 0)
return ret;
ret = jpeg_parse_frame_header(&stream, marker,
&out->frame);
break;
/* progressive, lossless */
case SOF2 ... SOF3:
/* differential coding */
case SOF5 ... SOF7:
/* arithmetic coding */
case SOF9 ... SOF11:
case SOF13 ... SOF15:
case DAC:
case TEM:
return -EINVAL;
case DHT:
ret = jpeg_reference_segment(&stream,
&out->dht[out->num_dht++ % 4]);
if (ret < 0)
return ret;
if (!out->huffman_tables) {
ret = jpeg_skip_segment(&stream);
break;
}
ret = jpeg_parse_huffman_tables(&stream,
out->huffman_tables);
break;
case DQT:
ret = jpeg_reference_segment(&stream,
&out->dqt[out->num_dqt++ % 4]);
if (ret < 0)
return ret;
if (!out->quantization_tables) {
ret = jpeg_skip_segment(&stream);
break;
}
ret = jpeg_parse_quantization_tables(&stream,
out->frame.precision,
out->quantization_tables);
break;
case DRI:
ret = jpeg_parse_restart_interval(&stream,
&out->restart_interval);
break;
case APP14:
ret = jpeg_parse_app14_data(&stream,
&out->app14_tf);
break;
case SOS:
ret = jpeg_reference_segment(&stream, &out->sos);
if (ret < 0)
return ret;
ret = jpeg_parse_scan_header(&stream, out->scan);
/*
* stop parsing, the scan header marks the beginning of
* the entropy coded segment
*/
out->ecs_offset = stream.curr - (u8 *)buf;
return ret;
/* markers without parameters */
case RST0 ... RST7: /* restart */
case SOI: /* start of image */
case EOI: /* end of image */
break;
/* skip unknown or unsupported marker segments */
default:
ret = jpeg_skip_segment(&stream);
break;
}
if (ret < 0)
return ret;
}
return marker;
}
EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_header);
/**
* v4l2_jpeg_parse_frame_header - parse frame header
* @buf: address of the frame header, after the SOF0 marker
* @len: length of the frame header
* @frame_header: returns the parsed frame header
*
* Returns 0 or negative error if parsing failed.
*/
int v4l2_jpeg_parse_frame_header(void *buf, size_t len,
struct v4l2_jpeg_frame_header *frame_header)
{
struct jpeg_stream stream;
stream.curr = buf;
stream.end = stream.curr + len;
return jpeg_parse_frame_header(&stream, SOF0, frame_header);
}
EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_frame_header);
/**
* v4l2_jpeg_parse_scan_header - parse scan header
* @buf: address of the scan header, after the SOS marker
* @len: length of the scan header
* @scan_header: returns the parsed scan header
*
* Returns 0 or negative error if parsing failed.
*/
int v4l2_jpeg_parse_scan_header(void *buf, size_t len,
struct v4l2_jpeg_scan_header *scan_header)
{
struct jpeg_stream stream;
stream.curr = buf;
stream.end = stream.curr + len;
return jpeg_parse_scan_header(&stream, scan_header);
}
EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_scan_header);
/**
* v4l2_jpeg_parse_quantization_tables - parse quantization tables segment
* @buf: address of the quantization table segment, after the DQT marker
* @len: length of the quantization table segment
* @precision: sample precision (P) in bits per component
* @q_tables: returns four references into the buffer for the
* four possible quantization table destinations
*
* Returns 0 or negative error if parsing failed.
*/
int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision,
struct v4l2_jpeg_reference *q_tables)
{
struct jpeg_stream stream;
stream.curr = buf;
stream.end = stream.curr + len;
return jpeg_parse_quantization_tables(&stream, precision, q_tables);
}
EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_quantization_tables);
/**
* v4l2_jpeg_parse_huffman_tables - parse huffman tables segment
* @buf: address of the Huffman table segment, after the DHT marker
* @len: length of the Huffman table segment
* @huffman_tables: returns four references into the buffer for the
* four possible Huffman table destinations, in
* the order DC0, DC1, AC0, AC1
*
* Returns 0 or negative error if parsing failed.
*/
int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len,
struct v4l2_jpeg_reference *huffman_tables)
{
struct jpeg_stream stream;
stream.curr = buf;
stream.end = stream.curr + len;
return jpeg_parse_huffman_tables(&stream, huffman_tables);
}
EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_huffman_tables);
| linux-master | drivers/media/v4l2-core/v4l2-jpeg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Video capture interface for Linux version 2
*
* A generic framework to process V4L2 ioctl commands.
*
* Authors: Alan Cox, <[email protected]> (version 1)
* Mauro Carvalho Chehab <[email protected]> (version 2)
*/
#include <linux/compat.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/v4l2-subdev.h>
#include <linux/videodev2.h>
#include <media/media-device.h> /* for media_set_bus_info() */
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-device.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-mem2mem.h>
#include <trace/events/v4l2.h>
#define is_valid_ioctl(vfd, cmd) test_bit(_IOC_NR(cmd), (vfd)->valid_ioctls)
struct std_descr {
v4l2_std_id std;
const char *descr;
};
static const struct std_descr standards[] = {
{ V4L2_STD_NTSC, "NTSC" },
{ V4L2_STD_NTSC_M, "NTSC-M" },
{ V4L2_STD_NTSC_M_JP, "NTSC-M-JP" },
{ V4L2_STD_NTSC_M_KR, "NTSC-M-KR" },
{ V4L2_STD_NTSC_443, "NTSC-443" },
{ V4L2_STD_PAL, "PAL" },
{ V4L2_STD_PAL_BG, "PAL-BG" },
{ V4L2_STD_PAL_B, "PAL-B" },
{ V4L2_STD_PAL_B1, "PAL-B1" },
{ V4L2_STD_PAL_G, "PAL-G" },
{ V4L2_STD_PAL_H, "PAL-H" },
{ V4L2_STD_PAL_I, "PAL-I" },
{ V4L2_STD_PAL_DK, "PAL-DK" },
{ V4L2_STD_PAL_D, "PAL-D" },
{ V4L2_STD_PAL_D1, "PAL-D1" },
{ V4L2_STD_PAL_K, "PAL-K" },
{ V4L2_STD_PAL_M, "PAL-M" },
{ V4L2_STD_PAL_N, "PAL-N" },
{ V4L2_STD_PAL_Nc, "PAL-Nc" },
{ V4L2_STD_PAL_60, "PAL-60" },
{ V4L2_STD_SECAM, "SECAM" },
{ V4L2_STD_SECAM_B, "SECAM-B" },
{ V4L2_STD_SECAM_G, "SECAM-G" },
{ V4L2_STD_SECAM_H, "SECAM-H" },
{ V4L2_STD_SECAM_DK, "SECAM-DK" },
{ V4L2_STD_SECAM_D, "SECAM-D" },
{ V4L2_STD_SECAM_K, "SECAM-K" },
{ V4L2_STD_SECAM_K1, "SECAM-K1" },
{ V4L2_STD_SECAM_L, "SECAM-L" },
{ V4L2_STD_SECAM_LC, "SECAM-Lc" },
{ 0, "Unknown" }
};
/* video4linux standard ID conversion to standard name
*/
const char *v4l2_norm_to_name(v4l2_std_id id)
{
u32 myid = id;
int i;
/* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle
64 bit comparisons. So, on that architecture, with some gcc
variants, compilation fails. Currently, the max value is 30bit wide.
*/
BUG_ON(myid != id);
for (i = 0; standards[i].std; i++)
if (myid == standards[i].std)
break;
return standards[i].descr;
}
EXPORT_SYMBOL(v4l2_norm_to_name);
/* Returns frame period for the given standard */
void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod)
{
if (id & V4L2_STD_525_60) {
frameperiod->numerator = 1001;
frameperiod->denominator = 30000;
} else {
frameperiod->numerator = 1;
frameperiod->denominator = 25;
}
}
EXPORT_SYMBOL(v4l2_video_std_frame_period);
/* Fill in the fields of a v4l2_standard structure according to the
'id' and 'transmission' parameters. Returns negative on error. */
int v4l2_video_std_construct(struct v4l2_standard *vs,
int id, const char *name)
{
vs->id = id;
v4l2_video_std_frame_period(id, &vs->frameperiod);
vs->framelines = (id & V4L2_STD_525_60) ? 525 : 625;
strscpy(vs->name, name, sizeof(vs->name));
return 0;
}
EXPORT_SYMBOL(v4l2_video_std_construct);
/* Fill in the fields of a v4l2_standard structure according to the
* 'id' and 'vs->index' parameters. Returns negative on error. */
int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id)
{
v4l2_std_id curr_id = 0;
unsigned int index = vs->index, i, j = 0;
const char *descr = "";
/* Return -ENODATA if the id for the current input
or output is 0, meaning that it doesn't support this API. */
if (id == 0)
return -ENODATA;
/* Return norm array in a canonical way */
for (i = 0; i <= index && id; i++) {
/* last std value in the standards array is 0, so this
while always ends there since (id & 0) == 0. */
while ((id & standards[j].std) != standards[j].std)
j++;
curr_id = standards[j].std;
descr = standards[j].descr;
j++;
if (curr_id == 0)
break;
if (curr_id != V4L2_STD_PAL &&
curr_id != V4L2_STD_SECAM &&
curr_id != V4L2_STD_NTSC)
id &= ~curr_id;
}
if (i <= index)
return -EINVAL;
v4l2_video_std_construct(vs, curr_id, descr);
return 0;
}
/* ----------------------------------------------------------------- */
/* some arrays for pretty-printing debug messages of enum types */
const char *v4l2_field_names[] = {
[V4L2_FIELD_ANY] = "any",
[V4L2_FIELD_NONE] = "none",
[V4L2_FIELD_TOP] = "top",
[V4L2_FIELD_BOTTOM] = "bottom",
[V4L2_FIELD_INTERLACED] = "interlaced",
[V4L2_FIELD_SEQ_TB] = "seq-tb",
[V4L2_FIELD_SEQ_BT] = "seq-bt",
[V4L2_FIELD_ALTERNATE] = "alternate",
[V4L2_FIELD_INTERLACED_TB] = "interlaced-tb",
[V4L2_FIELD_INTERLACED_BT] = "interlaced-bt",
};
EXPORT_SYMBOL(v4l2_field_names);
const char *v4l2_type_names[] = {
[0] = "0",
[V4L2_BUF_TYPE_VIDEO_CAPTURE] = "vid-cap",
[V4L2_BUF_TYPE_VIDEO_OVERLAY] = "vid-overlay",
[V4L2_BUF_TYPE_VIDEO_OUTPUT] = "vid-out",
[V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap",
[V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
[V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap",
[V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out",
[V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay",
[V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane",
[V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane",
[V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap",
[V4L2_BUF_TYPE_SDR_OUTPUT] = "sdr-out",
[V4L2_BUF_TYPE_META_CAPTURE] = "meta-cap",
[V4L2_BUF_TYPE_META_OUTPUT] = "meta-out",
};
EXPORT_SYMBOL(v4l2_type_names);
static const char *v4l2_memory_names[] = {
[V4L2_MEMORY_MMAP] = "mmap",
[V4L2_MEMORY_USERPTR] = "userptr",
[V4L2_MEMORY_OVERLAY] = "overlay",
[V4L2_MEMORY_DMABUF] = "dmabuf",
};
#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown")
/* ------------------------------------------------------------------ */
/* debug help functions */
static void v4l_print_querycap(const void *arg, bool write_only)
{
const struct v4l2_capability *p = arg;
pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, capabilities=0x%08x, device_caps=0x%08x\n",
(int)sizeof(p->driver), p->driver,
(int)sizeof(p->card), p->card,
(int)sizeof(p->bus_info), p->bus_info,
p->version, p->capabilities, p->device_caps);
}
static void v4l_print_enuminput(const void *arg, bool write_only)
{
const struct v4l2_input *p = arg;
pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
p->tuner, (unsigned long long)p->std, p->status,
p->capabilities);
}
static void v4l_print_enumoutput(const void *arg, bool write_only)
{
const struct v4l2_output *p = arg;
pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
p->modulator, (unsigned long long)p->std, p->capabilities);
}
static void v4l_print_audio(const void *arg, bool write_only)
{
const struct v4l2_audio *p = arg;
if (write_only)
pr_cont("index=%u, mode=0x%x\n", p->index, p->mode);
else
pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n",
p->index, (int)sizeof(p->name), p->name,
p->capability, p->mode);
}
static void v4l_print_audioout(const void *arg, bool write_only)
{
const struct v4l2_audioout *p = arg;
if (write_only)
pr_cont("index=%u\n", p->index);
else
pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n",
p->index, (int)sizeof(p->name), p->name,
p->capability, p->mode);
}
static void v4l_print_fmtdesc(const void *arg, bool write_only)
{
const struct v4l2_fmtdesc *p = arg;
pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%p4cc, mbus_code=0x%04x, description='%.*s'\n",
p->index, prt_names(p->type, v4l2_type_names),
p->flags, &p->pixelformat, p->mbus_code,
(int)sizeof(p->description), p->description);
}
static void v4l_print_format(const void *arg, bool write_only)
{
const struct v4l2_format *p = arg;
const struct v4l2_pix_format *pix;
const struct v4l2_pix_format_mplane *mp;
const struct v4l2_vbi_format *vbi;
const struct v4l2_sliced_vbi_format *sliced;
const struct v4l2_window *win;
const struct v4l2_meta_format *meta;
u32 pixelformat;
u32 planes;
unsigned i;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
pix = &p->fmt.pix;
pr_cont(", width=%u, height=%u, pixelformat=%p4cc, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
pix->width, pix->height, &pix->pixelformat,
prt_names(pix->field, v4l2_field_names),
pix->bytesperline, pix->sizeimage,
pix->colorspace, pix->flags, pix->ycbcr_enc,
pix->quantization, pix->xfer_func);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
mp = &p->fmt.pix_mp;
pixelformat = mp->pixelformat;
pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
mp->width, mp->height, &pixelformat,
prt_names(mp->field, v4l2_field_names),
mp->colorspace, mp->num_planes, mp->flags,
mp->ycbcr_enc, mp->quantization, mp->xfer_func);
planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
for (i = 0; i < planes; i++)
printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
mp->plane_fmt[i].bytesperline,
mp->plane_fmt[i].sizeimage);
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
win = &p->fmt.win;
pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n",
win->w.width, win->w.height, win->w.left, win->w.top,
prt_names(win->field, v4l2_field_names),
win->chromakey, win->global_alpha);
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
vbi = &p->fmt.vbi;
pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%p4cc, start=%u,%u, count=%u,%u\n",
vbi->sampling_rate, vbi->offset,
vbi->samples_per_line, &vbi->sample_format,
vbi->start[0], vbi->start[1],
vbi->count[0], vbi->count[1]);
break;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
sliced = &p->fmt.sliced;
pr_cont(", service_set=0x%08x, io_size=%d\n",
sliced->service_set, sliced->io_size);
for (i = 0; i < 24; i++)
printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i,
sliced->service_lines[0][i],
sliced->service_lines[1][i]);
break;
case V4L2_BUF_TYPE_SDR_CAPTURE:
case V4L2_BUF_TYPE_SDR_OUTPUT:
pixelformat = p->fmt.sdr.pixelformat;
pr_cont(", pixelformat=%p4cc\n", &pixelformat);
break;
case V4L2_BUF_TYPE_META_CAPTURE:
case V4L2_BUF_TYPE_META_OUTPUT:
meta = &p->fmt.meta;
pixelformat = meta->dataformat;
pr_cont(", dataformat=%p4cc, buffersize=%u\n",
&pixelformat, meta->buffersize);
break;
}
}
static void v4l_print_framebuffer(const void *arg, bool write_only)
{
const struct v4l2_framebuffer *p = arg;
pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%p4cc, bytesperline=%u, sizeimage=%u, colorspace=%d\n",
p->capability, p->flags, p->base, p->fmt.width, p->fmt.height,
&p->fmt.pixelformat, p->fmt.bytesperline, p->fmt.sizeimage,
p->fmt.colorspace);
}
static void v4l_print_buftype(const void *arg, bool write_only)
{
pr_cont("type=%s\n", prt_names(*(u32 *)arg, v4l2_type_names));
}
static void v4l_print_modulator(const void *arg, bool write_only)
{
const struct v4l2_modulator *p = arg;
if (write_only)
pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
else
pr_cont("index=%u, name=%.*s, capability=0x%x, rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->capability,
p->rangelow, p->rangehigh, p->txsubchans);
}
static void v4l_print_tuner(const void *arg, bool write_only)
{
const struct v4l2_tuner *p = arg;
if (write_only)
pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
else
pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n",
p->index, (int)sizeof(p->name), p->name, p->type,
p->capability, p->rangelow,
p->rangehigh, p->signal, p->afc,
p->rxsubchans, p->audmode);
}
static void v4l_print_frequency(const void *arg, bool write_only)
{
const struct v4l2_frequency *p = arg;
pr_cont("tuner=%u, type=%u, frequency=%u\n",
p->tuner, p->type, p->frequency);
}
static void v4l_print_standard(const void *arg, bool write_only)
{
const struct v4l2_standard *p = arg;
pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, framelines=%u\n",
p->index,
(unsigned long long)p->id, (int)sizeof(p->name), p->name,
p->frameperiod.numerator,
p->frameperiod.denominator,
p->framelines);
}
static void v4l_print_std(const void *arg, bool write_only)
{
pr_cont("std=0x%08Lx\n", *(const long long unsigned *)arg);
}
static void v4l_print_hw_freq_seek(const void *arg, bool write_only)
{
const struct v4l2_hw_freq_seek *p = arg;
pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n",
p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
p->rangelow, p->rangehigh);
}
static void v4l_print_requestbuffers(const void *arg, bool write_only)
{
const struct v4l2_requestbuffers *p = arg;
pr_cont("count=%d, type=%s, memory=%s\n",
p->count,
prt_names(p->type, v4l2_type_names),
prt_names(p->memory, v4l2_memory_names));
}
static void v4l_print_buffer(const void *arg, bool write_only)
{
const struct v4l2_buffer *p = arg;
const struct v4l2_timecode *tc = &p->timecode;
const struct v4l2_plane *plane;
int i;
pr_cont("%02d:%02d:%02d.%06ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s",
(int)p->timestamp.tv_sec / 3600,
((int)p->timestamp.tv_sec / 60) % 60,
((int)p->timestamp.tv_sec % 60),
(long)p->timestamp.tv_usec,
p->index,
prt_names(p->type, v4l2_type_names), p->request_fd,
p->flags, prt_names(p->field, v4l2_field_names),
p->sequence, prt_names(p->memory, v4l2_memory_names));
if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) {
pr_cont("\n");
for (i = 0; i < p->length; ++i) {
plane = &p->m.planes[i];
printk(KERN_DEBUG
"plane %d: bytesused=%d, data_offset=0x%08x, offset/userptr=0x%lx, length=%d\n",
i, plane->bytesused, plane->data_offset,
plane->m.userptr, plane->length);
}
} else {
pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n",
p->bytesused, p->m.userptr, p->length);
}
printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, flags=0x%08x, frames=%d, userbits=0x%08x\n",
tc->hours, tc->minutes, tc->seconds,
tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
}
static void v4l_print_exportbuffer(const void *arg, bool write_only)
{
const struct v4l2_exportbuffer *p = arg;
pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n",
p->fd, prt_names(p->type, v4l2_type_names),
p->index, p->plane, p->flags);
}
static void v4l_print_create_buffers(const void *arg, bool write_only)
{
const struct v4l2_create_buffers *p = arg;
pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, ",
p->index, p->count, prt_names(p->memory, v4l2_memory_names),
p->capabilities);
v4l_print_format(&p->format, write_only);
}
static void v4l_print_streamparm(const void *arg, bool write_only)
{
const struct v4l2_streamparm *p = arg;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
const struct v4l2_captureparm *c = &p->parm.capture;
pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n",
c->capability, c->capturemode,
c->timeperframe.numerator, c->timeperframe.denominator,
c->extendedmode, c->readbuffers);
} else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
const struct v4l2_outputparm *c = &p->parm.output;
pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n",
c->capability, c->outputmode,
c->timeperframe.numerator, c->timeperframe.denominator,
c->extendedmode, c->writebuffers);
} else {
pr_cont("\n");
}
}
static void v4l_print_queryctrl(const void *arg, bool write_only)
{
const struct v4l2_queryctrl *p = arg;
pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, step=%d, default=%d, flags=0x%08x\n",
p->id, p->type, (int)sizeof(p->name), p->name,
p->minimum, p->maximum,
p->step, p->default_value, p->flags);
}
static void v4l_print_query_ext_ctrl(const void *arg, bool write_only)
{
const struct v4l2_query_ext_ctrl *p = arg;
pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, nr_of_dims=%u, dims=%u,%u,%u,%u\n",
p->id, p->type, (int)sizeof(p->name), p->name,
p->minimum, p->maximum,
p->step, p->default_value, p->flags,
p->elem_size, p->elems, p->nr_of_dims,
p->dims[0], p->dims[1], p->dims[2], p->dims[3]);
}
static void v4l_print_querymenu(const void *arg, bool write_only)
{
const struct v4l2_querymenu *p = arg;
pr_cont("id=0x%x, index=%d\n", p->id, p->index);
}
static void v4l_print_control(const void *arg, bool write_only)
{
const struct v4l2_control *p = arg;
const char *name = v4l2_ctrl_get_name(p->id);
if (name)
pr_cont("name=%s, ", name);
pr_cont("id=0x%x, value=%d\n", p->id, p->value);
}
static void v4l_print_ext_controls(const void *arg, bool write_only)
{
const struct v4l2_ext_controls *p = arg;
int i;
pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d",
p->which, p->count, p->error_idx, p->request_fd);
for (i = 0; i < p->count; i++) {
unsigned int id = p->controls[i].id;
const char *name = v4l2_ctrl_get_name(id);
if (name)
pr_cont(", name=%s", name);
if (!p->controls[i].size)
pr_cont(", id/val=0x%x/0x%x", id, p->controls[i].value);
else
pr_cont(", id/size=0x%x/%u", id, p->controls[i].size);
}
pr_cont("\n");
}
static void v4l_print_cropcap(const void *arg, bool write_only)
{
const struct v4l2_cropcap *p = arg;
pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, defrect wxh=%dx%d, x,y=%d,%d, pixelaspect %d/%d\n",
prt_names(p->type, v4l2_type_names),
p->bounds.width, p->bounds.height,
p->bounds.left, p->bounds.top,
p->defrect.width, p->defrect.height,
p->defrect.left, p->defrect.top,
p->pixelaspect.numerator, p->pixelaspect.denominator);
}
static void v4l_print_crop(const void *arg, bool write_only)
{
const struct v4l2_crop *p = arg;
pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n",
prt_names(p->type, v4l2_type_names),
p->c.width, p->c.height,
p->c.left, p->c.top);
}
static void v4l_print_selection(const void *arg, bool write_only)
{
const struct v4l2_selection *p = arg;
pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n",
prt_names(p->type, v4l2_type_names),
p->target, p->flags,
p->r.width, p->r.height, p->r.left, p->r.top);
}
static void v4l_print_jpegcompression(const void *arg, bool write_only)
{
const struct v4l2_jpegcompression *p = arg;
pr_cont("quality=%d, APPn=%d, APP_len=%d, COM_len=%d, jpeg_markers=0x%x\n",
p->quality, p->APPn, p->APP_len,
p->COM_len, p->jpeg_markers);
}
static void v4l_print_enc_idx(const void *arg, bool write_only)
{
const struct v4l2_enc_idx *p = arg;
pr_cont("entries=%d, entries_cap=%d\n",
p->entries, p->entries_cap);
}
static void v4l_print_encoder_cmd(const void *arg, bool write_only)
{
const struct v4l2_encoder_cmd *p = arg;
pr_cont("cmd=%d, flags=0x%x\n",
p->cmd, p->flags);
}
static void v4l_print_decoder_cmd(const void *arg, bool write_only)
{
const struct v4l2_decoder_cmd *p = arg;
pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags);
if (p->cmd == V4L2_DEC_CMD_START)
pr_info("speed=%d, format=%u\n",
p->start.speed, p->start.format);
else if (p->cmd == V4L2_DEC_CMD_STOP)
pr_info("pts=%llu\n", p->stop.pts);
}
static void v4l_print_dbg_chip_info(const void *arg, bool write_only)
{
const struct v4l2_dbg_chip_info *p = arg;
pr_cont("type=%u, ", p->match.type);
if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
pr_cont("name=%.*s, ",
(int)sizeof(p->match.name), p->match.name);
else
pr_cont("addr=%u, ", p->match.addr);
pr_cont("name=%.*s\n", (int)sizeof(p->name), p->name);
}
static void v4l_print_dbg_register(const void *arg, bool write_only)
{
const struct v4l2_dbg_register *p = arg;
pr_cont("type=%u, ", p->match.type);
if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
pr_cont("name=%.*s, ",
(int)sizeof(p->match.name), p->match.name);
else
pr_cont("addr=%u, ", p->match.addr);
pr_cont("reg=0x%llx, val=0x%llx\n",
p->reg, p->val);
}
static void v4l_print_dv_timings(const void *arg, bool write_only)
{
const struct v4l2_dv_timings *p = arg;
switch (p->type) {
case V4L2_DV_BT_656_1120:
pr_cont("type=bt-656/1120, interlaced=%u, pixelclock=%llu, width=%u, height=%u, polarities=0x%x, hfrontporch=%u, hsync=%u, hbackporch=%u, vfrontporch=%u, vsync=%u, vbackporch=%u, il_vfrontporch=%u, il_vsync=%u, il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
p->bt.interlaced, p->bt.pixelclock,
p->bt.width, p->bt.height,
p->bt.polarities, p->bt.hfrontporch,
p->bt.hsync, p->bt.hbackporch,
p->bt.vfrontporch, p->bt.vsync,
p->bt.vbackporch, p->bt.il_vfrontporch,
p->bt.il_vsync, p->bt.il_vbackporch,
p->bt.standards, p->bt.flags);
break;
default:
pr_cont("type=%d\n", p->type);
break;
}
}
static void v4l_print_enum_dv_timings(const void *arg, bool write_only)
{
const struct v4l2_enum_dv_timings *p = arg;
pr_cont("index=%u, ", p->index);
v4l_print_dv_timings(&p->timings, write_only);
}
static void v4l_print_dv_timings_cap(const void *arg, bool write_only)
{
const struct v4l2_dv_timings_cap *p = arg;
switch (p->type) {
case V4L2_DV_BT_656_1120:
pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
p->bt.min_width, p->bt.max_width,
p->bt.min_height, p->bt.max_height,
p->bt.min_pixelclock, p->bt.max_pixelclock,
p->bt.standards, p->bt.capabilities);
break;
default:
pr_cont("type=%u\n", p->type);
break;
}
}
static void v4l_print_frmsizeenum(const void *arg, bool write_only)
{
const struct v4l2_frmsizeenum *p = arg;
pr_cont("index=%u, pixelformat=%p4cc, type=%u",
p->index, &p->pixel_format, p->type);
switch (p->type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
pr_cont(", wxh=%ux%u\n",
p->discrete.width, p->discrete.height);
break;
case V4L2_FRMSIZE_TYPE_STEPWISE:
pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
p->stepwise.min_width,
p->stepwise.min_height,
p->stepwise.max_width,
p->stepwise.max_height,
p->stepwise.step_width,
p->stepwise.step_height);
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
default:
pr_cont("\n");
break;
}
}
static void v4l_print_frmivalenum(const void *arg, bool write_only)
{
const struct v4l2_frmivalenum *p = arg;
pr_cont("index=%u, pixelformat=%p4cc, wxh=%ux%u, type=%u",
p->index, &p->pixel_format, p->width, p->height, p->type);
switch (p->type) {
case V4L2_FRMIVAL_TYPE_DISCRETE:
pr_cont(", fps=%d/%d\n",
p->discrete.numerator,
p->discrete.denominator);
break;
case V4L2_FRMIVAL_TYPE_STEPWISE:
pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n",
p->stepwise.min.numerator,
p->stepwise.min.denominator,
p->stepwise.max.numerator,
p->stepwise.max.denominator,
p->stepwise.step.numerator,
p->stepwise.step.denominator);
break;
case V4L2_FRMIVAL_TYPE_CONTINUOUS:
default:
pr_cont("\n");
break;
}
}
static void v4l_print_event(const void *arg, bool write_only)
{
const struct v4l2_event *p = arg;
const struct v4l2_event_ctrl *c;
pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%llu.%9.9llu\n",
p->type, p->pending, p->sequence, p->id,
p->timestamp.tv_sec, p->timestamp.tv_nsec);
switch (p->type) {
case V4L2_EVENT_VSYNC:
printk(KERN_DEBUG "field=%s\n",
prt_names(p->u.vsync.field, v4l2_field_names));
break;
case V4L2_EVENT_CTRL:
c = &p->u.ctrl;
printk(KERN_DEBUG "changes=0x%x, type=%u, ",
c->changes, c->type);
if (c->type == V4L2_CTRL_TYPE_INTEGER64)
pr_cont("value64=%lld, ", c->value64);
else
pr_cont("value=%d, ", c->value);
pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, default_value=%d\n",
c->flags, c->minimum, c->maximum,
c->step, c->default_value);
break;
case V4L2_EVENT_FRAME_SYNC:
pr_cont("frame_sequence=%u\n",
p->u.frame_sync.frame_sequence);
break;
}
}
static void v4l_print_event_subscription(const void *arg, bool write_only)
{
const struct v4l2_event_subscription *p = arg;
pr_cont("type=0x%x, id=0x%x, flags=0x%x\n",
p->type, p->id, p->flags);
}
static void v4l_print_sliced_vbi_cap(const void *arg, bool write_only)
{
const struct v4l2_sliced_vbi_cap *p = arg;
int i;
pr_cont("type=%s, service_set=0x%08x\n",
prt_names(p->type, v4l2_type_names), p->service_set);
for (i = 0; i < 24; i++)
printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i,
p->service_lines[0][i],
p->service_lines[1][i]);
}
static void v4l_print_freq_band(const void *arg, bool write_only)
{
const struct v4l2_frequency_band *p = arg;
pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n",
p->tuner, p->type, p->index,
p->capability, p->rangelow,
p->rangehigh, p->modulation);
}
static void v4l_print_edid(const void *arg, bool write_only)
{
const struct v4l2_edid *p = arg;
pr_cont("pad=%u, start_block=%u, blocks=%u\n",
p->pad, p->start_block, p->blocks);
}
static void v4l_print_u32(const void *arg, bool write_only)
{
pr_cont("value=%u\n", *(const u32 *)arg);
}
static void v4l_print_newline(const void *arg, bool write_only)
{
pr_cont("\n");
}
static void v4l_print_default(const void *arg, bool write_only)
{
pr_cont("driver-specific ioctl\n");
}
static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl)
{
__u32 i;
/* zero the reserved fields */
c->reserved[0] = 0;
for (i = 0; i < c->count; i++)
c->controls[i].reserved2[0] = 0;
switch (c->which) {
case V4L2_CID_PRIVATE_BASE:
/*
* V4L2_CID_PRIVATE_BASE cannot be used as control class
* when using extended controls.
* Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
* is it allowed for backwards compatibility.
*/
if (ioctl == VIDIOC_G_CTRL || ioctl == VIDIOC_S_CTRL)
return false;
break;
case V4L2_CTRL_WHICH_DEF_VAL:
/* Default value cannot be changed */
if (ioctl == VIDIOC_S_EXT_CTRLS ||
ioctl == VIDIOC_TRY_EXT_CTRLS) {
c->error_idx = c->count;
return false;
}
return true;
case V4L2_CTRL_WHICH_CUR_VAL:
return true;
case V4L2_CTRL_WHICH_REQUEST_VAL:
c->error_idx = c->count;
return false;
}
/* Check that all controls are from the same control class. */
for (i = 0; i < c->count; i++) {
if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) {
c->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i :
c->count;
return false;
}
}
return true;
}
static int check_fmt(struct file *file, enum v4l2_buf_type type)
{
const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VIDEO_CAPTURE_MPLANE |
V4L2_CAP_VIDEO_OUTPUT |
V4L2_CAP_VIDEO_OUTPUT_MPLANE |
V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
const u32 meta_caps = V4L2_CAP_META_CAPTURE |
V4L2_CAP_META_OUTPUT;
struct video_device *vfd = video_devdata(file);
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
bool is_vid = vfd->vfl_type == VFL_TYPE_VIDEO &&
(vfd->device_caps & vid_caps);
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
bool is_meta = vfd->vfl_type == VFL_TYPE_VIDEO &&
(vfd->device_caps & meta_caps);
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
if (ops == NULL)
return -EINVAL;
switch (type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if ((is_vid || is_tch) && is_rx &&
(ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane))
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
if ((is_vid || is_tch) && is_rx && ops->vidioc_g_fmt_vid_cap_mplane)
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (is_vid && is_rx && ops->vidioc_g_fmt_vid_overlay)
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (is_vid && is_tx &&
(ops->vidioc_g_fmt_vid_out || ops->vidioc_g_fmt_vid_out_mplane))
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_mplane)
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_overlay)
return 0;
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (is_vbi && is_rx && ops->vidioc_g_fmt_vbi_cap)
return 0;
break;
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (is_vbi && is_tx && ops->vidioc_g_fmt_vbi_out)
return 0;
break;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (is_vbi && is_rx && ops->vidioc_g_fmt_sliced_vbi_cap)
return 0;
break;
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out)
return 0;
break;
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap)
return 0;
break;
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (is_sdr && is_tx && ops->vidioc_g_fmt_sdr_out)
return 0;
break;
case V4L2_BUF_TYPE_META_CAPTURE:
if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap)
return 0;
break;
case V4L2_BUF_TYPE_META_OUTPUT:
if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out)
return 0;
break;
default:
break;
}
return -EINVAL;
}
static void v4l_sanitize_colorspace(u32 pixelformat, u32 *colorspace,
u32 *encoding, u32 *quantization,
u32 *xfer_func)
{
bool is_hsv = pixelformat == V4L2_PIX_FMT_HSV24 ||
pixelformat == V4L2_PIX_FMT_HSV32;
if (!v4l2_is_colorspace_valid(*colorspace)) {
*colorspace = V4L2_COLORSPACE_DEFAULT;
*encoding = V4L2_YCBCR_ENC_DEFAULT;
*quantization = V4L2_QUANTIZATION_DEFAULT;
*xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
if ((!is_hsv && !v4l2_is_ycbcr_enc_valid(*encoding)) ||
(is_hsv && !v4l2_is_hsv_enc_valid(*encoding)))
*encoding = V4L2_YCBCR_ENC_DEFAULT;
if (!v4l2_is_quant_valid(*quantization))
*quantization = V4L2_QUANTIZATION_DEFAULT;
if (!v4l2_is_xfer_func_valid(*xfer_func))
*xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
static void v4l_sanitize_format(struct v4l2_format *fmt)
{
unsigned int offset;
/* Make sure num_planes is not bogus */
if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
fmt->fmt.pix_mp.num_planes = min_t(u32, fmt->fmt.pix_mp.num_planes,
VIDEO_MAX_PLANES);
/*
* The v4l2_pix_format structure has been extended with fields that were
* not previously required to be set to zero by applications. The priv
* field, when set to a magic value, indicates that the extended fields
* are valid. Otherwise they will contain undefined values. To simplify
* the API towards drivers zero the extended fields and set the priv
* field to the magic value when the extended pixel format structure
* isn't used by applications.
*/
if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
if (fmt->fmt.pix.priv != V4L2_PIX_FMT_PRIV_MAGIC) {
fmt->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
offset = offsetof(struct v4l2_pix_format, priv)
+ sizeof(fmt->fmt.pix.priv);
memset(((void *)&fmt->fmt.pix) + offset, 0,
sizeof(fmt->fmt.pix) - offset);
}
}
/* Replace invalid colorspace values with defaults. */
if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
v4l_sanitize_colorspace(fmt->fmt.pix.pixelformat,
&fmt->fmt.pix.colorspace,
&fmt->fmt.pix.ycbcr_enc,
&fmt->fmt.pix.quantization,
&fmt->fmt.pix.xfer_func);
} else if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
u32 ycbcr_enc = fmt->fmt.pix_mp.ycbcr_enc;
u32 quantization = fmt->fmt.pix_mp.quantization;
u32 xfer_func = fmt->fmt.pix_mp.xfer_func;
v4l_sanitize_colorspace(fmt->fmt.pix_mp.pixelformat,
&fmt->fmt.pix_mp.colorspace, &ycbcr_enc,
&quantization, &xfer_func);
fmt->fmt.pix_mp.ycbcr_enc = ycbcr_enc;
fmt->fmt.pix_mp.quantization = quantization;
fmt->fmt.pix_mp.xfer_func = xfer_func;
}
}
static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_capability *cap = (struct v4l2_capability *)arg;
struct video_device *vfd = video_devdata(file);
int ret;
cap->version = LINUX_VERSION_CODE;
cap->device_caps = vfd->device_caps;
cap->capabilities = vfd->device_caps | V4L2_CAP_DEVICE_CAPS;
media_set_bus_info(cap->bus_info, sizeof(cap->bus_info),
vfd->dev_parent);
ret = ops->vidioc_querycap(file, fh, cap);
/*
* Drivers must not change device_caps, so check for this and
* warn if this happened.
*/
WARN_ON(cap->device_caps != vfd->device_caps);
/*
* Check that capabilities is a superset of
* vfd->device_caps | V4L2_CAP_DEVICE_CAPS
*/
WARN_ON((cap->capabilities &
(vfd->device_caps | V4L2_CAP_DEVICE_CAPS)) !=
(vfd->device_caps | V4L2_CAP_DEVICE_CAPS));
cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT;
cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT;
return ret;
}
static int v4l_g_input(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
if (vfd->device_caps & V4L2_CAP_IO_MC) {
*(int *)arg = 0;
return 0;
}
return ops->vidioc_g_input(file, fh, arg);
}
static int v4l_g_output(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
if (vfd->device_caps & V4L2_CAP_IO_MC) {
*(int *)arg = 0;
return 0;
}
return ops->vidioc_g_output(file, fh, arg);
}
static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
int ret;
ret = v4l_enable_media_source(vfd);
if (ret)
return ret;
if (vfd->device_caps & V4L2_CAP_IO_MC)
return *(int *)arg ? -EINVAL : 0;
return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
}
static int v4l_s_output(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
if (vfd->device_caps & V4L2_CAP_IO_MC)
return *(int *)arg ? -EINVAL : 0;
return ops->vidioc_s_output(file, fh, *(unsigned int *)arg);
}
static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd;
u32 *p = arg;
vfd = video_devdata(file);
*p = v4l2_prio_max(vfd->prio);
return 0;
}
static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd;
struct v4l2_fh *vfh;
u32 *p = arg;
vfd = video_devdata(file);
if (!test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
return -ENOTTY;
vfh = file->private_data;
return v4l2_prio_change(vfd->prio, &vfh->prio, *p);
}
static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_input *p = arg;
/*
* We set the flags for CAP_DV_TIMINGS &
* CAP_STD here based on ioctl handler provided by the
* driver. If the driver doesn't support these
* for a specific input, it must override these flags.
*/
if (is_valid_ioctl(vfd, VIDIOC_S_STD))
p->capabilities |= V4L2_IN_CAP_STD;
if (vfd->device_caps & V4L2_CAP_IO_MC) {
if (p->index)
return -EINVAL;
strscpy(p->name, vfd->name, sizeof(p->name));
p->type = V4L2_INPUT_TYPE_CAMERA;
return 0;
}
return ops->vidioc_enum_input(file, fh, p);
}
static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_output *p = arg;
/*
* We set the flags for CAP_DV_TIMINGS &
* CAP_STD here based on ioctl handler provided by the
* driver. If the driver doesn't support these
* for a specific output, it must override these flags.
*/
if (is_valid_ioctl(vfd, VIDIOC_S_STD))
p->capabilities |= V4L2_OUT_CAP_STD;
if (vfd->device_caps & V4L2_CAP_IO_MC) {
if (p->index)
return -EINVAL;
strscpy(p->name, vfd->name, sizeof(p->name));
p->type = V4L2_OUTPUT_TYPE_ANALOG;
return 0;
}
return ops->vidioc_enum_output(file, fh, p);
}
static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
{
const unsigned sz = sizeof(fmt->description);
const char *descr = NULL;
u32 flags = 0;
/*
* We depart from the normal coding style here since the descriptions
* should be aligned so it is easy to see which descriptions will be
* longer than 31 characters (the max length for a description).
* And frankly, this is easier to read anyway.
*
* Note that gcc will use O(log N) comparisons to find the right case.
*/
switch (fmt->pixelformat) {
/* Max description length mask: descr = "0123456789012345678901234567890" */
case V4L2_PIX_FMT_RGB332: descr = "8-bit RGB 3-3-2"; break;
case V4L2_PIX_FMT_RGB444: descr = "16-bit A/XRGB 4-4-4-4"; break;
case V4L2_PIX_FMT_ARGB444: descr = "16-bit ARGB 4-4-4-4"; break;
case V4L2_PIX_FMT_XRGB444: descr = "16-bit XRGB 4-4-4-4"; break;
case V4L2_PIX_FMT_RGBA444: descr = "16-bit RGBA 4-4-4-4"; break;
case V4L2_PIX_FMT_RGBX444: descr = "16-bit RGBX 4-4-4-4"; break;
case V4L2_PIX_FMT_ABGR444: descr = "16-bit ABGR 4-4-4-4"; break;
case V4L2_PIX_FMT_XBGR444: descr = "16-bit XBGR 4-4-4-4"; break;
case V4L2_PIX_FMT_BGRA444: descr = "16-bit BGRA 4-4-4-4"; break;
case V4L2_PIX_FMT_BGRX444: descr = "16-bit BGRX 4-4-4-4"; break;
case V4L2_PIX_FMT_RGB555: descr = "16-bit A/XRGB 1-5-5-5"; break;
case V4L2_PIX_FMT_ARGB555: descr = "16-bit ARGB 1-5-5-5"; break;
case V4L2_PIX_FMT_XRGB555: descr = "16-bit XRGB 1-5-5-5"; break;
case V4L2_PIX_FMT_ABGR555: descr = "16-bit ABGR 1-5-5-5"; break;
case V4L2_PIX_FMT_XBGR555: descr = "16-bit XBGR 1-5-5-5"; break;
case V4L2_PIX_FMT_RGBA555: descr = "16-bit RGBA 5-5-5-1"; break;
case V4L2_PIX_FMT_RGBX555: descr = "16-bit RGBX 5-5-5-1"; break;
case V4L2_PIX_FMT_BGRA555: descr = "16-bit BGRA 5-5-5-1"; break;
case V4L2_PIX_FMT_BGRX555: descr = "16-bit BGRX 5-5-5-1"; break;
case V4L2_PIX_FMT_RGB565: descr = "16-bit RGB 5-6-5"; break;
case V4L2_PIX_FMT_RGB555X: descr = "16-bit A/XRGB 1-5-5-5 BE"; break;
case V4L2_PIX_FMT_ARGB555X: descr = "16-bit ARGB 1-5-5-5 BE"; break;
case V4L2_PIX_FMT_XRGB555X: descr = "16-bit XRGB 1-5-5-5 BE"; break;
case V4L2_PIX_FMT_RGB565X: descr = "16-bit RGB 5-6-5 BE"; break;
case V4L2_PIX_FMT_BGR666: descr = "18-bit BGRX 6-6-6-14"; break;
case V4L2_PIX_FMT_BGR24: descr = "24-bit BGR 8-8-8"; break;
case V4L2_PIX_FMT_RGB24: descr = "24-bit RGB 8-8-8"; break;
case V4L2_PIX_FMT_BGR32: descr = "32-bit BGRA/X 8-8-8-8"; break;
case V4L2_PIX_FMT_ABGR32: descr = "32-bit BGRA 8-8-8-8"; break;
case V4L2_PIX_FMT_XBGR32: descr = "32-bit BGRX 8-8-8-8"; break;
case V4L2_PIX_FMT_RGB32: descr = "32-bit A/XRGB 8-8-8-8"; break;
case V4L2_PIX_FMT_ARGB32: descr = "32-bit ARGB 8-8-8-8"; break;
case V4L2_PIX_FMT_XRGB32: descr = "32-bit XRGB 8-8-8-8"; break;
case V4L2_PIX_FMT_BGRA32: descr = "32-bit ABGR 8-8-8-8"; break;
case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break;
case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break;
case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break;
case V4L2_PIX_FMT_RGBX1010102: descr = "32-bit RGBX 10-10-10-2"; break;
case V4L2_PIX_FMT_RGBA1010102: descr = "32-bit RGBA 10-10-10-2"; break;
case V4L2_PIX_FMT_ARGB2101010: descr = "32-bit ARGB 2-10-10-10"; break;
case V4L2_PIX_FMT_BGR48_12: descr = "12-bit Depth BGR"; break;
case V4L2_PIX_FMT_ABGR64_12: descr = "12-bit Depth BGRA"; break;
case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break;
case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break;
case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break;
case V4L2_PIX_FMT_Y10: descr = "10-bit Greyscale"; break;
case V4L2_PIX_FMT_Y12: descr = "12-bit Greyscale"; break;
case V4L2_PIX_FMT_Y012: descr = "12-bit Greyscale (bits 15-4)"; break;
case V4L2_PIX_FMT_Y14: descr = "14-bit Greyscale"; break;
case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break;
case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break;
case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break;
case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break;
case V4L2_PIX_FMT_IPU3_Y10: descr = "10-bit greyscale (IPU3 Packed)"; break;
case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break;
case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break;
case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break;
case V4L2_PIX_FMT_INZI: descr = "Planar 10:16 Greyscale Depth"; break;
case V4L2_PIX_FMT_CNF4: descr = "4-bit Depth Confidence (Packed)"; break;
case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break;
case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break;
case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break;
case V4L2_PIX_FMT_YVU420: descr = "Planar YVU 4:2:0"; break;
case V4L2_PIX_FMT_YUYV: descr = "YUYV 4:2:2"; break;
case V4L2_PIX_FMT_YYUV: descr = "YYUV 4:2:2"; break;
case V4L2_PIX_FMT_YVYU: descr = "YVYU 4:2:2"; break;
case V4L2_PIX_FMT_UYVY: descr = "UYVY 4:2:2"; break;
case V4L2_PIX_FMT_VYUY: descr = "VYUY 4:2:2"; break;
case V4L2_PIX_FMT_YUV422P: descr = "Planar YUV 4:2:2"; break;
case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break;
case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break;
case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break;
case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break;
case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break;
case V4L2_PIX_FMT_YUV24: descr = "24-bit YUV 4:4:4 8-8-8"; break;
case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break;
case V4L2_PIX_FMT_AYUV32: descr = "32-bit AYUV 8-8-8-8"; break;
case V4L2_PIX_FMT_XYUV32: descr = "32-bit XYUV 8-8-8-8"; break;
case V4L2_PIX_FMT_VUYA32: descr = "32-bit VUYA 8-8-8-8"; break;
case V4L2_PIX_FMT_VUYX32: descr = "32-bit VUYX 8-8-8-8"; break;
case V4L2_PIX_FMT_YUVA32: descr = "32-bit YUVA 8-8-8-8"; break;
case V4L2_PIX_FMT_YUVX32: descr = "32-bit YUVX 8-8-8-8"; break;
case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break;
case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break;
case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break;
case V4L2_PIX_FMT_YUV48_12: descr = "12-bit YUV 4:4:4 Packed"; break;
case V4L2_PIX_FMT_NV12: descr = "Y/UV 4:2:0"; break;
case V4L2_PIX_FMT_NV21: descr = "Y/VU 4:2:0"; break;
case V4L2_PIX_FMT_NV16: descr = "Y/UV 4:2:2"; break;
case V4L2_PIX_FMT_NV61: descr = "Y/VU 4:2:2"; break;
case V4L2_PIX_FMT_NV24: descr = "Y/UV 4:4:4"; break;
case V4L2_PIX_FMT_NV42: descr = "Y/VU 4:4:4"; break;
case V4L2_PIX_FMT_P010: descr = "10-bit Y/UV 4:2:0"; break;
case V4L2_PIX_FMT_P012: descr = "12-bit Y/UV 4:2:0"; break;
case V4L2_PIX_FMT_NV12_4L4: descr = "Y/UV 4:2:0 (4x4 Linear)"; break;
case V4L2_PIX_FMT_NV12_16L16: descr = "Y/UV 4:2:0 (16x16 Linear)"; break;
case V4L2_PIX_FMT_NV12_32L32: descr = "Y/UV 4:2:0 (32x32 Linear)"; break;
case V4L2_PIX_FMT_NV15_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break;
case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break;
case V4L2_PIX_FMT_NV12M: descr = "Y/UV 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV21M: descr = "Y/VU 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV16M: descr = "Y/UV 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_NV61M: descr = "Y/VU 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_NV12MT: descr = "Y/UV 4:2:0 (64x32 MB, N-C)"; break;
case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/UV 4:2:0 (16x16 MB, N-C)"; break;
case V4L2_PIX_FMT_P012M: descr = "12-bit Y/UV 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_YVU422M: descr = "Planar YVU 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_YUV444M: descr = "Planar YUV 4:4:4 (N-C)"; break;
case V4L2_PIX_FMT_YVU444M: descr = "Planar YVU 4:4:4 (N-C)"; break;
case V4L2_PIX_FMT_SBGGR8: descr = "8-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG8: descr = "8-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG8: descr = "8-bit Bayer GRGR/BGBG"; break;
case V4L2_PIX_FMT_SRGGB8: descr = "8-bit Bayer RGRG/GBGB"; break;
case V4L2_PIX_FMT_SBGGR10: descr = "10-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG10: descr = "10-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG10: descr = "10-bit Bayer GRGR/BGBG"; break;
case V4L2_PIX_FMT_SRGGB10: descr = "10-bit Bayer RGRG/GBGB"; break;
case V4L2_PIX_FMT_SBGGR10P: descr = "10-bit Bayer BGBG/GRGR Packed"; break;
case V4L2_PIX_FMT_SGBRG10P: descr = "10-bit Bayer GBGB/RGRG Packed"; break;
case V4L2_PIX_FMT_SGRBG10P: descr = "10-bit Bayer GRGR/BGBG Packed"; break;
case V4L2_PIX_FMT_SRGGB10P: descr = "10-bit Bayer RGRG/GBGB Packed"; break;
case V4L2_PIX_FMT_IPU3_SBGGR10: descr = "10-bit bayer BGGR IPU3 Packed"; break;
case V4L2_PIX_FMT_IPU3_SGBRG10: descr = "10-bit bayer GBRG IPU3 Packed"; break;
case V4L2_PIX_FMT_IPU3_SGRBG10: descr = "10-bit bayer GRBG IPU3 Packed"; break;
case V4L2_PIX_FMT_IPU3_SRGGB10: descr = "10-bit bayer RGGB IPU3 Packed"; break;
case V4L2_PIX_FMT_SBGGR10ALAW8: descr = "8-bit Bayer BGBG/GRGR (A-law)"; break;
case V4L2_PIX_FMT_SGBRG10ALAW8: descr = "8-bit Bayer GBGB/RGRG (A-law)"; break;
case V4L2_PIX_FMT_SGRBG10ALAW8: descr = "8-bit Bayer GRGR/BGBG (A-law)"; break;
case V4L2_PIX_FMT_SRGGB10ALAW8: descr = "8-bit Bayer RGRG/GBGB (A-law)"; break;
case V4L2_PIX_FMT_SBGGR10DPCM8: descr = "8-bit Bayer BGBG/GRGR (DPCM)"; break;
case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break;
case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break;
case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break;
case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break;
case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break;
case V4L2_PIX_FMT_SBGGR12P: descr = "12-bit Bayer BGBG/GRGR Packed"; break;
case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break;
case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break;
case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break;
case V4L2_PIX_FMT_SBGGR14: descr = "14-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG14: descr = "14-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG14: descr = "14-bit Bayer GRGR/BGBG"; break;
case V4L2_PIX_FMT_SRGGB14: descr = "14-bit Bayer RGRG/GBGB"; break;
case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break;
case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break;
case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break;
case V4L2_PIX_FMT_SRGGB14P: descr = "14-bit Bayer RGRG/GBGB Packed"; break;
case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break;
case V4L2_PIX_FMT_SRGGB16: descr = "16-bit Bayer RGRG/GBGB"; break;
case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break;
case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break;
case V4L2_PIX_FMT_SPCA505: descr = "GSPCA SPCA505"; break;
case V4L2_PIX_FMT_SPCA508: descr = "GSPCA SPCA508"; break;
case V4L2_PIX_FMT_STV0680: descr = "GSPCA STV0680"; break;
case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break;
case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break;
case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break;
case V4L2_PIX_FMT_MM21: descr = "Mediatek 8-bit Block Format"; break;
case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break;
case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break;
case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break;
case V4L2_SDR_FMT_CU16LE: descr = "Complex U16LE"; break;
case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break;
case V4L2_SDR_FMT_CS14LE: descr = "Complex S14LE"; break;
case V4L2_SDR_FMT_RU12LE: descr = "Real U12LE"; break;
case V4L2_SDR_FMT_PCU16BE: descr = "Planar Complex U16BE"; break;
case V4L2_SDR_FMT_PCU18BE: descr = "Planar Complex U18BE"; break;
case V4L2_SDR_FMT_PCU20BE: descr = "Planar Complex U20BE"; break;
case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit Signed Deltas"; break;
case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit Signed Deltas"; break;
case V4L2_TCH_FMT_TU16: descr = "16-bit Unsigned Touch Data"; break;
case V4L2_TCH_FMT_TU08: descr = "8-bit Unsigned Touch Data"; break;
case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break;
case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break;
case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break;
case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break;
case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break;
case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break;
case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break;
case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break;
case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break;
case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break;
case V4L2_PIX_FMT_Y210: descr = "10-bit YUYV Packed"; break;
case V4L2_PIX_FMT_Y212: descr = "12-bit YUYV Packed"; break;
case V4L2_PIX_FMT_Y216: descr = "16-bit YUYV Packed"; break;
default:
/* Compressed formats */
flags = V4L2_FMT_FLAG_COMPRESSED;
switch (fmt->pixelformat) {
/* Max description length mask: descr = "0123456789012345678901234567890" */
case V4L2_PIX_FMT_MJPEG: descr = "Motion-JPEG"; break;
case V4L2_PIX_FMT_JPEG: descr = "JFIF JPEG"; break;
case V4L2_PIX_FMT_DV: descr = "1394"; break;
case V4L2_PIX_FMT_MPEG: descr = "MPEG-1/2/4"; break;
case V4L2_PIX_FMT_H264: descr = "H.264"; break;
case V4L2_PIX_FMT_H264_NO_SC: descr = "H.264 (No Start Codes)"; break;
case V4L2_PIX_FMT_H264_MVC: descr = "H.264 MVC"; break;
case V4L2_PIX_FMT_H264_SLICE: descr = "H.264 Parsed Slice Data"; break;
case V4L2_PIX_FMT_H263: descr = "H.263"; break;
case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break;
case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break;
case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break;
case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 Part 2 ES"; break;
case V4L2_PIX_FMT_XVID: descr = "Xvid"; break;
case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break;
case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break;
case V4L2_PIX_FMT_VP8: descr = "VP8"; break;
case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break;
case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_VP9_FRAME: descr = "VP9 Frame"; break;
case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break;
case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */
case V4L2_PIX_FMT_SPK: descr = "Sorenson Spark"; break;
case V4L2_PIX_FMT_RV30: descr = "RealVideo 8"; break;
case V4L2_PIX_FMT_RV40: descr = "RealVideo 9 & 10"; break;
case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break;
case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break;
case V4L2_PIX_FMT_PWC1: descr = "Raw Philips Webcam Type (Old)"; break;
case V4L2_PIX_FMT_PWC2: descr = "Raw Philips Webcam Type (New)"; break;
case V4L2_PIX_FMT_ET61X251: descr = "GSPCA ET61X251"; break;
case V4L2_PIX_FMT_SPCA561: descr = "GSPCA SPCA561"; break;
case V4L2_PIX_FMT_PAC207: descr = "GSPCA PAC207"; break;
case V4L2_PIX_FMT_MR97310A: descr = "GSPCA MR97310A"; break;
case V4L2_PIX_FMT_JL2005BCD: descr = "GSPCA JL2005BCD"; break;
case V4L2_PIX_FMT_SN9C2028: descr = "GSPCA SN9C2028"; break;
case V4L2_PIX_FMT_SQ905C: descr = "GSPCA SQ905C"; break;
case V4L2_PIX_FMT_PJPG: descr = "GSPCA PJPG"; break;
case V4L2_PIX_FMT_OV511: descr = "GSPCA OV511"; break;
case V4L2_PIX_FMT_OV518: descr = "GSPCA OV518"; break;
case V4L2_PIX_FMT_JPGL: descr = "JPEG Lite"; break;
case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
case V4L2_PIX_FMT_QC08C: descr = "QCOM Compressed 8-bit Format"; break;
case V4L2_PIX_FMT_QC10C: descr = "QCOM Compressed 10-bit Format"; break;
case V4L2_PIX_FMT_AJPG: descr = "Aspeed JPEG"; break;
case V4L2_PIX_FMT_AV1_FRAME: descr = "AV1 Frame"; break;
case V4L2_PIX_FMT_MT2110T: descr = "Mediatek 10bit Tile Mode"; break;
case V4L2_PIX_FMT_MT2110R: descr = "Mediatek 10bit Raster Mode"; break;
default:
if (fmt->description[0])
return;
WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
flags = 0;
snprintf(fmt->description, sz, "%p4cc",
&fmt->pixelformat);
break;
}
}
if (descr)
WARN_ON(strscpy(fmt->description, descr, sz) < 0);
fmt->flags |= flags;
}
static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_fmtdesc *p = arg;
int ret = check_fmt(file, p->type);
u32 mbus_code;
u32 cap_mask;
if (ret)
return ret;
ret = -EINVAL;
if (!(vdev->device_caps & V4L2_CAP_IO_MC))
p->mbus_code = 0;
mbus_code = p->mbus_code;
memset_after(p, 0, type);
p->mbus_code = mbus_code;
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
cap_mask = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
V4L2_CAP_VIDEO_M2M_MPLANE;
if (!!(vdev->device_caps & cap_mask) !=
(p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE))
break;
if (unlikely(!ops->vidioc_enum_fmt_vid_cap))
break;
ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_enum_fmt_vid_overlay))
break;
ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
cap_mask = V4L2_CAP_VIDEO_OUTPUT_MPLANE |
V4L2_CAP_VIDEO_M2M_MPLANE;
if (!!(vdev->device_caps & cap_mask) !=
(p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
break;
if (unlikely(!ops->vidioc_enum_fmt_vid_out))
break;
ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg);
break;
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_enum_fmt_sdr_cap))
break;
ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);
break;
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_enum_fmt_sdr_out))
break;
ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg);
break;
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_enum_fmt_meta_cap))
break;
ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg);
break;
case V4L2_BUF_TYPE_META_OUTPUT:
if (unlikely(!ops->vidioc_enum_fmt_meta_out))
break;
ret = ops->vidioc_enum_fmt_meta_out(file, fh, arg);
break;
}
if (ret == 0)
v4l_fill_fmtdesc(p);
return ret;
}
static void v4l_pix_format_touch(struct v4l2_pix_format *p)
{
/*
* The v4l2_pix_format structure contains fields that make no sense for
* touch. Set them to default values in this case.
*/
p->field = V4L2_FIELD_NONE;
p->colorspace = V4L2_COLORSPACE_RAW;
p->flags = 0;
p->ycbcr_enc = 0;
p->quantization = 0;
p->xfer_func = 0;
}
static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
int ret = check_fmt(file, p->type);
if (ret)
return ret;
memset(&p->fmt, 0, sizeof(p->fmt));
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (unlikely(!ops->vidioc_g_fmt_vid_cap))
break;
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
if (vfd->vfl_type == VFL_TYPE_TOUCH)
v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
return ops->vidioc_g_fmt_vid_overlay(file, fh, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
return ops->vidioc_g_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_g_fmt_vid_out))
break;
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
ret = ops->vidioc_g_fmt_vid_out(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
return ret;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
return ops->vidioc_g_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
return ops->vidioc_g_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
return ops->vidioc_g_fmt_meta_cap(file, fh, arg);
case V4L2_BUF_TYPE_META_OUTPUT:
return ops->vidioc_g_fmt_meta_out(file, fh, arg);
}
return -EINVAL;
}
static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
int ret = check_fmt(file, p->type);
unsigned int i;
if (ret)
return ret;
ret = v4l_enable_media_source(vfd);
if (ret)
return ret;
v4l_sanitize_format(p);
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_vid_cap))
break;
memset_after(p, 0, fmt.pix);
ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
if (vfd->vfl_type == VFL_TYPE_TOUCH)
v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
break;
memset_after(p, 0, fmt.pix_mp.xfer_func);
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
break;
memset_after(p, 0, fmt.win);
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
return ops->vidioc_s_fmt_vid_overlay(file, fh, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
break;
memset_after(p, 0, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
break;
memset_after(p, 0, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vid_out))
break;
memset_after(p, 0, fmt.pix);
ret = ops->vidioc_s_fmt_vid_out(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
return ret;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
break;
memset_after(p, 0, fmt.pix_mp.xfer_func);
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
break;
memset_after(p, 0, fmt.win);
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vbi_out))
break;
memset_after(p, 0, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
break;
memset_after(p, 0, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
break;
memset_after(p, 0, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sdr_out))
break;
memset_after(p, 0, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_meta_cap))
break;
memset_after(p, 0, fmt.meta);
return ops->vidioc_s_fmt_meta_cap(file, fh, arg);
case V4L2_BUF_TYPE_META_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_meta_out))
break;
memset_after(p, 0, fmt.meta);
return ops->vidioc_s_fmt_meta_out(file, fh, arg);
}
return -EINVAL;
}
static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
int ret = check_fmt(file, p->type);
unsigned int i;
if (ret)
return ret;
v4l_sanitize_format(p);
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_vid_cap))
break;
memset_after(p, 0, fmt.pix);
ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
if (vfd->vfl_type == VFL_TYPE_TOUCH)
v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
break;
memset_after(p, 0, fmt.pix_mp.xfer_func);
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
break;
memset_after(p, 0, fmt.win);
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
return ops->vidioc_try_fmt_vid_overlay(file, fh, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
break;
memset_after(p, 0, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
break;
memset_after(p, 0, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vid_out))
break;
memset_after(p, 0, fmt.pix);
ret = ops->vidioc_try_fmt_vid_out(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
return ret;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
break;
memset_after(p, 0, fmt.pix_mp.xfer_func);
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
break;
memset_after(p, 0, fmt.win);
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vbi_out))
break;
memset_after(p, 0, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
break;
memset_after(p, 0, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
break;
memset_after(p, 0, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sdr_out))
break;
memset_after(p, 0, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_meta_cap))
break;
memset_after(p, 0, fmt.meta);
return ops->vidioc_try_fmt_meta_cap(file, fh, arg);
case V4L2_BUF_TYPE_META_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_meta_out))
break;
memset_after(p, 0, fmt.meta);
return ops->vidioc_try_fmt_meta_out(file, fh, arg);
}
return -EINVAL;
}
static int v4l_streamon(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
return ops->vidioc_streamon(file, fh, *(unsigned int *)arg);
}
static int v4l_streamoff(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg);
}
static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_tuner *p = arg;
int err;
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
err = ops->vidioc_g_tuner(file, fh, p);
if (!err)
p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
return err;
}
static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_tuner *p = arg;
int ret;
ret = v4l_enable_media_source(vfd);
if (ret)
return ret;
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
return ops->vidioc_s_tuner(file, fh, p);
}
static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_modulator *p = arg;
int err;
if (vfd->vfl_type == VFL_TYPE_RADIO)
p->type = V4L2_TUNER_RADIO;
err = ops->vidioc_g_modulator(file, fh, p);
if (!err)
p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
return err;
}
static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_modulator *p = arg;
if (vfd->vfl_type == VFL_TYPE_RADIO)
p->type = V4L2_TUNER_RADIO;
return ops->vidioc_s_modulator(file, fh, p);
}
static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_frequency *p = arg;
if (vfd->vfl_type == VFL_TYPE_SDR)
p->type = V4L2_TUNER_SDR;
else
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
return ops->vidioc_g_frequency(file, fh, p);
}
static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
const struct v4l2_frequency *p = arg;
enum v4l2_tuner_type type;
int ret;
ret = v4l_enable_media_source(vfd);
if (ret)
return ret;
if (vfd->vfl_type == VFL_TYPE_SDR) {
if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
return -EINVAL;
} else {
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
if (type != p->type)
return -EINVAL;
}
return ops->vidioc_s_frequency(file, fh, p);
}
static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_standard *p = arg;
return v4l_video_std_enumstd(p, vfd->tvnorms);
}
static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id id = *(v4l2_std_id *)arg, norm;
int ret;
ret = v4l_enable_media_source(vfd);
if (ret)
return ret;
norm = id & vfd->tvnorms;
if (vfd->tvnorms && !norm) /* Check if std is supported */
return -EINVAL;
/* Calls the specific handler */
return ops->vidioc_s_std(file, fh, norm);
}
static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id *p = arg;
int ret;
ret = v4l_enable_media_source(vfd);
if (ret)
return ret;
/*
* If no signal is detected, then the driver should return
* V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with
* any standards that do not apply removed.
*
* This means that tuners, audio and video decoders can join
* their efforts to improve the standards detection.
*/
*p = vfd->tvnorms;
return ops->vidioc_querystd(file, fh, arg);
}
static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_hw_freq_seek *p = arg;
enum v4l2_tuner_type type;
int ret;
ret = v4l_enable_media_source(vfd);
if (ret)
return ret;
/* s_hw_freq_seek is not supported for SDR for now */
if (vfd->vfl_type == VFL_TYPE_SDR)
return -EINVAL;
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
if (p->type != type)
return -EINVAL;
return ops->vidioc_s_hw_freq_seek(file, fh, p);
}
static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_framebuffer *p = arg;
p->base = NULL;
return ops->vidioc_s_fbuf(file, fh, p);
}
static int v4l_overlay(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
return ops->vidioc_overlay(file, fh, *(unsigned int *)arg);
}
static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_requestbuffers *p = arg;
int ret = check_fmt(file, p->type);
if (ret)
return ret;
memset_after(p, 0, flags);
return ops->vidioc_reqbufs(file, fh, p);
}
static int v4l_querybuf(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
return ret ? ret : ops->vidioc_querybuf(file, fh, p);
}
static int v4l_qbuf(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
return ret ? ret : ops->vidioc_qbuf(file, fh, p);
}
static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
return ret ? ret : ops->vidioc_dqbuf(file, fh, p);
}
static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_create_buffers *create = arg;
int ret = check_fmt(file, create->format.type);
if (ret)
return ret;
memset_after(create, 0, flags);
v4l_sanitize_format(&create->format);
ret = ops->vidioc_create_bufs(file, fh, create);
if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
create->format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
create->format.fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
return ret;
}
static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_buffer *b = arg;
int ret = check_fmt(file, b->type);
return ret ? ret : ops->vidioc_prepare_buf(file, fh, b);
}
static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_streamparm *p = arg;
v4l2_std_id std;
int ret = check_fmt(file, p->type);
if (ret)
return ret;
if (ops->vidioc_g_parm)
return ops->vidioc_g_parm(file, fh, p);
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
if (vfd->device_caps & V4L2_CAP_READWRITE)
p->parm.capture.readbuffers = 2;
ret = ops->vidioc_g_std(file, fh, &std);
if (ret == 0)
v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe);
return ret;
}
static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_streamparm *p = arg;
int ret = check_fmt(file, p->type);
if (ret)
return ret;
/* Note: extendedmode is never used in drivers */
if (V4L2_TYPE_IS_OUTPUT(p->type)) {
memset(p->parm.output.reserved, 0,
sizeof(p->parm.output.reserved));
p->parm.output.extendedmode = 0;
p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY;
} else {
memset(p->parm.capture.reserved, 0,
sizeof(p->parm.capture.reserved));
p->parm.capture.extendedmode = 0;
p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY;
}
return ops->vidioc_s_parm(file, fh, p);
}
static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_queryctrl *p = arg;
struct v4l2_fh *vfh =
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
if (vfh && vfh->ctrl_handler)
return v4l2_queryctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_queryctrl(vfd->ctrl_handler, p);
if (ops->vidioc_queryctrl)
return ops->vidioc_queryctrl(file, fh, p);
return -ENOTTY;
}
static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_query_ext_ctrl *p = arg;
struct v4l2_fh *vfh =
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
if (vfh && vfh->ctrl_handler)
return v4l2_query_ext_ctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_query_ext_ctrl(vfd->ctrl_handler, p);
if (ops->vidioc_query_ext_ctrl)
return ops->vidioc_query_ext_ctrl(file, fh, p);
return -ENOTTY;
}
static int v4l_querymenu(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_querymenu *p = arg;
struct v4l2_fh *vfh =
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
if (vfh && vfh->ctrl_handler)
return v4l2_querymenu(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_querymenu(vfd->ctrl_handler, p);
if (ops->vidioc_querymenu)
return ops->vidioc_querymenu(file, fh, p);
return -ENOTTY;
}
static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_control *p = arg;
struct v4l2_fh *vfh =
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
if (vfh && vfh->ctrl_handler)
return v4l2_g_ctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_g_ctrl(vfd->ctrl_handler, p);
if (ops->vidioc_g_ctrl)
return ops->vidioc_g_ctrl(file, fh, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
ctrls.which = V4L2_CTRL_ID2WHICH(p->id);
ctrls.count = 1;
ctrls.controls = &ctrl;
ctrl.id = p->id;
ctrl.value = p->value;
if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) {
int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
if (ret == 0)
p->value = ctrl.value;
return ret;
}
return -EINVAL;
}
static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_control *p = arg;
struct v4l2_fh *vfh =
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
int ret;
if (vfh && vfh->ctrl_handler)
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
if (ops->vidioc_s_ctrl)
return ops->vidioc_s_ctrl(file, fh, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
ctrls.which = V4L2_CTRL_ID2WHICH(p->id);
ctrls.count = 1;
ctrls.controls = &ctrl;
ctrl.id = p->id;
ctrl.value = p->value;
if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL))
return -EINVAL;
ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
p->value = ctrl.value;
return ret;
}
static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
struct v4l2_fh *vfh =
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
return v4l2_g_ext_ctrls(vfh->ctrl_handler,
vfd, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
return v4l2_g_ext_ctrls(vfd->ctrl_handler,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ?
ops->vidioc_g_ext_ctrls(file, fh, p) : -EINVAL;
}
static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
struct v4l2_fh *vfh =
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
vfd, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ?
ops->vidioc_s_ext_ctrls(file, fh, p) : -EINVAL;
}
static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
struct v4l2_fh *vfh =
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
return v4l2_try_ext_ctrls(vfh->ctrl_handler,
vfd, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
return v4l2_try_ext_ctrls(vfd->ctrl_handler,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ?
ops->vidioc_try_ext_ctrls(file, fh, p) : -EINVAL;
}
/*
* The selection API specified originally that the _MPLANE buffer types
* shouldn't be used. The reasons for this are lost in the mists of time
* (or just really crappy memories). Regardless, this is really annoying
* for userspace. So to keep things simple we map _MPLANE buffer types
* to their 'regular' counterparts before calling the driver. And we
* restore it afterwards. This way applications can use either buffer
* type and drivers don't need to check for both.
*/
static int v4l_g_selection(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_selection *p = arg;
u32 old_type = p->type;
int ret;
if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
ret = ops->vidioc_g_selection(file, fh, p);
p->type = old_type;
return ret;
}
static int v4l_s_selection(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_selection *p = arg;
u32 old_type = p->type;
int ret;
if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
ret = ops->vidioc_s_selection(file, fh, p);
p->type = old_type;
return ret;
}
static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_crop *p = arg;
struct v4l2_selection s = {
.type = p->type,
};
int ret;
/* simulate capture crop using selection api */
/* crop means compose for output devices */
if (V4L2_TYPE_IS_OUTPUT(p->type))
s.target = V4L2_SEL_TGT_COMPOSE;
else
s.target = V4L2_SEL_TGT_CROP;
if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
ret = v4l_g_selection(ops, file, fh, &s);
/* copying results to old structure on success */
if (!ret)
p->c = s.r;
return ret;
}
static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_crop *p = arg;
struct v4l2_selection s = {
.type = p->type,
.r = p->c,
};
/* simulate capture crop using selection api */
/* crop means compose for output devices */
if (V4L2_TYPE_IS_OUTPUT(p->type))
s.target = V4L2_SEL_TGT_COMPOSE;
else
s.target = V4L2_SEL_TGT_CROP;
if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
return v4l_s_selection(ops, file, fh, &s);
}
static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_cropcap *p = arg;
struct v4l2_selection s = { .type = p->type };
int ret = 0;
/* setting trivial pixelaspect */
p->pixelaspect.numerator = 1;
p->pixelaspect.denominator = 1;
if (s.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
s.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
else if (s.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
s.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
/*
* The determine_valid_ioctls() call already should ensure
* that this can never happen, but just in case...
*/
if (WARN_ON(!ops->vidioc_g_selection))
return -ENOTTY;
if (ops->vidioc_g_pixelaspect)
ret = ops->vidioc_g_pixelaspect(file, fh, s.type,
&p->pixelaspect);
/*
* Ignore ENOTTY or ENOIOCTLCMD error returns, just use the
* square pixel aspect ratio in that case.
*/
if (ret && ret != -ENOTTY && ret != -ENOIOCTLCMD)
return ret;
/* Use g_selection() to fill in the bounds and defrect rectangles */
/* obtaining bounds */
if (V4L2_TYPE_IS_OUTPUT(p->type))
s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
else
s.target = V4L2_SEL_TGT_CROP_BOUNDS;
if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
s.target = s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS ?
V4L2_SEL_TGT_CROP_BOUNDS : V4L2_SEL_TGT_COMPOSE_BOUNDS;
ret = v4l_g_selection(ops, file, fh, &s);
if (ret)
return ret;
p->bounds = s.r;
/* obtaining defrect */
if (s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS)
s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
else
s.target = V4L2_SEL_TGT_CROP_DEFAULT;
ret = v4l_g_selection(ops, file, fh, &s);
if (ret)
return ret;
p->defrect = s.r;
return 0;
}
static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
int ret;
if (vfd->v4l2_dev)
pr_info("%s: ================= START STATUS =================\n",
vfd->v4l2_dev->name);
ret = ops->vidioc_log_status(file, fh);
if (vfd->v4l2_dev)
pr_info("%s: ================== END STATUS ==================\n",
vfd->v4l2_dev->name);
return ret;
}
static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct v4l2_dbg_register *p = arg;
struct video_device *vfd = video_devdata(file);
struct v4l2_subdev *sd;
int idx = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) {
if (vfd->v4l2_dev == NULL)
return -EINVAL;
v4l2_device_for_each_subdev(sd, vfd->v4l2_dev)
if (p->match.addr == idx++)
return v4l2_subdev_call(sd, core, g_register, p);
return -EINVAL;
}
if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
(ops->vidioc_g_chip_info || p->match.addr == 0))
return ops->vidioc_g_register(file, fh, p);
return -EINVAL;
#else
return -ENOTTY;
#endif
}
static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
const struct v4l2_dbg_register *p = arg;
struct video_device *vfd = video_devdata(file);
struct v4l2_subdev *sd;
int idx = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) {
if (vfd->v4l2_dev == NULL)
return -EINVAL;
v4l2_device_for_each_subdev(sd, vfd->v4l2_dev)
if (p->match.addr == idx++)
return v4l2_subdev_call(sd, core, s_register, p);
return -EINVAL;
}
if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
(ops->vidioc_g_chip_info || p->match.addr == 0))
return ops->vidioc_s_register(file, fh, p);
return -EINVAL;
#else
return -ENOTTY;
#endif
}
static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct video_device *vfd = video_devdata(file);
struct v4l2_dbg_chip_info *p = arg;
struct v4l2_subdev *sd;
int idx = 0;
switch (p->match.type) {
case V4L2_CHIP_MATCH_BRIDGE:
if (ops->vidioc_s_register)
p->flags |= V4L2_CHIP_FL_WRITABLE;
if (ops->vidioc_g_register)
p->flags |= V4L2_CHIP_FL_READABLE;
strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
if (ops->vidioc_g_chip_info)
return ops->vidioc_g_chip_info(file, fh, arg);
if (p->match.addr)
return -EINVAL;
return 0;
case V4L2_CHIP_MATCH_SUBDEV:
if (vfd->v4l2_dev == NULL)
break;
v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) {
if (p->match.addr != idx++)
continue;
if (sd->ops->core && sd->ops->core->s_register)
p->flags |= V4L2_CHIP_FL_WRITABLE;
if (sd->ops->core && sd->ops->core->g_register)
p->flags |= V4L2_CHIP_FL_READABLE;
strscpy(p->name, sd->name, sizeof(p->name));
return 0;
}
break;
}
return -EINVAL;
#else
return -ENOTTY;
#endif
}
static int v4l_dqevent(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK);
}
static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
return ops->vidioc_subscribe_event(fh, arg);
}
static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
return ops->vidioc_unsubscribe_event(fh, arg);
}
static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_sliced_vbi_cap *p = arg;
int ret = check_fmt(file, p->type);
if (ret)
return ret;
/* Clear up to type, everything after type is zeroed already */
memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
return ops->vidioc_g_sliced_vbi_cap(file, fh, p);
}
static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_frequency_band *p = arg;
enum v4l2_tuner_type type;
int err;
if (vfd->vfl_type == VFL_TYPE_SDR) {
if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
return -EINVAL;
type = p->type;
} else {
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
if (type != p->type)
return -EINVAL;
}
if (ops->vidioc_enum_freq_bands) {
err = ops->vidioc_enum_freq_bands(file, fh, p);
if (err != -ENOTTY)
return err;
}
if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) {
struct v4l2_tuner t = {
.index = p->tuner,
.type = type,
};
if (p->index)
return -EINVAL;
err = ops->vidioc_g_tuner(file, fh, &t);
if (err)
return err;
p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS;
p->rangelow = t.rangelow;
p->rangehigh = t.rangehigh;
p->modulation = (type == V4L2_TUNER_RADIO) ?
V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
return 0;
}
if (is_valid_ioctl(vfd, VIDIOC_G_MODULATOR)) {
struct v4l2_modulator m = {
.index = p->tuner,
};
if (type != V4L2_TUNER_RADIO)
return -EINVAL;
if (p->index)
return -EINVAL;
err = ops->vidioc_g_modulator(file, fh, &m);
if (err)
return err;
p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS;
p->rangelow = m.rangelow;
p->rangehigh = m.rangehigh;
p->modulation = (type == V4L2_TUNER_RADIO) ?
V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
return 0;
}
return -ENOTTY;
}
struct v4l2_ioctl_info {
unsigned int ioctl;
u32 flags;
const char * const name;
int (*func)(const struct v4l2_ioctl_ops *ops, struct file *file,
void *fh, void *p);
void (*debug)(const void *arg, bool write_only);
};
/* This control needs a priority check */
#define INFO_FL_PRIO (1 << 0)
/* This control can be valid if the filehandle passes a control handler. */
#define INFO_FL_CTRL (1 << 1)
/* Queuing ioctl */
#define INFO_FL_QUEUE (1 << 2)
/* Always copy back result, even on error */
#define INFO_FL_ALWAYS_COPY (1 << 3)
/* Zero struct from after the field to the end */
#define INFO_FL_CLEAR(v4l2_struct, field) \
((offsetof(struct v4l2_struct, field) + \
sizeof_field(struct v4l2_struct, field)) << 16)
#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
#define DEFINE_V4L_STUB_FUNC(_vidioc) \
static int v4l_stub_ ## _vidioc( \
const struct v4l2_ioctl_ops *ops, \
struct file *file, void *fh, void *p) \
{ \
return ops->vidioc_ ## _vidioc(file, fh, p); \
}
#define IOCTL_INFO(_ioctl, _func, _debug, _flags) \
[_IOC_NR(_ioctl)] = { \
.ioctl = _ioctl, \
.flags = _flags, \
.name = #_ioctl, \
.func = _func, \
.debug = _debug, \
}
DEFINE_V4L_STUB_FUNC(g_fbuf)
DEFINE_V4L_STUB_FUNC(expbuf)
DEFINE_V4L_STUB_FUNC(g_std)
DEFINE_V4L_STUB_FUNC(g_audio)
DEFINE_V4L_STUB_FUNC(s_audio)
DEFINE_V4L_STUB_FUNC(g_edid)
DEFINE_V4L_STUB_FUNC(s_edid)
DEFINE_V4L_STUB_FUNC(g_audout)
DEFINE_V4L_STUB_FUNC(s_audout)
DEFINE_V4L_STUB_FUNC(g_jpegcomp)
DEFINE_V4L_STUB_FUNC(s_jpegcomp)
DEFINE_V4L_STUB_FUNC(enumaudio)
DEFINE_V4L_STUB_FUNC(enumaudout)
DEFINE_V4L_STUB_FUNC(enum_framesizes)
DEFINE_V4L_STUB_FUNC(enum_frameintervals)
DEFINE_V4L_STUB_FUNC(g_enc_index)
DEFINE_V4L_STUB_FUNC(encoder_cmd)
DEFINE_V4L_STUB_FUNC(try_encoder_cmd)
DEFINE_V4L_STUB_FUNC(decoder_cmd)
DEFINE_V4L_STUB_FUNC(try_decoder_cmd)
DEFINE_V4L_STUB_FUNC(s_dv_timings)
DEFINE_V4L_STUB_FUNC(g_dv_timings)
DEFINE_V4L_STUB_FUNC(enum_dv_timings)
DEFINE_V4L_STUB_FUNC(query_dv_timings)
DEFINE_V4L_STUB_FUNC(dv_timings_cap)
static const struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0),
IOCTL_INFO(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, 0),
IOCTL_INFO(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, 0),
IOCTL_INFO(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)),
IOCTL_INFO(VIDIOC_G_FBUF, v4l_stub_g_fbuf, v4l_print_framebuffer, 0),
IOCTL_INFO(VIDIOC_S_FBUF, v4l_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
IOCTL_INFO(VIDIOC_EXPBUF, v4l_stub_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
IOCTL_INFO(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
IOCTL_INFO(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)),
IOCTL_INFO(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_STD, v4l_stub_g_std, v4l_print_std, 0),
IOCTL_INFO(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)),
IOCTL_INFO(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)),
IOCTL_INFO(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)),
IOCTL_INFO(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL),
IOCTL_INFO(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)),
IOCTL_INFO(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_AUDIO, v4l_stub_g_audio, v4l_print_audio, 0),
IOCTL_INFO(VIDIOC_S_AUDIO, v4l_stub_s_audio, v4l_print_audio, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)),
IOCTL_INFO(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
IOCTL_INFO(VIDIOC_G_INPUT, v4l_g_input, v4l_print_u32, 0),
IOCTL_INFO(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_EDID, v4l_stub_g_edid, v4l_print_edid, INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_S_EDID, v4l_stub_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_G_OUTPUT, v4l_g_output, v4l_print_u32, 0),
IOCTL_INFO(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
IOCTL_INFO(VIDIOC_G_AUDOUT, v4l_stub_g_audout, v4l_print_audioout, 0),
IOCTL_INFO(VIDIOC_S_AUDOUT, v4l_stub_s_audout, v4l_print_audioout, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)),
IOCTL_INFO(VIDIOC_S_MODULATOR, v4l_s_modulator, v4l_print_modulator, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)),
IOCTL_INFO(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)),
IOCTL_INFO(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)),
IOCTL_INFO(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_SELECTION, v4l_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)),
IOCTL_INFO(VIDIOC_S_SELECTION, v4l_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)),
IOCTL_INFO(VIDIOC_G_JPEGCOMP, v4l_stub_g_jpegcomp, v4l_print_jpegcompression, 0),
IOCTL_INFO(VIDIOC_S_JPEGCOMP, v4l_stub_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0),
IOCTL_INFO(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0),
IOCTL_INFO(VIDIOC_ENUMAUDIO, v4l_stub_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)),
IOCTL_INFO(VIDIOC_ENUMAUDOUT, v4l_stub_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)),
IOCTL_INFO(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0),
IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0),
IOCTL_INFO(VIDIOC_ENCODER_CMD, v4l_stub_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
IOCTL_INFO(VIDIOC_TRY_ENCODER_CMD, v4l_stub_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
IOCTL_INFO(VIDIOC_DECODER_CMD, v4l_stub_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_TRY_DECODER_CMD, v4l_stub_try_decoder_cmd, v4l_print_decoder_cmd, 0),
IOCTL_INFO(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0),
IOCTL_INFO(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0),
IOCTL_INFO(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_S_DV_TIMINGS, v4l_stub_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)),
IOCTL_INFO(VIDIOC_G_DV_TIMINGS, v4l_stub_g_dv_timings, v4l_print_dv_timings, 0),
IOCTL_INFO(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0),
IOCTL_INFO(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0),
IOCTL_INFO(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0),
IOCTL_INFO(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE),
IOCTL_INFO(VIDIOC_ENUM_DV_TIMINGS, v4l_stub_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)),
IOCTL_INFO(VIDIOC_QUERY_DV_TIMINGS, v4l_stub_query_dv_timings, v4l_print_dv_timings, INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_DV_TIMINGS_CAP, v4l_stub_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, pad)),
IOCTL_INFO(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
IOCTL_INFO(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)),
IOCTL_INFO(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)),
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
static bool v4l2_is_known_ioctl(unsigned int cmd)
{
if (_IOC_NR(cmd) >= V4L2_IOCTLS)
return false;
return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
}
static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev,
struct v4l2_fh *vfh, unsigned int cmd,
void *arg)
{
if (_IOC_NR(cmd) >= V4L2_IOCTLS)
return vdev->lock;
if (vfh && vfh->m2m_ctx &&
(v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) {
if (vfh->m2m_ctx->q_lock)
return vfh->m2m_ctx->q_lock;
}
if (vdev->queue && vdev->queue->lock &&
(v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE))
return vdev->queue->lock;
return vdev->lock;
}
/* Common ioctl debug function. This function can be used by
external ioctl messages as well as internal V4L ioctl */
void v4l_printk_ioctl(const char *prefix, unsigned int cmd)
{
const char *dir, *type;
if (prefix)
printk(KERN_DEBUG "%s: ", prefix);
switch (_IOC_TYPE(cmd)) {
case 'd':
type = "v4l2_int";
break;
case 'V':
if (_IOC_NR(cmd) >= V4L2_IOCTLS) {
type = "v4l2";
break;
}
pr_cont("%s", v4l2_ioctls[_IOC_NR(cmd)].name);
return;
default:
type = "unknown";
break;
}
switch (_IOC_DIR(cmd)) {
case _IOC_NONE: dir = "--"; break;
case _IOC_READ: dir = "r-"; break;
case _IOC_WRITE: dir = "-w"; break;
case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
default: dir = "*ERR*"; break;
}
pr_cont("%s ioctl '%c', dir=%s, #%d (0x%08x)",
type, _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd);
}
EXPORT_SYMBOL(v4l_printk_ioctl);
static long __video_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct mutex *req_queue_lock = NULL;
struct mutex *lock; /* ioctl serialization mutex */
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
bool write_only = false;
struct v4l2_ioctl_info default_info;
const struct v4l2_ioctl_info *info;
void *fh = file->private_data;
struct v4l2_fh *vfh = NULL;
int dev_debug = vfd->dev_debug;
long ret = -ENOTTY;
if (ops == NULL) {
pr_warn("%s: has no ioctl_ops.\n",
video_device_node_name(vfd));
return ret;
}
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
vfh = file->private_data;
/*
* We need to serialize streamon/off with queueing new requests.
* These ioctls may trigger the cancellation of a streaming
* operation, and that should not be mixed with queueing a new
* request at the same time.
*/
if (v4l2_device_supports_requests(vfd->v4l2_dev) &&
(cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) {
req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex;
if (mutex_lock_interruptible(req_queue_lock))
return -ERESTARTSYS;
}
lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg);
if (lock && mutex_lock_interruptible(lock)) {
if (req_queue_lock)
mutex_unlock(req_queue_lock);
return -ERESTARTSYS;
}
if (!video_is_registered(vfd)) {
ret = -ENODEV;
goto unlock;
}
if (v4l2_is_known_ioctl(cmd)) {
info = &v4l2_ioctls[_IOC_NR(cmd)];
if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
!((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
goto done;
if (vfh && (info->flags & INFO_FL_PRIO)) {
ret = v4l2_prio_check(vfd->prio, vfh->prio);
if (ret)
goto done;
}
} else {
default_info.ioctl = cmd;
default_info.flags = 0;
default_info.debug = v4l_print_default;
info = &default_info;
}
write_only = _IOC_DIR(cmd) == _IOC_WRITE;
if (info != &default_info) {
ret = info->func(ops, file, fh, arg);
} else if (!ops->vidioc_default) {
ret = -ENOTTY;
} else {
ret = ops->vidioc_default(file, fh,
vfh ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
cmd, arg);
}
done:
if (dev_debug & (V4L2_DEV_DEBUG_IOCTL | V4L2_DEV_DEBUG_IOCTL_ARG)) {
if (!(dev_debug & V4L2_DEV_DEBUG_STREAMING) &&
(cmd == VIDIOC_QBUF || cmd == VIDIOC_DQBUF))
goto unlock;
v4l_printk_ioctl(video_device_node_name(vfd), cmd);
if (ret < 0)
pr_cont(": error %ld", ret);
if (!(dev_debug & V4L2_DEV_DEBUG_IOCTL_ARG))
pr_cont("\n");
else if (_IOC_DIR(cmd) == _IOC_NONE)
info->debug(arg, write_only);
else {
pr_cont(": ");
info->debug(arg, write_only);
}
}
unlock:
if (lock)
mutex_unlock(lock);
if (req_queue_lock)
mutex_unlock(req_queue_lock);
return ret;
}
static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
void __user **user_ptr, void ***kernel_ptr)
{
int ret = 0;
switch (cmd) {
case VIDIOC_PREPARE_BUF:
case VIDIOC_QUERYBUF:
case VIDIOC_QBUF:
case VIDIOC_DQBUF: {
struct v4l2_buffer *buf = parg;
if (V4L2_TYPE_IS_MULTIPLANAR(buf->type) && buf->length > 0) {
if (buf->length > VIDEO_MAX_PLANES) {
ret = -EINVAL;
break;
}
*user_ptr = (void __user *)buf->m.planes;
*kernel_ptr = (void **)&buf->m.planes;
*array_size = sizeof(struct v4l2_plane) * buf->length;
ret = 1;
}
break;
}
case VIDIOC_G_EDID:
case VIDIOC_S_EDID: {
struct v4l2_edid *edid = parg;
if (edid->blocks) {
if (edid->blocks > 256) {
ret = -EINVAL;
break;
}
*user_ptr = (void __user *)edid->edid;
*kernel_ptr = (void **)&edid->edid;
*array_size = edid->blocks * 128;
ret = 1;
}
break;
}
case VIDIOC_S_EXT_CTRLS:
case VIDIOC_G_EXT_CTRLS:
case VIDIOC_TRY_EXT_CTRLS: {
struct v4l2_ext_controls *ctrls = parg;
if (ctrls->count != 0) {
if (ctrls->count > V4L2_CID_MAX_CTRLS) {
ret = -EINVAL;
break;
}
*user_ptr = (void __user *)ctrls->controls;
*kernel_ptr = (void **)&ctrls->controls;
*array_size = sizeof(struct v4l2_ext_control)
* ctrls->count;
ret = 1;
}
break;
}
case VIDIOC_SUBDEV_G_ROUTING:
case VIDIOC_SUBDEV_S_ROUTING: {
struct v4l2_subdev_routing *routing = parg;
if (routing->num_routes > 256)
return -E2BIG;
*user_ptr = u64_to_user_ptr(routing->routes);
*kernel_ptr = (void **)&routing->routes;
*array_size = sizeof(struct v4l2_subdev_route)
* routing->num_routes;
ret = 1;
break;
}
}
return ret;
}
static unsigned int video_translate_cmd(unsigned int cmd)
{
#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
switch (cmd) {
case VIDIOC_DQEVENT_TIME32:
return VIDIOC_DQEVENT;
case VIDIOC_QUERYBUF_TIME32:
return VIDIOC_QUERYBUF;
case VIDIOC_QBUF_TIME32:
return VIDIOC_QBUF;
case VIDIOC_DQBUF_TIME32:
return VIDIOC_DQBUF;
case VIDIOC_PREPARE_BUF_TIME32:
return VIDIOC_PREPARE_BUF;
}
#endif
if (in_compat_syscall())
return v4l2_compat_translate_cmd(cmd);
return cmd;
}
static int video_get_user(void __user *arg, void *parg,
unsigned int real_cmd, unsigned int cmd,
bool *always_copy)
{
unsigned int n = _IOC_SIZE(real_cmd);
int err = 0;
if (!(_IOC_DIR(cmd) & _IOC_WRITE)) {
/* read-only ioctl */
memset(parg, 0, n);
return 0;
}
/*
* In some cases, only a few fields are used as input,
* i.e. when the app sets "index" and then the driver
* fills in the rest of the structure for the thing
* with that index. We only need to copy up the first
* non-input field.
*/
if (v4l2_is_known_ioctl(real_cmd)) {
u32 flags = v4l2_ioctls[_IOC_NR(real_cmd)].flags;
if (flags & INFO_FL_CLEAR_MASK)
n = (flags & INFO_FL_CLEAR_MASK) >> 16;
*always_copy = flags & INFO_FL_ALWAYS_COPY;
}
if (cmd == real_cmd) {
if (copy_from_user(parg, (void __user *)arg, n))
err = -EFAULT;
} else if (in_compat_syscall()) {
memset(parg, 0, n);
err = v4l2_compat_get_user(arg, parg, cmd);
} else {
memset(parg, 0, n);
#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
switch (cmd) {
case VIDIOC_QUERYBUF_TIME32:
case VIDIOC_QBUF_TIME32:
case VIDIOC_DQBUF_TIME32:
case VIDIOC_PREPARE_BUF_TIME32: {
struct v4l2_buffer_time32 vb32;
struct v4l2_buffer *vb = parg;
if (copy_from_user(&vb32, arg, sizeof(vb32)))
return -EFAULT;
*vb = (struct v4l2_buffer) {
.index = vb32.index,
.type = vb32.type,
.bytesused = vb32.bytesused,
.flags = vb32.flags,
.field = vb32.field,
.timestamp.tv_sec = vb32.timestamp.tv_sec,
.timestamp.tv_usec = vb32.timestamp.tv_usec,
.timecode = vb32.timecode,
.sequence = vb32.sequence,
.memory = vb32.memory,
.m.userptr = vb32.m.userptr,
.length = vb32.length,
.request_fd = vb32.request_fd,
};
break;
}
}
#endif
}
/* zero out anything we don't copy from userspace */
if (!err && n < _IOC_SIZE(real_cmd))
memset((u8 *)parg + n, 0, _IOC_SIZE(real_cmd) - n);
return err;
}
static int video_put_user(void __user *arg, void *parg,
unsigned int real_cmd, unsigned int cmd)
{
if (!(_IOC_DIR(cmd) & _IOC_READ))
return 0;
if (cmd == real_cmd) {
/* Copy results into user buffer */
if (copy_to_user(arg, parg, _IOC_SIZE(cmd)))
return -EFAULT;
return 0;
}
if (in_compat_syscall())
return v4l2_compat_put_user(arg, parg, cmd);
#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
switch (cmd) {
case VIDIOC_DQEVENT_TIME32: {
struct v4l2_event *ev = parg;
struct v4l2_event_time32 ev32;
memset(&ev32, 0, sizeof(ev32));
ev32.type = ev->type;
ev32.pending = ev->pending;
ev32.sequence = ev->sequence;
ev32.timestamp.tv_sec = ev->timestamp.tv_sec;
ev32.timestamp.tv_nsec = ev->timestamp.tv_nsec;
ev32.id = ev->id;
memcpy(&ev32.u, &ev->u, sizeof(ev->u));
memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved));
if (copy_to_user(arg, &ev32, sizeof(ev32)))
return -EFAULT;
break;
}
case VIDIOC_QUERYBUF_TIME32:
case VIDIOC_QBUF_TIME32:
case VIDIOC_DQBUF_TIME32:
case VIDIOC_PREPARE_BUF_TIME32: {
struct v4l2_buffer *vb = parg;
struct v4l2_buffer_time32 vb32;
memset(&vb32, 0, sizeof(vb32));
vb32.index = vb->index;
vb32.type = vb->type;
vb32.bytesused = vb->bytesused;
vb32.flags = vb->flags;
vb32.field = vb->field;
vb32.timestamp.tv_sec = vb->timestamp.tv_sec;
vb32.timestamp.tv_usec = vb->timestamp.tv_usec;
vb32.timecode = vb->timecode;
vb32.sequence = vb->sequence;
vb32.memory = vb->memory;
vb32.m.userptr = vb->m.userptr;
vb32.length = vb->length;
vb32.request_fd = vb->request_fd;
if (copy_to_user(arg, &vb32, sizeof(vb32)))
return -EFAULT;
break;
}
}
#endif
return 0;
}
long
video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
v4l2_kioctl func)
{
char sbuf[128];
void *mbuf = NULL, *array_buf = NULL;
void *parg = (void *)arg;
long err = -EINVAL;
bool has_array_args;
bool always_copy = false;
size_t array_size = 0;
void __user *user_ptr = NULL;
void **kernel_ptr = NULL;
unsigned int cmd = video_translate_cmd(orig_cmd);
const size_t ioc_size = _IOC_SIZE(cmd);
/* Copy arguments into temp kernel buffer */
if (_IOC_DIR(cmd) != _IOC_NONE) {
if (ioc_size <= sizeof(sbuf)) {
parg = sbuf;
} else {
/* too big to allocate from stack */
mbuf = kmalloc(ioc_size, GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
}
err = video_get_user((void __user *)arg, parg, cmd,
orig_cmd, &always_copy);
if (err)
goto out;
}
err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
if (err < 0)
goto out;
has_array_args = err;
if (has_array_args) {
array_buf = kvmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
if (array_buf == NULL)
goto out;
if (in_compat_syscall())
err = v4l2_compat_get_array_args(file, array_buf,
user_ptr, array_size,
orig_cmd, parg);
else
err = copy_from_user(array_buf, user_ptr, array_size) ?
-EFAULT : 0;
if (err)
goto out;
*kernel_ptr = array_buf;
}
/* Handles IOCTL */
err = func(file, cmd, parg);
if (err == -ENOTTY || err == -ENOIOCTLCMD) {
err = -ENOTTY;
goto out;
}
if (err == 0) {
if (cmd == VIDIOC_DQBUF)
trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
else if (cmd == VIDIOC_QBUF)
trace_v4l2_qbuf(video_devdata(file)->minor, parg);
}
/*
* Some ioctls can return an error, but still have valid
* results that must be returned.
*
* FIXME: subdev IOCTLS are partially handled here and partially in
* v4l2-subdev.c and the 'always_copy' flag can only be set for IOCTLS
* defined here as part of the 'v4l2_ioctls' array. As
* VIDIOC_SUBDEV_G_ROUTING needs to return results to applications even
* in case of failure, but it is not defined here as part of the
* 'v4l2_ioctls' array, insert an ad-hoc check to address that.
*/
if (err < 0 && !always_copy && cmd != VIDIOC_SUBDEV_G_ROUTING)
goto out;
if (has_array_args) {
*kernel_ptr = (void __force *)user_ptr;
if (in_compat_syscall()) {
int put_err;
put_err = v4l2_compat_put_array_args(file, user_ptr,
array_buf,
array_size,
orig_cmd, parg);
if (put_err)
err = put_err;
} else if (copy_to_user(user_ptr, array_buf, array_size)) {
err = -EFAULT;
}
}
if (video_put_user((void __user *)arg, parg, cmd, orig_cmd))
err = -EFAULT;
out:
kvfree(array_buf);
kfree(mbuf);
return err;
}
long video_ioctl2(struct file *file,
unsigned int cmd, unsigned long arg)
{
return video_usercopy(file, cmd, arg, __video_do_ioctl);
}
EXPORT_SYMBOL(video_ioctl2);
| linux-master | drivers/media/v4l2-core/v4l2-ioctl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* media-dev-allocator.c - Media Controller Device Allocator API
*
* Copyright (c) 2019 Shuah Khan <[email protected]>
*
* Credits: Suggested by Laurent Pinchart <[email protected]>
*/
/*
* This file adds a global refcounted Media Controller Device Instance API.
* A system wide global media device list is managed and each media device
* includes a kref count. The last put on the media device releases the media
* device instance.
*
*/
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <media/media-device.h>
#include <media/media-dev-allocator.h>
static LIST_HEAD(media_device_list);
static DEFINE_MUTEX(media_device_lock);
struct media_device_instance {
struct media_device mdev;
struct module *owner;
struct list_head list;
struct kref refcount;
};
static inline struct media_device_instance *
to_media_device_instance(struct media_device *mdev)
{
return container_of(mdev, struct media_device_instance, mdev);
}
static void media_device_instance_release(struct kref *kref)
{
struct media_device_instance *mdi =
container_of(kref, struct media_device_instance, refcount);
dev_dbg(mdi->mdev.dev, "%s: releasing Media Device\n", __func__);
mutex_lock(&media_device_lock);
media_device_unregister(&mdi->mdev);
media_device_cleanup(&mdi->mdev);
list_del(&mdi->list);
mutex_unlock(&media_device_lock);
kfree(mdi);
}
/* Callers should hold media_device_lock when calling this function */
static struct media_device *__media_device_get(struct device *dev,
const char *module_name,
struct module *owner)
{
struct media_device_instance *mdi;
list_for_each_entry(mdi, &media_device_list, list) {
if (mdi->mdev.dev != dev)
continue;
kref_get(&mdi->refcount);
/* get module reference for the media_device owner */
if (owner != mdi->owner && !try_module_get(mdi->owner))
dev_err(dev,
"%s: module %s get owner reference error\n",
__func__, module_name);
else
dev_dbg(dev, "%s: module %s got owner reference\n",
__func__, module_name);
return &mdi->mdev;
}
mdi = kzalloc(sizeof(*mdi), GFP_KERNEL);
if (!mdi)
return NULL;
mdi->owner = owner;
kref_init(&mdi->refcount);
list_add_tail(&mdi->list, &media_device_list);
dev_dbg(dev, "%s: Allocated media device for owner %s\n",
__func__, module_name);
return &mdi->mdev;
}
struct media_device *media_device_usb_allocate(struct usb_device *udev,
const char *module_name,
struct module *owner)
{
struct media_device *mdev;
mutex_lock(&media_device_lock);
mdev = __media_device_get(&udev->dev, module_name, owner);
if (!mdev) {
mutex_unlock(&media_device_lock);
return ERR_PTR(-ENOMEM);
}
/* check if media device is already initialized */
if (!mdev->dev)
__media_device_usb_init(mdev, udev, udev->product,
module_name);
mutex_unlock(&media_device_lock);
return mdev;
}
EXPORT_SYMBOL_GPL(media_device_usb_allocate);
void media_device_delete(struct media_device *mdev, const char *module_name,
struct module *owner)
{
struct media_device_instance *mdi = to_media_device_instance(mdev);
mutex_lock(&media_device_lock);
/* put module reference for the media_device owner */
if (mdi->owner != owner) {
module_put(mdi->owner);
dev_dbg(mdi->mdev.dev,
"%s: module %s put owner module reference\n",
__func__, module_name);
}
mutex_unlock(&media_device_lock);
kref_put(&mdi->refcount, media_device_instance_release);
}
EXPORT_SYMBOL_GPL(media_device_delete);
| linux-master | drivers/media/mc/mc-dev-allocator.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Media device request objects
*
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2018 Google, Inc.
*
* Author: Hans Verkuil <[email protected]>
* Author: Sakari Ailus <[email protected]>
*/
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/refcount.h>
#include <media/media-device.h>
#include <media/media-request.h>
static const char * const request_state[] = {
[MEDIA_REQUEST_STATE_IDLE] = "idle",
[MEDIA_REQUEST_STATE_VALIDATING] = "validating",
[MEDIA_REQUEST_STATE_QUEUED] = "queued",
[MEDIA_REQUEST_STATE_COMPLETE] = "complete",
[MEDIA_REQUEST_STATE_CLEANING] = "cleaning",
[MEDIA_REQUEST_STATE_UPDATING] = "updating",
};
static const char *
media_request_state_str(enum media_request_state state)
{
BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
if (WARN_ON(state >= ARRAY_SIZE(request_state)))
return "invalid";
return request_state[state];
}
static void media_request_clean(struct media_request *req)
{
struct media_request_object *obj, *obj_safe;
/* Just a sanity check. No other code path is allowed to change this. */
WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
WARN_ON(req->updating_count);
WARN_ON(req->access_count);
list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
media_request_object_unbind(obj);
media_request_object_put(obj);
}
req->updating_count = 0;
req->access_count = 0;
WARN_ON(req->num_incomplete_objects);
req->num_incomplete_objects = 0;
wake_up_interruptible_all(&req->poll_wait);
}
static void media_request_release(struct kref *kref)
{
struct media_request *req =
container_of(kref, struct media_request, kref);
struct media_device *mdev = req->mdev;
dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
/* No other users, no need for a spinlock */
req->state = MEDIA_REQUEST_STATE_CLEANING;
media_request_clean(req);
if (mdev->ops->req_free)
mdev->ops->req_free(req);
else
kfree(req);
}
void media_request_put(struct media_request *req)
{
kref_put(&req->kref, media_request_release);
}
EXPORT_SYMBOL_GPL(media_request_put);
static int media_request_close(struct inode *inode, struct file *filp)
{
struct media_request *req = filp->private_data;
media_request_put(req);
return 0;
}
static __poll_t media_request_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct media_request *req = filp->private_data;
unsigned long flags;
__poll_t ret = 0;
if (!(poll_requested_events(wait) & EPOLLPRI))
return 0;
poll_wait(filp, &req->poll_wait, wait);
spin_lock_irqsave(&req->lock, flags);
if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
ret = EPOLLPRI;
goto unlock;
}
if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
ret = EPOLLERR;
goto unlock;
}
unlock:
spin_unlock_irqrestore(&req->lock, flags);
return ret;
}
static long media_request_ioctl_queue(struct media_request *req)
{
struct media_device *mdev = req->mdev;
enum media_request_state state;
unsigned long flags;
int ret;
dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
/*
* Ensure the request that is validated will be the one that gets queued
* next by serialising the queueing process. This mutex is also used
* to serialize with canceling a vb2 queue and with setting values such
* as controls in a request.
*/
mutex_lock(&mdev->req_queue_mutex);
media_request_get(req);
spin_lock_irqsave(&req->lock, flags);
if (req->state == MEDIA_REQUEST_STATE_IDLE)
req->state = MEDIA_REQUEST_STATE_VALIDATING;
state = req->state;
spin_unlock_irqrestore(&req->lock, flags);
if (state != MEDIA_REQUEST_STATE_VALIDATING) {
dev_dbg(mdev->dev,
"request: unable to queue %s, request in state %s\n",
req->debug_str, media_request_state_str(state));
media_request_put(req);
mutex_unlock(&mdev->req_queue_mutex);
return -EBUSY;
}
ret = mdev->ops->req_validate(req);
/*
* If the req_validate was successful, then we mark the state as QUEUED
* and call req_queue. The reason we set the state first is that this
* allows req_queue to unbind or complete the queued objects in case
* they are immediately 'consumed'. State changes from QUEUED to another
* state can only happen if either the driver changes the state or if
* the user cancels the vb2 queue. The driver can only change the state
* after each object is queued through the req_queue op (and note that
* that op cannot fail), so setting the state to QUEUED up front is
* safe.
*
* The other reason for changing the state is if the vb2 queue is
* canceled, and that uses the req_queue_mutex which is still locked
* while req_queue is called, so that's safe as well.
*/
spin_lock_irqsave(&req->lock, flags);
req->state = ret ? MEDIA_REQUEST_STATE_IDLE
: MEDIA_REQUEST_STATE_QUEUED;
spin_unlock_irqrestore(&req->lock, flags);
if (!ret)
mdev->ops->req_queue(req);
mutex_unlock(&mdev->req_queue_mutex);
if (ret) {
dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
req->debug_str, ret);
media_request_put(req);
}
return ret;
}
static long media_request_ioctl_reinit(struct media_request *req)
{
struct media_device *mdev = req->mdev;
unsigned long flags;
spin_lock_irqsave(&req->lock, flags);
if (req->state != MEDIA_REQUEST_STATE_IDLE &&
req->state != MEDIA_REQUEST_STATE_COMPLETE) {
dev_dbg(mdev->dev,
"request: %s not in idle or complete state, cannot reinit\n",
req->debug_str);
spin_unlock_irqrestore(&req->lock, flags);
return -EBUSY;
}
if (req->access_count) {
dev_dbg(mdev->dev,
"request: %s is being accessed, cannot reinit\n",
req->debug_str);
spin_unlock_irqrestore(&req->lock, flags);
return -EBUSY;
}
req->state = MEDIA_REQUEST_STATE_CLEANING;
spin_unlock_irqrestore(&req->lock, flags);
media_request_clean(req);
spin_lock_irqsave(&req->lock, flags);
req->state = MEDIA_REQUEST_STATE_IDLE;
spin_unlock_irqrestore(&req->lock, flags);
return 0;
}
static long media_request_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct media_request *req = filp->private_data;
switch (cmd) {
case MEDIA_REQUEST_IOC_QUEUE:
return media_request_ioctl_queue(req);
case MEDIA_REQUEST_IOC_REINIT:
return media_request_ioctl_reinit(req);
default:
return -ENOIOCTLCMD;
}
}
static const struct file_operations request_fops = {
.owner = THIS_MODULE,
.poll = media_request_poll,
.unlocked_ioctl = media_request_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = media_request_ioctl,
#endif /* CONFIG_COMPAT */
.release = media_request_close,
};
struct media_request *
media_request_get_by_fd(struct media_device *mdev, int request_fd)
{
struct fd f;
struct media_request *req;
if (!mdev || !mdev->ops ||
!mdev->ops->req_validate || !mdev->ops->req_queue)
return ERR_PTR(-EBADR);
f = fdget(request_fd);
if (!f.file)
goto err_no_req_fd;
if (f.file->f_op != &request_fops)
goto err_fput;
req = f.file->private_data;
if (req->mdev != mdev)
goto err_fput;
/*
* Note: as long as someone has an open filehandle of the request,
* the request can never be released. The fdget() above ensures that
* even if userspace closes the request filehandle, the release()
* fop won't be called, so the media_request_get() always succeeds
* and there is no race condition where the request was released
* before media_request_get() is called.
*/
media_request_get(req);
fdput(f);
return req;
err_fput:
fdput(f);
err_no_req_fd:
dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(media_request_get_by_fd);
int media_request_alloc(struct media_device *mdev, int *alloc_fd)
{
struct media_request *req;
struct file *filp;
int fd;
int ret;
/* Either both are NULL or both are non-NULL */
if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
return -ENOMEM;
if (mdev->ops->req_alloc)
req = mdev->ops->req_alloc(mdev);
else
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto err_free_req;
}
filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
if (IS_ERR(filp)) {
ret = PTR_ERR(filp);
goto err_put_fd;
}
filp->private_data = req;
req->mdev = mdev;
req->state = MEDIA_REQUEST_STATE_IDLE;
req->num_incomplete_objects = 0;
kref_init(&req->kref);
INIT_LIST_HEAD(&req->objects);
spin_lock_init(&req->lock);
init_waitqueue_head(&req->poll_wait);
req->updating_count = 0;
req->access_count = 0;
*alloc_fd = fd;
snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
atomic_inc_return(&mdev->request_id), fd);
dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
fd_install(fd, filp);
return 0;
err_put_fd:
put_unused_fd(fd);
err_free_req:
if (mdev->ops->req_free)
mdev->ops->req_free(req);
else
kfree(req);
return ret;
}
static void media_request_object_release(struct kref *kref)
{
struct media_request_object *obj =
container_of(kref, struct media_request_object, kref);
struct media_request *req = obj->req;
if (WARN_ON(req))
media_request_object_unbind(obj);
obj->ops->release(obj);
}
struct media_request_object *
media_request_object_find(struct media_request *req,
const struct media_request_object_ops *ops,
void *priv)
{
struct media_request_object *obj;
struct media_request_object *found = NULL;
unsigned long flags;
if (WARN_ON(!ops || !priv))
return NULL;
spin_lock_irqsave(&req->lock, flags);
list_for_each_entry(obj, &req->objects, list) {
if (obj->ops == ops && obj->priv == priv) {
media_request_object_get(obj);
found = obj;
break;
}
}
spin_unlock_irqrestore(&req->lock, flags);
return found;
}
EXPORT_SYMBOL_GPL(media_request_object_find);
void media_request_object_put(struct media_request_object *obj)
{
kref_put(&obj->kref, media_request_object_release);
}
EXPORT_SYMBOL_GPL(media_request_object_put);
void media_request_object_init(struct media_request_object *obj)
{
obj->ops = NULL;
obj->req = NULL;
obj->priv = NULL;
obj->completed = false;
INIT_LIST_HEAD(&obj->list);
kref_init(&obj->kref);
}
EXPORT_SYMBOL_GPL(media_request_object_init);
int media_request_object_bind(struct media_request *req,
const struct media_request_object_ops *ops,
void *priv, bool is_buffer,
struct media_request_object *obj)
{
unsigned long flags;
int ret = -EBUSY;
if (WARN_ON(!ops->release))
return -EBADR;
spin_lock_irqsave(&req->lock, flags);
if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING &&
req->state != MEDIA_REQUEST_STATE_QUEUED))
goto unlock;
obj->req = req;
obj->ops = ops;
obj->priv = priv;
if (is_buffer)
list_add_tail(&obj->list, &req->objects);
else
list_add(&obj->list, &req->objects);
req->num_incomplete_objects++;
ret = 0;
unlock:
spin_unlock_irqrestore(&req->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(media_request_object_bind);
void media_request_object_unbind(struct media_request_object *obj)
{
struct media_request *req = obj->req;
unsigned long flags;
bool completed = false;
if (WARN_ON(!req))
return;
spin_lock_irqsave(&req->lock, flags);
list_del(&obj->list);
obj->req = NULL;
if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
goto unlock;
if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
goto unlock;
if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
if (!obj->completed)
req->num_incomplete_objects--;
goto unlock;
}
if (WARN_ON(!req->num_incomplete_objects))
goto unlock;
req->num_incomplete_objects--;
if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
!req->num_incomplete_objects) {
req->state = MEDIA_REQUEST_STATE_COMPLETE;
completed = true;
wake_up_interruptible_all(&req->poll_wait);
}
unlock:
spin_unlock_irqrestore(&req->lock, flags);
if (obj->ops->unbind)
obj->ops->unbind(obj);
if (completed)
media_request_put(req);
}
EXPORT_SYMBOL_GPL(media_request_object_unbind);
void media_request_object_complete(struct media_request_object *obj)
{
struct media_request *req = obj->req;
unsigned long flags;
bool completed = false;
spin_lock_irqsave(&req->lock, flags);
if (obj->completed)
goto unlock;
obj->completed = true;
if (WARN_ON(!req->num_incomplete_objects) ||
WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
goto unlock;
if (!--req->num_incomplete_objects) {
req->state = MEDIA_REQUEST_STATE_COMPLETE;
wake_up_interruptible_all(&req->poll_wait);
completed = true;
}
unlock:
spin_unlock_irqrestore(&req->lock, flags);
if (completed)
media_request_put(req);
}
EXPORT_SYMBOL_GPL(media_request_object_complete);
| linux-master | drivers/media/mc/mc-request.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Media entity
*
* Copyright (C) 2010 Nokia Corporation
*
* Contacts: Laurent Pinchart <[email protected]>
* Sakari Ailus <[email protected]>
*/
#include <linux/bitmap.h>
#include <linux/list.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <media/media-entity.h>
#include <media/media-device.h>
static inline const char *intf_type(struct media_interface *intf)
{
switch (intf->type) {
case MEDIA_INTF_T_DVB_FE:
return "dvb-frontend";
case MEDIA_INTF_T_DVB_DEMUX:
return "dvb-demux";
case MEDIA_INTF_T_DVB_DVR:
return "dvb-dvr";
case MEDIA_INTF_T_DVB_CA:
return "dvb-ca";
case MEDIA_INTF_T_DVB_NET:
return "dvb-net";
case MEDIA_INTF_T_V4L_VIDEO:
return "v4l-video";
case MEDIA_INTF_T_V4L_VBI:
return "v4l-vbi";
case MEDIA_INTF_T_V4L_RADIO:
return "v4l-radio";
case MEDIA_INTF_T_V4L_SUBDEV:
return "v4l-subdev";
case MEDIA_INTF_T_V4L_SWRADIO:
return "v4l-swradio";
case MEDIA_INTF_T_V4L_TOUCH:
return "v4l-touch";
default:
return "unknown-intf";
}
};
static inline const char *link_type_name(struct media_link *link)
{
switch (link->flags & MEDIA_LNK_FL_LINK_TYPE) {
case MEDIA_LNK_FL_DATA_LINK:
return "data";
case MEDIA_LNK_FL_INTERFACE_LINK:
return "interface";
case MEDIA_LNK_FL_ANCILLARY_LINK:
return "ancillary";
default:
return "unknown";
}
}
__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum,
struct media_device *mdev)
{
int idx_max;
idx_max = ALIGN(mdev->entity_internal_idx_max + 1, BITS_PER_LONG);
ent_enum->bmap = bitmap_zalloc(idx_max, GFP_KERNEL);
if (!ent_enum->bmap)
return -ENOMEM;
ent_enum->idx_max = idx_max;
return 0;
}
EXPORT_SYMBOL_GPL(media_entity_enum_init);
void media_entity_enum_cleanup(struct media_entity_enum *ent_enum)
{
bitmap_free(ent_enum->bmap);
}
EXPORT_SYMBOL_GPL(media_entity_enum_cleanup);
/**
* dev_dbg_obj - Prints in debug mode a change on some object
*
* @event_name: Name of the event to report. Could be __func__
* @gobj: Pointer to the object
*
* Enabled only if DEBUG or CONFIG_DYNAMIC_DEBUG. Otherwise, it
* won't produce any code.
*/
static void dev_dbg_obj(const char *event_name, struct media_gobj *gobj)
{
#if defined(DEBUG) || defined (CONFIG_DYNAMIC_DEBUG)
switch (media_type(gobj)) {
case MEDIA_GRAPH_ENTITY:
dev_dbg(gobj->mdev->dev,
"%s id %u: entity '%s'\n",
event_name, media_id(gobj),
gobj_to_entity(gobj)->name);
break;
case MEDIA_GRAPH_LINK:
{
struct media_link *link = gobj_to_link(gobj);
dev_dbg(gobj->mdev->dev,
"%s id %u: %s link id %u ==> id %u\n",
event_name, media_id(gobj), link_type_name(link),
media_id(link->gobj0),
media_id(link->gobj1));
break;
}
case MEDIA_GRAPH_PAD:
{
struct media_pad *pad = gobj_to_pad(gobj);
dev_dbg(gobj->mdev->dev,
"%s id %u: %s%spad '%s':%d\n",
event_name, media_id(gobj),
pad->flags & MEDIA_PAD_FL_SINK ? "sink " : "",
pad->flags & MEDIA_PAD_FL_SOURCE ? "source " : "",
pad->entity->name, pad->index);
break;
}
case MEDIA_GRAPH_INTF_DEVNODE:
{
struct media_interface *intf = gobj_to_intf(gobj);
struct media_intf_devnode *devnode = intf_to_devnode(intf);
dev_dbg(gobj->mdev->dev,
"%s id %u: intf_devnode %s - major: %d, minor: %d\n",
event_name, media_id(gobj),
intf_type(intf),
devnode->major, devnode->minor);
break;
}
}
#endif
}
void media_gobj_create(struct media_device *mdev,
enum media_gobj_type type,
struct media_gobj *gobj)
{
BUG_ON(!mdev);
gobj->mdev = mdev;
/* Create a per-type unique object ID */
gobj->id = media_gobj_gen_id(type, ++mdev->id);
switch (type) {
case MEDIA_GRAPH_ENTITY:
list_add_tail(&gobj->list, &mdev->entities);
break;
case MEDIA_GRAPH_PAD:
list_add_tail(&gobj->list, &mdev->pads);
break;
case MEDIA_GRAPH_LINK:
list_add_tail(&gobj->list, &mdev->links);
break;
case MEDIA_GRAPH_INTF_DEVNODE:
list_add_tail(&gobj->list, &mdev->interfaces);
break;
}
mdev->topology_version++;
dev_dbg_obj(__func__, gobj);
}
void media_gobj_destroy(struct media_gobj *gobj)
{
/* Do nothing if the object is not linked. */
if (gobj->mdev == NULL)
return;
dev_dbg_obj(__func__, gobj);
gobj->mdev->topology_version++;
/* Remove the object from mdev list */
list_del(&gobj->list);
gobj->mdev = NULL;
}
/*
* TODO: Get rid of this.
*/
#define MEDIA_ENTITY_MAX_PADS 512
int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
struct media_pad *pads)
{
struct media_device *mdev = entity->graph_obj.mdev;
struct media_pad *iter;
unsigned int i = 0;
if (num_pads >= MEDIA_ENTITY_MAX_PADS)
return -E2BIG;
entity->num_pads = num_pads;
entity->pads = pads;
if (mdev)
mutex_lock(&mdev->graph_mutex);
media_entity_for_each_pad(entity, iter) {
iter->entity = entity;
iter->index = i++;
if (mdev)
media_gobj_create(mdev, MEDIA_GRAPH_PAD,
&iter->graph_obj);
}
if (mdev)
mutex_unlock(&mdev->graph_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(media_entity_pads_init);
/* -----------------------------------------------------------------------------
* Graph traversal
*/
/**
* media_entity_has_pad_interdep - Check interdependency between two pads
*
* @entity: The entity
* @pad0: The first pad index
* @pad1: The second pad index
*
* This function checks the interdependency inside the entity between @pad0
* and @pad1. If two pads are interdependent they are part of the same pipeline
* and enabling one of the pads means that the other pad will become "locked"
* and doesn't allow configuration changes.
*
* This function uses the &media_entity_operations.has_pad_interdep() operation
* to check the dependency inside the entity between @pad0 and @pad1. If the
* has_pad_interdep operation is not implemented, all pads of the entity are
* considered to be interdependent.
*
* One of @pad0 and @pad1 must be a sink pad and the other one a source pad.
* The function returns false if both pads are sinks or sources.
*
* The caller must hold entity->graph_obj.mdev->mutex.
*
* Return: true if the pads are connected internally and false otherwise.
*/
static bool media_entity_has_pad_interdep(struct media_entity *entity,
unsigned int pad0, unsigned int pad1)
{
if (pad0 >= entity->num_pads || pad1 >= entity->num_pads)
return false;
if (entity->pads[pad0].flags & entity->pads[pad1].flags &
(MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE))
return false;
if (!entity->ops || !entity->ops->has_pad_interdep)
return true;
return entity->ops->has_pad_interdep(entity, pad0, pad1);
}
static struct media_entity *
media_entity_other(struct media_entity *entity, struct media_link *link)
{
if (link->source->entity == entity)
return link->sink->entity;
else
return link->source->entity;
}
/* push an entity to traversal stack */
static void stack_push(struct media_graph *graph,
struct media_entity *entity)
{
if (graph->top == MEDIA_ENTITY_ENUM_MAX_DEPTH - 1) {
WARN_ON(1);
return;
}
graph->top++;
graph->stack[graph->top].link = entity->links.next;
graph->stack[graph->top].entity = entity;
}
static struct media_entity *stack_pop(struct media_graph *graph)
{
struct media_entity *entity;
entity = graph->stack[graph->top].entity;
graph->top--;
return entity;
}
#define link_top(en) ((en)->stack[(en)->top].link)
#define stack_top(en) ((en)->stack[(en)->top].entity)
/**
* media_graph_walk_init - Allocate resources for graph walk
* @graph: Media graph structure that will be used to walk the graph
* @mdev: Media device
*
* Reserve resources for graph walk in media device's current
* state. The memory must be released using
* media_graph_walk_cleanup().
*
* Returns error on failure, zero on success.
*/
__must_check int media_graph_walk_init(
struct media_graph *graph, struct media_device *mdev)
{
return media_entity_enum_init(&graph->ent_enum, mdev);
}
EXPORT_SYMBOL_GPL(media_graph_walk_init);
/**
* media_graph_walk_cleanup - Release resources related to graph walking
* @graph: Media graph structure that was used to walk the graph
*/
void media_graph_walk_cleanup(struct media_graph *graph)
{
media_entity_enum_cleanup(&graph->ent_enum);
}
EXPORT_SYMBOL_GPL(media_graph_walk_cleanup);
void media_graph_walk_start(struct media_graph *graph,
struct media_entity *entity)
{
media_entity_enum_zero(&graph->ent_enum);
media_entity_enum_set(&graph->ent_enum, entity);
graph->top = 0;
graph->stack[graph->top].entity = NULL;
stack_push(graph, entity);
dev_dbg(entity->graph_obj.mdev->dev,
"begin graph walk at '%s'\n", entity->name);
}
EXPORT_SYMBOL_GPL(media_graph_walk_start);
static void media_graph_walk_iter(struct media_graph *graph)
{
struct media_entity *entity = stack_top(graph);
struct media_link *link;
struct media_entity *next;
link = list_entry(link_top(graph), typeof(*link), list);
/* If the link is not a data link, don't follow it */
if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) {
link_top(graph) = link_top(graph)->next;
return;
}
/* The link is not enabled so we do not follow. */
if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
link_top(graph) = link_top(graph)->next;
dev_dbg(entity->graph_obj.mdev->dev,
"walk: skipping disabled link '%s':%u -> '%s':%u\n",
link->source->entity->name, link->source->index,
link->sink->entity->name, link->sink->index);
return;
}
/* Get the entity at the other end of the link. */
next = media_entity_other(entity, link);
/* Has the entity already been visited? */
if (media_entity_enum_test_and_set(&graph->ent_enum, next)) {
link_top(graph) = link_top(graph)->next;
dev_dbg(entity->graph_obj.mdev->dev,
"walk: skipping entity '%s' (already seen)\n",
next->name);
return;
}
/* Push the new entity to stack and start over. */
link_top(graph) = link_top(graph)->next;
stack_push(graph, next);
dev_dbg(entity->graph_obj.mdev->dev, "walk: pushing '%s' on stack\n",
next->name);
lockdep_assert_held(&entity->graph_obj.mdev->graph_mutex);
}
struct media_entity *media_graph_walk_next(struct media_graph *graph)
{
struct media_entity *entity;
if (stack_top(graph) == NULL)
return NULL;
/*
* Depth first search. Push entity to stack and continue from
* top of the stack until no more entities on the level can be
* found.
*/
while (link_top(graph) != &stack_top(graph)->links)
media_graph_walk_iter(graph);
entity = stack_pop(graph);
dev_dbg(entity->graph_obj.mdev->dev,
"walk: returning entity '%s'\n", entity->name);
return entity;
}
EXPORT_SYMBOL_GPL(media_graph_walk_next);
/* -----------------------------------------------------------------------------
* Pipeline management
*/
/*
* The pipeline traversal stack stores pads that are reached during graph
* traversal, with a list of links to be visited to continue the traversal.
* When a new pad is reached, an entry is pushed on the top of the stack and
* points to the incoming pad and the first link of the entity.
*
* To find further pads in the pipeline, the traversal algorithm follows
* internal pad dependencies in the entity, and then links in the graph. It
* does so by iterating over all links of the entity, and following enabled
* links that originate from a pad that is internally connected to the incoming
* pad, as reported by the media_entity_has_pad_interdep() function.
*/
/**
* struct media_pipeline_walk_entry - Entry in the pipeline traversal stack
*
* @pad: The media pad being visited
* @links: Links left to be visited
*/
struct media_pipeline_walk_entry {
struct media_pad *pad;
struct list_head *links;
};
/**
* struct media_pipeline_walk - State used by the media pipeline traversal
* algorithm
*
* @mdev: The media device
* @stack: Depth-first search stack
* @stack.size: Number of allocated entries in @stack.entries
* @stack.top: Index of the top stack entry (-1 if the stack is empty)
* @stack.entries: Stack entries
*/
struct media_pipeline_walk {
struct media_device *mdev;
struct {
unsigned int size;
int top;
struct media_pipeline_walk_entry *entries;
} stack;
};
#define MEDIA_PIPELINE_STACK_GROW_STEP 16
static struct media_pipeline_walk_entry *
media_pipeline_walk_top(struct media_pipeline_walk *walk)
{
return &walk->stack.entries[walk->stack.top];
}
static bool media_pipeline_walk_empty(struct media_pipeline_walk *walk)
{
return walk->stack.top == -1;
}
/* Increase the stack size by MEDIA_PIPELINE_STACK_GROW_STEP elements. */
static int media_pipeline_walk_resize(struct media_pipeline_walk *walk)
{
struct media_pipeline_walk_entry *entries;
unsigned int new_size;
/* Safety check, to avoid stack overflows in case of bugs. */
if (walk->stack.size >= 256)
return -E2BIG;
new_size = walk->stack.size + MEDIA_PIPELINE_STACK_GROW_STEP;
entries = krealloc(walk->stack.entries,
new_size * sizeof(*walk->stack.entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
walk->stack.entries = entries;
walk->stack.size = new_size;
return 0;
}
/* Push a new entry on the stack. */
static int media_pipeline_walk_push(struct media_pipeline_walk *walk,
struct media_pad *pad)
{
struct media_pipeline_walk_entry *entry;
int ret;
if (walk->stack.top + 1 >= walk->stack.size) {
ret = media_pipeline_walk_resize(walk);
if (ret)
return ret;
}
walk->stack.top++;
entry = media_pipeline_walk_top(walk);
entry->pad = pad;
entry->links = pad->entity->links.next;
dev_dbg(walk->mdev->dev,
"media pipeline: pushed entry %u: '%s':%u\n",
walk->stack.top, pad->entity->name, pad->index);
return 0;
}
/*
* Move the top entry link cursor to the next link. If all links of the entry
* have been visited, pop the entry itself.
*/
static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
{
struct media_pipeline_walk_entry *entry;
if (WARN_ON(walk->stack.top < 0))
return;
entry = media_pipeline_walk_top(walk);
if (entry->links->next == &entry->pad->entity->links) {
dev_dbg(walk->mdev->dev,
"media pipeline: entry %u has no more links, popping\n",
walk->stack.top);
walk->stack.top--;
return;
}
entry->links = entry->links->next;
dev_dbg(walk->mdev->dev,
"media pipeline: moved entry %u to next link\n",
walk->stack.top);
}
/* Free all memory allocated while walking the pipeline. */
static void media_pipeline_walk_destroy(struct media_pipeline_walk *walk)
{
kfree(walk->stack.entries);
}
/* Add a pad to the pipeline and push it to the stack. */
static int media_pipeline_add_pad(struct media_pipeline *pipe,
struct media_pipeline_walk *walk,
struct media_pad *pad)
{
struct media_pipeline_pad *ppad;
list_for_each_entry(ppad, &pipe->pads, list) {
if (ppad->pad == pad) {
dev_dbg(pad->graph_obj.mdev->dev,
"media pipeline: already contains pad '%s':%u\n",
pad->entity->name, pad->index);
return 0;
}
}
ppad = kzalloc(sizeof(*ppad), GFP_KERNEL);
if (!ppad)
return -ENOMEM;
ppad->pipe = pipe;
ppad->pad = pad;
list_add_tail(&ppad->list, &pipe->pads);
dev_dbg(pad->graph_obj.mdev->dev,
"media pipeline: added pad '%s':%u\n",
pad->entity->name, pad->index);
return media_pipeline_walk_push(walk, pad);
}
/* Explore the next link of the entity at the top of the stack. */
static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
struct media_pipeline_walk *walk)
{
struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk);
struct media_pad *pad;
struct media_link *link;
struct media_pad *local;
struct media_pad *remote;
int ret;
pad = entry->pad;
link = list_entry(entry->links, typeof(*link), list);
media_pipeline_walk_pop(walk);
dev_dbg(walk->mdev->dev,
"media pipeline: exploring link '%s':%u -> '%s':%u\n",
link->source->entity->name, link->source->index,
link->sink->entity->name, link->sink->index);
/* Skip links that are not enabled. */
if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
dev_dbg(walk->mdev->dev,
"media pipeline: skipping link (disabled)\n");
return 0;
}
/* Get the local pad and remote pad. */
if (link->source->entity == pad->entity) {
local = link->source;
remote = link->sink;
} else {
local = link->sink;
remote = link->source;
}
/*
* Skip links that originate from a different pad than the incoming pad
* that is not connected internally in the entity to the incoming pad.
*/
if (pad != local &&
!media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) {
dev_dbg(walk->mdev->dev,
"media pipeline: skipping link (no route)\n");
return 0;
}
/*
* Add the local and remote pads of the link to the pipeline and push
* them to the stack, if they're not already present.
*/
ret = media_pipeline_add_pad(pipe, walk, local);
if (ret)
return ret;
ret = media_pipeline_add_pad(pipe, walk, remote);
if (ret)
return ret;
return 0;
}
static void media_pipeline_cleanup(struct media_pipeline *pipe)
{
while (!list_empty(&pipe->pads)) {
struct media_pipeline_pad *ppad;
ppad = list_first_entry(&pipe->pads, typeof(*ppad), list);
list_del(&ppad->list);
kfree(ppad);
}
}
static int media_pipeline_populate(struct media_pipeline *pipe,
struct media_pad *pad)
{
struct media_pipeline_walk walk = { };
struct media_pipeline_pad *ppad;
int ret;
/*
* Populate the media pipeline by walking the media graph, starting
* from @pad.
*/
INIT_LIST_HEAD(&pipe->pads);
pipe->mdev = pad->graph_obj.mdev;
walk.mdev = pipe->mdev;
walk.stack.top = -1;
ret = media_pipeline_add_pad(pipe, &walk, pad);
if (ret)
goto done;
/*
* Use a depth-first search algorithm: as long as the stack is not
* empty, explore the next link of the top entry. The
* media_pipeline_explore_next_link() function will either move to the
* next link, pop the entry if fully visited, or add new entries on
* top.
*/
while (!media_pipeline_walk_empty(&walk)) {
ret = media_pipeline_explore_next_link(pipe, &walk);
if (ret)
goto done;
}
dev_dbg(pad->graph_obj.mdev->dev,
"media pipeline populated, found pads:\n");
list_for_each_entry(ppad, &pipe->pads, list)
dev_dbg(pad->graph_obj.mdev->dev, "- '%s':%u\n",
ppad->pad->entity->name, ppad->pad->index);
WARN_ON(walk.stack.top != -1);
ret = 0;
done:
media_pipeline_walk_destroy(&walk);
if (ret)
media_pipeline_cleanup(pipe);
return ret;
}
__must_check int __media_pipeline_start(struct media_pad *pad,
struct media_pipeline *pipe)
{
struct media_device *mdev = pad->graph_obj.mdev;
struct media_pipeline_pad *err_ppad;
struct media_pipeline_pad *ppad;
int ret;
lockdep_assert_held(&mdev->graph_mutex);
/*
* If the pad is already part of a pipeline, that pipeline must be the
* same as the pipe given to media_pipeline_start().
*/
if (WARN_ON(pad->pipe && pad->pipe != pipe))
return -EINVAL;
/*
* If the pipeline has already been started, it is guaranteed to be
* valid, so just increase the start count.
*/
if (pipe->start_count) {
pipe->start_count++;
return 0;
}
/*
* Populate the pipeline. This populates the media_pipeline pads list
* with media_pipeline_pad instances for each pad found during graph
* walk.
*/
ret = media_pipeline_populate(pipe, pad);
if (ret)
return ret;
/*
* Now that all the pads in the pipeline have been gathered, perform
* the validation steps.
*/
list_for_each_entry(ppad, &pipe->pads, list) {
struct media_pad *pad = ppad->pad;
struct media_entity *entity = pad->entity;
bool has_enabled_link = false;
bool has_link = false;
struct media_link *link;
dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name,
pad->index);
/*
* 1. Ensure that the pad doesn't already belong to a different
* pipeline.
*/
if (pad->pipe) {
dev_dbg(mdev->dev, "Failed to start pipeline: pad '%s':%u busy\n",
pad->entity->name, pad->index);
ret = -EBUSY;
goto error;
}
/*
* 2. Validate all active links whose sink is the current pad.
* Validation of the source pads is performed in the context of
* the connected sink pad to avoid duplicating checks.
*/
for_each_media_entity_data_link(entity, link) {
/* Skip links unrelated to the current pad. */
if (link->sink != pad && link->source != pad)
continue;
/* Record if the pad has links and enabled links. */
if (link->flags & MEDIA_LNK_FL_ENABLED)
has_enabled_link = true;
has_link = true;
/*
* Validate the link if it's enabled and has the
* current pad as its sink.
*/
if (!(link->flags & MEDIA_LNK_FL_ENABLED))
continue;
if (link->sink != pad)
continue;
if (!entity->ops || !entity->ops->link_validate)
continue;
ret = entity->ops->link_validate(link);
if (ret) {
dev_dbg(mdev->dev,
"Link '%s':%u -> '%s':%u failed validation: %d\n",
link->source->entity->name,
link->source->index,
link->sink->entity->name,
link->sink->index, ret);
goto error;
}
dev_dbg(mdev->dev,
"Link '%s':%u -> '%s':%u is valid\n",
link->source->entity->name,
link->source->index,
link->sink->entity->name,
link->sink->index);
}
/*
* 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set,
* ensure that it has either no link or an enabled link.
*/
if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link &&
!has_enabled_link) {
dev_dbg(mdev->dev,
"Pad '%s':%u must be connected by an enabled link\n",
pad->entity->name, pad->index);
ret = -ENOLINK;
goto error;
}
/* Validation passed, store the pipe pointer in the pad. */
pad->pipe = pipe;
}
pipe->start_count++;
return 0;
error:
/*
* Link validation on graph failed. We revert what we did and
* return the error.
*/
list_for_each_entry(err_ppad, &pipe->pads, list) {
if (err_ppad == ppad)
break;
err_ppad->pad->pipe = NULL;
}
media_pipeline_cleanup(pipe);
return ret;
}
EXPORT_SYMBOL_GPL(__media_pipeline_start);
__must_check int media_pipeline_start(struct media_pad *pad,
struct media_pipeline *pipe)
{
struct media_device *mdev = pad->graph_obj.mdev;
int ret;
mutex_lock(&mdev->graph_mutex);
ret = __media_pipeline_start(pad, pipe);
mutex_unlock(&mdev->graph_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(media_pipeline_start);
void __media_pipeline_stop(struct media_pad *pad)
{
struct media_pipeline *pipe = pad->pipe;
struct media_pipeline_pad *ppad;
/*
* If the following check fails, the driver has performed an
* unbalanced call to media_pipeline_stop()
*/
if (WARN_ON(!pipe))
return;
if (--pipe->start_count)
return;
list_for_each_entry(ppad, &pipe->pads, list)
ppad->pad->pipe = NULL;
media_pipeline_cleanup(pipe);
if (pipe->allocated)
kfree(pipe);
}
EXPORT_SYMBOL_GPL(__media_pipeline_stop);
void media_pipeline_stop(struct media_pad *pad)
{
struct media_device *mdev = pad->graph_obj.mdev;
mutex_lock(&mdev->graph_mutex);
__media_pipeline_stop(pad);
mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_pipeline_stop);
__must_check int media_pipeline_alloc_start(struct media_pad *pad)
{
struct media_device *mdev = pad->graph_obj.mdev;
struct media_pipeline *new_pipe = NULL;
struct media_pipeline *pipe;
int ret;
mutex_lock(&mdev->graph_mutex);
/*
* Is the pad already part of a pipeline? If not, we need to allocate
* a pipe.
*/
pipe = media_pad_pipeline(pad);
if (!pipe) {
new_pipe = kzalloc(sizeof(*new_pipe), GFP_KERNEL);
if (!new_pipe) {
ret = -ENOMEM;
goto out;
}
pipe = new_pipe;
pipe->allocated = true;
}
ret = __media_pipeline_start(pad, pipe);
if (ret)
kfree(new_pipe);
out:
mutex_unlock(&mdev->graph_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(media_pipeline_alloc_start);
struct media_pad *
__media_pipeline_pad_iter_next(struct media_pipeline *pipe,
struct media_pipeline_pad_iter *iter,
struct media_pad *pad)
{
if (!pad)
iter->cursor = pipe->pads.next;
if (iter->cursor == &pipe->pads)
return NULL;
pad = list_entry(iter->cursor, struct media_pipeline_pad, list)->pad;
iter->cursor = iter->cursor->next;
return pad;
}
EXPORT_SYMBOL_GPL(__media_pipeline_pad_iter_next);
int media_pipeline_entity_iter_init(struct media_pipeline *pipe,
struct media_pipeline_entity_iter *iter)
{
return media_entity_enum_init(&iter->ent_enum, pipe->mdev);
}
EXPORT_SYMBOL_GPL(media_pipeline_entity_iter_init);
void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter)
{
media_entity_enum_cleanup(&iter->ent_enum);
}
EXPORT_SYMBOL_GPL(media_pipeline_entity_iter_cleanup);
struct media_entity *
__media_pipeline_entity_iter_next(struct media_pipeline *pipe,
struct media_pipeline_entity_iter *iter,
struct media_entity *entity)
{
if (!entity)
iter->cursor = pipe->pads.next;
while (iter->cursor != &pipe->pads) {
struct media_pipeline_pad *ppad;
struct media_entity *entity;
ppad = list_entry(iter->cursor, struct media_pipeline_pad, list);
entity = ppad->pad->entity;
iter->cursor = iter->cursor->next;
if (!media_entity_enum_test_and_set(&iter->ent_enum, entity))
return entity;
}
return NULL;
}
EXPORT_SYMBOL_GPL(__media_pipeline_entity_iter_next);
/* -----------------------------------------------------------------------------
* Links management
*/
static struct media_link *media_add_link(struct list_head *head)
{
struct media_link *link;
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (link == NULL)
return NULL;
list_add_tail(&link->list, head);
return link;
}
static void __media_entity_remove_link(struct media_entity *entity,
struct media_link *link)
{
struct media_link *rlink, *tmp;
struct media_entity *remote;
/* Remove the reverse links for a data link. */
if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) {
if (link->source->entity == entity)
remote = link->sink->entity;
else
remote = link->source->entity;
list_for_each_entry_safe(rlink, tmp, &remote->links, list) {
if (rlink != link->reverse)
continue;
if (link->source->entity == entity)
remote->num_backlinks--;
/* Remove the remote link */
list_del(&rlink->list);
media_gobj_destroy(&rlink->graph_obj);
kfree(rlink);
if (--remote->num_links == 0)
break;
}
}
list_del(&link->list);
media_gobj_destroy(&link->graph_obj);
kfree(link);
}
int media_get_pad_index(struct media_entity *entity, u32 pad_type,
enum media_pad_signal_type sig_type)
{
unsigned int i;
if (!entity)
return -EINVAL;
for (i = 0; i < entity->num_pads; i++) {
if ((entity->pads[i].flags &
(MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE)) != pad_type)
continue;
if (entity->pads[i].sig_type == sig_type)
return i;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(media_get_pad_index);
int
media_create_pad_link(struct media_entity *source, u16 source_pad,
struct media_entity *sink, u16 sink_pad, u32 flags)
{
struct media_link *link;
struct media_link *backlink;
if (WARN_ON(!source || !sink) ||
WARN_ON(source_pad >= source->num_pads) ||
WARN_ON(sink_pad >= sink->num_pads))
return -EINVAL;
if (WARN_ON(!(source->pads[source_pad].flags & MEDIA_PAD_FL_SOURCE)))
return -EINVAL;
if (WARN_ON(!(sink->pads[sink_pad].flags & MEDIA_PAD_FL_SINK)))
return -EINVAL;
link = media_add_link(&source->links);
if (link == NULL)
return -ENOMEM;
link->source = &source->pads[source_pad];
link->sink = &sink->pads[sink_pad];
link->flags = flags & ~MEDIA_LNK_FL_INTERFACE_LINK;
/* Initialize graph object embedded at the new link */
media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK,
&link->graph_obj);
/* Create the backlink. Backlinks are used to help graph traversal and
* are not reported to userspace.
*/
backlink = media_add_link(&sink->links);
if (backlink == NULL) {
__media_entity_remove_link(source, link);
return -ENOMEM;
}
backlink->source = &source->pads[source_pad];
backlink->sink = &sink->pads[sink_pad];
backlink->flags = flags;
backlink->is_backlink = true;
/* Initialize graph object embedded at the new link */
media_gobj_create(sink->graph_obj.mdev, MEDIA_GRAPH_LINK,
&backlink->graph_obj);
link->reverse = backlink;
backlink->reverse = link;
sink->num_backlinks++;
sink->num_links++;
source->num_links++;
return 0;
}
EXPORT_SYMBOL_GPL(media_create_pad_link);
int media_create_pad_links(const struct media_device *mdev,
const u32 source_function,
struct media_entity *source,
const u16 source_pad,
const u32 sink_function,
struct media_entity *sink,
const u16 sink_pad,
u32 flags,
const bool allow_both_undefined)
{
struct media_entity *entity;
unsigned function;
int ret;
/* Trivial case: 1:1 relation */
if (source && sink)
return media_create_pad_link(source, source_pad,
sink, sink_pad, flags);
/* Worse case scenario: n:n relation */
if (!source && !sink) {
if (!allow_both_undefined)
return 0;
media_device_for_each_entity(source, mdev) {
if (source->function != source_function)
continue;
media_device_for_each_entity(sink, mdev) {
if (sink->function != sink_function)
continue;
ret = media_create_pad_link(source, source_pad,
sink, sink_pad,
flags);
if (ret)
return ret;
flags &= ~(MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
}
}
return 0;
}
/* Handle 1:n and n:1 cases */
if (source)
function = sink_function;
else
function = source_function;
media_device_for_each_entity(entity, mdev) {
if (entity->function != function)
continue;
if (source)
ret = media_create_pad_link(source, source_pad,
entity, sink_pad, flags);
else
ret = media_create_pad_link(entity, source_pad,
sink, sink_pad, flags);
if (ret)
return ret;
flags &= ~(MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
}
return 0;
}
EXPORT_SYMBOL_GPL(media_create_pad_links);
void __media_entity_remove_links(struct media_entity *entity)
{
struct media_link *link, *tmp;
list_for_each_entry_safe(link, tmp, &entity->links, list)
__media_entity_remove_link(entity, link);
entity->num_links = 0;
entity->num_backlinks = 0;
}
EXPORT_SYMBOL_GPL(__media_entity_remove_links);
void media_entity_remove_links(struct media_entity *entity)
{
struct media_device *mdev = entity->graph_obj.mdev;
/* Do nothing if the entity is not registered. */
if (mdev == NULL)
return;
mutex_lock(&mdev->graph_mutex);
__media_entity_remove_links(entity);
mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_entity_remove_links);
static int __media_entity_setup_link_notify(struct media_link *link, u32 flags)
{
int ret;
/* Notify both entities. */
ret = media_entity_call(link->source->entity, link_setup,
link->source, link->sink, flags);
if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
ret = media_entity_call(link->sink->entity, link_setup,
link->sink, link->source, flags);
if (ret < 0 && ret != -ENOIOCTLCMD) {
media_entity_call(link->source->entity, link_setup,
link->source, link->sink, link->flags);
return ret;
}
link->flags = flags;
link->reverse->flags = link->flags;
return 0;
}
int __media_entity_setup_link(struct media_link *link, u32 flags)
{
const u32 mask = MEDIA_LNK_FL_ENABLED;
struct media_device *mdev;
struct media_pad *source, *sink;
int ret = -EBUSY;
if (link == NULL)
return -EINVAL;
/* The non-modifiable link flags must not be modified. */
if ((link->flags & ~mask) != (flags & ~mask))
return -EINVAL;
if (link->flags & MEDIA_LNK_FL_IMMUTABLE)
return link->flags == flags ? 0 : -EINVAL;
if (link->flags == flags)
return 0;
source = link->source;
sink = link->sink;
if (!(link->flags & MEDIA_LNK_FL_DYNAMIC) &&
(media_pad_is_streaming(source) || media_pad_is_streaming(sink)))
return -EBUSY;
mdev = source->graph_obj.mdev;
if (mdev->ops && mdev->ops->link_notify) {
ret = mdev->ops->link_notify(link, flags,
MEDIA_DEV_NOTIFY_PRE_LINK_CH);
if (ret < 0)
return ret;
}
ret = __media_entity_setup_link_notify(link, flags);
if (mdev->ops && mdev->ops->link_notify)
mdev->ops->link_notify(link, flags,
MEDIA_DEV_NOTIFY_POST_LINK_CH);
return ret;
}
EXPORT_SYMBOL_GPL(__media_entity_setup_link);
int media_entity_setup_link(struct media_link *link, u32 flags)
{
int ret;
mutex_lock(&link->graph_obj.mdev->graph_mutex);
ret = __media_entity_setup_link(link, flags);
mutex_unlock(&link->graph_obj.mdev->graph_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(media_entity_setup_link);
struct media_link *
media_entity_find_link(struct media_pad *source, struct media_pad *sink)
{
struct media_link *link;
for_each_media_entity_data_link(source->entity, link) {
if (link->source->entity == source->entity &&
link->source->index == source->index &&
link->sink->entity == sink->entity &&
link->sink->index == sink->index)
return link;
}
return NULL;
}
EXPORT_SYMBOL_GPL(media_entity_find_link);
struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad)
{
struct media_link *link;
for_each_media_entity_data_link(pad->entity, link) {
if (!(link->flags & MEDIA_LNK_FL_ENABLED))
continue;
if (link->source == pad)
return link->sink;
if (link->sink == pad)
return link->source;
}
return NULL;
}
EXPORT_SYMBOL_GPL(media_pad_remote_pad_first);
struct media_pad *
media_entity_remote_pad_unique(const struct media_entity *entity,
unsigned int type)
{
struct media_pad *pad = NULL;
struct media_link *link;
list_for_each_entry(link, &entity->links, list) {
struct media_pad *local_pad;
struct media_pad *remote_pad;
if (((link->flags & MEDIA_LNK_FL_LINK_TYPE) !=
MEDIA_LNK_FL_DATA_LINK) ||
!(link->flags & MEDIA_LNK_FL_ENABLED))
continue;
if (type == MEDIA_PAD_FL_SOURCE) {
local_pad = link->sink;
remote_pad = link->source;
} else {
local_pad = link->source;
remote_pad = link->sink;
}
if (local_pad->entity == entity) {
if (pad)
return ERR_PTR(-ENOTUNIQ);
pad = remote_pad;
}
}
if (!pad)
return ERR_PTR(-ENOLINK);
return pad;
}
EXPORT_SYMBOL_GPL(media_entity_remote_pad_unique);
struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad)
{
struct media_pad *found_pad = NULL;
struct media_link *link;
list_for_each_entry(link, &pad->entity->links, list) {
struct media_pad *remote_pad;
if (!(link->flags & MEDIA_LNK_FL_ENABLED))
continue;
if (link->sink == pad)
remote_pad = link->source;
else if (link->source == pad)
remote_pad = link->sink;
else
continue;
if (found_pad)
return ERR_PTR(-ENOTUNIQ);
found_pad = remote_pad;
}
if (!found_pad)
return ERR_PTR(-ENOLINK);
return found_pad;
}
EXPORT_SYMBOL_GPL(media_pad_remote_pad_unique);
int media_entity_get_fwnode_pad(struct media_entity *entity,
const struct fwnode_handle *fwnode,
unsigned long direction_flags)
{
struct fwnode_endpoint endpoint;
unsigned int i;
int ret;
if (!entity->ops || !entity->ops->get_fwnode_pad) {
for (i = 0; i < entity->num_pads; i++) {
if (entity->pads[i].flags & direction_flags)
return i;
}
return -ENXIO;
}
ret = fwnode_graph_parse_endpoint(fwnode, &endpoint);
if (ret)
return ret;
ret = entity->ops->get_fwnode_pad(entity, &endpoint);
if (ret < 0)
return ret;
if (ret >= entity->num_pads)
return -ENXIO;
if (!(entity->pads[ret].flags & direction_flags))
return -ENXIO;
return ret;
}
EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad);
struct media_pipeline *media_entity_pipeline(struct media_entity *entity)
{
struct media_pad *pad;
media_entity_for_each_pad(entity, pad) {
if (pad->pipe)
return pad->pipe;
}
return NULL;
}
EXPORT_SYMBOL_GPL(media_entity_pipeline);
struct media_pipeline *media_pad_pipeline(struct media_pad *pad)
{
return pad->pipe;
}
EXPORT_SYMBOL_GPL(media_pad_pipeline);
static void media_interface_init(struct media_device *mdev,
struct media_interface *intf,
u32 gobj_type,
u32 intf_type, u32 flags)
{
intf->type = intf_type;
intf->flags = flags;
INIT_LIST_HEAD(&intf->links);
media_gobj_create(mdev, gobj_type, &intf->graph_obj);
}
/* Functions related to the media interface via device nodes */
struct media_intf_devnode *media_devnode_create(struct media_device *mdev,
u32 type, u32 flags,
u32 major, u32 minor)
{
struct media_intf_devnode *devnode;
devnode = kzalloc(sizeof(*devnode), GFP_KERNEL);
if (!devnode)
return NULL;
devnode->major = major;
devnode->minor = minor;
media_interface_init(mdev, &devnode->intf, MEDIA_GRAPH_INTF_DEVNODE,
type, flags);
return devnode;
}
EXPORT_SYMBOL_GPL(media_devnode_create);
void media_devnode_remove(struct media_intf_devnode *devnode)
{
media_remove_intf_links(&devnode->intf);
media_gobj_destroy(&devnode->intf.graph_obj);
kfree(devnode);
}
EXPORT_SYMBOL_GPL(media_devnode_remove);
struct media_link *media_create_intf_link(struct media_entity *entity,
struct media_interface *intf,
u32 flags)
{
struct media_link *link;
link = media_add_link(&intf->links);
if (link == NULL)
return NULL;
link->intf = intf;
link->entity = entity;
link->flags = flags | MEDIA_LNK_FL_INTERFACE_LINK;
/* Initialize graph object embedded at the new link */
media_gobj_create(intf->graph_obj.mdev, MEDIA_GRAPH_LINK,
&link->graph_obj);
return link;
}
EXPORT_SYMBOL_GPL(media_create_intf_link);
void __media_remove_intf_link(struct media_link *link)
{
list_del(&link->list);
media_gobj_destroy(&link->graph_obj);
kfree(link);
}
EXPORT_SYMBOL_GPL(__media_remove_intf_link);
void media_remove_intf_link(struct media_link *link)
{
struct media_device *mdev = link->graph_obj.mdev;
/* Do nothing if the intf is not registered. */
if (mdev == NULL)
return;
mutex_lock(&mdev->graph_mutex);
__media_remove_intf_link(link);
mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_remove_intf_link);
void __media_remove_intf_links(struct media_interface *intf)
{
struct media_link *link, *tmp;
list_for_each_entry_safe(link, tmp, &intf->links, list)
__media_remove_intf_link(link);
}
EXPORT_SYMBOL_GPL(__media_remove_intf_links);
void media_remove_intf_links(struct media_interface *intf)
{
struct media_device *mdev = intf->graph_obj.mdev;
/* Do nothing if the intf is not registered. */
if (mdev == NULL)
return;
mutex_lock(&mdev->graph_mutex);
__media_remove_intf_links(intf);
mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_remove_intf_links);
struct media_link *media_create_ancillary_link(struct media_entity *primary,
struct media_entity *ancillary)
{
struct media_link *link;
link = media_add_link(&primary->links);
if (!link)
return ERR_PTR(-ENOMEM);
link->gobj0 = &primary->graph_obj;
link->gobj1 = &ancillary->graph_obj;
link->flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_ANCILLARY_LINK;
/* Initialize graph object embedded in the new link */
media_gobj_create(primary->graph_obj.mdev, MEDIA_GRAPH_LINK,
&link->graph_obj);
return link;
}
EXPORT_SYMBOL_GPL(media_create_ancillary_link);
struct media_link *__media_entity_next_link(struct media_entity *entity,
struct media_link *link,
unsigned long link_type)
{
link = link ? list_next_entry(link, list)
: list_first_entry(&entity->links, typeof(*link), list);
list_for_each_entry_from(link, &entity->links, list)
if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == link_type)
return link;
return NULL;
}
EXPORT_SYMBOL_GPL(__media_entity_next_link);
| linux-master | drivers/media/mc/mc-entity.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Media device node
*
* Copyright (C) 2010 Nokia Corporation
*
* Based on drivers/media/video/v4l2_dev.c code authored by
* Mauro Carvalho Chehab <[email protected]> (version 2)
* Alan Cox, <[email protected]> (version 1)
*
* Contacts: Laurent Pinchart <[email protected]>
* Sakari Ailus <[email protected]>
*
* --
*
* Generic media device node infrastructure to register and unregister
* character devices using a dynamic major number and proper reference
* counting.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <media/media-devnode.h>
#include <media/media-device.h>
#define MEDIA_NUM_DEVICES 256
#define MEDIA_NAME "media"
static dev_t media_dev_t;
/*
* Active devices
*/
static DEFINE_MUTEX(media_devnode_lock);
static DECLARE_BITMAP(media_devnode_nums, MEDIA_NUM_DEVICES);
/* Called when the last user of the media device exits. */
static void media_devnode_release(struct device *cd)
{
struct media_devnode *devnode = to_media_devnode(cd);
mutex_lock(&media_devnode_lock);
/* Mark device node number as free */
clear_bit(devnode->minor, media_devnode_nums);
mutex_unlock(&media_devnode_lock);
/* Release media_devnode and perform other cleanups as needed. */
if (devnode->release)
devnode->release(devnode);
kfree(devnode);
pr_debug("%s: Media Devnode Deallocated\n", __func__);
}
static struct bus_type media_bus_type = {
.name = MEDIA_NAME,
};
static ssize_t media_read(struct file *filp, char __user *buf,
size_t sz, loff_t *off)
{
struct media_devnode *devnode = media_devnode_data(filp);
if (!devnode->fops->read)
return -EINVAL;
if (!media_devnode_is_registered(devnode))
return -EIO;
return devnode->fops->read(filp, buf, sz, off);
}
static ssize_t media_write(struct file *filp, const char __user *buf,
size_t sz, loff_t *off)
{
struct media_devnode *devnode = media_devnode_data(filp);
if (!devnode->fops->write)
return -EINVAL;
if (!media_devnode_is_registered(devnode))
return -EIO;
return devnode->fops->write(filp, buf, sz, off);
}
static __poll_t media_poll(struct file *filp,
struct poll_table_struct *poll)
{
struct media_devnode *devnode = media_devnode_data(filp);
if (!media_devnode_is_registered(devnode))
return EPOLLERR | EPOLLHUP;
if (!devnode->fops->poll)
return DEFAULT_POLLMASK;
return devnode->fops->poll(filp, poll);
}
static long
__media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg,
long (*ioctl_func)(struct file *filp, unsigned int cmd,
unsigned long arg))
{
struct media_devnode *devnode = media_devnode_data(filp);
if (!ioctl_func)
return -ENOTTY;
if (!media_devnode_is_registered(devnode))
return -EIO;
return ioctl_func(filp, cmd, arg);
}
static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct media_devnode *devnode = media_devnode_data(filp);
return __media_ioctl(filp, cmd, arg, devnode->fops->ioctl);
}
#ifdef CONFIG_COMPAT
static long media_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct media_devnode *devnode = media_devnode_data(filp);
return __media_ioctl(filp, cmd, arg, devnode->fops->compat_ioctl);
}
#endif /* CONFIG_COMPAT */
/* Override for the open function */
static int media_open(struct inode *inode, struct file *filp)
{
struct media_devnode *devnode;
int ret;
/* Check if the media device is available. This needs to be done with
* the media_devnode_lock held to prevent an open/unregister race:
* without the lock, the device could be unregistered and freed between
* the media_devnode_is_registered() and get_device() calls, leading to
* a crash.
*/
mutex_lock(&media_devnode_lock);
devnode = container_of(inode->i_cdev, struct media_devnode, cdev);
/* return ENXIO if the media device has been removed
already or if it is not registered anymore. */
if (!media_devnode_is_registered(devnode)) {
mutex_unlock(&media_devnode_lock);
return -ENXIO;
}
/* and increase the device refcount */
get_device(&devnode->dev);
mutex_unlock(&media_devnode_lock);
filp->private_data = devnode;
if (devnode->fops->open) {
ret = devnode->fops->open(filp);
if (ret) {
put_device(&devnode->dev);
filp->private_data = NULL;
return ret;
}
}
return 0;
}
/* Override for the release function */
static int media_release(struct inode *inode, struct file *filp)
{
struct media_devnode *devnode = media_devnode_data(filp);
if (devnode->fops->release)
devnode->fops->release(filp);
filp->private_data = NULL;
/* decrease the refcount unconditionally since the release()
return value is ignored. */
put_device(&devnode->dev);
pr_debug("%s: Media Release\n", __func__);
return 0;
}
static const struct file_operations media_devnode_fops = {
.owner = THIS_MODULE,
.read = media_read,
.write = media_write,
.open = media_open,
.unlocked_ioctl = media_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = media_compat_ioctl,
#endif /* CONFIG_COMPAT */
.release = media_release,
.poll = media_poll,
.llseek = no_llseek,
};
int __must_check media_devnode_register(struct media_device *mdev,
struct media_devnode *devnode,
struct module *owner)
{
int minor;
int ret;
/* Part 1: Find a free minor number */
mutex_lock(&media_devnode_lock);
minor = find_first_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES);
if (minor == MEDIA_NUM_DEVICES) {
mutex_unlock(&media_devnode_lock);
pr_err("could not get a free minor\n");
kfree(devnode);
return -ENFILE;
}
set_bit(minor, media_devnode_nums);
mutex_unlock(&media_devnode_lock);
devnode->minor = minor;
devnode->media_dev = mdev;
/* Part 1: Initialize dev now to use dev.kobj for cdev.kobj.parent */
devnode->dev.bus = &media_bus_type;
devnode->dev.devt = MKDEV(MAJOR(media_dev_t), devnode->minor);
devnode->dev.release = media_devnode_release;
if (devnode->parent)
devnode->dev.parent = devnode->parent;
dev_set_name(&devnode->dev, "media%d", devnode->minor);
device_initialize(&devnode->dev);
/* Part 2: Initialize the character device */
cdev_init(&devnode->cdev, &media_devnode_fops);
devnode->cdev.owner = owner;
kobject_set_name(&devnode->cdev.kobj, "media%d", devnode->minor);
/* Part 3: Add the media and char device */
ret = cdev_device_add(&devnode->cdev, &devnode->dev);
if (ret < 0) {
pr_err("%s: cdev_device_add failed\n", __func__);
goto cdev_add_error;
}
/* Part 4: Activate this minor. The char device can now be used. */
set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
return 0;
cdev_add_error:
mutex_lock(&media_devnode_lock);
clear_bit(devnode->minor, media_devnode_nums);
devnode->media_dev = NULL;
mutex_unlock(&media_devnode_lock);
put_device(&devnode->dev);
return ret;
}
void media_devnode_unregister_prepare(struct media_devnode *devnode)
{
/* Check if devnode was ever registered at all */
if (!media_devnode_is_registered(devnode))
return;
mutex_lock(&media_devnode_lock);
clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
mutex_unlock(&media_devnode_lock);
}
void media_devnode_unregister(struct media_devnode *devnode)
{
mutex_lock(&media_devnode_lock);
/* Delete the cdev on this minor as well */
cdev_device_del(&devnode->cdev, &devnode->dev);
devnode->media_dev = NULL;
mutex_unlock(&media_devnode_lock);
put_device(&devnode->dev);
}
/*
* Initialise media for linux
*/
static int __init media_devnode_init(void)
{
int ret;
pr_info("Linux media interface: v0.10\n");
ret = alloc_chrdev_region(&media_dev_t, 0, MEDIA_NUM_DEVICES,
MEDIA_NAME);
if (ret < 0) {
pr_warn("unable to allocate major\n");
return ret;
}
ret = bus_register(&media_bus_type);
if (ret < 0) {
unregister_chrdev_region(media_dev_t, MEDIA_NUM_DEVICES);
pr_warn("bus_register failed\n");
return -EIO;
}
return 0;
}
static void __exit media_devnode_exit(void)
{
bus_unregister(&media_bus_type);
unregister_chrdev_region(media_dev_t, MEDIA_NUM_DEVICES);
}
subsys_initcall(media_devnode_init);
module_exit(media_devnode_exit)
MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
MODULE_DESCRIPTION("Device node registration for media drivers");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/mc/mc-devnode.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Media device
*
* Copyright (C) 2010 Nokia Corporation
*
* Contacts: Laurent Pinchart <[email protected]>
* Sakari Ailus <[email protected]>
*/
#include <linux/compat.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/ioctl.h>
#include <linux/media.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/version.h>
#include <media/media-device.h>
#include <media/media-devnode.h>
#include <media/media-entity.h>
#include <media/media-request.h>
#ifdef CONFIG_MEDIA_CONTROLLER
/*
* Legacy defines from linux/media.h. This is the only place we need this
* so we just define it here. The media.h header doesn't expose it to the
* kernel to prevent it from being used by drivers, but here (and only here!)
* we need it to handle the legacy behavior.
*/
#define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff
#define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_F_OLD_BASE | \
MEDIA_ENT_SUBTYPE_MASK)
/* -----------------------------------------------------------------------------
* Userspace API
*/
static inline void __user *media_get_uptr(__u64 arg)
{
return (void __user *)(uintptr_t)arg;
}
static int media_device_open(struct file *filp)
{
return 0;
}
static int media_device_close(struct file *filp)
{
return 0;
}
static long media_device_get_info(struct media_device *dev, void *arg)
{
struct media_device_info *info = arg;
memset(info, 0, sizeof(*info));
if (dev->driver_name[0])
strscpy(info->driver, dev->driver_name, sizeof(info->driver));
else
strscpy(info->driver, dev->dev->driver->name,
sizeof(info->driver));
strscpy(info->model, dev->model, sizeof(info->model));
strscpy(info->serial, dev->serial, sizeof(info->serial));
strscpy(info->bus_info, dev->bus_info, sizeof(info->bus_info));
info->media_version = LINUX_VERSION_CODE;
info->driver_version = info->media_version;
info->hw_revision = dev->hw_revision;
return 0;
}
static struct media_entity *find_entity(struct media_device *mdev, u32 id)
{
struct media_entity *entity;
int next = id & MEDIA_ENT_ID_FLAG_NEXT;
id &= ~MEDIA_ENT_ID_FLAG_NEXT;
media_device_for_each_entity(entity, mdev) {
if (((media_entity_id(entity) == id) && !next) ||
((media_entity_id(entity) > id) && next)) {
return entity;
}
}
return NULL;
}
static long media_device_enum_entities(struct media_device *mdev, void *arg)
{
struct media_entity_desc *entd = arg;
struct media_entity *ent;
ent = find_entity(mdev, entd->id);
if (ent == NULL)
return -EINVAL;
memset(entd, 0, sizeof(*entd));
entd->id = media_entity_id(ent);
if (ent->name)
strscpy(entd->name, ent->name, sizeof(entd->name));
entd->type = ent->function;
entd->revision = 0; /* Unused */
entd->flags = ent->flags;
entd->group_id = 0; /* Unused */
entd->pads = ent->num_pads;
entd->links = ent->num_links - ent->num_backlinks;
/*
* Workaround for a bug at media-ctl <= v1.10 that makes it to
* do the wrong thing if the entity function doesn't belong to
* either MEDIA_ENT_F_OLD_BASE or MEDIA_ENT_F_OLD_SUBDEV_BASE
* Ranges.
*
* Non-subdevices are expected to be at the MEDIA_ENT_F_OLD_BASE,
* or, otherwise, will be silently ignored by media-ctl when
* printing the graphviz diagram. So, map them into the devnode
* old range.
*/
if (ent->function < MEDIA_ENT_F_OLD_BASE ||
ent->function > MEDIA_ENT_F_TUNER) {
if (is_media_entity_v4l2_subdev(ent))
entd->type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
else if (ent->function != MEDIA_ENT_F_IO_V4L)
entd->type = MEDIA_ENT_T_DEVNODE_UNKNOWN;
}
memcpy(&entd->raw, &ent->info, sizeof(ent->info));
return 0;
}
static void media_device_kpad_to_upad(const struct media_pad *kpad,
struct media_pad_desc *upad)
{
upad->entity = media_entity_id(kpad->entity);
upad->index = kpad->index;
upad->flags = kpad->flags;
}
static long media_device_enum_links(struct media_device *mdev, void *arg)
{
struct media_links_enum *links = arg;
struct media_entity *entity;
entity = find_entity(mdev, links->entity);
if (entity == NULL)
return -EINVAL;
if (links->pads) {
unsigned int p;
for (p = 0; p < entity->num_pads; p++) {
struct media_pad_desc pad;
memset(&pad, 0, sizeof(pad));
media_device_kpad_to_upad(&entity->pads[p], &pad);
if (copy_to_user(&links->pads[p], &pad, sizeof(pad)))
return -EFAULT;
}
}
if (links->links) {
struct media_link *link;
struct media_link_desc __user *ulink_desc = links->links;
list_for_each_entry(link, &entity->links, list) {
struct media_link_desc klink_desc;
/* Ignore backlinks. */
if (link->source->entity != entity)
continue;
memset(&klink_desc, 0, sizeof(klink_desc));
media_device_kpad_to_upad(link->source,
&klink_desc.source);
media_device_kpad_to_upad(link->sink,
&klink_desc.sink);
klink_desc.flags = link->flags;
if (copy_to_user(ulink_desc, &klink_desc,
sizeof(*ulink_desc)))
return -EFAULT;
ulink_desc++;
}
}
memset(links->reserved, 0, sizeof(links->reserved));
return 0;
}
static long media_device_setup_link(struct media_device *mdev, void *arg)
{
struct media_link_desc *linkd = arg;
struct media_link *link = NULL;
struct media_entity *source;
struct media_entity *sink;
/* Find the source and sink entities and link.
*/
source = find_entity(mdev, linkd->source.entity);
sink = find_entity(mdev, linkd->sink.entity);
if (source == NULL || sink == NULL)
return -EINVAL;
if (linkd->source.index >= source->num_pads ||
linkd->sink.index >= sink->num_pads)
return -EINVAL;
link = media_entity_find_link(&source->pads[linkd->source.index],
&sink->pads[linkd->sink.index]);
if (link == NULL)
return -EINVAL;
memset(linkd->reserved, 0, sizeof(linkd->reserved));
/* Setup the link on both entities. */
return __media_entity_setup_link(link, linkd->flags);
}
static long media_device_get_topology(struct media_device *mdev, void *arg)
{
struct media_v2_topology *topo = arg;
struct media_entity *entity;
struct media_interface *intf;
struct media_pad *pad;
struct media_link *link;
struct media_v2_entity kentity, __user *uentity;
struct media_v2_interface kintf, __user *uintf;
struct media_v2_pad kpad, __user *upad;
struct media_v2_link klink, __user *ulink;
unsigned int i;
int ret = 0;
topo->topology_version = mdev->topology_version;
/* Get entities and number of entities */
i = 0;
uentity = media_get_uptr(topo->ptr_entities);
media_device_for_each_entity(entity, mdev) {
i++;
if (ret || !uentity)
continue;
if (i > topo->num_entities) {
ret = -ENOSPC;
continue;
}
/* Copy fields to userspace struct if not error */
memset(&kentity, 0, sizeof(kentity));
kentity.id = entity->graph_obj.id;
kentity.function = entity->function;
kentity.flags = entity->flags;
strscpy(kentity.name, entity->name,
sizeof(kentity.name));
if (copy_to_user(uentity, &kentity, sizeof(kentity)))
ret = -EFAULT;
uentity++;
}
topo->num_entities = i;
topo->reserved1 = 0;
/* Get interfaces and number of interfaces */
i = 0;
uintf = media_get_uptr(topo->ptr_interfaces);
media_device_for_each_intf(intf, mdev) {
i++;
if (ret || !uintf)
continue;
if (i > topo->num_interfaces) {
ret = -ENOSPC;
continue;
}
memset(&kintf, 0, sizeof(kintf));
/* Copy intf fields to userspace struct */
kintf.id = intf->graph_obj.id;
kintf.intf_type = intf->type;
kintf.flags = intf->flags;
if (media_type(&intf->graph_obj) == MEDIA_GRAPH_INTF_DEVNODE) {
struct media_intf_devnode *devnode;
devnode = intf_to_devnode(intf);
kintf.devnode.major = devnode->major;
kintf.devnode.minor = devnode->minor;
}
if (copy_to_user(uintf, &kintf, sizeof(kintf)))
ret = -EFAULT;
uintf++;
}
topo->num_interfaces = i;
topo->reserved2 = 0;
/* Get pads and number of pads */
i = 0;
upad = media_get_uptr(topo->ptr_pads);
media_device_for_each_pad(pad, mdev) {
i++;
if (ret || !upad)
continue;
if (i > topo->num_pads) {
ret = -ENOSPC;
continue;
}
memset(&kpad, 0, sizeof(kpad));
/* Copy pad fields to userspace struct */
kpad.id = pad->graph_obj.id;
kpad.entity_id = pad->entity->graph_obj.id;
kpad.flags = pad->flags;
kpad.index = pad->index;
if (copy_to_user(upad, &kpad, sizeof(kpad)))
ret = -EFAULT;
upad++;
}
topo->num_pads = i;
topo->reserved3 = 0;
/* Get links and number of links */
i = 0;
ulink = media_get_uptr(topo->ptr_links);
media_device_for_each_link(link, mdev) {
if (link->is_backlink)
continue;
i++;
if (ret || !ulink)
continue;
if (i > topo->num_links) {
ret = -ENOSPC;
continue;
}
memset(&klink, 0, sizeof(klink));
/* Copy link fields to userspace struct */
klink.id = link->graph_obj.id;
klink.source_id = link->gobj0->id;
klink.sink_id = link->gobj1->id;
klink.flags = link->flags;
if (copy_to_user(ulink, &klink, sizeof(klink)))
ret = -EFAULT;
ulink++;
}
topo->num_links = i;
topo->reserved4 = 0;
return ret;
}
static long media_device_request_alloc(struct media_device *mdev, void *arg)
{
#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
int *alloc_fd = arg;
if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
return -ENOTTY;
return media_request_alloc(mdev, alloc_fd);
#else
return -ENOTTY;
#endif
}
static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
{
if ((_IOC_DIR(cmd) & _IOC_WRITE) &&
copy_from_user(karg, uarg, _IOC_SIZE(cmd)))
return -EFAULT;
return 0;
}
static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd)
{
if ((_IOC_DIR(cmd) & _IOC_READ) &&
copy_to_user(uarg, karg, _IOC_SIZE(cmd)))
return -EFAULT;
return 0;
}
/* Do acquire the graph mutex */
#define MEDIA_IOC_FL_GRAPH_MUTEX BIT(0)
#define MEDIA_IOC_ARG(__cmd, func, fl, from_user, to_user) \
[_IOC_NR(MEDIA_IOC_##__cmd)] = { \
.cmd = MEDIA_IOC_##__cmd, \
.fn = func, \
.flags = fl, \
.arg_from_user = from_user, \
.arg_to_user = to_user, \
}
#define MEDIA_IOC(__cmd, func, fl) \
MEDIA_IOC_ARG(__cmd, func, fl, copy_arg_from_user, copy_arg_to_user)
/* the table is indexed by _IOC_NR(cmd) */
struct media_ioctl_info {
unsigned int cmd;
unsigned short flags;
long (*fn)(struct media_device *dev, void *arg);
long (*arg_from_user)(void *karg, void __user *uarg, unsigned int cmd);
long (*arg_to_user)(void __user *uarg, void *karg, unsigned int cmd);
};
static const struct media_ioctl_info ioctl_info[] = {
MEDIA_IOC(DEVICE_INFO, media_device_get_info, MEDIA_IOC_FL_GRAPH_MUTEX),
MEDIA_IOC(ENUM_ENTITIES, media_device_enum_entities, MEDIA_IOC_FL_GRAPH_MUTEX),
MEDIA_IOC(ENUM_LINKS, media_device_enum_links, MEDIA_IOC_FL_GRAPH_MUTEX),
MEDIA_IOC(SETUP_LINK, media_device_setup_link, MEDIA_IOC_FL_GRAPH_MUTEX),
MEDIA_IOC(G_TOPOLOGY, media_device_get_topology, MEDIA_IOC_FL_GRAPH_MUTEX),
MEDIA_IOC(REQUEST_ALLOC, media_device_request_alloc, 0),
};
static long media_device_ioctl(struct file *filp, unsigned int cmd,
unsigned long __arg)
{
struct media_devnode *devnode = media_devnode_data(filp);
struct media_device *dev = devnode->media_dev;
const struct media_ioctl_info *info;
void __user *arg = (void __user *)__arg;
char __karg[256], *karg = __karg;
long ret;
if (_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_info)
|| ioctl_info[_IOC_NR(cmd)].cmd != cmd)
return -ENOIOCTLCMD;
info = &ioctl_info[_IOC_NR(cmd)];
if (_IOC_SIZE(info->cmd) > sizeof(__karg)) {
karg = kmalloc(_IOC_SIZE(info->cmd), GFP_KERNEL);
if (!karg)
return -ENOMEM;
}
if (info->arg_from_user) {
ret = info->arg_from_user(karg, arg, cmd);
if (ret)
goto out_free;
}
if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX)
mutex_lock(&dev->graph_mutex);
ret = info->fn(dev, karg);
if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX)
mutex_unlock(&dev->graph_mutex);
if (!ret && info->arg_to_user)
ret = info->arg_to_user(arg, karg, cmd);
out_free:
if (karg != __karg)
kfree(karg);
return ret;
}
#ifdef CONFIG_COMPAT
struct media_links_enum32 {
__u32 entity;
compat_uptr_t pads; /* struct media_pad_desc * */
compat_uptr_t links; /* struct media_link_desc * */
__u32 reserved[4];
};
static long media_device_enum_links32(struct media_device *mdev,
struct media_links_enum32 __user *ulinks)
{
struct media_links_enum links;
compat_uptr_t pads_ptr, links_ptr;
int ret;
memset(&links, 0, sizeof(links));
if (get_user(links.entity, &ulinks->entity)
|| get_user(pads_ptr, &ulinks->pads)
|| get_user(links_ptr, &ulinks->links))
return -EFAULT;
links.pads = compat_ptr(pads_ptr);
links.links = compat_ptr(links_ptr);
ret = media_device_enum_links(mdev, &links);
if (ret)
return ret;
if (copy_to_user(ulinks->reserved, links.reserved,
sizeof(ulinks->reserved)))
return -EFAULT;
return 0;
}
#define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32)
static long media_device_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct media_devnode *devnode = media_devnode_data(filp);
struct media_device *dev = devnode->media_dev;
long ret;
switch (cmd) {
case MEDIA_IOC_ENUM_LINKS32:
mutex_lock(&dev->graph_mutex);
ret = media_device_enum_links32(dev,
(struct media_links_enum32 __user *)arg);
mutex_unlock(&dev->graph_mutex);
break;
default:
return media_device_ioctl(filp, cmd, arg);
}
return ret;
}
#endif /* CONFIG_COMPAT */
static const struct media_file_operations media_device_fops = {
.owner = THIS_MODULE,
.open = media_device_open,
.ioctl = media_device_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = media_device_compat_ioctl,
#endif /* CONFIG_COMPAT */
.release = media_device_close,
};
/* -----------------------------------------------------------------------------
* sysfs
*/
static ssize_t model_show(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct media_devnode *devnode = to_media_devnode(cd);
struct media_device *mdev = devnode->media_dev;
return sprintf(buf, "%.*s\n", (int)sizeof(mdev->model), mdev->model);
}
static DEVICE_ATTR_RO(model);
/* -----------------------------------------------------------------------------
* Registration/unregistration
*/
static void media_device_release(struct media_devnode *devnode)
{
dev_dbg(devnode->parent, "Media device released\n");
}
static void __media_device_unregister_entity(struct media_entity *entity)
{
struct media_device *mdev = entity->graph_obj.mdev;
struct media_link *link, *tmp;
struct media_interface *intf;
struct media_pad *iter;
ida_free(&mdev->entity_internal_idx, entity->internal_idx);
/* Remove all interface links pointing to this entity */
list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
list_for_each_entry_safe(link, tmp, &intf->links, list) {
if (link->entity == entity)
__media_remove_intf_link(link);
}
}
/* Remove all data links that belong to this entity */
__media_entity_remove_links(entity);
/* Remove all pads that belong to this entity */
media_entity_for_each_pad(entity, iter)
media_gobj_destroy(&iter->graph_obj);
/* Remove the entity */
media_gobj_destroy(&entity->graph_obj);
/* invoke entity_notify callbacks to handle entity removal?? */
}
int __must_check media_device_register_entity(struct media_device *mdev,
struct media_entity *entity)
{
struct media_entity_notify *notify, *next;
struct media_pad *iter;
int ret;
if (entity->function == MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN ||
entity->function == MEDIA_ENT_F_UNKNOWN)
dev_warn(mdev->dev,
"Entity type for entity %s was not initialized!\n",
entity->name);
/* Warn if we apparently re-register an entity */
WARN_ON(entity->graph_obj.mdev != NULL);
entity->graph_obj.mdev = mdev;
INIT_LIST_HEAD(&entity->links);
entity->num_links = 0;
entity->num_backlinks = 0;
ret = ida_alloc_min(&mdev->entity_internal_idx, 1, GFP_KERNEL);
if (ret < 0)
return ret;
entity->internal_idx = ret;
mutex_lock(&mdev->graph_mutex);
mdev->entity_internal_idx_max =
max(mdev->entity_internal_idx_max, entity->internal_idx);
/* Initialize media_gobj embedded at the entity */
media_gobj_create(mdev, MEDIA_GRAPH_ENTITY, &entity->graph_obj);
/* Initialize objects at the pads */
media_entity_for_each_pad(entity, iter)
media_gobj_create(mdev, MEDIA_GRAPH_PAD, &iter->graph_obj);
/* invoke entity_notify callbacks */
list_for_each_entry_safe(notify, next, &mdev->entity_notify, list)
notify->notify(entity, notify->notify_data);
if (mdev->entity_internal_idx_max
>= mdev->pm_count_walk.ent_enum.idx_max) {
struct media_graph new = { .top = 0 };
/*
* Initialise the new graph walk before cleaning up
* the old one in order not to spoil the graph walk
* object of the media device if graph walk init fails.
*/
ret = media_graph_walk_init(&new, mdev);
if (ret) {
__media_device_unregister_entity(entity);
mutex_unlock(&mdev->graph_mutex);
return ret;
}
media_graph_walk_cleanup(&mdev->pm_count_walk);
mdev->pm_count_walk = new;
}
mutex_unlock(&mdev->graph_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(media_device_register_entity);
void media_device_unregister_entity(struct media_entity *entity)
{
struct media_device *mdev = entity->graph_obj.mdev;
if (mdev == NULL)
return;
mutex_lock(&mdev->graph_mutex);
__media_device_unregister_entity(entity);
mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_device_unregister_entity);
void media_device_init(struct media_device *mdev)
{
INIT_LIST_HEAD(&mdev->entities);
INIT_LIST_HEAD(&mdev->interfaces);
INIT_LIST_HEAD(&mdev->pads);
INIT_LIST_HEAD(&mdev->links);
INIT_LIST_HEAD(&mdev->entity_notify);
mutex_init(&mdev->req_queue_mutex);
mutex_init(&mdev->graph_mutex);
ida_init(&mdev->entity_internal_idx);
atomic_set(&mdev->request_id, 0);
if (!*mdev->bus_info)
media_set_bus_info(mdev->bus_info, sizeof(mdev->bus_info),
mdev->dev);
dev_dbg(mdev->dev, "Media device initialized\n");
}
EXPORT_SYMBOL_GPL(media_device_init);
void media_device_cleanup(struct media_device *mdev)
{
ida_destroy(&mdev->entity_internal_idx);
mdev->entity_internal_idx_max = 0;
media_graph_walk_cleanup(&mdev->pm_count_walk);
mutex_destroy(&mdev->graph_mutex);
mutex_destroy(&mdev->req_queue_mutex);
}
EXPORT_SYMBOL_GPL(media_device_cleanup);
int __must_check __media_device_register(struct media_device *mdev,
struct module *owner)
{
struct media_devnode *devnode;
int ret;
devnode = kzalloc(sizeof(*devnode), GFP_KERNEL);
if (!devnode)
return -ENOMEM;
/* Register the device node. */
mdev->devnode = devnode;
devnode->fops = &media_device_fops;
devnode->parent = mdev->dev;
devnode->release = media_device_release;
/* Set version 0 to indicate user-space that the graph is static */
mdev->topology_version = 0;
ret = media_devnode_register(mdev, devnode, owner);
if (ret < 0) {
/* devnode free is handled in media_devnode_*() */
mdev->devnode = NULL;
return ret;
}
ret = device_create_file(&devnode->dev, &dev_attr_model);
if (ret < 0) {
/* devnode free is handled in media_devnode_*() */
mdev->devnode = NULL;
media_devnode_unregister_prepare(devnode);
media_devnode_unregister(devnode);
return ret;
}
dev_dbg(mdev->dev, "Media device registered\n");
return 0;
}
EXPORT_SYMBOL_GPL(__media_device_register);
void media_device_register_entity_notify(struct media_device *mdev,
struct media_entity_notify *nptr)
{
mutex_lock(&mdev->graph_mutex);
list_add_tail(&nptr->list, &mdev->entity_notify);
mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_device_register_entity_notify);
/*
* Note: Should be called with mdev->lock held.
*/
static void __media_device_unregister_entity_notify(struct media_device *mdev,
struct media_entity_notify *nptr)
{
list_del(&nptr->list);
}
void media_device_unregister_entity_notify(struct media_device *mdev,
struct media_entity_notify *nptr)
{
mutex_lock(&mdev->graph_mutex);
__media_device_unregister_entity_notify(mdev, nptr);
mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_device_unregister_entity_notify);
void media_device_unregister(struct media_device *mdev)
{
struct media_entity *entity;
struct media_entity *next;
struct media_interface *intf, *tmp_intf;
struct media_entity_notify *notify, *nextp;
if (mdev == NULL)
return;
mutex_lock(&mdev->graph_mutex);
/* Check if mdev was ever registered at all */
if (!media_devnode_is_registered(mdev->devnode)) {
mutex_unlock(&mdev->graph_mutex);
return;
}
/* Clear the devnode register bit to avoid races with media dev open */
media_devnode_unregister_prepare(mdev->devnode);
/* Remove all entities from the media device */
list_for_each_entry_safe(entity, next, &mdev->entities, graph_obj.list)
__media_device_unregister_entity(entity);
/* Remove all entity_notify callbacks from the media device */
list_for_each_entry_safe(notify, nextp, &mdev->entity_notify, list)
__media_device_unregister_entity_notify(mdev, notify);
/* Remove all interfaces from the media device */
list_for_each_entry_safe(intf, tmp_intf, &mdev->interfaces,
graph_obj.list) {
/*
* Unlink the interface, but don't free it here; the
* module which created it is responsible for freeing
* it
*/
__media_remove_intf_links(intf);
media_gobj_destroy(&intf->graph_obj);
}
mutex_unlock(&mdev->graph_mutex);
dev_dbg(mdev->dev, "Media device unregistered\n");
device_remove_file(&mdev->devnode->dev, &dev_attr_model);
media_devnode_unregister(mdev->devnode);
/* devnode free is handled in media_devnode_*() */
mdev->devnode = NULL;
}
EXPORT_SYMBOL_GPL(media_device_unregister);
#if IS_ENABLED(CONFIG_PCI)
void media_device_pci_init(struct media_device *mdev,
struct pci_dev *pci_dev,
const char *name)
{
mdev->dev = &pci_dev->dev;
if (name)
strscpy(mdev->model, name, sizeof(mdev->model));
else
strscpy(mdev->model, pci_name(pci_dev), sizeof(mdev->model));
sprintf(mdev->bus_info, "PCI:%s", pci_name(pci_dev));
mdev->hw_revision = (pci_dev->subsystem_vendor << 16)
| pci_dev->subsystem_device;
media_device_init(mdev);
}
EXPORT_SYMBOL_GPL(media_device_pci_init);
#endif
#if IS_ENABLED(CONFIG_USB)
void __media_device_usb_init(struct media_device *mdev,
struct usb_device *udev,
const char *board_name,
const char *driver_name)
{
mdev->dev = &udev->dev;
if (driver_name)
strscpy(mdev->driver_name, driver_name,
sizeof(mdev->driver_name));
if (board_name)
strscpy(mdev->model, board_name, sizeof(mdev->model));
else if (udev->product)
strscpy(mdev->model, udev->product, sizeof(mdev->model));
else
strscpy(mdev->model, "unknown model", sizeof(mdev->model));
if (udev->serial)
strscpy(mdev->serial, udev->serial, sizeof(mdev->serial));
usb_make_path(udev, mdev->bus_info, sizeof(mdev->bus_info));
mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
media_device_init(mdev);
}
EXPORT_SYMBOL_GPL(__media_device_usb_init);
#endif
#endif /* CONFIG_MEDIA_CONTROLLER */
| linux-master | drivers/media/mc/mc-device.c |
// SPDX-License-Identifier: GPL-2.0-only
/* ir-xmp-decoder.c - handle XMP IR Pulse/Space protocol
*
* Copyright (C) 2014 by Marcel Mol
*
* - Based on info from http://www.hifi-remote.com
* - Ignore Toggle=9 frames
* - Ignore XMP-1 XMP-2 difference, always store 16 bit OBC
*/
#include <linux/bitrev.h>
#include <linux/module.h>
#include "rc-core-priv.h"
#define XMP_UNIT 136 /* us */
#define XMP_LEADER 210 /* us */
#define XMP_NIBBLE_PREFIX 760 /* us */
#define XMP_HALFFRAME_SPACE 13800 /* us */
/* should be 80ms but not all duration supliers can go that high */
#define XMP_TRAILER_SPACE 20000
enum xmp_state {
STATE_INACTIVE,
STATE_LEADER_PULSE,
STATE_NIBBLE_SPACE,
};
/**
* ir_xmp_decode() - Decode one XMP pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct xmp_dec *data = &dev->raw->xmp;
if (!is_timing_event(ev)) {
if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
dev_dbg(&dev->dev, "XMP decode started at state %d %d (%uus %s)\n",
data->state, data->count, ev.duration, TO_STR(ev.pulse));
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
if (eq_margin(ev.duration, XMP_LEADER, XMP_UNIT / 2)) {
data->count = 0;
data->state = STATE_NIBBLE_SPACE;
}
return 0;
case STATE_LEADER_PULSE:
if (!ev.pulse)
break;
if (eq_margin(ev.duration, XMP_LEADER, XMP_UNIT / 2))
data->state = STATE_NIBBLE_SPACE;
return 0;
case STATE_NIBBLE_SPACE:
if (ev.pulse)
break;
if (geq_margin(ev.duration, XMP_TRAILER_SPACE, XMP_NIBBLE_PREFIX)) {
int divider, i;
u8 addr, subaddr, subaddr2, toggle, oem, obc1, obc2, sum1, sum2;
u32 *n;
u32 scancode;
if (data->count != 16) {
dev_dbg(&dev->dev, "received TRAILER period at index %d: %u\n",
data->count, ev.duration);
data->state = STATE_INACTIVE;
return -EINVAL;
}
n = data->durations;
/*
* the 4th nibble should be 15 so base the divider on this
* to transform durations into nibbles. Subtract 2000 from
* the divider to compensate for fluctuations in the signal
*/
divider = (n[3] - XMP_NIBBLE_PREFIX) / 15 - 2000;
if (divider < 50) {
dev_dbg(&dev->dev, "divider to small %d.\n",
divider);
data->state = STATE_INACTIVE;
return -EINVAL;
}
/* convert to nibbles and do some sanity checks */
for (i = 0; i < 16; i++)
n[i] = (n[i] - XMP_NIBBLE_PREFIX) / divider;
sum1 = (15 + n[0] + n[1] + n[2] + n[3] +
n[4] + n[5] + n[6] + n[7]) % 16;
sum2 = (15 + n[8] + n[9] + n[10] + n[11] +
n[12] + n[13] + n[14] + n[15]) % 16;
if (sum1 != 15 || sum2 != 15) {
dev_dbg(&dev->dev, "checksum errors sum1=0x%X sum2=0x%X\n",
sum1, sum2);
data->state = STATE_INACTIVE;
return -EINVAL;
}
subaddr = n[0] << 4 | n[2];
subaddr2 = n[8] << 4 | n[11];
oem = n[4] << 4 | n[5];
addr = n[6] << 4 | n[7];
toggle = n[10];
obc1 = n[12] << 4 | n[13];
obc2 = n[14] << 4 | n[15];
if (subaddr != subaddr2) {
dev_dbg(&dev->dev, "subaddress nibbles mismatch 0x%02X != 0x%02X\n",
subaddr, subaddr2);
data->state = STATE_INACTIVE;
return -EINVAL;
}
if (oem != 0x44)
dev_dbg(&dev->dev, "Warning: OEM nibbles 0x%02X. Expected 0x44\n",
oem);
scancode = addr << 24 | subaddr << 16 |
obc1 << 8 | obc2;
dev_dbg(&dev->dev, "XMP scancode 0x%06x\n", scancode);
if (toggle == 0) {
rc_keydown(dev, RC_PROTO_XMP, scancode, 0);
} else {
rc_repeat(dev);
dev_dbg(&dev->dev, "Repeat last key\n");
}
data->state = STATE_INACTIVE;
return 0;
} else if (geq_margin(ev.duration, XMP_HALFFRAME_SPACE, XMP_NIBBLE_PREFIX)) {
/* Expect 8 or 16 nibble pulses. 16 in case of 'final' frame */
if (data->count == 16) {
dev_dbg(&dev->dev, "received half frame pulse at index %d. Probably a final frame key-up event: %u\n",
data->count, ev.duration);
/*
* TODO: for now go back to half frame position
* so trailer can be found and key press
* can be handled.
*/
data->count = 8;
}
else if (data->count != 8)
dev_dbg(&dev->dev, "received half frame pulse at index %d: %u\n",
data->count, ev.duration);
data->state = STATE_LEADER_PULSE;
return 0;
} else if (geq_margin(ev.duration, XMP_NIBBLE_PREFIX, XMP_UNIT)) {
/* store nibble raw data, decode after trailer */
if (data->count == 16) {
dev_dbg(&dev->dev, "too many pulses (%d) ignoring: %u\n",
data->count, ev.duration);
data->state = STATE_INACTIVE;
return -EINVAL;
}
data->durations[data->count] = ev.duration;
data->count++;
data->state = STATE_LEADER_PULSE;
return 0;
}
break;
}
dev_dbg(&dev->dev, "XMP decode failed at count %d state %d (%uus %s)\n",
data->count, data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static struct ir_raw_handler xmp_handler = {
.protocols = RC_PROTO_BIT_XMP,
.decode = ir_xmp_decode,
.min_timeout = XMP_TRAILER_SPACE,
};
static int __init ir_xmp_decode_init(void)
{
ir_raw_handler_register(&xmp_handler);
printk(KERN_INFO "IR XMP protocol handler initialized\n");
return 0;
}
static void __exit ir_xmp_decode_exit(void)
{
ir_raw_handler_unregister(&xmp_handler);
}
module_init(ir_xmp_decode_init);
module_exit(ir_xmp_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marcel Mol <[email protected]>");
MODULE_AUTHOR("MESA Consulting (http://www.mesa.nl)");
MODULE_DESCRIPTION("XMP IR protocol decoder");
| linux-master | drivers/media/rc/ir-xmp-decoder.c |
// SPDX-License-Identifier: GPL-2.0+
// Driver for Xbox DVD Movie Playback Kit
// Copyright (c) 2018 by Benjamin Valentin <[email protected]>
/*
* Xbox DVD Movie Playback Kit USB IR dongle support
*
* The driver was derived from the ati_remote driver 2.2.1
* and used information from lirc_xbox.c
*
* Copyright (c) 2011, 2012 Anssi Hannula <[email protected]>
* Copyright (c) 2004 Torrey Hoffman <[email protected]>
* Copyright (c) 2002 Vladimir Dergachev
* Copyright (c) 2003-2004 Paul Miller <[email protected]>
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
#include <media/rc-core.h>
/*
* Module and Version Information
*/
#define DRIVER_VERSION "1.0.0"
#define DRIVER_AUTHOR "Benjamin Valentin <[email protected]>"
#define DRIVER_DESC "Xbox DVD USB Remote Control"
#define NAME_BUFSIZE 80 /* size of product name, path buffers */
#define DATA_BUFSIZE 8 /* size of URB data buffers */
/*
* USB vendor ids for XBOX DVD Dongles
*/
#define VENDOR_GAMESTER 0x040b
#define VENDOR_MICROSOFT 0x045e
static const struct usb_device_id xbox_remote_table[] = {
/* Gamester Xbox DVD Movie Playback Kit IR */
{
USB_DEVICE(VENDOR_GAMESTER, 0x6521),
},
/* Microsoft Xbox DVD Movie Playback Kit IR */
{
USB_DEVICE(VENDOR_MICROSOFT, 0x0284),
},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, xbox_remote_table);
struct xbox_remote {
struct rc_dev *rdev;
struct usb_device *udev;
struct usb_interface *interface;
struct urb *irq_urb;
unsigned char inbuf[DATA_BUFSIZE] __aligned(sizeof(u16));
char rc_name[NAME_BUFSIZE];
char rc_phys[NAME_BUFSIZE];
};
static int xbox_remote_rc_open(struct rc_dev *rdev)
{
struct xbox_remote *xbox_remote = rdev->priv;
/* On first open, submit the read urb which was set up previously. */
xbox_remote->irq_urb->dev = xbox_remote->udev;
if (usb_submit_urb(xbox_remote->irq_urb, GFP_KERNEL)) {
dev_err(&xbox_remote->interface->dev,
"%s: usb_submit_urb failed!\n", __func__);
return -EIO;
}
return 0;
}
static void xbox_remote_rc_close(struct rc_dev *rdev)
{
struct xbox_remote *xbox_remote = rdev->priv;
usb_kill_urb(xbox_remote->irq_urb);
}
/*
* xbox_remote_report_input
*/
static void xbox_remote_input_report(struct urb *urb)
{
struct xbox_remote *xbox_remote = urb->context;
unsigned char *data = xbox_remote->inbuf;
/*
* data[0] = 0x00
* data[1] = length - always 0x06
* data[2] = the key code
* data[3] = high part of key code
* data[4] = last_press_ms (low)
* data[5] = last_press_ms (high)
*/
/* Deal with strange looking inputs */
if (urb->actual_length != 6 || urb->actual_length != data[1]) {
dev_warn(&urb->dev->dev, "Weird data, len=%d: %*ph\n",
urb->actual_length, urb->actual_length, data);
return;
}
rc_keydown(xbox_remote->rdev, RC_PROTO_XBOX_DVD,
le16_to_cpup((__le16 *)(data + 2)), 0);
}
/*
* xbox_remote_irq_in
*/
static void xbox_remote_irq_in(struct urb *urb)
{
struct xbox_remote *xbox_remote = urb->context;
int retval;
switch (urb->status) {
case 0: /* success */
xbox_remote_input_report(urb);
break;
case -ECONNRESET: /* unlink */
case -ENOENT:
case -ESHUTDOWN:
dev_dbg(&xbox_remote->interface->dev,
"%s: urb error status, unlink?\n",
__func__);
return;
default: /* error */
dev_dbg(&xbox_remote->interface->dev,
"%s: Nonzero urb status %d\n",
__func__, urb->status);
}
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&xbox_remote->interface->dev,
"%s: usb_submit_urb()=%d\n",
__func__, retval);
}
static void xbox_remote_rc_init(struct xbox_remote *xbox_remote)
{
struct rc_dev *rdev = xbox_remote->rdev;
rdev->priv = xbox_remote;
rdev->allowed_protocols = RC_PROTO_BIT_XBOX_DVD;
rdev->driver_name = "xbox_remote";
rdev->open = xbox_remote_rc_open;
rdev->close = xbox_remote_rc_close;
rdev->device_name = xbox_remote->rc_name;
rdev->input_phys = xbox_remote->rc_phys;
rdev->timeout = MS_TO_US(10);
usb_to_input_id(xbox_remote->udev, &rdev->input_id);
rdev->dev.parent = &xbox_remote->interface->dev;
}
static void xbox_remote_initialize(struct xbox_remote *xbox_remote,
struct usb_endpoint_descriptor *endpoint_in)
{
struct usb_device *udev = xbox_remote->udev;
int pipe, maxp;
/* Set up irq_urb */
pipe = usb_rcvintpipe(udev, endpoint_in->bEndpointAddress);
maxp = usb_maxpacket(udev, pipe);
maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
usb_fill_int_urb(xbox_remote->irq_urb, udev, pipe, xbox_remote->inbuf,
maxp, xbox_remote_irq_in, xbox_remote,
endpoint_in->bInterval);
}
/*
* xbox_remote_probe
*/
static int xbox_remote_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *iface_host = interface->cur_altsetting;
struct usb_endpoint_descriptor *endpoint_in;
struct xbox_remote *xbox_remote;
struct rc_dev *rc_dev;
int err = -ENOMEM;
// why is there also a device with no endpoints?
if (iface_host->desc.bNumEndpoints == 0)
return -ENODEV;
if (iface_host->desc.bNumEndpoints != 1) {
pr_err("%s: Unexpected desc.bNumEndpoints: %d\n",
__func__, iface_host->desc.bNumEndpoints);
return -ENODEV;
}
endpoint_in = &iface_host->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint_in)) {
pr_err("%s: Unexpected endpoint_in\n", __func__);
return -ENODEV;
}
if (le16_to_cpu(endpoint_in->wMaxPacketSize) == 0) {
pr_err("%s: endpoint_in message size==0?\n", __func__);
return -ENODEV;
}
xbox_remote = kzalloc(sizeof(*xbox_remote), GFP_KERNEL);
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!xbox_remote || !rc_dev)
goto exit_free_dev_rdev;
/* Allocate URB buffer */
xbox_remote->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!xbox_remote->irq_urb)
goto exit_free_buffers;
xbox_remote->udev = udev;
xbox_remote->rdev = rc_dev;
xbox_remote->interface = interface;
usb_make_path(udev, xbox_remote->rc_phys, sizeof(xbox_remote->rc_phys));
strlcat(xbox_remote->rc_phys, "/input0", sizeof(xbox_remote->rc_phys));
snprintf(xbox_remote->rc_name, sizeof(xbox_remote->rc_name), "%s%s%s",
udev->manufacturer ?: "",
udev->manufacturer && udev->product ? " " : "",
udev->product ?: "");
if (!strlen(xbox_remote->rc_name))
snprintf(xbox_remote->rc_name, sizeof(xbox_remote->rc_name),
DRIVER_DESC "(%04x,%04x)",
le16_to_cpu(xbox_remote->udev->descriptor.idVendor),
le16_to_cpu(xbox_remote->udev->descriptor.idProduct));
rc_dev->map_name = RC_MAP_XBOX_DVD; /* default map */
xbox_remote_rc_init(xbox_remote);
/* Device Hardware Initialization */
xbox_remote_initialize(xbox_remote, endpoint_in);
/* Set up and register rc device */
err = rc_register_device(xbox_remote->rdev);
if (err)
goto exit_kill_urbs;
usb_set_intfdata(interface, xbox_remote);
return 0;
exit_kill_urbs:
usb_kill_urb(xbox_remote->irq_urb);
exit_free_buffers:
usb_free_urb(xbox_remote->irq_urb);
exit_free_dev_rdev:
rc_free_device(rc_dev);
kfree(xbox_remote);
return err;
}
/*
* xbox_remote_disconnect
*/
static void xbox_remote_disconnect(struct usb_interface *interface)
{
struct xbox_remote *xbox_remote;
xbox_remote = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
if (!xbox_remote) {
dev_warn(&interface->dev, "%s - null device?\n", __func__);
return;
}
usb_kill_urb(xbox_remote->irq_urb);
rc_unregister_device(xbox_remote->rdev);
usb_free_urb(xbox_remote->irq_urb);
kfree(xbox_remote);
}
/* usb specific object to register with the usb subsystem */
static struct usb_driver xbox_remote_driver = {
.name = "xbox_remote",
.probe = xbox_remote_probe,
.disconnect = xbox_remote_disconnect,
.id_table = xbox_remote_table,
};
module_usb_driver(xbox_remote_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/xbox_remote.c |
// SPDX-License-Identifier: GPL-2.0-only
/* ir-rc6-decoder.c - A decoder for the RC6 IR protocol
*
* Copyright (C) 2010 by David Härdeman <[email protected]>
*/
#include "rc-core-priv.h"
#include <linux/module.h>
/*
* This decoder currently supports:
* RC6-0-16 (standard toggle bit in header)
* RC6-6A-20 (no toggle bit)
* RC6-6A-24 (no toggle bit)
* RC6-6A-32 (MCE version with toggle bit in body)
*/
#define RC6_UNIT 444 /* microseconds */
#define RC6_HEADER_NBITS 4 /* not including toggle bit */
#define RC6_0_NBITS 16
#define RC6_6A_32_NBITS 32
#define RC6_6A_NBITS 128 /* Variable 8..128 */
#define RC6_PREFIX_PULSE (6 * RC6_UNIT)
#define RC6_PREFIX_SPACE (2 * RC6_UNIT)
#define RC6_BIT_START (1 * RC6_UNIT)
#define RC6_BIT_END (1 * RC6_UNIT)
#define RC6_TOGGLE_START (2 * RC6_UNIT)
#define RC6_TOGGLE_END (2 * RC6_UNIT)
#define RC6_SUFFIX_SPACE (6 * RC6_UNIT)
#define RC6_MODE_MASK 0x07 /* for the header bits */
#define RC6_STARTBIT_MASK 0x08 /* for the header bits */
#define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */
#define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */
#define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */
#define RC6_6A_ZOTAC_CC 0x80340000 /* Zotac customer code */
#define RC6_6A_KATHREIN_CC 0x80460000 /* Kathrein RCU-676 customer code */
#ifndef CHAR_BIT
#define CHAR_BIT 8 /* Normally in <limits.h> */
#endif
enum rc6_mode {
RC6_MODE_0,
RC6_MODE_6A,
RC6_MODE_UNKNOWN,
};
enum rc6_state {
STATE_INACTIVE,
STATE_PREFIX_SPACE,
STATE_HEADER_BIT_START,
STATE_HEADER_BIT_END,
STATE_TOGGLE_START,
STATE_TOGGLE_END,
STATE_BODY_BIT_START,
STATE_BODY_BIT_END,
STATE_FINISHED,
};
static enum rc6_mode rc6_mode(struct rc6_dec *data)
{
switch (data->header & RC6_MODE_MASK) {
case 0:
return RC6_MODE_0;
case 6:
if (!data->toggle)
return RC6_MODE_6A;
fallthrough;
default:
return RC6_MODE_UNKNOWN;
}
}
/**
* ir_rc6_decode() - Decode one RC6 pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct rc6_dec *data = &dev->raw->rc6;
u32 scancode;
u8 toggle;
enum rc_proto protocol;
if (!is_timing_event(ev)) {
if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
goto out;
again:
dev_dbg(&dev->dev, "RC6 decode started at state %i (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
return 0;
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
/* Note: larger margin on first pulse since each RC6_UNIT
is quite short and some hardware takes some time to
adjust to the signal */
if (!eq_margin(ev.duration, RC6_PREFIX_PULSE, RC6_UNIT))
break;
data->state = STATE_PREFIX_SPACE;
data->count = 0;
return 0;
case STATE_PREFIX_SPACE:
if (ev.pulse)
break;
if (!eq_margin(ev.duration, RC6_PREFIX_SPACE, RC6_UNIT / 2))
break;
data->state = STATE_HEADER_BIT_START;
data->header = 0;
return 0;
case STATE_HEADER_BIT_START:
if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2))
break;
data->header <<= 1;
if (ev.pulse)
data->header |= 1;
data->count++;
data->state = STATE_HEADER_BIT_END;
return 0;
case STATE_HEADER_BIT_END:
if (data->count == RC6_HEADER_NBITS)
data->state = STATE_TOGGLE_START;
else
data->state = STATE_HEADER_BIT_START;
decrease_duration(&ev, RC6_BIT_END);
goto again;
case STATE_TOGGLE_START:
if (!eq_margin(ev.duration, RC6_TOGGLE_START, RC6_UNIT / 2))
break;
data->toggle = ev.pulse;
data->state = STATE_TOGGLE_END;
return 0;
case STATE_TOGGLE_END:
if (!(data->header & RC6_STARTBIT_MASK)) {
dev_dbg(&dev->dev, "RC6 invalid start bit\n");
break;
}
data->state = STATE_BODY_BIT_START;
decrease_duration(&ev, RC6_TOGGLE_END);
data->count = 0;
data->body = 0;
switch (rc6_mode(data)) {
case RC6_MODE_0:
data->wanted_bits = RC6_0_NBITS;
break;
case RC6_MODE_6A:
data->wanted_bits = RC6_6A_NBITS;
break;
default:
dev_dbg(&dev->dev, "RC6 unknown mode\n");
goto out;
}
goto again;
case STATE_BODY_BIT_START:
if (eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) {
/* Discard LSB's that won't fit in data->body */
if (data->count++ < CHAR_BIT * sizeof data->body) {
data->body <<= 1;
if (ev.pulse)
data->body |= 1;
}
data->state = STATE_BODY_BIT_END;
return 0;
} else if (RC6_MODE_6A == rc6_mode(data) && !ev.pulse &&
geq_margin(ev.duration, RC6_SUFFIX_SPACE, RC6_UNIT / 2)) {
data->state = STATE_FINISHED;
goto again;
}
break;
case STATE_BODY_BIT_END:
if (data->count == data->wanted_bits)
data->state = STATE_FINISHED;
else
data->state = STATE_BODY_BIT_START;
decrease_duration(&ev, RC6_BIT_END);
goto again;
case STATE_FINISHED:
if (ev.pulse)
break;
switch (rc6_mode(data)) {
case RC6_MODE_0:
scancode = data->body;
toggle = data->toggle;
protocol = RC_PROTO_RC6_0;
dev_dbg(&dev->dev, "RC6(0) scancode 0x%04x (toggle: %u)\n",
scancode, toggle);
break;
case RC6_MODE_6A:
if (data->count > CHAR_BIT * sizeof data->body) {
dev_dbg(&dev->dev, "RC6 too many (%u) data bits\n",
data->count);
goto out;
}
scancode = data->body;
switch (data->count) {
case 20:
protocol = RC_PROTO_RC6_6A_20;
toggle = 0;
break;
case 24:
protocol = RC_PROTO_RC6_6A_24;
toggle = 0;
break;
case 32:
switch (scancode & RC6_6A_LCC_MASK) {
case RC6_6A_MCE_CC:
case RC6_6A_KATHREIN_CC:
case RC6_6A_ZOTAC_CC:
protocol = RC_PROTO_RC6_MCE;
toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
break;
default:
protocol = RC_PROTO_RC6_6A_32;
toggle = 0;
break;
}
break;
default:
dev_dbg(&dev->dev, "RC6(6A) unsupported length\n");
goto out;
}
dev_dbg(&dev->dev, "RC6(6A) proto 0x%04x, scancode 0x%08x (toggle: %u)\n",
protocol, scancode, toggle);
break;
default:
dev_dbg(&dev->dev, "RC6 unknown mode\n");
goto out;
}
rc_keydown(dev, protocol, scancode, toggle);
data->state = STATE_INACTIVE;
return 0;
}
out:
dev_dbg(&dev->dev, "RC6 decode failed at state %i (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static const struct ir_raw_timings_manchester ir_rc6_timings[4] = {
{
.leader_pulse = RC6_PREFIX_PULSE,
.leader_space = RC6_PREFIX_SPACE,
.clock = RC6_UNIT,
.invert = 1,
},
{
.clock = RC6_UNIT * 2,
.invert = 1,
},
{
.clock = RC6_UNIT,
.invert = 1,
.trailer_space = RC6_SUFFIX_SPACE,
},
};
/**
* ir_rc6_encode() - Encode a scancode as a stream of raw events
*
* @protocol: protocol to encode
* @scancode: scancode to encode
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
* -EINVAL if the scancode is ambiguous or invalid.
*/
static int ir_rc6_encode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
int ret;
struct ir_raw_event *e = events;
if (protocol == RC_PROTO_RC6_0) {
/* Modulate the header (Start Bit & Mode-0) */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[0],
RC6_HEADER_NBITS, (1 << 3));
if (ret < 0)
return ret;
/* Modulate Trailer Bit */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[1], 1, 0);
if (ret < 0)
return ret;
/* Modulate rest of the data */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[2], RC6_0_NBITS,
scancode);
if (ret < 0)
return ret;
} else {
int bits;
switch (protocol) {
case RC_PROTO_RC6_MCE:
case RC_PROTO_RC6_6A_32:
bits = 32;
break;
case RC_PROTO_RC6_6A_24:
bits = 24;
break;
case RC_PROTO_RC6_6A_20:
bits = 20;
break;
default:
return -EINVAL;
}
/* Modulate the header (Start Bit & Header-version 6 */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[0],
RC6_HEADER_NBITS, (1 << 3 | 6));
if (ret < 0)
return ret;
/* Modulate Trailer Bit */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[1], 1, 0);
if (ret < 0)
return ret;
/* Modulate rest of the data */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[2],
bits,
scancode);
if (ret < 0)
return ret;
}
return e - events;
}
static struct ir_raw_handler rc6_handler = {
.protocols = RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 |
RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 |
RC_PROTO_BIT_RC6_MCE,
.decode = ir_rc6_decode,
.encode = ir_rc6_encode,
.carrier = 36000,
.min_timeout = RC6_SUFFIX_SPACE,
};
static int __init ir_rc6_decode_init(void)
{
ir_raw_handler_register(&rc6_handler);
printk(KERN_INFO "IR RC6 protocol handler initialized\n");
return 0;
}
static void __exit ir_rc6_decode_exit(void)
{
ir_raw_handler_unregister(&rc6_handler);
}
module_init(ir_rc6_decode_init);
module_exit(ir_rc6_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Härdeman <[email protected]>");
MODULE_DESCRIPTION("RC6 IR protocol decoder");
| linux-master | drivers/media/rc/ir-rc6-decoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 Linaro Ltd.
* Copyright (c) 2014 HiSilicon Limited.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <media/rc-core.h>
#define IR_ENABLE 0x00
#define IR_CONFIG 0x04
#define CNT_LEADS 0x08
#define CNT_LEADE 0x0c
#define CNT_SLEADE 0x10
#define CNT0_B 0x14
#define CNT1_B 0x18
#define IR_BUSY 0x1c
#define IR_DATAH 0x20
#define IR_DATAL 0x24
#define IR_INTM 0x28
#define IR_INTS 0x2c
#define IR_INTC 0x30
#define IR_START 0x34
/* interrupt mask */
#define INTMS_SYMBRCV (BIT(24) | BIT(8))
#define INTMS_TIMEOUT (BIT(25) | BIT(9))
#define INTMS_OVERFLOW (BIT(26) | BIT(10))
#define INT_CLR_OVERFLOW BIT(18)
#define INT_CLR_TIMEOUT BIT(17)
#define INT_CLR_RCV BIT(16)
#define INT_CLR_RCVTIMEOUT (BIT(16) | BIT(17))
#define IR_CLK_ENABLE BIT(4)
#define IR_CLK_RESET BIT(5)
/* IR_ENABLE register bits */
#define IR_ENABLE_EN BIT(0)
#define IR_ENABLE_EN_EXTRA BIT(8)
#define IR_CFG_WIDTH_MASK 0xffff
#define IR_CFG_WIDTH_SHIFT 16
#define IR_CFG_FORMAT_MASK 0x3
#define IR_CFG_FORMAT_SHIFT 14
#define IR_CFG_INT_LEVEL_MASK 0x3f
#define IR_CFG_INT_LEVEL_SHIFT 8
/* only support raw mode */
#define IR_CFG_MODE_RAW BIT(7)
#define IR_CFG_FREQ_MASK 0x7f
#define IR_CFG_FREQ_SHIFT 0
#define IR_CFG_INT_THRESHOLD 1
/* symbol start from low to high, symbol stream end at high*/
#define IR_CFG_SYMBOL_FMT 0
#define IR_CFG_SYMBOL_MAXWIDTH 0x3e80
#define IR_HIX5HD2_NAME "hix5hd2-ir"
/* Need to set extra bit for enabling IR */
#define HIX5HD2_FLAG_EXTRA_ENABLE BIT(0)
struct hix5hd2_soc_data {
u32 clk_reg;
u32 flags;
};
static const struct hix5hd2_soc_data hix5hd2_data = {
.clk_reg = 0x48,
};
static const struct hix5hd2_soc_data hi3796cv300_data = {
.clk_reg = 0x60,
.flags = HIX5HD2_FLAG_EXTRA_ENABLE,
};
struct hix5hd2_ir_priv {
int irq;
void __iomem *base;
struct device *dev;
struct rc_dev *rdev;
struct regmap *regmap;
struct clk *clock;
unsigned long rate;
const struct hix5hd2_soc_data *socdata;
};
static int hix5hd2_ir_clk_enable(struct hix5hd2_ir_priv *dev, bool on)
{
u32 clk_reg = dev->socdata->clk_reg;
u32 val;
int ret = 0;
if (dev->regmap) {
regmap_read(dev->regmap, clk_reg, &val);
if (on) {
val &= ~IR_CLK_RESET;
val |= IR_CLK_ENABLE;
} else {
val &= ~IR_CLK_ENABLE;
val |= IR_CLK_RESET;
}
regmap_write(dev->regmap, clk_reg, val);
} else {
if (on)
ret = clk_prepare_enable(dev->clock);
else
clk_disable_unprepare(dev->clock);
}
return ret;
}
static inline void hix5hd2_ir_enable(struct hix5hd2_ir_priv *priv)
{
u32 val = IR_ENABLE_EN;
if (priv->socdata->flags & HIX5HD2_FLAG_EXTRA_ENABLE)
val |= IR_ENABLE_EN_EXTRA;
writel_relaxed(val, priv->base + IR_ENABLE);
}
static int hix5hd2_ir_config(struct hix5hd2_ir_priv *priv)
{
int timeout = 10000;
u32 val, rate;
hix5hd2_ir_enable(priv);
while (readl_relaxed(priv->base + IR_BUSY)) {
if (timeout--) {
udelay(1);
} else {
dev_err(priv->dev, "IR_BUSY timeout\n");
return -ETIMEDOUT;
}
}
/* Now only support raw mode, with symbol start from low to high */
rate = DIV_ROUND_CLOSEST(priv->rate, 1000000);
val = IR_CFG_SYMBOL_MAXWIDTH & IR_CFG_WIDTH_MASK << IR_CFG_WIDTH_SHIFT;
val |= IR_CFG_SYMBOL_FMT & IR_CFG_FORMAT_MASK << IR_CFG_FORMAT_SHIFT;
val |= (IR_CFG_INT_THRESHOLD - 1) & IR_CFG_INT_LEVEL_MASK
<< IR_CFG_INT_LEVEL_SHIFT;
val |= IR_CFG_MODE_RAW;
val |= (rate - 1) & IR_CFG_FREQ_MASK << IR_CFG_FREQ_SHIFT;
writel_relaxed(val, priv->base + IR_CONFIG);
writel_relaxed(0x00, priv->base + IR_INTM);
/* write arbitrary value to start */
writel_relaxed(0x01, priv->base + IR_START);
return 0;
}
static int hix5hd2_ir_open(struct rc_dev *rdev)
{
struct hix5hd2_ir_priv *priv = rdev->priv;
int ret;
ret = hix5hd2_ir_clk_enable(priv, true);
if (ret)
return ret;
ret = hix5hd2_ir_config(priv);
if (ret) {
hix5hd2_ir_clk_enable(priv, false);
return ret;
}
return 0;
}
static void hix5hd2_ir_close(struct rc_dev *rdev)
{
struct hix5hd2_ir_priv *priv = rdev->priv;
hix5hd2_ir_clk_enable(priv, false);
}
static irqreturn_t hix5hd2_ir_rx_interrupt(int irq, void *data)
{
u32 symb_num, symb_val, symb_time;
u32 data_l, data_h;
u32 irq_sr, i;
struct hix5hd2_ir_priv *priv = data;
irq_sr = readl_relaxed(priv->base + IR_INTS);
if (irq_sr & INTMS_OVERFLOW) {
/*
* we must read IR_DATAL first, then we can clean up
* IR_INTS availably since logic would not clear
* fifo when overflow, drv do the job
*/
ir_raw_event_overflow(priv->rdev);
symb_num = readl_relaxed(priv->base + IR_DATAH);
for (i = 0; i < symb_num; i++)
readl_relaxed(priv->base + IR_DATAL);
writel_relaxed(INT_CLR_OVERFLOW, priv->base + IR_INTC);
dev_info(priv->dev, "overflow, level=%d\n",
IR_CFG_INT_THRESHOLD);
}
if ((irq_sr & INTMS_SYMBRCV) || (irq_sr & INTMS_TIMEOUT)) {
struct ir_raw_event ev = {};
symb_num = readl_relaxed(priv->base + IR_DATAH);
for (i = 0; i < symb_num; i++) {
symb_val = readl_relaxed(priv->base + IR_DATAL);
data_l = ((symb_val & 0xffff) * 10);
data_h = ((symb_val >> 16) & 0xffff) * 10;
symb_time = (data_l + data_h) / 10;
ev.duration = data_l;
ev.pulse = true;
ir_raw_event_store(priv->rdev, &ev);
if (symb_time < IR_CFG_SYMBOL_MAXWIDTH) {
ev.duration = data_h;
ev.pulse = false;
ir_raw_event_store(priv->rdev, &ev);
} else {
ir_raw_event_set_idle(priv->rdev, true);
}
}
if (irq_sr & INTMS_SYMBRCV)
writel_relaxed(INT_CLR_RCV, priv->base + IR_INTC);
if (irq_sr & INTMS_TIMEOUT)
writel_relaxed(INT_CLR_TIMEOUT, priv->base + IR_INTC);
}
/* Empty software fifo */
ir_raw_event_handle(priv->rdev);
return IRQ_HANDLED;
}
static const struct of_device_id hix5hd2_ir_table[] = {
{ .compatible = "hisilicon,hix5hd2-ir", &hix5hd2_data, },
{ .compatible = "hisilicon,hi3796cv300-ir", &hi3796cv300_data, },
{},
};
MODULE_DEVICE_TABLE(of, hix5hd2_ir_table);
static int hix5hd2_ir_probe(struct platform_device *pdev)
{
struct rc_dev *rdev;
struct device *dev = &pdev->dev;
struct hix5hd2_ir_priv *priv;
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *of_id;
const char *map_name;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
of_id = of_match_device(hix5hd2_ir_table, dev);
if (!of_id) {
dev_err(dev, "Unable to initialize IR data\n");
return -ENODEV;
}
priv->socdata = of_id->data;
priv->regmap = syscon_regmap_lookup_by_phandle(node,
"hisilicon,power-syscon");
if (IS_ERR(priv->regmap)) {
dev_info(dev, "no power-reg\n");
priv->regmap = NULL;
}
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0)
return priv->irq;
rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
return -ENOMEM;
priv->clock = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clock)) {
dev_err(dev, "clock not found\n");
ret = PTR_ERR(priv->clock);
goto err;
}
ret = clk_prepare_enable(priv->clock);
if (ret)
goto err;
priv->rate = clk_get_rate(priv->clock);
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rdev->priv = priv;
rdev->open = hix5hd2_ir_open;
rdev->close = hix5hd2_ir_close;
rdev->driver_name = IR_HIX5HD2_NAME;
map_name = of_get_property(node, "linux,rc-map-name", NULL);
rdev->map_name = map_name ?: RC_MAP_EMPTY;
rdev->device_name = IR_HIX5HD2_NAME;
rdev->input_phys = IR_HIX5HD2_NAME "/input0";
rdev->input_id.bustype = BUS_HOST;
rdev->input_id.vendor = 0x0001;
rdev->input_id.product = 0x0001;
rdev->input_id.version = 0x0100;
rdev->rx_resolution = 10;
rdev->timeout = IR_CFG_SYMBOL_MAXWIDTH * 10;
ret = rc_register_device(rdev);
if (ret < 0)
goto clkerr;
if (devm_request_irq(dev, priv->irq, hix5hd2_ir_rx_interrupt,
0, pdev->name, priv) < 0) {
dev_err(dev, "IRQ %d register failed\n", priv->irq);
ret = -EINVAL;
goto regerr;
}
priv->rdev = rdev;
priv->dev = dev;
platform_set_drvdata(pdev, priv);
return ret;
regerr:
rc_unregister_device(rdev);
rdev = NULL;
clkerr:
clk_disable_unprepare(priv->clock);
err:
rc_free_device(rdev);
dev_err(dev, "Unable to register device (%d)\n", ret);
return ret;
}
static void hix5hd2_ir_remove(struct platform_device *pdev)
{
struct hix5hd2_ir_priv *priv = platform_get_drvdata(pdev);
clk_disable_unprepare(priv->clock);
rc_unregister_device(priv->rdev);
}
#ifdef CONFIG_PM_SLEEP
static int hix5hd2_ir_suspend(struct device *dev)
{
struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev);
clk_disable_unprepare(priv->clock);
hix5hd2_ir_clk_enable(priv, false);
return 0;
}
static int hix5hd2_ir_resume(struct device *dev)
{
struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev);
int ret;
ret = hix5hd2_ir_clk_enable(priv, true);
if (ret)
return ret;
ret = clk_prepare_enable(priv->clock);
if (ret) {
hix5hd2_ir_clk_enable(priv, false);
return ret;
}
hix5hd2_ir_enable(priv);
writel_relaxed(0x00, priv->base + IR_INTM);
writel_relaxed(0xff, priv->base + IR_INTC);
writel_relaxed(0x01, priv->base + IR_START);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(hix5hd2_ir_pm_ops, hix5hd2_ir_suspend,
hix5hd2_ir_resume);
static struct platform_driver hix5hd2_ir_driver = {
.driver = {
.name = IR_HIX5HD2_NAME,
.of_match_table = hix5hd2_ir_table,
.pm = &hix5hd2_ir_pm_ops,
},
.probe = hix5hd2_ir_probe,
.remove_new = hix5hd2_ir_remove,
};
module_platform_driver(hix5hd2_ir_driver);
MODULE_DESCRIPTION("IR controller driver for hix5hd2 platforms");
MODULE_AUTHOR("Guoxiong Yan <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:hix5hd2-ir");
| linux-master | drivers/media/rc/ir-hix5hd2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
*
* Copyright (C) 2011 Jarod Wilson <[email protected]>
*
* Special thanks to Fintek for providing hardware and spec sheets.
* This driver is based upon the nuvoton, ite and ene drivers for
* similar hardware.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pnp.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/rc-core.h>
#include "fintek-cir.h"
/* write val to config reg */
static inline void fintek_cr_write(struct fintek_dev *fintek, u8 val, u8 reg)
{
fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)",
__func__, reg, val, fintek->cr_ip, fintek->cr_dp);
outb(reg, fintek->cr_ip);
outb(val, fintek->cr_dp);
}
/* read val from config reg */
static inline u8 fintek_cr_read(struct fintek_dev *fintek, u8 reg)
{
u8 val;
outb(reg, fintek->cr_ip);
val = inb(fintek->cr_dp);
fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)",
__func__, reg, val, fintek->cr_ip, fintek->cr_dp);
return val;
}
/* update config register bit without changing other bits */
static inline void fintek_set_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
{
u8 tmp = fintek_cr_read(fintek, reg) | val;
fintek_cr_write(fintek, tmp, reg);
}
/* enter config mode */
static inline void fintek_config_mode_enable(struct fintek_dev *fintek)
{
/* Enabling Config Mode explicitly requires writing 2x */
outb(CONFIG_REG_ENABLE, fintek->cr_ip);
outb(CONFIG_REG_ENABLE, fintek->cr_ip);
}
/* exit config mode */
static inline void fintek_config_mode_disable(struct fintek_dev *fintek)
{
outb(CONFIG_REG_DISABLE, fintek->cr_ip);
}
/*
* When you want to address a specific logical device, write its logical
* device number to GCR_LOGICAL_DEV_NO
*/
static inline void fintek_select_logical_dev(struct fintek_dev *fintek, u8 ldev)
{
fintek_cr_write(fintek, ldev, GCR_LOGICAL_DEV_NO);
}
/* write val to cir config register */
static inline void fintek_cir_reg_write(struct fintek_dev *fintek, u8 val, u8 offset)
{
outb(val, fintek->cir_addr + offset);
}
/* read val from cir config register */
static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset)
{
return inb(fintek->cir_addr + offset);
}
/* dump current cir register contents */
static void cir_dump_regs(struct fintek_dev *fintek)
{
fintek_config_mode_enable(fintek);
fintek_select_logical_dev(fintek, fintek->logical_dev_cir);
pr_info("%s: Dump CIR logical device registers:\n", FINTEK_DRIVER_NAME);
pr_info(" * CR CIR BASE ADDR: 0x%x\n",
(fintek_cr_read(fintek, CIR_CR_BASE_ADDR_HI) << 8) |
fintek_cr_read(fintek, CIR_CR_BASE_ADDR_LO));
pr_info(" * CR CIR IRQ NUM: 0x%x\n",
fintek_cr_read(fintek, CIR_CR_IRQ_SEL));
fintek_config_mode_disable(fintek);
pr_info("%s: Dump CIR registers:\n", FINTEK_DRIVER_NAME);
pr_info(" * STATUS: 0x%x\n",
fintek_cir_reg_read(fintek, CIR_STATUS));
pr_info(" * CONTROL: 0x%x\n",
fintek_cir_reg_read(fintek, CIR_CONTROL));
pr_info(" * RX_DATA: 0x%x\n",
fintek_cir_reg_read(fintek, CIR_RX_DATA));
pr_info(" * TX_CONTROL: 0x%x\n",
fintek_cir_reg_read(fintek, CIR_TX_CONTROL));
pr_info(" * TX_DATA: 0x%x\n",
fintek_cir_reg_read(fintek, CIR_TX_DATA));
}
/* detect hardware features */
static int fintek_hw_detect(struct fintek_dev *fintek)
{
unsigned long flags;
u8 chip_major, chip_minor;
u8 vendor_major, vendor_minor;
u8 portsel, ir_class;
u16 vendor, chip;
fintek_config_mode_enable(fintek);
/* Check if we're using config port 0x4e or 0x2e */
portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
if (portsel == 0xff) {
fit_pr(KERN_INFO, "first portsel read was bunk, trying alt");
fintek_config_mode_disable(fintek);
fintek->cr_ip = CR_INDEX_PORT2;
fintek->cr_dp = CR_DATA_PORT2;
fintek_config_mode_enable(fintek);
portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
}
fit_dbg("portsel reg: 0x%02x", portsel);
ir_class = fintek_cir_reg_read(fintek, CIR_CR_CLASS);
fit_dbg("ir_class reg: 0x%02x", ir_class);
switch (ir_class) {
case CLASS_RX_2TX:
case CLASS_RX_1TX:
fintek->hw_tx_capable = true;
break;
case CLASS_RX_ONLY:
default:
fintek->hw_tx_capable = false;
break;
}
chip_major = fintek_cr_read(fintek, GCR_CHIP_ID_HI);
chip_minor = fintek_cr_read(fintek, GCR_CHIP_ID_LO);
chip = chip_major << 8 | chip_minor;
vendor_major = fintek_cr_read(fintek, GCR_VENDOR_ID_HI);
vendor_minor = fintek_cr_read(fintek, GCR_VENDOR_ID_LO);
vendor = vendor_major << 8 | vendor_minor;
if (vendor != VENDOR_ID_FINTEK)
fit_pr(KERN_WARNING, "Unknown vendor ID: 0x%04x", vendor);
else
fit_dbg("Read Fintek vendor ID from chip");
fintek_config_mode_disable(fintek);
spin_lock_irqsave(&fintek->fintek_lock, flags);
fintek->chip_major = chip_major;
fintek->chip_minor = chip_minor;
fintek->chip_vendor = vendor;
/*
* Newer reviews of this chipset uses port 8 instead of 5
*/
if ((chip != 0x0408) && (chip != 0x0804))
fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV2;
else
fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV1;
spin_unlock_irqrestore(&fintek->fintek_lock, flags);
return 0;
}
static void fintek_cir_ldev_init(struct fintek_dev *fintek)
{
/* Select CIR logical device and enable */
fintek_select_logical_dev(fintek, fintek->logical_dev_cir);
fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
/* Write allocated CIR address and IRQ information to hardware */
fintek_cr_write(fintek, fintek->cir_addr >> 8, CIR_CR_BASE_ADDR_HI);
fintek_cr_write(fintek, fintek->cir_addr & 0xff, CIR_CR_BASE_ADDR_LO);
fintek_cr_write(fintek, fintek->cir_irq, CIR_CR_IRQ_SEL);
fit_dbg("CIR initialized, base io address: 0x%lx, irq: %d (len: %d)",
fintek->cir_addr, fintek->cir_irq, fintek->cir_port_len);
}
/* enable CIR interrupts */
static void fintek_enable_cir_irq(struct fintek_dev *fintek)
{
fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
}
static void fintek_cir_regs_init(struct fintek_dev *fintek)
{
/* clear any and all stray interrupts */
fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
/* and finally, enable interrupts */
fintek_enable_cir_irq(fintek);
}
static void fintek_enable_wake(struct fintek_dev *fintek)
{
fintek_config_mode_enable(fintek);
fintek_select_logical_dev(fintek, LOGICAL_DEV_ACPI);
/* Allow CIR PME's to wake system */
fintek_set_reg_bit(fintek, ACPI_WAKE_EN_CIR_BIT, LDEV_ACPI_WAKE_EN_REG);
/* Enable CIR PME's */
fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_EN_REG);
/* Clear CIR PME status register */
fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_CLR_REG);
/* Save state */
fintek_set_reg_bit(fintek, ACPI_STATE_CIR_BIT, LDEV_ACPI_STATE_REG);
fintek_config_mode_disable(fintek);
}
static int fintek_cmdsize(u8 cmd, u8 subcmd)
{
int datasize = 0;
switch (cmd) {
case BUF_COMMAND_NULL:
if (subcmd == BUF_HW_CMD_HEADER)
datasize = 1;
break;
case BUF_HW_CMD_HEADER:
if (subcmd == BUF_CMD_G_REVISION)
datasize = 2;
break;
case BUF_COMMAND_HEADER:
switch (subcmd) {
case BUF_CMD_S_CARRIER:
case BUF_CMD_S_TIMEOUT:
case BUF_RSP_PULSE_COUNT:
datasize = 2;
break;
case BUF_CMD_SIG_END:
case BUF_CMD_S_TXMASK:
case BUF_CMD_S_RXSENSOR:
datasize = 1;
break;
}
}
return datasize;
}
/* process ir data stored in driver buffer */
static void fintek_process_rx_ir_data(struct fintek_dev *fintek)
{
struct ir_raw_event rawir = {};
u8 sample;
bool event = false;
int i;
for (i = 0; i < fintek->pkts; i++) {
sample = fintek->buf[i];
switch (fintek->parser_state) {
case CMD_HEADER:
fintek->cmd = sample;
if ((fintek->cmd == BUF_COMMAND_HEADER) ||
((fintek->cmd & BUF_COMMAND_MASK) !=
BUF_PULSE_BIT)) {
fintek->parser_state = SUBCMD;
continue;
}
fintek->rem = (fintek->cmd & BUF_LEN_MASK);
fit_dbg("%s: rem: 0x%02x", __func__, fintek->rem);
if (fintek->rem)
fintek->parser_state = PARSE_IRDATA;
else
ir_raw_event_overflow(fintek->rdev);
break;
case SUBCMD:
fintek->rem = fintek_cmdsize(fintek->cmd, sample);
fintek->parser_state = CMD_DATA;
break;
case CMD_DATA:
fintek->rem--;
break;
case PARSE_IRDATA:
fintek->rem--;
rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
rawir.duration = (sample & BUF_SAMPLE_MASK)
* CIR_SAMPLE_PERIOD;
fit_dbg("Storing %s with duration %d",
rawir.pulse ? "pulse" : "space",
rawir.duration);
if (ir_raw_event_store_with_filter(fintek->rdev,
&rawir))
event = true;
break;
}
if ((fintek->parser_state != CMD_HEADER) && !fintek->rem)
fintek->parser_state = CMD_HEADER;
}
fintek->pkts = 0;
if (event) {
fit_dbg("Calling ir_raw_event_handle");
ir_raw_event_handle(fintek->rdev);
}
}
/* copy data from hardware rx register into driver buffer */
static void fintek_get_rx_ir_data(struct fintek_dev *fintek, u8 rx_irqs)
{
unsigned long flags;
u8 sample, status;
spin_lock_irqsave(&fintek->fintek_lock, flags);
/*
* We must read data from CIR_RX_DATA until the hardware IR buffer
* is empty and clears the RX_TIMEOUT and/or RX_RECEIVE flags in
* the CIR_STATUS register
*/
do {
sample = fintek_cir_reg_read(fintek, CIR_RX_DATA);
fit_dbg("%s: sample: 0x%02x", __func__, sample);
fintek->buf[fintek->pkts] = sample;
fintek->pkts++;
status = fintek_cir_reg_read(fintek, CIR_STATUS);
if (!(status & CIR_STATUS_IRQ_EN))
break;
} while (status & rx_irqs);
fintek_process_rx_ir_data(fintek);
spin_unlock_irqrestore(&fintek->fintek_lock, flags);
}
static void fintek_cir_log_irqs(u8 status)
{
fit_pr(KERN_INFO, "IRQ 0x%02x:%s%s%s%s%s", status,
status & CIR_STATUS_IRQ_EN ? " IRQEN" : "",
status & CIR_STATUS_TX_FINISH ? " TXF" : "",
status & CIR_STATUS_TX_UNDERRUN ? " TXU" : "",
status & CIR_STATUS_RX_TIMEOUT ? " RXTO" : "",
status & CIR_STATUS_RX_RECEIVE ? " RXOK" : "");
}
/* interrupt service routine for incoming and outgoing CIR data */
static irqreturn_t fintek_cir_isr(int irq, void *data)
{
struct fintek_dev *fintek = data;
u8 status, rx_irqs;
fit_dbg_verbose("%s firing", __func__);
fintek_config_mode_enable(fintek);
fintek_select_logical_dev(fintek, fintek->logical_dev_cir);
fintek_config_mode_disable(fintek);
/*
* Get IR Status register contents. Write 1 to ack/clear
*
* bit: reg name - description
* 3: TX_FINISH - TX is finished
* 2: TX_UNDERRUN - TX underrun
* 1: RX_TIMEOUT - RX data timeout
* 0: RX_RECEIVE - RX data received
*/
status = fintek_cir_reg_read(fintek, CIR_STATUS);
if (!(status & CIR_STATUS_IRQ_MASK) || status == 0xff) {
fit_dbg_verbose("%s exiting, IRSTS 0x%02x", __func__, status);
fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
return IRQ_RETVAL(IRQ_NONE);
}
if (debug)
fintek_cir_log_irqs(status);
rx_irqs = status & (CIR_STATUS_RX_RECEIVE | CIR_STATUS_RX_TIMEOUT);
if (rx_irqs)
fintek_get_rx_ir_data(fintek, rx_irqs);
/* ack/clear all irq flags we've got */
fintek_cir_reg_write(fintek, status, CIR_STATUS);
fit_dbg_verbose("%s done", __func__);
return IRQ_RETVAL(IRQ_HANDLED);
}
static void fintek_enable_cir(struct fintek_dev *fintek)
{
/* set IRQ enabled */
fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
fintek_config_mode_enable(fintek);
/* enable the CIR logical device */
fintek_select_logical_dev(fintek, fintek->logical_dev_cir);
fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
fintek_config_mode_disable(fintek);
/* clear all pending interrupts */
fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
/* enable interrupts */
fintek_enable_cir_irq(fintek);
}
static void fintek_disable_cir(struct fintek_dev *fintek)
{
fintek_config_mode_enable(fintek);
/* disable the CIR logical device */
fintek_select_logical_dev(fintek, fintek->logical_dev_cir);
fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
fintek_config_mode_disable(fintek);
}
static int fintek_open(struct rc_dev *dev)
{
struct fintek_dev *fintek = dev->priv;
unsigned long flags;
spin_lock_irqsave(&fintek->fintek_lock, flags);
fintek_enable_cir(fintek);
spin_unlock_irqrestore(&fintek->fintek_lock, flags);
return 0;
}
static void fintek_close(struct rc_dev *dev)
{
struct fintek_dev *fintek = dev->priv;
unsigned long flags;
spin_lock_irqsave(&fintek->fintek_lock, flags);
fintek_disable_cir(fintek);
spin_unlock_irqrestore(&fintek->fintek_lock, flags);
}
/* Allocate memory, probe hardware, and initialize everything */
static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
{
struct fintek_dev *fintek;
struct rc_dev *rdev;
int ret = -ENOMEM;
fintek = kzalloc(sizeof(struct fintek_dev), GFP_KERNEL);
if (!fintek)
return ret;
/* input device for IR remote (and tx) */
rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
goto exit_free_dev_rdev;
ret = -ENODEV;
/* validate pnp resources */
if (!pnp_port_valid(pdev, 0)) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
goto exit_free_dev_rdev;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "IR PNP IRQ not valid!\n");
goto exit_free_dev_rdev;
}
fintek->cir_addr = pnp_port_start(pdev, 0);
fintek->cir_irq = pnp_irq(pdev, 0);
fintek->cir_port_len = pnp_port_len(pdev, 0);
fintek->cr_ip = CR_INDEX_PORT;
fintek->cr_dp = CR_DATA_PORT;
spin_lock_init(&fintek->fintek_lock);
pnp_set_drvdata(pdev, fintek);
fintek->pdev = pdev;
ret = fintek_hw_detect(fintek);
if (ret)
goto exit_free_dev_rdev;
/* Initialize CIR & CIR Wake Logical Devices */
fintek_config_mode_enable(fintek);
fintek_cir_ldev_init(fintek);
fintek_config_mode_disable(fintek);
/* Initialize CIR & CIR Wake Config Registers */
fintek_cir_regs_init(fintek);
/* Set up the rc device */
rdev->priv = fintek;
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rdev->open = fintek_open;
rdev->close = fintek_close;
rdev->device_name = FINTEK_DESCRIPTION;
rdev->input_phys = "fintek/cir0";
rdev->input_id.bustype = BUS_HOST;
rdev->input_id.vendor = VENDOR_ID_FINTEK;
rdev->input_id.product = fintek->chip_major;
rdev->input_id.version = fintek->chip_minor;
rdev->dev.parent = &pdev->dev;
rdev->driver_name = FINTEK_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
rdev->timeout = 1000;
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
rdev->rx_resolution = CIR_SAMPLE_PERIOD;
fintek->rdev = rdev;
ret = -EBUSY;
/* now claim resources */
if (!request_region(fintek->cir_addr,
fintek->cir_port_len, FINTEK_DRIVER_NAME))
goto exit_free_dev_rdev;
if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
FINTEK_DRIVER_NAME, (void *)fintek))
goto exit_free_cir_addr;
ret = rc_register_device(rdev);
if (ret)
goto exit_free_irq;
device_init_wakeup(&pdev->dev, true);
fit_pr(KERN_NOTICE, "driver has been successfully loaded\n");
if (debug)
cir_dump_regs(fintek);
return 0;
exit_free_irq:
free_irq(fintek->cir_irq, fintek);
exit_free_cir_addr:
release_region(fintek->cir_addr, fintek->cir_port_len);
exit_free_dev_rdev:
rc_free_device(rdev);
kfree(fintek);
return ret;
}
static void fintek_remove(struct pnp_dev *pdev)
{
struct fintek_dev *fintek = pnp_get_drvdata(pdev);
unsigned long flags;
spin_lock_irqsave(&fintek->fintek_lock, flags);
/* disable CIR */
fintek_disable_cir(fintek);
fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
/* enable CIR Wake (for IR power-on) */
fintek_enable_wake(fintek);
spin_unlock_irqrestore(&fintek->fintek_lock, flags);
/* free resources */
free_irq(fintek->cir_irq, fintek);
release_region(fintek->cir_addr, fintek->cir_port_len);
rc_unregister_device(fintek->rdev);
kfree(fintek);
}
static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
{
struct fintek_dev *fintek = pnp_get_drvdata(pdev);
unsigned long flags;
fit_dbg("%s called", __func__);
spin_lock_irqsave(&fintek->fintek_lock, flags);
/* disable all CIR interrupts */
fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
spin_unlock_irqrestore(&fintek->fintek_lock, flags);
fintek_config_mode_enable(fintek);
/* disable cir logical dev */
fintek_select_logical_dev(fintek, fintek->logical_dev_cir);
fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
fintek_config_mode_disable(fintek);
/* make sure wake is enabled */
fintek_enable_wake(fintek);
return 0;
}
static int fintek_resume(struct pnp_dev *pdev)
{
struct fintek_dev *fintek = pnp_get_drvdata(pdev);
fit_dbg("%s called", __func__);
/* open interrupt */
fintek_enable_cir_irq(fintek);
/* Enable CIR logical device */
fintek_config_mode_enable(fintek);
fintek_select_logical_dev(fintek, fintek->logical_dev_cir);
fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
fintek_config_mode_disable(fintek);
fintek_cir_regs_init(fintek);
return 0;
}
static void fintek_shutdown(struct pnp_dev *pdev)
{
struct fintek_dev *fintek = pnp_get_drvdata(pdev);
fintek_enable_wake(fintek);
}
static const struct pnp_device_id fintek_ids[] = {
{ "FIT0002", 0 }, /* CIR */
{ "", 0 },
};
static struct pnp_driver fintek_driver = {
.name = FINTEK_DRIVER_NAME,
.id_table = fintek_ids,
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = fintek_probe,
.remove = fintek_remove,
.suspend = fintek_suspend,
.resume = fintek_resume,
.shutdown = fintek_shutdown,
};
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debugging output");
MODULE_DEVICE_TABLE(pnp, fintek_ids);
MODULE_DESCRIPTION(FINTEK_DESCRIPTION " driver");
MODULE_AUTHOR("Jarod Wilson <[email protected]>");
MODULE_LICENSE("GPL");
module_pnp_driver(fintek_driver);
| linux-master | drivers/media/rc/fintek-cir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LIRC base driver
*
* by Artur Lipowski <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/file.h>
#include <linux/idr.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include "rc-core-priv.h"
#include <uapi/linux/lirc.h>
#define LIRCBUF_SIZE 1024
static dev_t lirc_base_dev;
/* Used to keep track of allocated lirc devices */
static DEFINE_IDA(lirc_ida);
/* Only used for sysfs but defined to void otherwise */
static struct class *lirc_class;
/**
* lirc_raw_event() - Send raw IR data to lirc to be relayed to userspace
*
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*/
void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev)
{
unsigned long flags;
struct lirc_fh *fh;
int sample;
/* Receiver overflow, data missing */
if (ev.overflow) {
/*
* Send lirc overflow message. This message is unknown to
* lircd, but it will interpret this as a long space as
* long as the value is set to high value. This resets its
* decoder state.
*/
sample = LIRC_OVERFLOW(LIRC_VALUE_MASK);
dev_dbg(&dev->dev, "delivering overflow to lirc_dev\n");
/* Carrier reports */
} else if (ev.carrier_report) {
sample = LIRC_FREQUENCY(ev.carrier);
dev_dbg(&dev->dev, "carrier report (freq: %d)\n", sample);
/* Packet end */
} else if (ev.timeout) {
dev->gap_start = ktime_get();
sample = LIRC_TIMEOUT(ev.duration);
dev_dbg(&dev->dev, "timeout report (duration: %d)\n", sample);
/* Normal sample */
} else {
if (dev->gap_start) {
u64 duration = ktime_us_delta(ktime_get(),
dev->gap_start);
/* Cap by LIRC_VALUE_MASK */
duration = min_t(u64, duration, LIRC_VALUE_MASK);
spin_lock_irqsave(&dev->lirc_fh_lock, flags);
list_for_each_entry(fh, &dev->lirc_fh, list)
kfifo_put(&fh->rawir, LIRC_SPACE(duration));
spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
dev->gap_start = 0;
}
sample = ev.pulse ? LIRC_PULSE(ev.duration) :
LIRC_SPACE(ev.duration);
dev_dbg(&dev->dev, "delivering %uus %s to lirc_dev\n",
ev.duration, TO_STR(ev.pulse));
}
/*
* bpf does not care about the gap generated above; that exists
* for backwards compatibility
*/
lirc_bpf_run(dev, sample);
spin_lock_irqsave(&dev->lirc_fh_lock, flags);
list_for_each_entry(fh, &dev->lirc_fh, list) {
if (kfifo_put(&fh->rawir, sample))
wake_up_poll(&fh->wait_poll, EPOLLIN | EPOLLRDNORM);
}
spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
}
/**
* lirc_scancode_event() - Send scancode data to lirc to be relayed to
* userspace. This can be called in atomic context.
* @dev: the struct rc_dev descriptor of the device
* @lsc: the struct lirc_scancode describing the decoded scancode
*/
void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc)
{
unsigned long flags;
struct lirc_fh *fh;
lsc->timestamp = ktime_get_ns();
spin_lock_irqsave(&dev->lirc_fh_lock, flags);
list_for_each_entry(fh, &dev->lirc_fh, list) {
if (kfifo_put(&fh->scancodes, *lsc))
wake_up_poll(&fh->wait_poll, EPOLLIN | EPOLLRDNORM);
}
spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
}
EXPORT_SYMBOL_GPL(lirc_scancode_event);
static int lirc_open(struct inode *inode, struct file *file)
{
struct rc_dev *dev = container_of(inode->i_cdev, struct rc_dev,
lirc_cdev);
struct lirc_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
unsigned long flags;
int retval;
if (!fh)
return -ENOMEM;
get_device(&dev->dev);
if (!dev->registered) {
retval = -ENODEV;
goto out_fh;
}
if (dev->driver_type == RC_DRIVER_IR_RAW) {
if (kfifo_alloc(&fh->rawir, MAX_IR_EVENT_SIZE, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_fh;
}
}
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
if (kfifo_alloc(&fh->scancodes, 32, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_rawir;
}
}
fh->send_mode = LIRC_MODE_PULSE;
fh->rc = dev;
if (dev->driver_type == RC_DRIVER_SCANCODE)
fh->rec_mode = LIRC_MODE_SCANCODE;
else
fh->rec_mode = LIRC_MODE_MODE2;
retval = rc_open(dev);
if (retval)
goto out_kfifo;
init_waitqueue_head(&fh->wait_poll);
file->private_data = fh;
spin_lock_irqsave(&dev->lirc_fh_lock, flags);
list_add(&fh->list, &dev->lirc_fh);
spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
stream_open(inode, file);
return 0;
out_kfifo:
if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
kfifo_free(&fh->scancodes);
out_rawir:
if (dev->driver_type == RC_DRIVER_IR_RAW)
kfifo_free(&fh->rawir);
out_fh:
kfree(fh);
put_device(&dev->dev);
return retval;
}
static int lirc_close(struct inode *inode, struct file *file)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *dev = fh->rc;
unsigned long flags;
spin_lock_irqsave(&dev->lirc_fh_lock, flags);
list_del(&fh->list);
spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
if (dev->driver_type == RC_DRIVER_IR_RAW)
kfifo_free(&fh->rawir);
if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
kfifo_free(&fh->scancodes);
kfree(fh);
rc_close(dev);
put_device(&dev->dev);
return 0;
}
static ssize_t lirc_transmit(struct file *file, const char __user *buf,
size_t n, loff_t *ppos)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *dev = fh->rc;
unsigned int *txbuf;
struct ir_raw_event *raw = NULL;
ssize_t ret;
size_t count;
ktime_t start;
s64 towait;
unsigned int duration = 0; /* signal duration in us */
int i;
ret = mutex_lock_interruptible(&dev->lock);
if (ret)
return ret;
if (!dev->registered) {
ret = -ENODEV;
goto out_unlock;
}
if (!dev->tx_ir) {
ret = -EINVAL;
goto out_unlock;
}
if (fh->send_mode == LIRC_MODE_SCANCODE) {
struct lirc_scancode scan;
if (n != sizeof(scan)) {
ret = -EINVAL;
goto out_unlock;
}
if (copy_from_user(&scan, buf, sizeof(scan))) {
ret = -EFAULT;
goto out_unlock;
}
if (scan.flags || scan.keycode || scan.timestamp ||
scan.rc_proto > RC_PROTO_MAX) {
ret = -EINVAL;
goto out_unlock;
}
/* We only have encoders for 32-bit protocols. */
if (scan.scancode > U32_MAX ||
!rc_validate_scancode(scan.rc_proto, scan.scancode)) {
ret = -EINVAL;
goto out_unlock;
}
raw = kmalloc_array(LIRCBUF_SIZE, sizeof(*raw), GFP_KERNEL);
if (!raw) {
ret = -ENOMEM;
goto out_unlock;
}
ret = ir_raw_encode_scancode(scan.rc_proto, scan.scancode,
raw, LIRCBUF_SIZE);
if (ret < 0)
goto out_kfree_raw;
count = ret;
txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
if (!txbuf) {
ret = -ENOMEM;
goto out_kfree_raw;
}
for (i = 0; i < count; i++)
txbuf[i] = raw[i].duration;
if (dev->s_tx_carrier) {
int carrier = ir_raw_encode_carrier(scan.rc_proto);
if (carrier > 0)
dev->s_tx_carrier(dev, carrier);
}
} else {
if (n < sizeof(unsigned int) || n % sizeof(unsigned int)) {
ret = -EINVAL;
goto out_unlock;
}
count = n / sizeof(unsigned int);
if (count > LIRCBUF_SIZE || count % 2 == 0) {
ret = -EINVAL;
goto out_unlock;
}
txbuf = memdup_user(buf, n);
if (IS_ERR(txbuf)) {
ret = PTR_ERR(txbuf);
goto out_unlock;
}
}
for (i = 0; i < count; i++) {
if (txbuf[i] > IR_MAX_DURATION - duration || !txbuf[i]) {
ret = -EINVAL;
goto out_kfree;
}
duration += txbuf[i];
}
start = ktime_get();
ret = dev->tx_ir(dev, txbuf, count);
if (ret < 0)
goto out_kfree;
kfree(txbuf);
kfree(raw);
mutex_unlock(&dev->lock);
/*
* The lircd gap calculation expects the write function to
* wait for the actual IR signal to be transmitted before
* returning.
*/
towait = ktime_us_delta(ktime_add_us(start, duration),
ktime_get());
if (towait > 0) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(towait));
}
return n;
out_kfree:
kfree(txbuf);
out_kfree_raw:
kfree(raw);
out_unlock:
mutex_unlock(&dev->lock);
return ret;
}
static long lirc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *dev = fh->rc;
u32 __user *argp = (u32 __user *)(arg);
u32 val = 0;
int ret;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
ret = get_user(val, argp);
if (ret)
return ret;
}
ret = mutex_lock_interruptible(&dev->lock);
if (ret)
return ret;
if (!dev->registered) {
ret = -ENODEV;
goto out;
}
switch (cmd) {
case LIRC_GET_FEATURES:
if (dev->driver_type == RC_DRIVER_SCANCODE)
val |= LIRC_CAN_REC_SCANCODE;
if (dev->driver_type == RC_DRIVER_IR_RAW) {
val |= LIRC_CAN_REC_MODE2;
if (dev->rx_resolution)
val |= LIRC_CAN_GET_REC_RESOLUTION;
}
if (dev->tx_ir) {
val |= LIRC_CAN_SEND_PULSE;
if (dev->s_tx_mask)
val |= LIRC_CAN_SET_TRANSMITTER_MASK;
if (dev->s_tx_carrier)
val |= LIRC_CAN_SET_SEND_CARRIER;
if (dev->s_tx_duty_cycle)
val |= LIRC_CAN_SET_SEND_DUTY_CYCLE;
}
if (dev->s_rx_carrier_range)
val |= LIRC_CAN_SET_REC_CARRIER |
LIRC_CAN_SET_REC_CARRIER_RANGE;
if (dev->s_wideband_receiver)
val |= LIRC_CAN_USE_WIDEBAND_RECEIVER;
if (dev->s_carrier_report)
val |= LIRC_CAN_MEASURE_CARRIER;
if (dev->max_timeout)
val |= LIRC_CAN_SET_REC_TIMEOUT;
break;
/* mode support */
case LIRC_GET_REC_MODE:
if (dev->driver_type == RC_DRIVER_IR_RAW_TX)
ret = -ENOTTY;
else
val = fh->rec_mode;
break;
case LIRC_SET_REC_MODE:
switch (dev->driver_type) {
case RC_DRIVER_IR_RAW_TX:
ret = -ENOTTY;
break;
case RC_DRIVER_SCANCODE:
if (val != LIRC_MODE_SCANCODE)
ret = -EINVAL;
break;
case RC_DRIVER_IR_RAW:
if (!(val == LIRC_MODE_MODE2 ||
val == LIRC_MODE_SCANCODE))
ret = -EINVAL;
break;
}
if (!ret)
fh->rec_mode = val;
break;
case LIRC_GET_SEND_MODE:
if (!dev->tx_ir)
ret = -ENOTTY;
else
val = fh->send_mode;
break;
case LIRC_SET_SEND_MODE:
if (!dev->tx_ir)
ret = -ENOTTY;
else if (!(val == LIRC_MODE_PULSE || val == LIRC_MODE_SCANCODE))
ret = -EINVAL;
else
fh->send_mode = val;
break;
/* TX settings */
case LIRC_SET_TRANSMITTER_MASK:
if (!dev->s_tx_mask)
ret = -ENOTTY;
else
ret = dev->s_tx_mask(dev, val);
break;
case LIRC_SET_SEND_CARRIER:
if (!dev->s_tx_carrier)
ret = -ENOTTY;
else
ret = dev->s_tx_carrier(dev, val);
break;
case LIRC_SET_SEND_DUTY_CYCLE:
if (!dev->s_tx_duty_cycle)
ret = -ENOTTY;
else if (val <= 0 || val >= 100)
ret = -EINVAL;
else
ret = dev->s_tx_duty_cycle(dev, val);
break;
/* RX settings */
case LIRC_SET_REC_CARRIER:
if (!dev->s_rx_carrier_range)
ret = -ENOTTY;
else if (val <= 0)
ret = -EINVAL;
else
ret = dev->s_rx_carrier_range(dev, fh->carrier_low,
val);
break;
case LIRC_SET_REC_CARRIER_RANGE:
if (!dev->s_rx_carrier_range)
ret = -ENOTTY;
else if (val <= 0)
ret = -EINVAL;
else
fh->carrier_low = val;
break;
case LIRC_GET_REC_RESOLUTION:
if (!dev->rx_resolution)
ret = -ENOTTY;
else
val = dev->rx_resolution;
break;
case LIRC_SET_WIDEBAND_RECEIVER:
if (!dev->s_wideband_receiver)
ret = -ENOTTY;
else
ret = dev->s_wideband_receiver(dev, !!val);
break;
case LIRC_SET_MEASURE_CARRIER_MODE:
if (!dev->s_carrier_report)
ret = -ENOTTY;
else
ret = dev->s_carrier_report(dev, !!val);
break;
/* Generic timeout support */
case LIRC_GET_MIN_TIMEOUT:
if (!dev->max_timeout)
ret = -ENOTTY;
else
val = dev->min_timeout;
break;
case LIRC_GET_MAX_TIMEOUT:
if (!dev->max_timeout)
ret = -ENOTTY;
else
val = dev->max_timeout;
break;
case LIRC_SET_REC_TIMEOUT:
if (!dev->max_timeout) {
ret = -ENOTTY;
} else {
if (val < dev->min_timeout || val > dev->max_timeout)
ret = -EINVAL;
else if (dev->s_timeout)
ret = dev->s_timeout(dev, val);
else
dev->timeout = val;
}
break;
case LIRC_GET_REC_TIMEOUT:
if (!dev->timeout)
ret = -ENOTTY;
else
val = dev->timeout;
break;
case LIRC_SET_REC_TIMEOUT_REPORTS:
if (dev->driver_type != RC_DRIVER_IR_RAW)
ret = -ENOTTY;
break;
default:
ret = -ENOTTY;
}
if (!ret && _IOC_DIR(cmd) & _IOC_READ)
ret = put_user(val, argp);
out:
mutex_unlock(&dev->lock);
return ret;
}
static __poll_t lirc_poll(struct file *file, struct poll_table_struct *wait)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *rcdev = fh->rc;
__poll_t events = 0;
poll_wait(file, &fh->wait_poll, wait);
if (!rcdev->registered) {
events = EPOLLHUP | EPOLLERR;
} else if (rcdev->driver_type != RC_DRIVER_IR_RAW_TX) {
if (fh->rec_mode == LIRC_MODE_SCANCODE &&
!kfifo_is_empty(&fh->scancodes))
events = EPOLLIN | EPOLLRDNORM;
if (fh->rec_mode == LIRC_MODE_MODE2 &&
!kfifo_is_empty(&fh->rawir))
events = EPOLLIN | EPOLLRDNORM;
}
return events;
}
static ssize_t lirc_read_mode2(struct file *file, char __user *buffer,
size_t length)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *rcdev = fh->rc;
unsigned int copied;
int ret;
if (length < sizeof(unsigned int) || length % sizeof(unsigned int))
return -EINVAL;
do {
if (kfifo_is_empty(&fh->rawir)) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(fh->wait_poll,
!kfifo_is_empty(&fh->rawir) ||
!rcdev->registered);
if (ret)
return ret;
}
if (!rcdev->registered)
return -ENODEV;
ret = mutex_lock_interruptible(&rcdev->lock);
if (ret)
return ret;
ret = kfifo_to_user(&fh->rawir, buffer, length, &copied);
mutex_unlock(&rcdev->lock);
if (ret)
return ret;
} while (copied == 0);
return copied;
}
static ssize_t lirc_read_scancode(struct file *file, char __user *buffer,
size_t length)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *rcdev = fh->rc;
unsigned int copied;
int ret;
if (length < sizeof(struct lirc_scancode) ||
length % sizeof(struct lirc_scancode))
return -EINVAL;
do {
if (kfifo_is_empty(&fh->scancodes)) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(fh->wait_poll,
!kfifo_is_empty(&fh->scancodes) ||
!rcdev->registered);
if (ret)
return ret;
}
if (!rcdev->registered)
return -ENODEV;
ret = mutex_lock_interruptible(&rcdev->lock);
if (ret)
return ret;
ret = kfifo_to_user(&fh->scancodes, buffer, length, &copied);
mutex_unlock(&rcdev->lock);
if (ret)
return ret;
} while (copied == 0);
return copied;
}
static ssize_t lirc_read(struct file *file, char __user *buffer, size_t length,
loff_t *ppos)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *rcdev = fh->rc;
if (rcdev->driver_type == RC_DRIVER_IR_RAW_TX)
return -EINVAL;
if (!rcdev->registered)
return -ENODEV;
if (fh->rec_mode == LIRC_MODE_MODE2)
return lirc_read_mode2(file, buffer, length);
else /* LIRC_MODE_SCANCODE */
return lirc_read_scancode(file, buffer, length);
}
static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.write = lirc_transmit,
.unlocked_ioctl = lirc_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.read = lirc_read,
.poll = lirc_poll,
.open = lirc_open,
.release = lirc_close,
.llseek = no_llseek,
};
static void lirc_release_device(struct device *ld)
{
struct rc_dev *rcdev = container_of(ld, struct rc_dev, lirc_dev);
put_device(&rcdev->dev);
}
int lirc_register(struct rc_dev *dev)
{
const char *rx_type, *tx_type;
int err, minor;
minor = ida_alloc_max(&lirc_ida, RC_DEV_MAX - 1, GFP_KERNEL);
if (minor < 0)
return minor;
device_initialize(&dev->lirc_dev);
dev->lirc_dev.class = lirc_class;
dev->lirc_dev.parent = &dev->dev;
dev->lirc_dev.release = lirc_release_device;
dev->lirc_dev.devt = MKDEV(MAJOR(lirc_base_dev), minor);
dev_set_name(&dev->lirc_dev, "lirc%d", minor);
INIT_LIST_HEAD(&dev->lirc_fh);
spin_lock_init(&dev->lirc_fh_lock);
cdev_init(&dev->lirc_cdev, &lirc_fops);
err = cdev_device_add(&dev->lirc_cdev, &dev->lirc_dev);
if (err)
goto out_ida;
get_device(&dev->dev);
switch (dev->driver_type) {
case RC_DRIVER_SCANCODE:
rx_type = "scancode";
break;
case RC_DRIVER_IR_RAW:
rx_type = "raw IR";
break;
default:
rx_type = "no";
break;
}
if (dev->tx_ir)
tx_type = "raw IR";
else
tx_type = "no";
dev_info(&dev->dev, "lirc_dev: driver %s registered at minor = %d, %s receiver, %s transmitter",
dev->driver_name, minor, rx_type, tx_type);
return 0;
out_ida:
ida_free(&lirc_ida, minor);
return err;
}
void lirc_unregister(struct rc_dev *dev)
{
unsigned long flags;
struct lirc_fh *fh;
dev_dbg(&dev->dev, "lirc_dev: driver %s unregistered from minor = %d\n",
dev->driver_name, MINOR(dev->lirc_dev.devt));
spin_lock_irqsave(&dev->lirc_fh_lock, flags);
list_for_each_entry(fh, &dev->lirc_fh, list)
wake_up_poll(&fh->wait_poll, EPOLLHUP | EPOLLERR);
spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
cdev_device_del(&dev->lirc_cdev, &dev->lirc_dev);
ida_free(&lirc_ida, MINOR(dev->lirc_dev.devt));
}
int __init lirc_dev_init(void)
{
int retval;
lirc_class = class_create("lirc");
if (IS_ERR(lirc_class)) {
pr_err("class_create failed\n");
return PTR_ERR(lirc_class);
}
retval = alloc_chrdev_region(&lirc_base_dev, 0, RC_DEV_MAX, "lirc");
if (retval) {
class_destroy(lirc_class);
pr_err("alloc_chrdev_region failed\n");
return retval;
}
pr_debug("IR Remote Control driver registered, major %d\n",
MAJOR(lirc_base_dev));
return 0;
}
void __exit lirc_dev_exit(void)
{
class_destroy(lirc_class);
unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX);
}
struct rc_dev *rc_dev_get_from_fd(int fd)
{
struct fd f = fdget(fd);
struct lirc_fh *fh;
struct rc_dev *dev;
if (!f.file)
return ERR_PTR(-EBADF);
if (f.file->f_op != &lirc_fops) {
fdput(f);
return ERR_PTR(-EINVAL);
}
fh = f.file->private_data;
dev = fh->rc;
get_device(&dev->dev);
fdput(f);
return dev;
}
MODULE_ALIAS("lirc_dev");
| linux-master | drivers/media/rc/lirc_dev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* driver for ENE KB3926 B/C/D/E/F CIR (pnp id: ENE0XXX)
*
* Copyright (C) 2010 Maxim Levitsky <[email protected]>
*
* Special thanks to:
* Sami R. <[email protected]> for lot of help in debugging and therefore
* bringing to life support for transmission & learning mode.
*
* Charlie Andrews <[email protected]> for lots of help in
* bringing up the support of new firmware buffer that is popular
* on latest notebooks
*
* ENE for partial device documentation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pnp.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/rc-core.h>
#include "ene_ir.h"
static int sample_period;
static bool learning_mode_force;
static int debug;
static bool txsim;
static void ene_set_reg_addr(struct ene_device *dev, u16 reg)
{
outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
}
/* read a hardware register */
static u8 ene_read_reg(struct ene_device *dev, u16 reg)
{
u8 retval;
ene_set_reg_addr(dev, reg);
retval = inb(dev->hw_io + ENE_IO);
dbg_regs("reg %04x == %02x", reg, retval);
return retval;
}
/* write a hardware register */
static void ene_write_reg(struct ene_device *dev, u16 reg, u8 value)
{
dbg_regs("reg %04x <- %02x", reg, value);
ene_set_reg_addr(dev, reg);
outb(value, dev->hw_io + ENE_IO);
}
/* Set bits in hardware register */
static void ene_set_reg_mask(struct ene_device *dev, u16 reg, u8 mask)
{
dbg_regs("reg %04x |= %02x", reg, mask);
ene_set_reg_addr(dev, reg);
outb(inb(dev->hw_io + ENE_IO) | mask, dev->hw_io + ENE_IO);
}
/* Clear bits in hardware register */
static void ene_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask)
{
dbg_regs("reg %04x &= ~%02x ", reg, mask);
ene_set_reg_addr(dev, reg);
outb(inb(dev->hw_io + ENE_IO) & ~mask, dev->hw_io + ENE_IO);
}
/* A helper to set/clear a bit in register according to boolean variable */
static void ene_set_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask,
bool set)
{
if (set)
ene_set_reg_mask(dev, reg, mask);
else
ene_clear_reg_mask(dev, reg, mask);
}
/* detect hardware features */
static int ene_hw_detect(struct ene_device *dev)
{
u8 chip_major, chip_minor;
u8 hw_revision, old_ver;
u8 fw_reg2, fw_reg1;
ene_clear_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD);
chip_major = ene_read_reg(dev, ENE_ECVER_MAJOR);
chip_minor = ene_read_reg(dev, ENE_ECVER_MINOR);
ene_set_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD);
hw_revision = ene_read_reg(dev, ENE_ECHV);
old_ver = ene_read_reg(dev, ENE_HW_VER_OLD);
dev->pll_freq = (ene_read_reg(dev, ENE_PLLFRH) << 4) +
(ene_read_reg(dev, ENE_PLLFRL) >> 4);
if (sample_period != ENE_DEFAULT_SAMPLE_PERIOD)
dev->rx_period_adjust =
dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 2 : 4;
if (hw_revision == 0xFF) {
pr_warn("device seems to be disabled\n");
pr_warn("send a mail to [email protected]\n");
pr_warn("please attach output of acpidump and dmidecode\n");
return -ENODEV;
}
pr_notice("chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x\n",
chip_major, chip_minor, old_ver, hw_revision);
pr_notice("PLL freq = %d\n", dev->pll_freq);
if (chip_major == 0x33) {
pr_warn("chips 0x33xx aren't supported\n");
return -ENODEV;
}
if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) {
dev->hw_revision = ENE_HW_C;
pr_notice("KB3926C detected\n");
} else if (old_ver == 0x24 && hw_revision == 0xC0) {
dev->hw_revision = ENE_HW_B;
pr_notice("KB3926B detected\n");
} else {
dev->hw_revision = ENE_HW_D;
pr_notice("KB3926D or higher detected\n");
}
/* detect features hardware supports */
if (dev->hw_revision < ENE_HW_C)
return 0;
fw_reg1 = ene_read_reg(dev, ENE_FW1);
fw_reg2 = ene_read_reg(dev, ENE_FW2);
pr_notice("Firmware regs: %02x %02x\n", fw_reg1, fw_reg2);
dev->hw_use_gpio_0a = !!(fw_reg2 & ENE_FW2_GP0A);
dev->hw_learning_and_tx_capable = !!(fw_reg2 & ENE_FW2_LEARNING);
dev->hw_extra_buffer = !!(fw_reg1 & ENE_FW1_HAS_EXTRA_BUF);
if (dev->hw_learning_and_tx_capable)
dev->hw_fan_input = !!(fw_reg2 & ENE_FW2_FAN_INPUT);
pr_notice("Hardware features:\n");
if (dev->hw_learning_and_tx_capable) {
pr_notice("* Supports transmitting & learning mode\n");
pr_notice(" This feature is rare and therefore,\n");
pr_notice(" you are welcome to test it,\n");
pr_notice(" and/or contact the author via:\n");
pr_notice(" [email protected]\n");
pr_notice(" or [email protected]\n");
pr_notice("* Uses GPIO %s for IR raw input\n",
dev->hw_use_gpio_0a ? "40" : "0A");
if (dev->hw_fan_input)
pr_notice("* Uses unused fan feedback input as source of demodulated IR data\n");
}
if (!dev->hw_fan_input)
pr_notice("* Uses GPIO %s for IR demodulated input\n",
dev->hw_use_gpio_0a ? "0A" : "40");
if (dev->hw_extra_buffer)
pr_notice("* Uses new style input buffer\n");
return 0;
}
/* Read properties of hw sample buffer */
static void ene_rx_setup_hw_buffer(struct ene_device *dev)
{
u16 tmp;
ene_rx_read_hw_pointer(dev);
dev->r_pointer = dev->w_pointer;
if (!dev->hw_extra_buffer) {
dev->buffer_len = ENE_FW_PACKET_SIZE * 2;
return;
}
tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER);
tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER+1) << 8;
dev->extra_buf1_address = tmp;
dev->extra_buf1_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 2);
tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 3);
tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 4) << 8;
dev->extra_buf2_address = tmp;
dev->extra_buf2_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 5);
dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8;
pr_notice("Hardware uses 2 extended buffers:\n");
pr_notice(" 0x%04x - len : %d\n",
dev->extra_buf1_address, dev->extra_buf1_len);
pr_notice(" 0x%04x - len : %d\n",
dev->extra_buf2_address, dev->extra_buf2_len);
pr_notice("Total buffer len = %d\n", dev->buffer_len);
if (dev->buffer_len > 64 || dev->buffer_len < 16)
goto error;
if (dev->extra_buf1_address > 0xFBFC ||
dev->extra_buf1_address < 0xEC00)
goto error;
if (dev->extra_buf2_address > 0xFBFC ||
dev->extra_buf2_address < 0xEC00)
goto error;
if (dev->r_pointer > dev->buffer_len)
goto error;
ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
return;
error:
pr_warn("Error validating extra buffers, device probably won't work\n");
dev->hw_extra_buffer = false;
ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
}
/* Restore the pointers to extra buffers - to make module reload work*/
static void ene_rx_restore_hw_buffer(struct ene_device *dev)
{
if (!dev->hw_extra_buffer)
return;
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 0,
dev->extra_buf1_address & 0xFF);
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 1,
dev->extra_buf1_address >> 8);
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 2, dev->extra_buf1_len);
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 3,
dev->extra_buf2_address & 0xFF);
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 4,
dev->extra_buf2_address >> 8);
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 5,
dev->extra_buf2_len);
ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
}
/* Read hardware write pointer */
static void ene_rx_read_hw_pointer(struct ene_device *dev)
{
if (dev->hw_extra_buffer)
dev->w_pointer = ene_read_reg(dev, ENE_FW_RX_POINTER);
else
dev->w_pointer = ene_read_reg(dev, ENE_FW2)
& ENE_FW2_BUF_WPTR ? 0 : ENE_FW_PACKET_SIZE;
dbg_verbose("RB: HW write pointer: %02x, driver read pointer: %02x",
dev->w_pointer, dev->r_pointer);
}
/* Gets address of next sample from HW ring buffer */
static int ene_rx_get_sample_reg(struct ene_device *dev)
{
int r_pointer;
if (dev->r_pointer == dev->w_pointer) {
dbg_verbose("RB: hit end, try update w_pointer");
ene_rx_read_hw_pointer(dev);
}
if (dev->r_pointer == dev->w_pointer) {
dbg_verbose("RB: end of data at %d", dev->r_pointer);
return 0;
}
dbg_verbose("RB: reading at offset %d", dev->r_pointer);
r_pointer = dev->r_pointer;
dev->r_pointer++;
if (dev->r_pointer == dev->buffer_len)
dev->r_pointer = 0;
dbg_verbose("RB: next read will be from offset %d", dev->r_pointer);
if (r_pointer < 8) {
dbg_verbose("RB: read at main buffer at %d", r_pointer);
return ENE_FW_SAMPLE_BUFFER + r_pointer;
}
r_pointer -= 8;
if (r_pointer < dev->extra_buf1_len) {
dbg_verbose("RB: read at 1st extra buffer at %d", r_pointer);
return dev->extra_buf1_address + r_pointer;
}
r_pointer -= dev->extra_buf1_len;
if (r_pointer < dev->extra_buf2_len) {
dbg_verbose("RB: read at 2nd extra buffer at %d", r_pointer);
return dev->extra_buf2_address + r_pointer;
}
dbg("attempt to read beyond ring buffer end");
return 0;
}
/* Sense current received carrier */
static void ene_rx_sense_carrier(struct ene_device *dev)
{
int carrier, duty_cycle;
int period = ene_read_reg(dev, ENE_CIRCAR_PRD);
int hperiod = ene_read_reg(dev, ENE_CIRCAR_HPRD);
if (!(period & ENE_CIRCAR_PRD_VALID))
return;
period &= ~ENE_CIRCAR_PRD_VALID;
if (!period)
return;
dbg("RX: hardware carrier period = %02x", period);
dbg("RX: hardware carrier pulse period = %02x", hperiod);
carrier = 2000000 / period;
duty_cycle = (hperiod * 100) / period;
dbg("RX: sensed carrier = %d Hz, duty cycle %d%%",
carrier, duty_cycle);
if (dev->carrier_detect_enabled) {
struct ir_raw_event ev = {
.carrier_report = true,
.carrier = carrier,
.duty_cycle = duty_cycle
};
ir_raw_event_store(dev->rdev, &ev);
}
}
/* this enables/disables the CIR RX engine */
static void ene_rx_enable_cir_engine(struct ene_device *dev, bool enable)
{
ene_set_clear_reg_mask(dev, ENE_CIRCFG,
ENE_CIRCFG_RX_EN | ENE_CIRCFG_RX_IRQ, enable);
}
/* this selects input for CIR engine. Ether GPIO 0A or GPIO40*/
static void ene_rx_select_input(struct ene_device *dev, bool gpio_0a)
{
ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_GPIO0A, gpio_0a);
}
/*
* this enables alternative input via fan tachometer sensor and bypasses
* the hw CIR engine
*/
static void ene_rx_enable_fan_input(struct ene_device *dev, bool enable)
{
if (!dev->hw_fan_input)
return;
if (!enable)
ene_write_reg(dev, ENE_FAN_AS_IN1, 0);
else {
ene_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN);
ene_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN);
}
}
/* setup the receiver for RX*/
static void ene_rx_setup(struct ene_device *dev)
{
bool learning_mode = dev->learning_mode_enabled ||
dev->carrier_detect_enabled;
int sample_period_adjust = 0;
dbg("RX: setup receiver, learning mode = %d", learning_mode);
/* This selects RLC input and clears CFG2 settings */
ene_write_reg(dev, ENE_CIRCFG2, 0x00);
/* set sample period*/
if (sample_period == ENE_DEFAULT_SAMPLE_PERIOD)
sample_period_adjust =
dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 1 : 2;
ene_write_reg(dev, ENE_CIRRLC_CFG,
(sample_period + sample_period_adjust) |
ENE_CIRRLC_CFG_OVERFLOW);
/* revB doesn't support inputs */
if (dev->hw_revision < ENE_HW_C)
goto select_timeout;
if (learning_mode) {
WARN_ON(!dev->hw_learning_and_tx_capable);
/* Enable the opposite of the normal input
That means that if GPIO40 is normally used, use GPIO0A
and vice versa.
This input will carry non demodulated
signal, and we will tell the hw to demodulate it itself */
ene_rx_select_input(dev, !dev->hw_use_gpio_0a);
dev->rx_fan_input_inuse = false;
/* Enable carrier demodulation */
ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD);
/* Enable carrier detection */
ene_write_reg(dev, ENE_CIRCAR_PULS, 0x63);
ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT,
dev->carrier_detect_enabled || debug);
} else {
if (dev->hw_fan_input)
dev->rx_fan_input_inuse = true;
else
ene_rx_select_input(dev, dev->hw_use_gpio_0a);
/* Disable carrier detection & demodulation */
ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD);
ene_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT);
}
select_timeout:
if (dev->rx_fan_input_inuse) {
dev->rdev->rx_resolution = ENE_FW_SAMPLE_PERIOD_FAN;
/* Fan input doesn't support timeouts, it just ends the
input with a maximum sample */
dev->rdev->min_timeout = dev->rdev->max_timeout =
ENE_FW_SMPL_BUF_FAN_MSK *
ENE_FW_SAMPLE_PERIOD_FAN;
} else {
dev->rdev->rx_resolution = sample_period;
/* Theoreticly timeout is unlimited, but we cap it
* because it was seen that on one device, it
* would stop sending spaces after around 250 msec.
* Besides, this is close to 2^32 anyway and timeout is u32.
*/
dev->rdev->min_timeout = 127 * sample_period;
dev->rdev->max_timeout = 200000;
}
if (dev->hw_learning_and_tx_capable)
dev->rdev->tx_resolution = sample_period;
if (dev->rdev->timeout > dev->rdev->max_timeout)
dev->rdev->timeout = dev->rdev->max_timeout;
if (dev->rdev->timeout < dev->rdev->min_timeout)
dev->rdev->timeout = dev->rdev->min_timeout;
}
/* Enable the device for receive */
static void ene_rx_enable_hw(struct ene_device *dev)
{
u8 reg_value;
/* Enable system interrupt */
if (dev->hw_revision < ENE_HW_C) {
ene_write_reg(dev, ENEB_IRQ, dev->irq << 1);
ene_write_reg(dev, ENEB_IRQ_UNK1, 0x01);
} else {
reg_value = ene_read_reg(dev, ENE_IRQ) & 0xF0;
reg_value |= ENE_IRQ_UNK_EN;
reg_value &= ~ENE_IRQ_STATUS;
reg_value |= (dev->irq & ENE_IRQ_MASK);
ene_write_reg(dev, ENE_IRQ, reg_value);
}
/* Enable inputs */
ene_rx_enable_fan_input(dev, dev->rx_fan_input_inuse);
ene_rx_enable_cir_engine(dev, !dev->rx_fan_input_inuse);
/* ack any pending irqs - just in case */
ene_irq_status(dev);
/* enable firmware bits */
ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
/* enter idle mode */
ir_raw_event_set_idle(dev->rdev, true);
}
/* Enable the device for receive - wrapper to track the state*/
static void ene_rx_enable(struct ene_device *dev)
{
ene_rx_enable_hw(dev);
dev->rx_enabled = true;
}
/* Disable the device receiver */
static void ene_rx_disable_hw(struct ene_device *dev)
{
/* disable inputs */
ene_rx_enable_cir_engine(dev, false);
ene_rx_enable_fan_input(dev, false);
/* disable hardware IRQ and firmware flag */
ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
ir_raw_event_set_idle(dev->rdev, true);
}
/* Disable the device receiver - wrapper to track the state */
static void ene_rx_disable(struct ene_device *dev)
{
ene_rx_disable_hw(dev);
dev->rx_enabled = false;
}
/* This resets the receiver. Useful to stop stream of spaces at end of
* transmission
*/
static void ene_rx_reset(struct ene_device *dev)
{
ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN);
ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN);
}
/* Set up the TX carrier frequency and duty cycle */
static void ene_tx_set_carrier(struct ene_device *dev)
{
u8 tx_puls_width;
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_set_clear_reg_mask(dev, ENE_CIRCFG,
ENE_CIRCFG_TX_CARR, dev->tx_period > 0);
if (!dev->tx_period)
goto unlock;
BUG_ON(dev->tx_duty_cycle >= 100 || dev->tx_duty_cycle <= 0);
tx_puls_width = dev->tx_period / (100 / dev->tx_duty_cycle);
if (!tx_puls_width)
tx_puls_width = 1;
dbg("TX: pulse distance = %d * 500 ns", dev->tx_period);
dbg("TX: pulse width = %d * 500 ns", tx_puls_width);
ene_write_reg(dev, ENE_CIRMOD_PRD, dev->tx_period | ENE_CIRMOD_PRD_POL);
ene_write_reg(dev, ENE_CIRMOD_HPRD, tx_puls_width);
unlock:
spin_unlock_irqrestore(&dev->hw_lock, flags);
}
/* Enable/disable transmitters */
static void ene_tx_set_transmitters(struct ene_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_set_clear_reg_mask(dev, ENE_GPIOFS8, ENE_GPIOFS8_GPIO41,
!!(dev->transmitter_mask & 0x01));
ene_set_clear_reg_mask(dev, ENE_GPIOFS1, ENE_GPIOFS1_GPIO0D,
!!(dev->transmitter_mask & 0x02));
spin_unlock_irqrestore(&dev->hw_lock, flags);
}
/* prepare transmission */
static void ene_tx_enable(struct ene_device *dev)
{
u8 conf1 = ene_read_reg(dev, ENE_CIRCFG);
u8 fwreg2 = ene_read_reg(dev, ENE_FW2);
dev->saved_conf1 = conf1;
/* Show information about currently connected transmitter jacks */
if (fwreg2 & ENE_FW2_EMMITER1_CONN)
dbg("TX: Transmitter #1 is connected");
if (fwreg2 & ENE_FW2_EMMITER2_CONN)
dbg("TX: Transmitter #2 is connected");
if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN)))
pr_warn("TX: transmitter cable isn't connected!\n");
/* disable receive on revc */
if (dev->hw_revision == ENE_HW_C)
conf1 &= ~ENE_CIRCFG_RX_EN;
/* Enable TX engine */
conf1 |= ENE_CIRCFG_TX_EN | ENE_CIRCFG_TX_IRQ;
ene_write_reg(dev, ENE_CIRCFG, conf1);
}
/* end transmission */
static void ene_tx_disable(struct ene_device *dev)
{
ene_write_reg(dev, ENE_CIRCFG, dev->saved_conf1);
dev->tx_buffer = NULL;
}
/* TX one sample - must be called with dev->hw_lock*/
static void ene_tx_sample(struct ene_device *dev)
{
u8 raw_tx;
u32 sample;
bool pulse = dev->tx_sample_pulse;
if (!dev->tx_buffer) {
pr_warn("TX: BUG: attempt to transmit NULL buffer\n");
return;
}
/* Grab next TX sample */
if (!dev->tx_sample) {
if (dev->tx_pos == dev->tx_len) {
if (!dev->tx_done) {
dbg("TX: no more data to send");
dev->tx_done = true;
goto exit;
} else {
dbg("TX: last sample sent by hardware");
ene_tx_disable(dev);
complete(&dev->tx_complete);
return;
}
}
sample = dev->tx_buffer[dev->tx_pos++];
dev->tx_sample_pulse = !dev->tx_sample_pulse;
dev->tx_sample = DIV_ROUND_CLOSEST(sample, sample_period);
if (!dev->tx_sample)
dev->tx_sample = 1;
}
raw_tx = min(dev->tx_sample , (unsigned int)ENE_CIRRLC_OUT_MASK);
dev->tx_sample -= raw_tx;
dbg("TX: sample %8d (%s)", raw_tx * sample_period,
pulse ? "pulse" : "space");
if (pulse)
raw_tx |= ENE_CIRRLC_OUT_PULSE;
ene_write_reg(dev,
dev->tx_reg ? ENE_CIRRLC_OUT1 : ENE_CIRRLC_OUT0, raw_tx);
dev->tx_reg = !dev->tx_reg;
exit:
/* simulate TX done interrupt */
if (txsim)
mod_timer(&dev->tx_sim_timer, jiffies + HZ / 500);
}
/* timer to simulate tx done interrupt */
static void ene_tx_irqsim(struct timer_list *t)
{
struct ene_device *dev = from_timer(dev, t, tx_sim_timer);
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_tx_sample(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
}
/* read irq status and ack it */
static int ene_irq_status(struct ene_device *dev)
{
u8 irq_status;
u8 fw_flags1, fw_flags2;
int retval = 0;
fw_flags2 = ene_read_reg(dev, ENE_FW2);
if (dev->hw_revision < ENE_HW_C) {
irq_status = ene_read_reg(dev, ENEB_IRQ_STATUS);
if (!(irq_status & ENEB_IRQ_STATUS_IR))
return 0;
ene_clear_reg_mask(dev, ENEB_IRQ_STATUS, ENEB_IRQ_STATUS_IR);
return ENE_IRQ_RX;
}
irq_status = ene_read_reg(dev, ENE_IRQ);
if (!(irq_status & ENE_IRQ_STATUS))
return 0;
/* original driver does that twice - a workaround ? */
ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS);
ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS);
/* check RX interrupt */
if (fw_flags2 & ENE_FW2_RXIRQ) {
retval |= ENE_IRQ_RX;
ene_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_RXIRQ);
}
/* check TX interrupt */
fw_flags1 = ene_read_reg(dev, ENE_FW1);
if (fw_flags1 & ENE_FW1_TXIRQ) {
ene_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ);
retval |= ENE_IRQ_TX;
}
return retval;
}
/* interrupt handler */
static irqreturn_t ene_isr(int irq, void *data)
{
u16 hw_value, reg;
int hw_sample, irq_status;
bool pulse;
unsigned long flags;
irqreturn_t retval = IRQ_NONE;
struct ene_device *dev = (struct ene_device *)data;
struct ir_raw_event ev = {};
spin_lock_irqsave(&dev->hw_lock, flags);
dbg_verbose("ISR called");
ene_rx_read_hw_pointer(dev);
irq_status = ene_irq_status(dev);
if (!irq_status)
goto unlock;
retval = IRQ_HANDLED;
if (irq_status & ENE_IRQ_TX) {
dbg_verbose("TX interrupt");
if (!dev->hw_learning_and_tx_capable) {
dbg("TX interrupt on unsupported device!");
goto unlock;
}
ene_tx_sample(dev);
}
if (!(irq_status & ENE_IRQ_RX))
goto unlock;
dbg_verbose("RX interrupt");
if (dev->hw_learning_and_tx_capable)
ene_rx_sense_carrier(dev);
/* On hardware that don't support extra buffer we need to trust
the interrupt and not track the read pointer */
if (!dev->hw_extra_buffer)
dev->r_pointer = dev->w_pointer == 0 ? ENE_FW_PACKET_SIZE : 0;
while (1) {
reg = ene_rx_get_sample_reg(dev);
dbg_verbose("next sample to read at: %04x", reg);
if (!reg)
break;
hw_value = ene_read_reg(dev, reg);
if (dev->rx_fan_input_inuse) {
int offset = ENE_FW_SMPL_BUF_FAN - ENE_FW_SAMPLE_BUFFER;
/* read high part of the sample */
hw_value |= ene_read_reg(dev, reg + offset) << 8;
pulse = hw_value & ENE_FW_SMPL_BUF_FAN_PLS;
/* clear space bit, and other unused bits */
hw_value &= ENE_FW_SMPL_BUF_FAN_MSK;
hw_sample = hw_value * ENE_FW_SAMPLE_PERIOD_FAN;
} else {
pulse = !(hw_value & ENE_FW_SAMPLE_SPACE);
hw_value &= ~ENE_FW_SAMPLE_SPACE;
hw_sample = hw_value * sample_period;
if (dev->rx_period_adjust) {
hw_sample *= 100;
hw_sample /= (100 + dev->rx_period_adjust);
}
}
if (!dev->hw_extra_buffer && !hw_sample) {
dev->r_pointer = dev->w_pointer;
continue;
}
dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
ev.duration = hw_sample;
ev.pulse = pulse;
ir_raw_event_store_with_filter(dev->rdev, &ev);
}
ir_raw_event_handle(dev->rdev);
unlock:
spin_unlock_irqrestore(&dev->hw_lock, flags);
return retval;
}
/* Initialize default settings */
static void ene_setup_default_settings(struct ene_device *dev)
{
dev->tx_period = 32;
dev->tx_duty_cycle = 50; /*%*/
dev->transmitter_mask = 0x03;
dev->learning_mode_enabled = learning_mode_force;
/* Set reasonable default timeout */
dev->rdev->timeout = MS_TO_US(150);
}
/* Upload all hardware settings at once. Used at load and resume time */
static void ene_setup_hw_settings(struct ene_device *dev)
{
if (dev->hw_learning_and_tx_capable) {
ene_tx_set_carrier(dev);
ene_tx_set_transmitters(dev);
}
ene_rx_setup(dev);
}
/* outside interface: called on first open*/
static int ene_open(struct rc_dev *rdev)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_enable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
}
/* outside interface: called on device close*/
static void ene_close(struct rc_dev *rdev)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_disable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
}
/* outside interface: set transmitter mask */
static int ene_set_tx_mask(struct rc_dev *rdev, u32 tx_mask)
{
struct ene_device *dev = rdev->priv;
dbg("TX: attempt to set transmitter mask %02x", tx_mask);
/* invalid txmask */
if (!tx_mask || tx_mask & ~0x03) {
dbg("TX: invalid mask");
/* return count of transmitters */
return 2;
}
dev->transmitter_mask = tx_mask;
ene_tx_set_transmitters(dev);
return 0;
}
/* outside interface : set tx carrier */
static int ene_set_tx_carrier(struct rc_dev *rdev, u32 carrier)
{
struct ene_device *dev = rdev->priv;
u32 period;
dbg("TX: attempt to set tx carrier to %d kHz", carrier);
if (carrier == 0)
return -EINVAL;
period = 2000000 / carrier;
if (period && (period > ENE_CIRMOD_PRD_MAX ||
period < ENE_CIRMOD_PRD_MIN)) {
dbg("TX: out of range %d-%d kHz carrier",
2000 / ENE_CIRMOD_PRD_MIN, 2000 / ENE_CIRMOD_PRD_MAX);
return -EINVAL;
}
dev->tx_period = period;
ene_tx_set_carrier(dev);
return 0;
}
/*outside interface : set tx duty cycle */
static int ene_set_tx_duty_cycle(struct rc_dev *rdev, u32 duty_cycle)
{
struct ene_device *dev = rdev->priv;
dbg("TX: setting duty cycle to %d%%", duty_cycle);
dev->tx_duty_cycle = duty_cycle;
ene_tx_set_carrier(dev);
return 0;
}
/* outside interface: enable learning mode */
static int ene_set_learning_mode(struct rc_dev *rdev, int enable)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
if (enable == dev->learning_mode_enabled)
return 0;
spin_lock_irqsave(&dev->hw_lock, flags);
dev->learning_mode_enabled = enable;
ene_rx_disable(dev);
ene_rx_setup(dev);
ene_rx_enable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
}
static int ene_set_carrier_report(struct rc_dev *rdev, int enable)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
if (enable == dev->carrier_detect_enabled)
return 0;
spin_lock_irqsave(&dev->hw_lock, flags);
dev->carrier_detect_enabled = enable;
ene_rx_disable(dev);
ene_rx_setup(dev);
ene_rx_enable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
}
/* outside interface: enable or disable idle mode */
static void ene_set_idle(struct rc_dev *rdev, bool idle)
{
struct ene_device *dev = rdev->priv;
if (idle) {
ene_rx_reset(dev);
dbg("RX: end of data");
}
}
/* outside interface: transmit */
static int ene_transmit(struct rc_dev *rdev, unsigned *buf, unsigned n)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
dev->tx_buffer = buf;
dev->tx_len = n;
dev->tx_pos = 0;
dev->tx_reg = 0;
dev->tx_done = 0;
dev->tx_sample = 0;
dev->tx_sample_pulse = false;
dbg("TX: %d samples", dev->tx_len);
spin_lock_irqsave(&dev->hw_lock, flags);
ene_tx_enable(dev);
/* Transmit first two samples */
ene_tx_sample(dev);
ene_tx_sample(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) {
dbg("TX: timeout");
spin_lock_irqsave(&dev->hw_lock, flags);
ene_tx_disable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
} else
dbg("TX: done");
return n;
}
/* probe entry */
static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
{
int error = -ENOMEM;
struct rc_dev *rdev;
struct ene_device *dev;
/* allocate memory */
dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!dev || !rdev)
goto exit_free_dev_rdev;
/* validate resources */
error = -ENODEV;
/* init these to -1, as 0 is valid for both */
dev->hw_io = -1;
dev->irq = -1;
if (!pnp_port_valid(pnp_dev, 0) ||
pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
goto exit_free_dev_rdev;
if (!pnp_irq_valid(pnp_dev, 0))
goto exit_free_dev_rdev;
spin_lock_init(&dev->hw_lock);
dev->hw_io = pnp_port_start(pnp_dev, 0);
dev->irq = pnp_irq(pnp_dev, 0);
pnp_set_drvdata(pnp_dev, dev);
dev->pnp_dev = pnp_dev;
/* don't allow too short/long sample periods */
if (sample_period < 5 || sample_period > 0x7F)
sample_period = ENE_DEFAULT_SAMPLE_PERIOD;
/* detect hardware version and features */
error = ene_hw_detect(dev);
if (error)
goto exit_free_dev_rdev;
if (!dev->hw_learning_and_tx_capable && txsim) {
dev->hw_learning_and_tx_capable = true;
timer_setup(&dev->tx_sim_timer, ene_tx_irqsim, 0);
pr_warn("Simulation of TX activated\n");
}
if (!dev->hw_learning_and_tx_capable)
learning_mode_force = false;
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rdev->priv = dev;
rdev->open = ene_open;
rdev->close = ene_close;
rdev->s_idle = ene_set_idle;
rdev->driver_name = ENE_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
rdev->device_name = "ENE eHome Infrared Remote Receiver";
if (dev->hw_learning_and_tx_capable) {
rdev->s_wideband_receiver = ene_set_learning_mode;
init_completion(&dev->tx_complete);
rdev->tx_ir = ene_transmit;
rdev->s_tx_mask = ene_set_tx_mask;
rdev->s_tx_carrier = ene_set_tx_carrier;
rdev->s_tx_duty_cycle = ene_set_tx_duty_cycle;
rdev->s_carrier_report = ene_set_carrier_report;
rdev->device_name = "ENE eHome Infrared Remote Transceiver";
}
dev->rdev = rdev;
ene_rx_setup_hw_buffer(dev);
ene_setup_default_settings(dev);
ene_setup_hw_settings(dev);
device_set_wakeup_capable(&pnp_dev->dev, true);
device_set_wakeup_enable(&pnp_dev->dev, true);
error = rc_register_device(rdev);
if (error < 0)
goto exit_free_dev_rdev;
/* claim the resources */
error = -EBUSY;
if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
goto exit_unregister_device;
}
if (request_irq(dev->irq, ene_isr,
IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
goto exit_release_hw_io;
}
pr_notice("driver has been successfully loaded\n");
return 0;
exit_release_hw_io:
release_region(dev->hw_io, ENE_IO_SIZE);
exit_unregister_device:
rc_unregister_device(rdev);
rdev = NULL;
exit_free_dev_rdev:
rc_free_device(rdev);
kfree(dev);
return error;
}
/* main unload function */
static void ene_remove(struct pnp_dev *pnp_dev)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
unsigned long flags;
rc_unregister_device(dev->rdev);
del_timer_sync(&dev->tx_sim_timer);
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_disable(dev);
ene_rx_restore_hw_buffer(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
free_irq(dev->irq, dev);
release_region(dev->hw_io, ENE_IO_SIZE);
kfree(dev);
}
/* enable wake on IR (wakes on specific button on original remote) */
static void ene_enable_wake(struct ene_device *dev, bool enable)
{
dbg("wake on IR %s", enable ? "enabled" : "disabled");
ene_set_clear_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, enable);
}
#ifdef CONFIG_PM
static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
bool wake = device_may_wakeup(&dev->pnp_dev->dev);
if (!wake && dev->rx_enabled)
ene_rx_disable_hw(dev);
ene_enable_wake(dev, wake);
return 0;
}
static int ene_resume(struct pnp_dev *pnp_dev)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
ene_setup_hw_settings(dev);
if (dev->rx_enabled)
ene_rx_enable(dev);
ene_enable_wake(dev, false);
return 0;
}
#endif
static void ene_shutdown(struct pnp_dev *pnp_dev)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
ene_enable_wake(dev, true);
}
static const struct pnp_device_id ene_ids[] = {
{.id = "ENE0100",},
{.id = "ENE0200",},
{.id = "ENE0201",},
{.id = "ENE0202",},
{},
};
static struct pnp_driver ene_driver = {
.name = ENE_DRIVER_NAME,
.id_table = ene_ids,
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = ene_probe,
.remove = ene_remove,
#ifdef CONFIG_PM
.suspend = ene_suspend,
.resume = ene_resume,
#endif
.shutdown = ene_shutdown,
};
module_param(sample_period, int, S_IRUGO);
MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)");
module_param(learning_mode_force, bool, S_IRUGO);
MODULE_PARM_DESC(learning_mode_force, "Enable learning mode by default");
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level");
module_param(txsim, bool, S_IRUGO);
MODULE_PARM_DESC(txsim,
"Simulate TX features on unsupported hardware (dangerous)");
MODULE_DEVICE_TABLE(pnp, ene_ids);
MODULE_DESCRIPTION
("Infrared input driver for KB3926B/C/D/E/F (aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
MODULE_AUTHOR("Maxim Levitsky");
MODULE_LICENSE("GPL");
module_pnp_driver(ene_driver);
| linux-master | drivers/media/rc/ene_ir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2008 Nokia Corporation
*
* Based on lirc_serial.c
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/wait.h>
#include <linux/pwm.h>
#include <linux/of.h>
#include <linux/hrtimer.h>
#include <media/rc-core.h>
#define WBUF_LEN 256
struct ir_rx51 {
struct rc_dev *rcdev;
struct pwm_device *pwm;
struct pwm_state state;
struct hrtimer timer;
struct device *dev;
wait_queue_head_t wqueue;
unsigned int freq; /* carrier frequency */
unsigned int duty_cycle; /* carrier duty cycle */
int wbuf[WBUF_LEN];
int wbuf_index;
unsigned long device_is_open;
};
static inline void ir_rx51_on(struct ir_rx51 *ir_rx51)
{
ir_rx51->state.enabled = true;
pwm_apply_state(ir_rx51->pwm, &ir_rx51->state);
}
static inline void ir_rx51_off(struct ir_rx51 *ir_rx51)
{
ir_rx51->state.enabled = false;
pwm_apply_state(ir_rx51->pwm, &ir_rx51->state);
}
static int init_timing_params(struct ir_rx51 *ir_rx51)
{
ir_rx51->state.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq);
pwm_set_relative_duty_cycle(&ir_rx51->state, ir_rx51->duty_cycle, 100);
return 0;
}
static enum hrtimer_restart ir_rx51_timer_cb(struct hrtimer *timer)
{
struct ir_rx51 *ir_rx51 = container_of(timer, struct ir_rx51, timer);
ktime_t now;
if (ir_rx51->wbuf_index < 0) {
dev_err_ratelimited(ir_rx51->dev,
"BUG wbuf_index has value of %i\n",
ir_rx51->wbuf_index);
goto end;
}
/*
* If we happen to hit an odd latency spike, loop through the
* pulses until we catch up.
*/
do {
u64 ns;
if (ir_rx51->wbuf_index >= WBUF_LEN)
goto end;
if (ir_rx51->wbuf[ir_rx51->wbuf_index] == -1)
goto end;
if (ir_rx51->wbuf_index % 2)
ir_rx51_off(ir_rx51);
else
ir_rx51_on(ir_rx51);
ns = US_TO_NS(ir_rx51->wbuf[ir_rx51->wbuf_index]);
hrtimer_add_expires_ns(timer, ns);
ir_rx51->wbuf_index++;
now = timer->base->get_time();
} while (hrtimer_get_expires_tv64(timer) < now);
return HRTIMER_RESTART;
end:
/* Stop TX here */
ir_rx51_off(ir_rx51);
ir_rx51->wbuf_index = -1;
wake_up_interruptible(&ir_rx51->wqueue);
return HRTIMER_NORESTART;
}
static int ir_rx51_tx(struct rc_dev *dev, unsigned int *buffer,
unsigned int count)
{
struct ir_rx51 *ir_rx51 = dev->priv;
if (count > WBUF_LEN)
return -EINVAL;
memcpy(ir_rx51->wbuf, buffer, count * sizeof(unsigned int));
/* Wait any pending transfers to finish */
wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
init_timing_params(ir_rx51);
if (count < WBUF_LEN)
ir_rx51->wbuf[count] = -1; /* Insert termination mark */
/*
* REVISIT: Adjust latency requirements so the device doesn't go in too
* deep sleep states with pm_qos_add_request().
*/
ir_rx51_on(ir_rx51);
ir_rx51->wbuf_index = 1;
hrtimer_start(&ir_rx51->timer,
ns_to_ktime(US_TO_NS(ir_rx51->wbuf[0])),
HRTIMER_MODE_REL);
/*
* Don't return back to the userspace until the transfer has
* finished
*/
wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
/* REVISIT: Remove pm_qos constraint, we can sleep again */
return count;
}
static int ir_rx51_open(struct rc_dev *dev)
{
struct ir_rx51 *ir_rx51 = dev->priv;
if (test_and_set_bit(1, &ir_rx51->device_is_open))
return -EBUSY;
ir_rx51->pwm = pwm_get(ir_rx51->dev, NULL);
if (IS_ERR(ir_rx51->pwm)) {
int res = PTR_ERR(ir_rx51->pwm);
dev_err(ir_rx51->dev, "pwm_get failed: %d\n", res);
return res;
}
return 0;
}
static void ir_rx51_release(struct rc_dev *dev)
{
struct ir_rx51 *ir_rx51 = dev->priv;
hrtimer_cancel(&ir_rx51->timer);
ir_rx51_off(ir_rx51);
pwm_put(ir_rx51->pwm);
clear_bit(1, &ir_rx51->device_is_open);
}
static struct ir_rx51 ir_rx51 = {
.duty_cycle = 50,
.wbuf_index = -1,
};
static int ir_rx51_set_duty_cycle(struct rc_dev *dev, u32 duty)
{
struct ir_rx51 *ir_rx51 = dev->priv;
ir_rx51->duty_cycle = duty;
return 0;
}
static int ir_rx51_set_tx_carrier(struct rc_dev *dev, u32 carrier)
{
struct ir_rx51 *ir_rx51 = dev->priv;
if (carrier > 500000 || carrier < 20000)
return -EINVAL;
ir_rx51->freq = carrier;
return 0;
}
#ifdef CONFIG_PM
static int ir_rx51_suspend(struct platform_device *dev, pm_message_t state)
{
/*
* In case the device is still open, do not suspend. Normally
* this should not be a problem as lircd only keeps the device
* open only for short periods of time. We also don't want to
* get involved with race conditions that might happen if we
* were in a middle of a transmit. Thus, we defer any suspend
* actions until transmit has completed.
*/
if (test_and_set_bit(1, &ir_rx51.device_is_open))
return -EAGAIN;
clear_bit(1, &ir_rx51.device_is_open);
return 0;
}
static int ir_rx51_resume(struct platform_device *dev)
{
return 0;
}
#else
#define ir_rx51_suspend NULL
#define ir_rx51_resume NULL
#endif /* CONFIG_PM */
static int ir_rx51_probe(struct platform_device *dev)
{
struct pwm_device *pwm;
struct rc_dev *rcdev;
pwm = pwm_get(&dev->dev, NULL);
if (IS_ERR(pwm))
return dev_err_probe(&dev->dev, PTR_ERR(pwm), "pwm_get failed\n");
/* Use default, in case userspace does not set the carrier */
ir_rx51.freq = DIV_ROUND_CLOSEST_ULL(pwm_get_period(pwm), NSEC_PER_SEC);
pwm_init_state(pwm, &ir_rx51.state);
pwm_put(pwm);
hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ir_rx51.timer.function = ir_rx51_timer_cb;
ir_rx51.dev = &dev->dev;
rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW_TX);
if (!rcdev)
return -ENOMEM;
rcdev->priv = &ir_rx51;
rcdev->open = ir_rx51_open;
rcdev->close = ir_rx51_release;
rcdev->tx_ir = ir_rx51_tx;
rcdev->s_tx_duty_cycle = ir_rx51_set_duty_cycle;
rcdev->s_tx_carrier = ir_rx51_set_tx_carrier;
rcdev->driver_name = KBUILD_MODNAME;
ir_rx51.rcdev = rcdev;
return devm_rc_register_device(&dev->dev, ir_rx51.rcdev);
}
static const struct of_device_id ir_rx51_match[] = {
{
.compatible = "nokia,n900-ir",
},
{},
};
MODULE_DEVICE_TABLE(of, ir_rx51_match);
static struct platform_driver ir_rx51_platform_driver = {
.probe = ir_rx51_probe,
.suspend = ir_rx51_suspend,
.resume = ir_rx51_resume,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = ir_rx51_match,
},
};
module_platform_driver(ir_rx51_platform_driver);
MODULE_DESCRIPTION("IR TX driver for Nokia RX51");
MODULE_AUTHOR("Nokia Corporation");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/ir-rx51.c |
// SPDX-License-Identifier: GPL-2.0
// ir-nec-decoder.c - handle NEC IR Pulse/Space protocol
//
// Copyright (C) 2010 by Mauro Carvalho Chehab
#include <linux/bitrev.h>
#include <linux/module.h>
#include "rc-core-priv.h"
#define NEC_NBITS 32
#define NEC_UNIT 563 /* us */
#define NEC_HEADER_PULSE (16 * NEC_UNIT)
#define NECX_HEADER_PULSE (8 * NEC_UNIT) /* Less common NEC variant */
#define NEC_HEADER_SPACE (8 * NEC_UNIT)
#define NEC_REPEAT_SPACE (4 * NEC_UNIT)
#define NEC_BIT_PULSE (1 * NEC_UNIT)
#define NEC_BIT_0_SPACE (1 * NEC_UNIT)
#define NEC_BIT_1_SPACE (3 * NEC_UNIT)
#define NEC_TRAILER_PULSE (1 * NEC_UNIT)
#define NEC_TRAILER_SPACE (10 * NEC_UNIT) /* even longer in reality */
#define NECX_REPEAT_BITS 1
enum nec_state {
STATE_INACTIVE,
STATE_HEADER_SPACE,
STATE_BIT_PULSE,
STATE_BIT_SPACE,
STATE_TRAILER_PULSE,
STATE_TRAILER_SPACE,
};
/**
* ir_nec_decode() - Decode one NEC pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct nec_dec *data = &dev->raw->nec;
u32 scancode;
enum rc_proto rc_proto;
u8 address, not_address, command, not_command;
if (!is_timing_event(ev)) {
if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
dev_dbg(&dev->dev, "NEC decode started at state %d (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
if (eq_margin(ev.duration, NEC_HEADER_PULSE, NEC_UNIT * 2)) {
data->is_nec_x = false;
data->necx_repeat = false;
} else if (eq_margin(ev.duration, NECX_HEADER_PULSE, NEC_UNIT / 2))
data->is_nec_x = true;
else
break;
data->count = 0;
data->state = STATE_HEADER_SPACE;
return 0;
case STATE_HEADER_SPACE:
if (ev.pulse)
break;
if (eq_margin(ev.duration, NEC_HEADER_SPACE, NEC_UNIT)) {
data->state = STATE_BIT_PULSE;
return 0;
} else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) {
data->state = STATE_TRAILER_PULSE;
return 0;
}
break;
case STATE_BIT_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, NEC_BIT_PULSE, NEC_UNIT / 2))
break;
data->state = STATE_BIT_SPACE;
return 0;
case STATE_BIT_SPACE:
if (ev.pulse)
break;
if (data->necx_repeat && data->count == NECX_REPEAT_BITS &&
geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2)) {
dev_dbg(&dev->dev, "Repeat last key\n");
rc_repeat(dev);
data->state = STATE_INACTIVE;
return 0;
} else if (data->count > NECX_REPEAT_BITS)
data->necx_repeat = false;
data->bits <<= 1;
if (eq_margin(ev.duration, NEC_BIT_1_SPACE, NEC_UNIT / 2))
data->bits |= 1;
else if (!eq_margin(ev.duration, NEC_BIT_0_SPACE, NEC_UNIT / 2))
break;
data->count++;
if (data->count == NEC_NBITS)
data->state = STATE_TRAILER_PULSE;
else
data->state = STATE_BIT_PULSE;
return 0;
case STATE_TRAILER_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, NEC_TRAILER_PULSE, NEC_UNIT / 2))
break;
data->state = STATE_TRAILER_SPACE;
return 0;
case STATE_TRAILER_SPACE:
if (ev.pulse)
break;
if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2))
break;
if (data->count == NEC_NBITS) {
address = bitrev8((data->bits >> 24) & 0xff);
not_address = bitrev8((data->bits >> 16) & 0xff);
command = bitrev8((data->bits >> 8) & 0xff);
not_command = bitrev8((data->bits >> 0) & 0xff);
scancode = ir_nec_bytes_to_scancode(address,
not_address,
command,
not_command,
&rc_proto);
if (data->is_nec_x)
data->necx_repeat = true;
rc_keydown(dev, rc_proto, scancode, 0);
} else {
rc_repeat(dev);
}
data->state = STATE_INACTIVE;
return 0;
}
dev_dbg(&dev->dev, "NEC decode failed at count %d state %d (%uus %s)\n",
data->count, data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
/**
* ir_nec_scancode_to_raw() - encode an NEC scancode ready for modulation.
* @protocol: specific protocol to use
* @scancode: a single NEC scancode.
*/
static u32 ir_nec_scancode_to_raw(enum rc_proto protocol, u32 scancode)
{
unsigned int addr, addr_inv, data, data_inv;
data = scancode & 0xff;
if (protocol == RC_PROTO_NEC32) {
/* 32-bit NEC (used by Apple and TiVo remotes) */
/* scan encoding: aaAAddDD */
addr_inv = (scancode >> 24) & 0xff;
addr = (scancode >> 16) & 0xff;
data_inv = (scancode >> 8) & 0xff;
} else if (protocol == RC_PROTO_NECX) {
/* Extended NEC */
/* scan encoding AAaaDD */
addr = (scancode >> 16) & 0xff;
addr_inv = (scancode >> 8) & 0xff;
data_inv = data ^ 0xff;
} else {
/* Normal NEC */
/* scan encoding: AADD */
addr = (scancode >> 8) & 0xff;
addr_inv = addr ^ 0xff;
data_inv = data ^ 0xff;
}
/* raw encoding: ddDDaaAA */
return data_inv << 24 |
data << 16 |
addr_inv << 8 |
addr;
}
static const struct ir_raw_timings_pd ir_nec_timings = {
.header_pulse = NEC_HEADER_PULSE,
.header_space = NEC_HEADER_SPACE,
.bit_pulse = NEC_BIT_PULSE,
.bit_space[0] = NEC_BIT_0_SPACE,
.bit_space[1] = NEC_BIT_1_SPACE,
.trailer_pulse = NEC_TRAILER_PULSE,
.trailer_space = NEC_TRAILER_SPACE,
.msb_first = 0,
};
/**
* ir_nec_encode() - Encode a scancode as a stream of raw events
*
* @protocol: protocol to encode
* @scancode: scancode to encode
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
*/
static int ir_nec_encode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
struct ir_raw_event *e = events;
int ret;
u32 raw;
/* Convert a NEC scancode to raw NEC data */
raw = ir_nec_scancode_to_raw(protocol, scancode);
/* Modulate the raw data using a pulse distance modulation */
ret = ir_raw_gen_pd(&e, max, &ir_nec_timings, NEC_NBITS, raw);
if (ret < 0)
return ret;
return e - events;
}
static struct ir_raw_handler nec_handler = {
.protocols = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX |
RC_PROTO_BIT_NEC32,
.decode = ir_nec_decode,
.encode = ir_nec_encode,
.carrier = 38000,
.min_timeout = NEC_TRAILER_SPACE,
};
static int __init ir_nec_decode_init(void)
{
ir_raw_handler_register(&nec_handler);
printk(KERN_INFO "IR NEC protocol handler initialized\n");
return 0;
}
static void __exit ir_nec_decode_exit(void)
{
ir_raw_handler_unregister(&nec_handler);
}
module_init(ir_nec_decode_init);
module_exit(ir_nec_decode_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("NEC IR protocol decoder");
| linux-master | drivers/media/rc/ir-nec-decoder.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Allwinner sunXi IR controller
*
* Copyright (C) 2014 Alexsey Shestacov <[email protected]>
* Copyright (C) 2014 Alexander Bersenev <[email protected]>
*
* Based on sun5i-ir.c:
* Copyright (C) 2007-2012 Daniel Wang
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <media/rc-core.h>
#define SUNXI_IR_DEV "sunxi-ir"
/* Registers */
/* IR Control */
#define SUNXI_IR_CTL_REG 0x00
/* Global Enable */
#define REG_CTL_GEN BIT(0)
/* RX block enable */
#define REG_CTL_RXEN BIT(1)
/* CIR mode */
#define REG_CTL_MD (BIT(4) | BIT(5))
/* Rx Config */
#define SUNXI_IR_RXCTL_REG 0x10
/* Pulse Polarity Invert flag */
#define REG_RXCTL_RPPI BIT(2)
/* Rx Data */
#define SUNXI_IR_RXFIFO_REG 0x20
/* Rx Interrupt Enable */
#define SUNXI_IR_RXINT_REG 0x2C
/* Rx FIFO Overflow Interrupt Enable */
#define REG_RXINT_ROI_EN BIT(0)
/* Rx Packet End Interrupt Enable */
#define REG_RXINT_RPEI_EN BIT(1)
/* Rx FIFO Data Available Interrupt Enable */
#define REG_RXINT_RAI_EN BIT(4)
/* Rx FIFO available byte level */
#define REG_RXINT_RAL(val) ((val) << 8)
/* Rx Interrupt Status */
#define SUNXI_IR_RXSTA_REG 0x30
/* Rx FIFO Overflow */
#define REG_RXSTA_ROI REG_RXINT_ROI_EN
/* Rx Packet End */
#define REG_RXSTA_RPE REG_RXINT_RPEI_EN
/* Rx FIFO Data Available */
#define REG_RXSTA_RA REG_RXINT_RAI_EN
/* RX FIFO Get Available Counter */
#define REG_RXSTA_GET_AC(val) (((val) >> 8) & (ir->fifo_size * 2 - 1))
/* Clear all interrupt status value */
#define REG_RXSTA_CLEARALL 0xff
/* IR Sample Config */
#define SUNXI_IR_CIR_REG 0x34
/* CIR_REG register noise threshold */
#define REG_CIR_NTHR(val) (((val) << 2) & (GENMASK(7, 2)))
/* CIR_REG register idle threshold */
#define REG_CIR_ITHR(val) (((val) << 8) & (GENMASK(15, 8)))
/* Required frequency for IR0 or IR1 clock in CIR mode (default) */
#define SUNXI_IR_BASE_CLK 8000000
/* Noise threshold in samples */
#define SUNXI_IR_RXNOISE 1
/**
* struct sunxi_ir_quirks - Differences between SoC variants.
*
* @has_reset: SoC needs reset deasserted.
* @fifo_size: size of the fifo.
*/
struct sunxi_ir_quirks {
bool has_reset;
int fifo_size;
};
struct sunxi_ir {
struct rc_dev *rc;
void __iomem *base;
int irq;
int fifo_size;
struct clk *clk;
struct clk *apb_clk;
struct reset_control *rst;
const char *map_name;
};
static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
{
unsigned long status;
unsigned char dt;
unsigned int cnt, rc;
struct sunxi_ir *ir = dev_id;
struct ir_raw_event rawir = {};
status = readl(ir->base + SUNXI_IR_RXSTA_REG);
/* clean all pending statuses */
writel(status | REG_RXSTA_CLEARALL, ir->base + SUNXI_IR_RXSTA_REG);
if (status & (REG_RXSTA_RA | REG_RXSTA_RPE)) {
/* How many messages in fifo */
rc = REG_RXSTA_GET_AC(status);
/* Sanity check */
rc = rc > ir->fifo_size ? ir->fifo_size : rc;
/* If we have data */
for (cnt = 0; cnt < rc; cnt++) {
/* for each bit in fifo */
dt = readb(ir->base + SUNXI_IR_RXFIFO_REG);
rawir.pulse = (dt & 0x80) != 0;
rawir.duration = ((dt & 0x7f) + 1) *
ir->rc->rx_resolution;
ir_raw_event_store_with_filter(ir->rc, &rawir);
}
}
if (status & REG_RXSTA_ROI) {
ir_raw_event_overflow(ir->rc);
} else if (status & REG_RXSTA_RPE) {
ir_raw_event_set_idle(ir->rc, true);
ir_raw_event_handle(ir->rc);
} else {
ir_raw_event_handle(ir->rc);
}
return IRQ_HANDLED;
}
/* Convert idle threshold to usec */
static unsigned int sunxi_ithr_to_usec(unsigned int base_clk, unsigned int ithr)
{
return DIV_ROUND_CLOSEST(USEC_PER_SEC * (ithr + 1),
base_clk / (128 * 64));
}
/* Convert usec to idle threshold */
static unsigned int sunxi_usec_to_ithr(unsigned int base_clk, unsigned int usec)
{
/* make sure we don't end up with a timeout less than requested */
return DIV_ROUND_UP((base_clk / (128 * 64)) * usec, USEC_PER_SEC) - 1;
}
static int sunxi_ir_set_timeout(struct rc_dev *rc_dev, unsigned int timeout)
{
struct sunxi_ir *ir = rc_dev->priv;
unsigned int base_clk = clk_get_rate(ir->clk);
unsigned int ithr = sunxi_usec_to_ithr(base_clk, timeout);
dev_dbg(rc_dev->dev.parent, "setting idle threshold to %u\n", ithr);
/* Set noise threshold and idle threshold */
writel(REG_CIR_NTHR(SUNXI_IR_RXNOISE) | REG_CIR_ITHR(ithr),
ir->base + SUNXI_IR_CIR_REG);
rc_dev->timeout = sunxi_ithr_to_usec(base_clk, ithr);
return 0;
}
static int sunxi_ir_hw_init(struct device *dev)
{
struct sunxi_ir *ir = dev_get_drvdata(dev);
u32 tmp;
int ret;
ret = reset_control_deassert(ir->rst);
if (ret)
return ret;
ret = clk_prepare_enable(ir->apb_clk);
if (ret) {
dev_err(dev, "failed to enable apb clk\n");
goto exit_assert_reset;
}
ret = clk_prepare_enable(ir->clk);
if (ret) {
dev_err(dev, "failed to enable ir clk\n");
goto exit_disable_apb_clk;
}
/* Enable CIR Mode */
writel(REG_CTL_MD, ir->base + SUNXI_IR_CTL_REG);
/* Set noise threshold and idle threshold */
sunxi_ir_set_timeout(ir->rc, ir->rc->timeout);
/* Invert Input Signal */
writel(REG_RXCTL_RPPI, ir->base + SUNXI_IR_RXCTL_REG);
/* Clear All Rx Interrupt Status */
writel(REG_RXSTA_CLEARALL, ir->base + SUNXI_IR_RXSTA_REG);
/*
* Enable IRQ on overflow, packet end, FIFO available with trigger
* level
*/
writel(REG_RXINT_ROI_EN | REG_RXINT_RPEI_EN |
REG_RXINT_RAI_EN | REG_RXINT_RAL(ir->fifo_size / 2 - 1),
ir->base + SUNXI_IR_RXINT_REG);
/* Enable IR Module */
tmp = readl(ir->base + SUNXI_IR_CTL_REG);
writel(tmp | REG_CTL_GEN | REG_CTL_RXEN, ir->base + SUNXI_IR_CTL_REG);
return 0;
exit_disable_apb_clk:
clk_disable_unprepare(ir->apb_clk);
exit_assert_reset:
reset_control_assert(ir->rst);
return ret;
}
static void sunxi_ir_hw_exit(struct device *dev)
{
struct sunxi_ir *ir = dev_get_drvdata(dev);
clk_disable_unprepare(ir->clk);
clk_disable_unprepare(ir->apb_clk);
reset_control_assert(ir->rst);
}
static int __maybe_unused sunxi_ir_suspend(struct device *dev)
{
sunxi_ir_hw_exit(dev);
return 0;
}
static int __maybe_unused sunxi_ir_resume(struct device *dev)
{
return sunxi_ir_hw_init(dev);
}
static SIMPLE_DEV_PM_OPS(sunxi_ir_pm_ops, sunxi_ir_suspend, sunxi_ir_resume);
static int sunxi_ir_probe(struct platform_device *pdev)
{
int ret = 0;
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
const struct sunxi_ir_quirks *quirks;
struct sunxi_ir *ir;
u32 b_clk_freq = SUNXI_IR_BASE_CLK;
ir = devm_kzalloc(dev, sizeof(struct sunxi_ir), GFP_KERNEL);
if (!ir)
return -ENOMEM;
quirks = of_device_get_match_data(&pdev->dev);
if (!quirks) {
dev_err(&pdev->dev, "Failed to determine the quirks to use\n");
return -ENODEV;
}
ir->fifo_size = quirks->fifo_size;
/* Clock */
ir->apb_clk = devm_clk_get(dev, "apb");
if (IS_ERR(ir->apb_clk)) {
dev_err(dev, "failed to get a apb clock.\n");
return PTR_ERR(ir->apb_clk);
}
ir->clk = devm_clk_get(dev, "ir");
if (IS_ERR(ir->clk)) {
dev_err(dev, "failed to get a ir clock.\n");
return PTR_ERR(ir->clk);
}
/* Base clock frequency (optional) */
of_property_read_u32(dn, "clock-frequency", &b_clk_freq);
/* Reset */
if (quirks->has_reset) {
ir->rst = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(ir->rst))
return PTR_ERR(ir->rst);
}
ret = clk_set_rate(ir->clk, b_clk_freq);
if (ret) {
dev_err(dev, "set ir base clock failed!\n");
return ret;
}
dev_dbg(dev, "set base clock frequency to %d Hz.\n", b_clk_freq);
/* IO */
ir->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->base)) {
return PTR_ERR(ir->base);
}
ir->rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!ir->rc) {
dev_err(dev, "failed to allocate device\n");
return -ENOMEM;
}
ir->rc->priv = ir;
ir->rc->device_name = SUNXI_IR_DEV;
ir->rc->input_phys = "sunxi-ir/input0";
ir->rc->input_id.bustype = BUS_HOST;
ir->rc->input_id.vendor = 0x0001;
ir->rc->input_id.product = 0x0001;
ir->rc->input_id.version = 0x0100;
ir->map_name = of_get_property(dn, "linux,rc-map-name", NULL);
ir->rc->map_name = ir->map_name ?: RC_MAP_EMPTY;
ir->rc->dev.parent = dev;
ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
/* Frequency after IR internal divider with sample period in us */
ir->rc->rx_resolution = (USEC_PER_SEC / (b_clk_freq / 64));
ir->rc->timeout = IR_DEFAULT_TIMEOUT;
ir->rc->min_timeout = sunxi_ithr_to_usec(b_clk_freq, 0);
ir->rc->max_timeout = sunxi_ithr_to_usec(b_clk_freq, 255);
ir->rc->s_timeout = sunxi_ir_set_timeout;
ir->rc->driver_name = SUNXI_IR_DEV;
ret = rc_register_device(ir->rc);
if (ret) {
dev_err(dev, "failed to register rc device\n");
goto exit_free_dev;
}
platform_set_drvdata(pdev, ir);
/* IRQ */
ir->irq = platform_get_irq(pdev, 0);
if (ir->irq < 0) {
ret = ir->irq;
goto exit_free_dev;
}
ret = devm_request_irq(dev, ir->irq, sunxi_ir_irq, 0, SUNXI_IR_DEV, ir);
if (ret) {
dev_err(dev, "failed request irq\n");
goto exit_free_dev;
}
ret = sunxi_ir_hw_init(dev);
if (ret)
goto exit_free_dev;
dev_info(dev, "initialized sunXi IR driver\n");
return 0;
exit_free_dev:
rc_free_device(ir->rc);
return ret;
}
static void sunxi_ir_remove(struct platform_device *pdev)
{
struct sunxi_ir *ir = platform_get_drvdata(pdev);
rc_unregister_device(ir->rc);
sunxi_ir_hw_exit(&pdev->dev);
}
static void sunxi_ir_shutdown(struct platform_device *pdev)
{
sunxi_ir_hw_exit(&pdev->dev);
}
static const struct sunxi_ir_quirks sun4i_a10_ir_quirks = {
.has_reset = false,
.fifo_size = 16,
};
static const struct sunxi_ir_quirks sun5i_a13_ir_quirks = {
.has_reset = false,
.fifo_size = 64,
};
static const struct sunxi_ir_quirks sun6i_a31_ir_quirks = {
.has_reset = true,
.fifo_size = 64,
};
static const struct of_device_id sunxi_ir_match[] = {
{
.compatible = "allwinner,sun4i-a10-ir",
.data = &sun4i_a10_ir_quirks,
},
{
.compatible = "allwinner,sun5i-a13-ir",
.data = &sun5i_a13_ir_quirks,
},
{
.compatible = "allwinner,sun6i-a31-ir",
.data = &sun6i_a31_ir_quirks,
},
{}
};
MODULE_DEVICE_TABLE(of, sunxi_ir_match);
static struct platform_driver sunxi_ir_driver = {
.probe = sunxi_ir_probe,
.remove_new = sunxi_ir_remove,
.shutdown = sunxi_ir_shutdown,
.driver = {
.name = SUNXI_IR_DEV,
.of_match_table = sunxi_ir_match,
.pm = &sunxi_ir_pm_ops,
},
};
module_platform_driver(sunxi_ir_driver);
MODULE_DESCRIPTION("Allwinner sunXi IR controller driver");
MODULE_AUTHOR("Alexsey Shestacov <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/sunxi-cir.c |
// SPDX-License-Identifier: GPL-2.0
// ir-sanyo-decoder.c - handle SANYO IR Pulse/Space protocol
//
// Copyright (C) 2011 by Mauro Carvalho Chehab
//
// This protocol uses the NEC protocol timings. However, data is formatted as:
// 13 bits Custom Code
// 13 bits NOT(Custom Code)
// 8 bits Key data
// 8 bits NOT(Key data)
//
// According with LIRC, this protocol is used on Sanyo, Aiwa and Chinon
// Information for this protocol is available at the Sanyo LC7461 datasheet.
#include <linux/module.h>
#include <linux/bitrev.h>
#include "rc-core-priv.h"
#define SANYO_NBITS (13+13+8+8)
#define SANYO_UNIT 563 /* us */
#define SANYO_HEADER_PULSE (16 * SANYO_UNIT)
#define SANYO_HEADER_SPACE (8 * SANYO_UNIT)
#define SANYO_BIT_PULSE (1 * SANYO_UNIT)
#define SANYO_BIT_0_SPACE (1 * SANYO_UNIT)
#define SANYO_BIT_1_SPACE (3 * SANYO_UNIT)
#define SANYO_REPEAT_SPACE (150 * SANYO_UNIT)
#define SANYO_TRAILER_PULSE (1 * SANYO_UNIT)
#define SANYO_TRAILER_SPACE (10 * SANYO_UNIT) /* in fact, 42 */
enum sanyo_state {
STATE_INACTIVE,
STATE_HEADER_SPACE,
STATE_BIT_PULSE,
STATE_BIT_SPACE,
STATE_TRAILER_PULSE,
STATE_TRAILER_SPACE,
};
/**
* ir_sanyo_decode() - Decode one SANYO pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct sanyo_dec *data = &dev->raw->sanyo;
u32 scancode;
u16 address;
u8 command, not_command;
if (!is_timing_event(ev)) {
if (ev.overflow) {
dev_dbg(&dev->dev, "SANYO event overflow received. reset to state 0\n");
data->state = STATE_INACTIVE;
}
return 0;
}
dev_dbg(&dev->dev, "SANYO decode started at state %d (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
if (eq_margin(ev.duration, SANYO_HEADER_PULSE, SANYO_UNIT / 2)) {
data->count = 0;
data->state = STATE_HEADER_SPACE;
return 0;
}
break;
case STATE_HEADER_SPACE:
if (ev.pulse)
break;
if (eq_margin(ev.duration, SANYO_HEADER_SPACE, SANYO_UNIT / 2)) {
data->state = STATE_BIT_PULSE;
return 0;
}
break;
case STATE_BIT_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, SANYO_BIT_PULSE, SANYO_UNIT / 2))
break;
data->state = STATE_BIT_SPACE;
return 0;
case STATE_BIT_SPACE:
if (ev.pulse)
break;
if (!data->count && geq_margin(ev.duration, SANYO_REPEAT_SPACE, SANYO_UNIT / 2)) {
rc_repeat(dev);
dev_dbg(&dev->dev, "SANYO repeat last key\n");
data->state = STATE_INACTIVE;
return 0;
}
data->bits <<= 1;
if (eq_margin(ev.duration, SANYO_BIT_1_SPACE, SANYO_UNIT / 2))
data->bits |= 1;
else if (!eq_margin(ev.duration, SANYO_BIT_0_SPACE, SANYO_UNIT / 2))
break;
data->count++;
if (data->count == SANYO_NBITS)
data->state = STATE_TRAILER_PULSE;
else
data->state = STATE_BIT_PULSE;
return 0;
case STATE_TRAILER_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, SANYO_TRAILER_PULSE, SANYO_UNIT / 2))
break;
data->state = STATE_TRAILER_SPACE;
return 0;
case STATE_TRAILER_SPACE:
if (ev.pulse)
break;
if (!geq_margin(ev.duration, SANYO_TRAILER_SPACE, SANYO_UNIT / 2))
break;
address = bitrev16((data->bits >> 29) & 0x1fff) >> 3;
/* not_address = bitrev16((data->bits >> 16) & 0x1fff) >> 3; */
command = bitrev8((data->bits >> 8) & 0xff);
not_command = bitrev8((data->bits >> 0) & 0xff);
if ((command ^ not_command) != 0xff) {
dev_dbg(&dev->dev, "SANYO checksum error: received 0x%08llx\n",
data->bits);
data->state = STATE_INACTIVE;
return 0;
}
scancode = address << 8 | command;
dev_dbg(&dev->dev, "SANYO scancode: 0x%06x\n", scancode);
rc_keydown(dev, RC_PROTO_SANYO, scancode, 0);
data->state = STATE_INACTIVE;
return 0;
}
dev_dbg(&dev->dev, "SANYO decode failed at count %d state %d (%uus %s)\n",
data->count, data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static const struct ir_raw_timings_pd ir_sanyo_timings = {
.header_pulse = SANYO_HEADER_PULSE,
.header_space = SANYO_HEADER_SPACE,
.bit_pulse = SANYO_BIT_PULSE,
.bit_space[0] = SANYO_BIT_0_SPACE,
.bit_space[1] = SANYO_BIT_1_SPACE,
.trailer_pulse = SANYO_TRAILER_PULSE,
.trailer_space = SANYO_TRAILER_SPACE,
.msb_first = 1,
};
/**
* ir_sanyo_encode() - Encode a scancode as a stream of raw events
*
* @protocol: protocol to encode
* @scancode: scancode to encode
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
*/
static int ir_sanyo_encode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
struct ir_raw_event *e = events;
int ret;
u64 raw;
raw = ((u64)(bitrev16(scancode >> 8) & 0xfff8) << (8 + 8 + 13 - 3)) |
((u64)(bitrev16(~scancode >> 8) & 0xfff8) << (8 + 8 + 0 - 3)) |
((bitrev8(scancode) & 0xff) << 8) |
(bitrev8(~scancode) & 0xff);
ret = ir_raw_gen_pd(&e, max, &ir_sanyo_timings, SANYO_NBITS, raw);
if (ret < 0)
return ret;
return e - events;
}
static struct ir_raw_handler sanyo_handler = {
.protocols = RC_PROTO_BIT_SANYO,
.decode = ir_sanyo_decode,
.encode = ir_sanyo_encode,
.carrier = 38000,
.min_timeout = SANYO_TRAILER_SPACE,
};
static int __init ir_sanyo_decode_init(void)
{
ir_raw_handler_register(&sanyo_handler);
printk(KERN_INFO "IR SANYO protocol handler initialized\n");
return 0;
}
static void __exit ir_sanyo_decode_exit(void)
{
ir_raw_handler_unregister(&sanyo_handler);
}
module_init(ir_sanyo_decode_init);
module_exit(ir_sanyo_decode_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("SANYO IR protocol decoder");
| linux-master | drivers/media/rc/ir-sanyo-decoder.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for USB Windows Media Center Ed. eHome Infrared Transceivers
*
* Copyright (c) 2010-2011, Jarod Wilson <[email protected]>
*
* Based on the original lirc_mceusb and lirc_mceusb2 drivers, by Dan
* Conti, Martin Blatter and Daniel Melander, the latter of which was
* in turn also based on the lirc_atiusb driver by Paul Miller. The
* two mce drivers were merged into one by Jarod Wilson, with transmit
* support for the 1st-gen device added primarily by Patrick Calhoun,
* with a bit of tweaks by Jarod. Debugging improvements and proper
* support for what appears to be 3rd-gen hardware added by Jarod.
* Initial port from lirc driver to ir-core drivery by Jarod, based
* partially on a port to an earlier proposed IR infrastructure by
* Jon Smirl, which included enhancements and simplifications to the
* incoming IR buffer parsing routines.
*
* Updated in July of 2011 with the aid of Microsoft's official
* remote/transceiver requirements and specification document, found at
* download.microsoft.com, title
* Windows-Media-Center-RC-IR-Collection-Green-Button-Specification-03-08-2011-V2.pdf
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <linux/pm_wakeup.h>
#include <media/rc-core.h>
#define DRIVER_VERSION "1.95"
#define DRIVER_AUTHOR "Jarod Wilson <[email protected]>"
#define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \
"device driver"
#define DRIVER_NAME "mceusb"
#define USB_TX_TIMEOUT 1000 /* in milliseconds */
#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
/* MCE constants */
#define MCE_IRBUF_SIZE 128 /* TX IR buffer length */
#define MCE_TIME_UNIT 50 /* Approx 50us resolution */
#define MCE_PACKET_SIZE 31 /* Max length of packet (with header) */
#define MCE_IRDATA_HEADER (0x80 + MCE_PACKET_SIZE - 1)
/* Actual format is 0x80 + num_bytes */
#define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
#define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
#define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */
#define MCE_PULSE_MASK 0x7f /* Pulse mask */
#define MCE_MAX_PULSE_LENGTH 0x7f /* Longest transmittable pulse symbol */
/*
* The interface between the host and the IR hardware is command-response
* based. All commands and responses have a consistent format, where a lead
* byte always identifies the type of data following it. The lead byte has
* a port value in the 3 highest bits and a length value in the 5 lowest
* bits.
*
* The length field is overloaded, with a value of 11111 indicating that the
* following byte is a command or response code, and the length of the entire
* message is determined by the code. If the length field is not 11111, then
* it specifies the number of bytes of port data that follow.
*/
#define MCE_CMD 0x1f
#define MCE_PORT_IR 0x4 /* (0x4 << 5) | MCE_CMD = 0x9f */
#define MCE_PORT_SYS 0x7 /* (0x7 << 5) | MCE_CMD = 0xff */
#define MCE_PORT_SER 0x6 /* 0xc0 through 0xdf flush & 0x1f bytes */
#define MCE_PORT_MASK 0xe0 /* Mask out command bits */
/* Command port headers */
#define MCE_CMD_PORT_IR 0x9f /* IR-related cmd/rsp */
#define MCE_CMD_PORT_SYS 0xff /* System (non-IR) device cmd/rsp */
/* Commands that set device state (2-4 bytes in length) */
#define MCE_CMD_RESET 0xfe /* Reset device, 2 bytes */
#define MCE_CMD_RESUME 0xaa /* Resume device after error, 2 bytes */
#define MCE_CMD_SETIRCFS 0x06 /* Set tx carrier, 4 bytes */
#define MCE_CMD_SETIRTIMEOUT 0x0c /* Set timeout, 4 bytes */
#define MCE_CMD_SETIRTXPORTS 0x08 /* Set tx ports, 3 bytes */
#define MCE_CMD_SETIRRXPORTEN 0x14 /* Set rx ports, 3 bytes */
#define MCE_CMD_FLASHLED 0x23 /* Flash receiver LED, 2 bytes */
/* Commands that query device state (all 2 bytes, unless noted) */
#define MCE_CMD_GETIRCFS 0x07 /* Get carrier */
#define MCE_CMD_GETIRTIMEOUT 0x0d /* Get timeout */
#define MCE_CMD_GETIRTXPORTS 0x13 /* Get tx ports */
#define MCE_CMD_GETIRRXPORTEN 0x15 /* Get rx ports */
#define MCE_CMD_GETPORTSTATUS 0x11 /* Get tx port status, 3 bytes */
#define MCE_CMD_GETIRNUMPORTS 0x16 /* Get number of ports */
#define MCE_CMD_GETWAKESOURCE 0x17 /* Get wake source */
#define MCE_CMD_GETEMVER 0x22 /* Get emulator interface version */
#define MCE_CMD_GETDEVDETAILS 0x21 /* Get device details (em ver2 only) */
#define MCE_CMD_GETWAKESUPPORT 0x20 /* Get wake details (em ver2 only) */
#define MCE_CMD_GETWAKEVERSION 0x18 /* Get wake pattern (em ver2 only) */
/* Misc commands */
#define MCE_CMD_NOP 0xff /* No operation */
/* Responses to commands (non-error cases) */
#define MCE_RSP_EQIRCFS 0x06 /* tx carrier, 4 bytes */
#define MCE_RSP_EQIRTIMEOUT 0x0c /* rx timeout, 4 bytes */
#define MCE_RSP_GETWAKESOURCE 0x17 /* wake source, 3 bytes */
#define MCE_RSP_EQIRTXPORTS 0x08 /* tx port mask, 3 bytes */
#define MCE_RSP_EQIRRXPORTEN 0x14 /* rx port mask, 3 bytes */
#define MCE_RSP_GETPORTSTATUS 0x11 /* tx port status, 7 bytes */
#define MCE_RSP_EQIRRXCFCNT 0x15 /* rx carrier count, 4 bytes */
#define MCE_RSP_EQIRNUMPORTS 0x16 /* number of ports, 4 bytes */
#define MCE_RSP_EQWAKESUPPORT 0x20 /* wake capabilities, 3 bytes */
#define MCE_RSP_EQWAKEVERSION 0x18 /* wake pattern details, 6 bytes */
#define MCE_RSP_EQDEVDETAILS 0x21 /* device capabilities, 3 bytes */
#define MCE_RSP_EQEMVER 0x22 /* emulator interface ver, 3 bytes */
#define MCE_RSP_FLASHLED 0x23 /* success flashing LED, 2 bytes */
/* Responses to error cases, must send MCE_CMD_RESUME to clear them */
#define MCE_RSP_CMD_ILLEGAL 0xfe /* illegal command for port, 2 bytes */
#define MCE_RSP_TX_TIMEOUT 0x81 /* tx timed out, 2 bytes */
/* Misc commands/responses not defined in the MCE remote/transceiver spec */
#define MCE_CMD_SIG_END 0x01 /* End of signal */
#define MCE_CMD_PING 0x03 /* Ping device */
#define MCE_CMD_UNKNOWN 0x04 /* Unknown */
#define MCE_CMD_UNKNOWN2 0x05 /* Unknown */
#define MCE_CMD_UNKNOWN3 0x09 /* Unknown */
#define MCE_CMD_UNKNOWN4 0x0a /* Unknown */
#define MCE_CMD_G_REVISION 0x0b /* Get hw/sw revision */
#define MCE_CMD_UNKNOWN5 0x0e /* Unknown */
#define MCE_CMD_UNKNOWN6 0x0f /* Unknown */
#define MCE_CMD_UNKNOWN8 0x19 /* Unknown */
#define MCE_CMD_UNKNOWN9 0x1b /* Unknown */
#define MCE_CMD_NULL 0x00 /* These show up various places... */
/* if buf[i] & MCE_PORT_MASK == 0x80 and buf[i] != MCE_CMD_PORT_IR,
* then we're looking at a raw IR data sample */
#define MCE_COMMAND_IRDATA 0x80
#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
#define VENDOR_PHILIPS 0x0471
#define VENDOR_SMK 0x0609
#define VENDOR_TATUNG 0x1460
#define VENDOR_GATEWAY 0x107b
#define VENDOR_SHUTTLE 0x1308
#define VENDOR_SHUTTLE2 0x051c
#define VENDOR_MITSUMI 0x03ee
#define VENDOR_TOPSEED 0x1784
#define VENDOR_RICAVISION 0x179d
#define VENDOR_ITRON 0x195d
#define VENDOR_FIC 0x1509
#define VENDOR_LG 0x043e
#define VENDOR_MICROSOFT 0x045e
#define VENDOR_FORMOSA 0x147a
#define VENDOR_FINTEK 0x1934
#define VENDOR_PINNACLE 0x2304
#define VENDOR_ECS 0x1019
#define VENDOR_WISTRON 0x0fb8
#define VENDOR_COMPRO 0x185b
#define VENDOR_NORTHSTAR 0x04eb
#define VENDOR_REALTEK 0x0bda
#define VENDOR_TIVO 0x105a
#define VENDOR_CONEXANT 0x0572
#define VENDOR_TWISTEDMELON 0x2596
#define VENDOR_HAUPPAUGE 0x2040
#define VENDOR_PCTV 0x2013
#define VENDOR_ADAPTEC 0x03f3
enum mceusb_model_type {
MCE_GEN2 = 0, /* Most boards */
MCE_GEN1,
MCE_GEN3,
MCE_GEN3_BROKEN_IRTIMEOUT,
MCE_GEN2_TX_INV,
MCE_GEN2_TX_INV_RX_GOOD,
POLARIS_EVK,
CX_HYBRID_TV,
MULTIFUNCTION,
TIVO_KIT,
MCE_GEN2_NO_TX,
HAUPPAUGE_CX_HYBRID_TV,
EVROMEDIA_FULL_HYBRID_FULLHD,
ASTROMETA_T2HYBRID,
};
struct mceusb_model {
u32 mce_gen1:1;
u32 mce_gen2:1;
u32 mce_gen3:1;
u32 tx_mask_normal:1;
u32 no_tx:1;
u32 broken_irtimeout:1;
/*
* 2nd IR receiver (short-range, wideband) for learning mode:
* 0, absent 2nd receiver (rx2)
* 1, rx2 present
* 2, rx2 which under counts IR carrier cycles
*/
u32 rx2;
int ir_intfnum;
const char *rc_map; /* Allow specify a per-board map */
const char *name; /* per-board name */
};
static const struct mceusb_model mceusb_model[] = {
[MCE_GEN1] = {
.mce_gen1 = 1,
.tx_mask_normal = 1,
.rx2 = 2,
},
[MCE_GEN2] = {
.mce_gen2 = 1,
.rx2 = 2,
},
[MCE_GEN2_NO_TX] = {
.mce_gen2 = 1,
.no_tx = 1,
},
[MCE_GEN2_TX_INV] = {
.mce_gen2 = 1,
.tx_mask_normal = 1,
.rx2 = 1,
},
[MCE_GEN2_TX_INV_RX_GOOD] = {
.mce_gen2 = 1,
.tx_mask_normal = 1,
.rx2 = 2,
},
[MCE_GEN3] = {
.mce_gen3 = 1,
.tx_mask_normal = 1,
.rx2 = 2,
},
[MCE_GEN3_BROKEN_IRTIMEOUT] = {
.mce_gen3 = 1,
.tx_mask_normal = 1,
.rx2 = 2,
.broken_irtimeout = 1
},
[POLARIS_EVK] = {
/*
* In fact, the EVK is shipped without
* remotes, but we should have something handy,
* to allow testing it
*/
.name = "Conexant Hybrid TV (cx231xx) MCE IR",
.rx2 = 2,
},
[CX_HYBRID_TV] = {
.no_tx = 1, /* tx isn't wired up at all */
.name = "Conexant Hybrid TV (cx231xx) MCE IR",
},
[HAUPPAUGE_CX_HYBRID_TV] = {
.no_tx = 1, /* eeprom says it has no tx */
.name = "Conexant Hybrid TV (cx231xx) MCE IR no TX",
},
[MULTIFUNCTION] = {
.mce_gen2 = 1,
.ir_intfnum = 2,
.rx2 = 2,
},
[TIVO_KIT] = {
.mce_gen2 = 1,
.rc_map = RC_MAP_TIVO,
.rx2 = 2,
},
[EVROMEDIA_FULL_HYBRID_FULLHD] = {
.name = "Evromedia USB Full Hybrid Full HD",
.no_tx = 1,
.rc_map = RC_MAP_MSI_DIGIVOX_III,
},
[ASTROMETA_T2HYBRID] = {
.name = "Astrometa T2Hybrid",
.no_tx = 1,
.rc_map = RC_MAP_ASTROMETA_T2HYBRID,
}
};
static const struct usb_device_id mceusb_dev_table[] = {
/* Original Microsoft MCE IR Transceiver (often HP-branded) */
{ USB_DEVICE(VENDOR_MICROSOFT, 0x006d),
.driver_info = MCE_GEN1 },
/* Philips Infrared Transceiver - Sahara branded */
{ USB_DEVICE(VENDOR_PHILIPS, 0x0608) },
/* Philips Infrared Transceiver - HP branded */
{ USB_DEVICE(VENDOR_PHILIPS, 0x060c),
.driver_info = MCE_GEN2_TX_INV },
/* Philips SRM5100 */
{ USB_DEVICE(VENDOR_PHILIPS, 0x060d) },
/* Philips Infrared Transceiver - Omaura */
{ USB_DEVICE(VENDOR_PHILIPS, 0x060f) },
/* Philips Infrared Transceiver - Spinel plus */
{ USB_DEVICE(VENDOR_PHILIPS, 0x0613) },
/* Philips eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_PHILIPS, 0x0815) },
/* Philips/Spinel plus IR transceiver for ASUS */
{ USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
/* Philips/Spinel plus IR transceiver for ASUS */
{ USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
/* Philips IR transceiver (Dell branded) */
{ USB_DEVICE(VENDOR_PHILIPS, 0x2093),
.driver_info = MCE_GEN2_TX_INV },
/* Realtek MCE IR Receiver and card reader */
{ USB_DEVICE(VENDOR_REALTEK, 0x0161),
.driver_info = MULTIFUNCTION },
/* SMK/Toshiba G83C0004D410 */
{ USB_DEVICE(VENDOR_SMK, 0x031d),
.driver_info = MCE_GEN2_TX_INV_RX_GOOD },
/* SMK eHome Infrared Transceiver (Sony VAIO) */
{ USB_DEVICE(VENDOR_SMK, 0x0322),
.driver_info = MCE_GEN2_TX_INV },
/* bundled with Hauppauge PVR-150 */
{ USB_DEVICE(VENDOR_SMK, 0x0334),
.driver_info = MCE_GEN2_TX_INV },
/* SMK eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_SMK, 0x0338) },
/* SMK/I-O Data GV-MC7/RCKIT Receiver */
{ USB_DEVICE(VENDOR_SMK, 0x0353),
.driver_info = MCE_GEN2_NO_TX },
/* SMK RXX6000 Infrared Receiver */
{ USB_DEVICE(VENDOR_SMK, 0x0357),
.driver_info = MCE_GEN2_NO_TX },
/* Tatung eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TATUNG, 0x9150) },
/* Shuttle eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_SHUTTLE, 0xc001) },
/* Shuttle eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_SHUTTLE2, 0xc001) },
/* Gateway eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_GATEWAY, 0x3009) },
/* Mitsumi */
{ USB_DEVICE(VENDOR_MITSUMI, 0x2501) },
/* Topseed eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TOPSEED, 0x0001),
.driver_info = MCE_GEN2_TX_INV },
/* Topseed HP eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TOPSEED, 0x0006),
.driver_info = MCE_GEN2_TX_INV },
/* Topseed eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TOPSEED, 0x0007),
.driver_info = MCE_GEN2_TX_INV },
/* Topseed eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TOPSEED, 0x0008),
.driver_info = MCE_GEN3 },
/* Topseed eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TOPSEED, 0x000a),
.driver_info = MCE_GEN2_TX_INV },
/* Topseed eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TOPSEED, 0x0011),
.driver_info = MCE_GEN3_BROKEN_IRTIMEOUT },
/* Ricavision internal Infrared Transceiver */
{ USB_DEVICE(VENDOR_RICAVISION, 0x0010) },
/* Itron ione Libra Q-11 */
{ USB_DEVICE(VENDOR_ITRON, 0x7002) },
/* FIC eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_FIC, 0x9242) },
/* LG eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_LG, 0x9803) },
/* Microsoft MCE Infrared Transceiver */
{ USB_DEVICE(VENDOR_MICROSOFT, 0x00a0) },
/* Formosa eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe015) },
/* Formosa21 / eHome Infrared Receiver */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe016) },
/* Formosa aim / Trust MCE Infrared Receiver */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe017),
.driver_info = MCE_GEN2_NO_TX },
/* Formosa Industrial Computing / Beanbag Emulation Device */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe018) },
/* Formosa21 / eHome Infrared Receiver */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe03a) },
/* Formosa Industrial Computing AIM IR605/A */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe03c) },
/* Formosa Industrial Computing */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe03e) },
/* Formosa Industrial Computing */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe042) },
/* Fintek eHome Infrared Transceiver (HP branded) */
{ USB_DEVICE(VENDOR_FINTEK, 0x5168),
.driver_info = MCE_GEN2_TX_INV },
/* Fintek eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_FINTEK, 0x0602) },
/* Fintek eHome Infrared Transceiver (in the AOpen MP45) */
{ USB_DEVICE(VENDOR_FINTEK, 0x0702) },
/* Pinnacle Remote Kit */
{ USB_DEVICE(VENDOR_PINNACLE, 0x0225),
.driver_info = MCE_GEN3 },
/* Elitegroup Computer Systems IR */
{ USB_DEVICE(VENDOR_ECS, 0x0f38) },
/* Wistron Corp. eHome Infrared Receiver */
{ USB_DEVICE(VENDOR_WISTRON, 0x0002) },
/* Compro K100 */
{ USB_DEVICE(VENDOR_COMPRO, 0x3020) },
/* Compro K100 v2 */
{ USB_DEVICE(VENDOR_COMPRO, 0x3082) },
/* Northstar Systems, Inc. eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_NORTHSTAR, 0xe004) },
/* TiVo PC IR Receiver */
{ USB_DEVICE(VENDOR_TIVO, 0x2000),
.driver_info = TIVO_KIT },
/* Conexant Hybrid TV "Shelby" Polaris SDK */
{ USB_DEVICE(VENDOR_CONEXANT, 0x58a1),
.driver_info = POLARIS_EVK },
/* Conexant Hybrid TV RDU253S Polaris */
{ USB_DEVICE(VENDOR_CONEXANT, 0x58a5),
.driver_info = CX_HYBRID_TV },
/* Twisted Melon Inc. - Manta Mini Receiver */
{ USB_DEVICE(VENDOR_TWISTEDMELON, 0x8008) },
/* Twisted Melon Inc. - Manta Pico Receiver */
{ USB_DEVICE(VENDOR_TWISTEDMELON, 0x8016) },
/* Twisted Melon Inc. - Manta Transceiver */
{ USB_DEVICE(VENDOR_TWISTEDMELON, 0x8042) },
/* Hauppauge WINTV-HVR-HVR 930C-HD - based on cx231xx */
{ USB_DEVICE(VENDOR_HAUPPAUGE, 0xb130),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_HAUPPAUGE, 0xb131),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_HAUPPAUGE, 0xb138),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_HAUPPAUGE, 0xb139),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
/* Hauppauge WinTV-HVR-935C - based on cx231xx */
{ USB_DEVICE(VENDOR_HAUPPAUGE, 0xb151),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
/* Hauppauge WinTV-HVR-955Q - based on cx231xx */
{ USB_DEVICE(VENDOR_HAUPPAUGE, 0xb123),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
/* Hauppauge WinTV-HVR-975 - based on cx231xx */
{ USB_DEVICE(VENDOR_HAUPPAUGE, 0xb150),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_PCTV, 0x0259),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_PCTV, 0x025e),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
/* Adaptec / HP eHome Receiver */
{ USB_DEVICE(VENDOR_ADAPTEC, 0x0094) },
/* Evromedia USB Full Hybrid Full HD */
{ USB_DEVICE(0x1b80, 0xd3b2),
.driver_info = EVROMEDIA_FULL_HYBRID_FULLHD },
/* Astrometa T2hybrid */
{ USB_DEVICE(0x15f4, 0x0135),
.driver_info = ASTROMETA_T2HYBRID },
/* Terminating entry */
{ }
};
/* data structure for each usb transceiver */
struct mceusb_dev {
/* ir-core bits */
struct rc_dev *rc;
/* optional features we can enable */
bool carrier_report_enabled;
bool wideband_rx_enabled; /* aka learning mode, short-range rx */
/* core device bits */
struct device *dev;
/* usb */
struct usb_device *usbdev;
struct usb_interface *usbintf;
struct urb *urb_in;
unsigned int pipe_in;
struct usb_endpoint_descriptor *usb_ep_out;
unsigned int pipe_out;
/* buffers and dma */
unsigned char *buf_in;
unsigned int len_in;
dma_addr_t dma_in;
enum {
CMD_HEADER = 0,
SUBCMD,
CMD_DATA,
PARSE_IRDATA,
} parser_state;
u8 cmd, rem; /* Remaining IR data bytes in packet */
struct {
u32 connected:1;
u32 tx_mask_normal:1;
u32 microsoft_gen1:1;
u32 no_tx:1;
u32 rx2;
} flags;
/* transmit support */
u32 carrier;
unsigned char tx_mask;
char name[128];
char phys[64];
enum mceusb_model_type model;
bool need_reset; /* flag to issue a device resume cmd */
u8 emver; /* emulator interface version */
u8 num_txports; /* number of transmit ports */
u8 num_rxports; /* number of receive sensors */
u8 txports_cabled; /* bitmask of transmitters with cable */
u8 rxports_active; /* bitmask of active receive sensors */
bool learning_active; /* wideband rx is active */
/* receiver carrier frequency detection support */
u32 pulse_tunit; /* IR pulse "on" cumulative time units */
u32 pulse_count; /* pulse "on" count in measurement interval */
/*
* support for async error handler mceusb_deferred_kevent()
* where usb_clear_halt(), usb_reset_configuration(),
* usb_reset_device(), etc. must be done in process context
*/
struct work_struct kevent;
unsigned long kevent_flags;
# define EVENT_TX_HALT 0
# define EVENT_RX_HALT 1
# define EVENT_RST_PEND 31
};
/* MCE Device Command Strings, generally a port and command pair */
static char DEVICE_RESUME[] = {MCE_CMD_NULL, MCE_CMD_PORT_SYS,
MCE_CMD_RESUME};
static char GET_REVISION[] = {MCE_CMD_PORT_SYS, MCE_CMD_G_REVISION};
static char GET_EMVER[] = {MCE_CMD_PORT_SYS, MCE_CMD_GETEMVER};
static char GET_WAKEVERSION[] = {MCE_CMD_PORT_SYS, MCE_CMD_GETWAKEVERSION};
static char FLASH_LED[] = {MCE_CMD_PORT_SYS, MCE_CMD_FLASHLED};
static char GET_UNKNOWN2[] = {MCE_CMD_PORT_IR, MCE_CMD_UNKNOWN2};
static char GET_CARRIER_FREQ[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRCFS};
static char GET_RX_TIMEOUT[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRTIMEOUT};
static char GET_NUM_PORTS[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRNUMPORTS};
static char GET_TX_BITMASK[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRTXPORTS};
static char GET_RX_SENSOR[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRRXPORTEN};
/* sub in desired values in lower byte or bytes for full command */
/* FIXME: make use of these for transmit.
static char SET_CARRIER_FREQ[] = {MCE_CMD_PORT_IR,
MCE_CMD_SETIRCFS, 0x00, 0x00};
static char SET_TX_BITMASK[] = {MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00};
static char SET_RX_TIMEOUT[] = {MCE_CMD_PORT_IR,
MCE_CMD_SETIRTIMEOUT, 0x00, 0x00};
static char SET_RX_SENSOR[] = {MCE_CMD_PORT_IR,
MCE_RSP_EQIRRXPORTEN, 0x00};
*/
static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
{
int datasize = 0;
switch (cmd) {
case MCE_CMD_NULL:
if (subcmd == MCE_CMD_PORT_SYS)
datasize = 1;
break;
case MCE_CMD_PORT_SYS:
switch (subcmd) {
case MCE_RSP_GETPORTSTATUS:
datasize = 5;
break;
case MCE_RSP_EQWAKEVERSION:
datasize = 4;
break;
case MCE_CMD_G_REVISION:
datasize = 4;
break;
case MCE_RSP_EQWAKESUPPORT:
case MCE_RSP_GETWAKESOURCE:
case MCE_RSP_EQDEVDETAILS:
case MCE_RSP_EQEMVER:
datasize = 1;
break;
}
break;
case MCE_CMD_PORT_IR:
switch (subcmd) {
case MCE_CMD_UNKNOWN:
case MCE_RSP_EQIRCFS:
case MCE_RSP_EQIRTIMEOUT:
case MCE_RSP_EQIRRXCFCNT:
case MCE_RSP_EQIRNUMPORTS:
datasize = 2;
break;
case MCE_CMD_SIG_END:
case MCE_RSP_EQIRTXPORTS:
case MCE_RSP_EQIRRXPORTEN:
datasize = 1;
break;
}
}
return datasize;
}
static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
int offset, int len, bool out)
{
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
char *inout;
u8 cmd, subcmd, *data;
struct device *dev = ir->dev;
u32 carrier, period;
if (offset < 0 || offset >= buf_len)
return;
dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
(out ? 't' : 'r'), offset,
min(len, buf_len - offset), buf + offset, len, buf_len);
inout = out ? "Request" : "Got";
cmd = buf[offset];
subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0;
data = &buf[offset] + 2;
/* Trace meaningless 0xb1 0x60 header bytes on original receiver */
if (ir->flags.microsoft_gen1 && !out && !offset) {
dev_dbg(dev, "MCE gen 1 header");
return;
}
/* Trace IR data header or trailer */
if (cmd != MCE_CMD_PORT_IR &&
(cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) {
if (cmd == MCE_IRDATA_TRAILER)
dev_dbg(dev, "End of raw IR data");
else
dev_dbg(dev, "Raw IR data, %d pulse/space samples",
cmd & MCE_PACKET_LENGTH_MASK);
return;
}
/* Unexpected end of buffer? */
if (offset + len > buf_len)
return;
/* Decode MCE command/response */
switch (cmd) {
case MCE_CMD_NULL:
if (subcmd == MCE_CMD_NULL)
break;
if ((subcmd == MCE_CMD_PORT_SYS) &&
(data[0] == MCE_CMD_RESUME))
dev_dbg(dev, "Device resume requested");
else
dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
cmd, subcmd);
break;
case MCE_CMD_PORT_SYS:
switch (subcmd) {
case MCE_RSP_EQEMVER:
if (!out)
dev_dbg(dev, "Emulator interface version %x",
data[0]);
break;
case MCE_CMD_G_REVISION:
if (len == 2)
dev_dbg(dev, "Get hw/sw rev?");
else
dev_dbg(dev, "hw/sw rev %*ph",
4, &buf[offset + 2]);
break;
case MCE_CMD_RESUME:
dev_dbg(dev, "Device resume requested");
break;
case MCE_RSP_CMD_ILLEGAL:
dev_dbg(dev, "Illegal PORT_SYS command");
break;
case MCE_RSP_EQWAKEVERSION:
if (!out)
dev_dbg(dev, "Wake version, proto: 0x%02x, payload: 0x%02x, address: 0x%02x, version: 0x%02x",
data[0], data[1], data[2], data[3]);
break;
case MCE_RSP_GETPORTSTATUS:
if (!out)
/* We use data1 + 1 here, to match hw labels */
dev_dbg(dev, "TX port %d: blaster is%s connected",
data[0] + 1, data[3] ? " not" : "");
break;
case MCE_CMD_FLASHLED:
dev_dbg(dev, "Attempting to flash LED");
break;
default:
dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
cmd, subcmd);
break;
}
break;
case MCE_CMD_PORT_IR:
switch (subcmd) {
case MCE_CMD_SIG_END:
dev_dbg(dev, "End of signal");
break;
case MCE_CMD_PING:
dev_dbg(dev, "Ping");
break;
case MCE_CMD_UNKNOWN:
dev_dbg(dev, "Resp to 9f 05 of 0x%02x 0x%02x",
data[0], data[1]);
break;
case MCE_RSP_EQIRCFS:
if (!data[0] && !data[1]) {
dev_dbg(dev, "%s: no carrier", inout);
break;
}
// prescaler should make sense
if (data[0] > 8)
break;
period = DIV_ROUND_CLOSEST((1U << data[0] * 2) *
(data[1] + 1), 10);
if (!period)
break;
carrier = USEC_PER_SEC / period;
dev_dbg(dev, "%s carrier of %u Hz (period %uus)",
inout, carrier, period);
break;
case MCE_CMD_GETIRCFS:
dev_dbg(dev, "Get carrier mode and freq");
break;
case MCE_RSP_EQIRTXPORTS:
dev_dbg(dev, "%s transmit blaster mask of 0x%02x",
inout, data[0]);
break;
case MCE_RSP_EQIRTIMEOUT:
/* value is in units of 50us, so x*50/1000 ms */
period = ((data[0] << 8) | data[1]) *
MCE_TIME_UNIT / 1000;
dev_dbg(dev, "%s receive timeout of %d ms",
inout, period);
break;
case MCE_CMD_GETIRTIMEOUT:
dev_dbg(dev, "Get receive timeout");
break;
case MCE_CMD_GETIRTXPORTS:
dev_dbg(dev, "Get transmit blaster mask");
break;
case MCE_RSP_EQIRRXPORTEN:
dev_dbg(dev, "%s %s-range receive sensor in use",
inout, data[0] == 0x02 ? "short" : "long");
break;
case MCE_CMD_GETIRRXPORTEN:
/* aka MCE_RSP_EQIRRXCFCNT */
if (out)
dev_dbg(dev, "Get receive sensor");
else
dev_dbg(dev, "RX carrier cycle count: %d",
((data[0] << 8) | data[1]));
break;
case MCE_RSP_EQIRNUMPORTS:
if (out)
break;
dev_dbg(dev, "Num TX ports: %x, num RX ports: %x",
data[0], data[1]);
break;
case MCE_RSP_CMD_ILLEGAL:
dev_dbg(dev, "Illegal PORT_IR command");
break;
case MCE_RSP_TX_TIMEOUT:
dev_dbg(dev, "IR TX timeout (TX buffer underrun)");
break;
default:
dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
cmd, subcmd);
break;
}
break;
default:
break;
}
#endif
}
/*
* Schedule work that can't be done in interrupt handlers
* (mceusb_dev_recv() and mce_write_callback()) nor tasklets.
* Invokes mceusb_deferred_kevent() for recovering from
* error events specified by the kevent bit field.
*/
static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent)
{
set_bit(kevent, &ir->kevent_flags);
if (test_bit(EVENT_RST_PEND, &ir->kevent_flags)) {
dev_dbg(ir->dev, "kevent %d dropped pending USB Reset Device",
kevent);
return;
}
if (!schedule_work(&ir->kevent))
dev_dbg(ir->dev, "kevent %d already scheduled", kevent);
else
dev_dbg(ir->dev, "kevent %d scheduled", kevent);
}
static void mce_write_callback(struct urb *urb)
{
if (!urb)
return;
complete(urb->context);
}
/*
* Write (TX/send) data to MCE device USB endpoint out.
* Used for IR blaster TX and MCE device commands.
*
* Return: The number of bytes written (> 0) or errno (< 0).
*/
static int mce_write(struct mceusb_dev *ir, u8 *data, int size)
{
int ret;
struct urb *urb;
struct device *dev = ir->dev;
unsigned char *buf_out;
struct completion tx_done;
unsigned long expire;
unsigned long ret_wait;
mceusb_dev_printdata(ir, data, size, 0, size, true);
urb = usb_alloc_urb(0, GFP_KERNEL);
if (unlikely(!urb)) {
dev_err(dev, "Error: mce write couldn't allocate urb");
return -ENOMEM;
}
buf_out = kmalloc(size, GFP_KERNEL);
if (!buf_out) {
usb_free_urb(urb);
return -ENOMEM;
}
init_completion(&tx_done);
/* outbound data */
if (usb_endpoint_xfer_int(ir->usb_ep_out))
usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out,
buf_out, size, mce_write_callback, &tx_done,
ir->usb_ep_out->bInterval);
else
usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out,
buf_out, size, mce_write_callback, &tx_done);
memcpy(buf_out, data, size);
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
dev_err(dev, "Error: mce write submit urb error = %d", ret);
kfree(buf_out);
usb_free_urb(urb);
return ret;
}
expire = msecs_to_jiffies(USB_TX_TIMEOUT);
ret_wait = wait_for_completion_timeout(&tx_done, expire);
if (!ret_wait) {
dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))",
expire, USB_TX_TIMEOUT);
usb_kill_urb(urb);
ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status);
} else {
ret = urb->status;
}
if (ret >= 0)
ret = urb->actual_length; /* bytes written */
switch (urb->status) {
/* success */
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -EILSEQ:
case -ESHUTDOWN:
break;
case -EPIPE:
dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)",
urb->status);
mceusb_defer_kevent(ir, EVENT_TX_HALT);
break;
default:
dev_err(ir->dev, "Error: mce write urb status = %d",
urb->status);
break;
}
dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)",
ret, ret_wait, expire, USB_TX_TIMEOUT,
urb->actual_length, urb->status);
kfree(buf_out);
usb_free_urb(urb);
return ret;
}
static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size)
{
int rsize = sizeof(DEVICE_RESUME);
if (ir->need_reset) {
ir->need_reset = false;
mce_write(ir, DEVICE_RESUME, rsize);
msleep(10);
}
mce_write(ir, data, size);
msleep(10);
}
/*
* Transmit IR out the MCE device IR blaster port(s).
*
* Convert IR pulse/space sequence from LIRC to MCE format.
* Break up a long IR sequence into multiple parts (MCE IR data packets).
*
* u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec.
* Pulses and spaces are implicit by their position.
* The first IR sample, txbuf[0], is always a pulse.
*
* u8 irbuf[] consists of multiple IR data packets for the MCE device.
* A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples.
* An IR sample is 1-bit pulse/space flag with 7-bit time
* in MCE time units (50usec).
*
* Return: The number of IR samples sent (> 0) or errno (< 0).
*/
static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
struct mceusb_dev *ir = dev->priv;
u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 };
u8 irbuf[MCE_IRBUF_SIZE];
int ircount = 0;
unsigned int irsample;
int i, length, ret;
/* Send the set TX ports command */
cmdbuf[2] = ir->tx_mask;
mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
/* Generate mce IR data packet */
for (i = 0; i < count; i++) {
irsample = txbuf[i] / MCE_TIME_UNIT;
/* loop to support long pulses/spaces > 6350us (127*50us) */
while (irsample > 0) {
/* Insert IR header every 30th entry */
if (ircount % MCE_PACKET_SIZE == 0) {
/* Room for IR header and one IR sample? */
if (ircount >= MCE_IRBUF_SIZE - 1) {
/* Send near full buffer */
ret = mce_write(ir, irbuf, ircount);
if (ret < 0)
return ret;
ircount = 0;
}
irbuf[ircount++] = MCE_IRDATA_HEADER;
}
/* Insert IR sample */
if (irsample <= MCE_MAX_PULSE_LENGTH) {
irbuf[ircount] = irsample;
irsample = 0;
} else {
irbuf[ircount] = MCE_MAX_PULSE_LENGTH;
irsample -= MCE_MAX_PULSE_LENGTH;
}
/*
* Even i = IR pulse
* Odd i = IR space
*/
irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT);
ircount++;
/* IR buffer full? */
if (ircount >= MCE_IRBUF_SIZE) {
/* Fix packet length in last header */
length = ircount % MCE_PACKET_SIZE;
if (length > 0)
irbuf[ircount - length] -=
MCE_PACKET_SIZE - length;
/* Send full buffer */
ret = mce_write(ir, irbuf, ircount);
if (ret < 0)
return ret;
ircount = 0;
}
}
} /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */
/* Fix packet length in last header */
length = ircount % MCE_PACKET_SIZE;
if (length > 0)
irbuf[ircount - length] -= MCE_PACKET_SIZE - length;
/* Append IR trailer (0x80) to final partial (or empty) IR buffer */
irbuf[ircount++] = MCE_IRDATA_TRAILER;
/* Send final buffer */
ret = mce_write(ir, irbuf, ircount);
if (ret < 0)
return ret;
return count;
}
/* Sets active IR outputs -- mce devices typically have two */
static int mceusb_set_tx_mask(struct rc_dev *dev, u32 mask)
{
struct mceusb_dev *ir = dev->priv;
/* return number of transmitters */
int emitters = ir->num_txports ? ir->num_txports : 2;
if (mask >= (1 << emitters))
return emitters;
if (ir->flags.tx_mask_normal)
ir->tx_mask = mask;
else
ir->tx_mask = (mask != MCE_DEFAULT_TX_MASK ?
mask ^ MCE_DEFAULT_TX_MASK : mask) << 1;
return 0;
}
/* Sets the send carrier frequency and mode */
static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
{
struct mceusb_dev *ir = dev->priv;
int clk = 10000000;
int prescaler = 0, divisor = 0;
unsigned char cmdbuf[4] = { MCE_CMD_PORT_IR,
MCE_CMD_SETIRCFS, 0x00, 0x00 };
/* Carrier has changed */
if (ir->carrier != carrier) {
if (carrier == 0) {
ir->carrier = carrier;
cmdbuf[2] = MCE_CMD_SIG_END;
cmdbuf[3] = MCE_IRDATA_TRAILER;
dev_dbg(ir->dev, "disabling carrier modulation");
mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
return 0;
}
for (prescaler = 0; prescaler < 4; ++prescaler) {
divisor = (clk >> (2 * prescaler)) / carrier;
if (divisor <= 0xff) {
ir->carrier = carrier;
cmdbuf[2] = prescaler;
cmdbuf[3] = divisor;
dev_dbg(ir->dev, "requesting %u HZ carrier",
carrier);
/* Transmit new carrier to mce device */
mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
return 0;
}
}
return -EINVAL;
}
return 0;
}
static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
{
u8 cmdbuf[4] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTIMEOUT, 0, 0 };
struct mceusb_dev *ir = dev->priv;
unsigned int units;
units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT);
cmdbuf[2] = units >> 8;
cmdbuf[3] = units;
mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
/* get receiver timeout value */
mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
return 0;
}
/*
* Select or deselect the 2nd receiver port.
* Second receiver is learning mode, wide-band, short-range receiver.
* Only one receiver (long or short range) may be active at a time.
*/
static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable)
{
struct mceusb_dev *ir = dev->priv;
unsigned char cmdbuf[3] = { MCE_CMD_PORT_IR,
MCE_CMD_SETIRRXPORTEN, 0x00 };
dev_dbg(ir->dev, "select %s-range receive sensor",
enable ? "short" : "long");
if (enable) {
ir->wideband_rx_enabled = true;
cmdbuf[2] = 2; /* port 2 is short range receiver */
} else {
ir->wideband_rx_enabled = false;
cmdbuf[2] = 1; /* port 1 is long range receiver */
}
mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
/* response from device sets ir->learning_active */
return 0;
}
/*
* Enable/disable receiver carrier frequency pass through reporting.
* Only the short-range receiver has carrier frequency measuring capability.
* Implicitly select this receiver when enabling carrier frequency reporting.
*/
static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
{
struct mceusb_dev *ir = dev->priv;
unsigned char cmdbuf[3] = { MCE_CMD_PORT_IR,
MCE_CMD_SETIRRXPORTEN, 0x00 };
dev_dbg(ir->dev, "%s short-range receiver carrier reporting",
enable ? "enable" : "disable");
if (enable) {
ir->carrier_report_enabled = true;
if (!ir->learning_active) {
cmdbuf[2] = 2; /* port 2 is short range receiver */
mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
}
} else {
ir->carrier_report_enabled = false;
/*
* Revert to normal (long-range) receiver only if the
* wideband (short-range) receiver wasn't explicitly
* enabled.
*/
if (ir->learning_active && !ir->wideband_rx_enabled) {
cmdbuf[2] = 1; /* port 1 is long range receiver */
mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
}
}
return 0;
}
/*
* Handle PORT_SYS/IR command response received from the MCE device.
*
* Assumes single response with all its data (not truncated)
* in buf_in[]. The response itself determines its total length
* (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[].
*
* We don't do anything but print debug spew for many of the command bits
* we receive from the hardware, but some of them are useful information
* we want to store so that we can use them.
*/
static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
{
u8 cmd = buf_in[0];
u8 subcmd = buf_in[1];
u8 *hi = &buf_in[2]; /* read only when required */
u8 *lo = &buf_in[3]; /* read only when required */
struct ir_raw_event rawir = {};
u32 carrier_cycles;
u32 cycles_fix;
if (cmd == MCE_CMD_PORT_SYS) {
switch (subcmd) {
/* the one and only 5-byte return value command */
case MCE_RSP_GETPORTSTATUS:
if (buf_in[5] == 0 && *hi < 8)
ir->txports_cabled |= 1 << *hi;
break;
/* 1-byte return value commands */
case MCE_RSP_EQEMVER:
ir->emver = *hi;
break;
/* No return value commands */
case MCE_RSP_CMD_ILLEGAL:
ir->need_reset = true;
break;
default:
break;
}
return;
}
if (cmd != MCE_CMD_PORT_IR)
return;
switch (subcmd) {
/* 2-byte return value commands */
case MCE_RSP_EQIRTIMEOUT:
ir->rc->timeout = (*hi << 8 | *lo) * MCE_TIME_UNIT;
break;
case MCE_RSP_EQIRNUMPORTS:
ir->num_txports = *hi;
ir->num_rxports = *lo;
break;
case MCE_RSP_EQIRRXCFCNT:
/*
* The carrier cycle counter can overflow and wrap around
* without notice from the device. So frequency measurement
* will be inaccurate with long duration IR.
*
* The long-range (non learning) receiver always reports
* zero count so we always ignore its report.
*/
if (ir->carrier_report_enabled && ir->learning_active &&
ir->pulse_tunit > 0) {
carrier_cycles = (*hi << 8 | *lo);
/*
* Adjust carrier cycle count by adding
* 1 missed count per pulse "on"
*/
cycles_fix = ir->flags.rx2 == 2 ? ir->pulse_count : 0;
rawir.carrier_report = 1;
rawir.carrier = (1000000u / MCE_TIME_UNIT) *
(carrier_cycles + cycles_fix) /
ir->pulse_tunit;
dev_dbg(ir->dev, "RX carrier frequency %u Hz (pulse count = %u, cycles = %u, duration = %u, rx2 = %u)",
rawir.carrier, ir->pulse_count, carrier_cycles,
ir->pulse_tunit, ir->flags.rx2);
ir_raw_event_store(ir->rc, &rawir);
}
break;
/* 1-byte return value commands */
case MCE_RSP_EQIRTXPORTS:
ir->tx_mask = *hi;
break;
case MCE_RSP_EQIRRXPORTEN:
ir->learning_active = ((*hi & 0x02) == 0x02);
if (ir->rxports_active != *hi) {
dev_info(ir->dev, "%s-range (0x%x) receiver active",
ir->learning_active ? "short" : "long", *hi);
ir->rxports_active = *hi;
}
break;
/* No return value commands */
case MCE_RSP_CMD_ILLEGAL:
case MCE_RSP_TX_TIMEOUT:
ir->need_reset = true;
break;
default:
break;
}
}
static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
{
struct ir_raw_event rawir = {};
bool event = false;
int i = 0;
/* skip meaningless 0xb1 0x60 header bytes on orig receiver */
if (ir->flags.microsoft_gen1)
i = 2;
/* if there's no data, just return now */
if (buf_len <= i)
return;
for (; i < buf_len; i++) {
switch (ir->parser_state) {
case SUBCMD:
ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]);
mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1,
ir->rem + 2, false);
if (i + ir->rem < buf_len)
mceusb_handle_command(ir, &ir->buf_in[i - 1]);
ir->parser_state = CMD_DATA;
break;
case PARSE_IRDATA:
ir->rem--;
rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK);
if (unlikely(!rawir.duration)) {
dev_dbg(ir->dev, "nonsensical irdata %02x with duration 0",
ir->buf_in[i]);
break;
}
if (rawir.pulse) {
ir->pulse_tunit += rawir.duration;
ir->pulse_count++;
}
rawir.duration *= MCE_TIME_UNIT;
dev_dbg(ir->dev, "Storing %s %u us (%02x)",
rawir.pulse ? "pulse" : "space",
rawir.duration, ir->buf_in[i]);
if (ir_raw_event_store_with_filter(ir->rc, &rawir))
event = true;
break;
case CMD_DATA:
ir->rem--;
break;
case CMD_HEADER:
ir->cmd = ir->buf_in[i];
if ((ir->cmd == MCE_CMD_PORT_IR) ||
((ir->cmd & MCE_PORT_MASK) !=
MCE_COMMAND_IRDATA)) {
/*
* got PORT_SYS, PORT_IR, or unknown
* command response prefix
*/
ir->parser_state = SUBCMD;
continue;
}
/*
* got IR data prefix (0x80 + num_bytes)
* decode MCE packets of the form {0x83, AA, BB, CC}
* IR data packets can span USB messages
*/
ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
mceusb_dev_printdata(ir, ir->buf_in, buf_len,
i, ir->rem + 1, false);
if (ir->rem) {
ir->parser_state = PARSE_IRDATA;
} else {
struct ir_raw_event ev = {
.timeout = 1,
.duration = ir->rc->timeout
};
if (ir_raw_event_store_with_filter(ir->rc,
&ev))
event = true;
ir->pulse_tunit = 0;
ir->pulse_count = 0;
}
break;
}
if (ir->parser_state != CMD_HEADER && !ir->rem)
ir->parser_state = CMD_HEADER;
}
/*
* Accept IR data spanning multiple rx buffers.
* Reject MCE command response spanning multiple rx buffers.
*/
if (ir->parser_state != PARSE_IRDATA || !ir->rem)
ir->parser_state = CMD_HEADER;
if (event) {
dev_dbg(ir->dev, "processed IR data");
ir_raw_event_handle(ir->rc);
}
}
static void mceusb_dev_recv(struct urb *urb)
{
struct mceusb_dev *ir;
if (!urb)
return;
ir = urb->context;
if (!ir) {
usb_unlink_urb(urb);
return;
}
switch (urb->status) {
/* success */
case 0:
mceusb_process_ir_data(ir, urb->actual_length);
break;
case -ECONNRESET:
case -ENOENT:
case -EILSEQ:
case -EPROTO:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
case -EPIPE:
dev_err(ir->dev, "Error: urb status = %d (RX HALT)",
urb->status);
mceusb_defer_kevent(ir, EVENT_RX_HALT);
return;
default:
dev_err(ir->dev, "Error: urb status = %d", urb->status);
break;
}
usb_submit_urb(urb, GFP_ATOMIC);
}
static void mceusb_get_emulator_version(struct mceusb_dev *ir)
{
/* If we get no reply or an illegal command reply, its ver 1, says MS */
ir->emver = 1;
mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER));
}
static void mceusb_gen1_init(struct mceusb_dev *ir)
{
int ret;
struct device *dev = ir->dev;
char data[USB_CTRL_MSG_SZ];
/*
* This is a strange one. Windows issues a set address to the device
* on the receive control pipe and expect a certain value pair back
*/
ret = usb_control_msg_recv(ir->usbdev, 0, USB_REQ_SET_ADDRESS,
USB_DIR_IN | USB_TYPE_VENDOR,
0, 0, data, USB_CTRL_MSG_SZ, 3000,
GFP_KERNEL);
dev_dbg(dev, "set address - ret = %d", ret);
dev_dbg(dev, "set address - data[0] = %d, data[1] = %d",
data[0], data[1]);
/* set feature: bit rate 38400 bps */
ret = usb_control_msg_send(ir->usbdev, 0,
USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
0xc04e, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set feature - ret = %d", ret);
/* bRequest 4: set char length to 8 bits */
ret = usb_control_msg_send(ir->usbdev, 0,
4, USB_TYPE_VENDOR,
0x0808, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set char length - retB = %d", ret);
/* bRequest 2: set handshaking to use DTR/DSR */
ret = usb_control_msg_send(ir->usbdev, 0,
2, USB_TYPE_VENDOR,
0x0000, 0x0100, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set handshake - retC = %d", ret);
/* device resume */
mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
/* get hw/sw revision? */
mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
}
static void mceusb_gen2_init(struct mceusb_dev *ir)
{
/* device resume */
mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
/* get wake version (protocol, key, address) */
mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
/* unknown what this one actually returns... */
mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
}
static void mceusb_get_parameters(struct mceusb_dev *ir)
{
int i;
unsigned char cmdbuf[3] = { MCE_CMD_PORT_SYS,
MCE_CMD_GETPORTSTATUS, 0x00 };
/* defaults, if the hardware doesn't support querying */
ir->num_txports = 2;
ir->num_rxports = 2;
/* get number of tx and rx ports */
mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
/* get the carrier and frequency */
mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
if (ir->num_txports && !ir->flags.no_tx)
/* get the transmitter bitmask */
mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
/* get receiver timeout value */
mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
/* get receiver sensor setting */
mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
for (i = 0; i < ir->num_txports; i++) {
cmdbuf[2] = i;
mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
}
}
static void mceusb_flash_led(struct mceusb_dev *ir)
{
if (ir->emver < 2)
return;
mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED));
}
/*
* Workqueue function
* for resetting or recovering device after occurrence of error events
* specified in ir->kevent bit field.
* Function runs (via schedule_work()) in non-interrupt context, for
* calls here (such as usb_clear_halt()) requiring non-interrupt context.
*/
static void mceusb_deferred_kevent(struct work_struct *work)
{
struct mceusb_dev *ir =
container_of(work, struct mceusb_dev, kevent);
int status;
dev_err(ir->dev, "kevent handler called (flags 0x%lx)",
ir->kevent_flags);
if (test_bit(EVENT_RST_PEND, &ir->kevent_flags)) {
dev_err(ir->dev, "kevent handler canceled pending USB Reset Device");
return;
}
if (test_bit(EVENT_RX_HALT, &ir->kevent_flags)) {
usb_unlink_urb(ir->urb_in);
status = usb_clear_halt(ir->usbdev, ir->pipe_in);
dev_err(ir->dev, "rx clear halt status = %d", status);
if (status < 0) {
/*
* Unable to clear RX halt/stall.
* Will need to call usb_reset_device().
*/
dev_err(ir->dev,
"stuck RX HALT state requires USB Reset Device to clear");
usb_queue_reset_device(ir->usbintf);
set_bit(EVENT_RST_PEND, &ir->kevent_flags);
clear_bit(EVENT_RX_HALT, &ir->kevent_flags);
/* Cancel all other error events and handlers */
clear_bit(EVENT_TX_HALT, &ir->kevent_flags);
return;
}
clear_bit(EVENT_RX_HALT, &ir->kevent_flags);
status = usb_submit_urb(ir->urb_in, GFP_KERNEL);
if (status < 0) {
dev_err(ir->dev, "rx unhalt submit urb error = %d",
status);
}
}
if (test_bit(EVENT_TX_HALT, &ir->kevent_flags)) {
status = usb_clear_halt(ir->usbdev, ir->pipe_out);
dev_err(ir->dev, "tx clear halt status = %d", status);
if (status < 0) {
/*
* Unable to clear TX halt/stall.
* Will need to call usb_reset_device().
*/
dev_err(ir->dev,
"stuck TX HALT state requires USB Reset Device to clear");
usb_queue_reset_device(ir->usbintf);
set_bit(EVENT_RST_PEND, &ir->kevent_flags);
clear_bit(EVENT_TX_HALT, &ir->kevent_flags);
/* Cancel all other error events and handlers */
clear_bit(EVENT_RX_HALT, &ir->kevent_flags);
return;
}
clear_bit(EVENT_TX_HALT, &ir->kevent_flags);
}
}
static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
{
struct usb_device *udev = ir->usbdev;
struct device *dev = ir->dev;
struct rc_dev *rc;
int ret;
rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc) {
dev_err(dev, "remote dev allocation failed");
goto out;
}
snprintf(ir->name, sizeof(ir->name), "%s (%04x:%04x)",
mceusb_model[ir->model].name ?
mceusb_model[ir->model].name :
"Media Center Ed. eHome Infrared Remote Transceiver",
le16_to_cpu(ir->usbdev->descriptor.idVendor),
le16_to_cpu(ir->usbdev->descriptor.idProduct));
usb_make_path(ir->usbdev, ir->phys, sizeof(ir->phys));
rc->device_name = ir->name;
rc->input_phys = ir->phys;
usb_to_input_id(ir->usbdev, &rc->input_id);
rc->dev.parent = dev;
rc->priv = ir;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->rx_resolution = MCE_TIME_UNIT;
rc->min_timeout = MCE_TIME_UNIT;
rc->timeout = MS_TO_US(100);
if (!mceusb_model[ir->model].broken_irtimeout) {
rc->s_timeout = mceusb_set_timeout;
rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
} else {
/*
* If we can't set the timeout using CMD_SETIRTIMEOUT, we can
* rely on software timeouts for timeouts < 100ms.
*/
rc->max_timeout = rc->timeout;
}
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
rc->s_tx_carrier = mceusb_set_tx_carrier;
rc->tx_ir = mceusb_tx_ir;
}
if (ir->flags.rx2 > 0) {
rc->s_wideband_receiver = mceusb_set_rx_wideband;
rc->s_carrier_report = mceusb_set_rx_carrier_report;
}
rc->driver_name = DRIVER_NAME;
switch (le16_to_cpu(udev->descriptor.idVendor)) {
case VENDOR_HAUPPAUGE:
rc->map_name = RC_MAP_HAUPPAUGE;
break;
case VENDOR_PCTV:
rc->map_name = RC_MAP_PINNACLE_PCTV_HD;
break;
default:
rc->map_name = RC_MAP_RC6_MCE;
}
if (mceusb_model[ir->model].rc_map)
rc->map_name = mceusb_model[ir->model].rc_map;
ret = rc_register_device(rc);
if (ret < 0) {
dev_err(dev, "remote dev registration failed");
goto out;
}
return rc;
out:
rc_free_device(rc);
return NULL;
}
static int mceusb_dev_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_host_interface *idesc;
struct usb_endpoint_descriptor *ep = NULL;
struct usb_endpoint_descriptor *ep_in = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
struct mceusb_dev *ir = NULL;
int pipe, maxp, i, res;
char buf[63], name[128] = "";
enum mceusb_model_type model = id->driver_info;
bool is_gen3;
bool is_microsoft_gen1;
bool tx_mask_normal;
int ir_intfnum;
dev_dbg(&intf->dev, "%s called", __func__);
idesc = intf->cur_altsetting;
is_gen3 = mceusb_model[model].mce_gen3;
is_microsoft_gen1 = mceusb_model[model].mce_gen1;
tx_mask_normal = mceusb_model[model].tx_mask_normal;
ir_intfnum = mceusb_model[model].ir_intfnum;
/* There are multi-function devices with non-IR interfaces */
if (idesc->desc.bInterfaceNumber != ir_intfnum)
return -ENODEV;
/* step through the endpoints to find first bulk in and out endpoint */
for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
ep = &idesc->endpoint[i].desc;
if (ep_in == NULL) {
if (usb_endpoint_is_bulk_in(ep)) {
ep_in = ep;
dev_dbg(&intf->dev, "acceptable bulk inbound endpoint found\n");
} else if (usb_endpoint_is_int_in(ep)) {
ep_in = ep;
ep_in->bInterval = 1;
dev_dbg(&intf->dev, "acceptable interrupt inbound endpoint found\n");
}
}
if (ep_out == NULL) {
if (usb_endpoint_is_bulk_out(ep)) {
ep_out = ep;
dev_dbg(&intf->dev, "acceptable bulk outbound endpoint found\n");
} else if (usb_endpoint_is_int_out(ep)) {
ep_out = ep;
ep_out->bInterval = 1;
dev_dbg(&intf->dev, "acceptable interrupt outbound endpoint found\n");
}
}
}
if (!ep_in || !ep_out) {
dev_dbg(&intf->dev, "required endpoints not found\n");
return -ENODEV;
}
if (usb_endpoint_xfer_int(ep_in))
pipe = usb_rcvintpipe(dev, ep_in->bEndpointAddress);
else
pipe = usb_rcvbulkpipe(dev, ep_in->bEndpointAddress);
maxp = usb_maxpacket(dev, pipe);
ir = kzalloc(sizeof(struct mceusb_dev), GFP_KERNEL);
if (!ir)
goto mem_alloc_fail;
ir->pipe_in = pipe;
ir->buf_in = usb_alloc_coherent(dev, maxp, GFP_KERNEL, &ir->dma_in);
if (!ir->buf_in)
goto buf_in_alloc_fail;
ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
if (!ir->urb_in)
goto urb_in_alloc_fail;
ir->usbintf = intf;
ir->usbdev = usb_get_dev(dev);
ir->dev = &intf->dev;
ir->len_in = maxp;
ir->flags.microsoft_gen1 = is_microsoft_gen1;
ir->flags.tx_mask_normal = tx_mask_normal;
ir->flags.no_tx = mceusb_model[model].no_tx;
ir->flags.rx2 = mceusb_model[model].rx2;
ir->model = model;
/* Saving usb interface data for use by the transmitter routine */
ir->usb_ep_out = ep_out;
if (usb_endpoint_xfer_int(ep_out))
ir->pipe_out = usb_sndintpipe(ir->usbdev,
ep_out->bEndpointAddress);
else
ir->pipe_out = usb_sndbulkpipe(ir->usbdev,
ep_out->bEndpointAddress);
if (dev->descriptor.iManufacturer
&& usb_string(dev, dev->descriptor.iManufacturer,
buf, sizeof(buf)) > 0)
strscpy(name, buf, sizeof(name));
if (dev->descriptor.iProduct
&& usb_string(dev, dev->descriptor.iProduct,
buf, sizeof(buf)) > 0)
snprintf(name + strlen(name), sizeof(name) - strlen(name),
" %s", buf);
/*
* Initialize async USB error handler before registering
* or activating any mceusb RX and TX functions
*/
INIT_WORK(&ir->kevent, mceusb_deferred_kevent);
ir->rc = mceusb_init_rc_dev(ir);
if (!ir->rc)
goto rc_dev_fail;
/* wire up inbound data handler */
if (usb_endpoint_xfer_int(ep_in))
usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
mceusb_dev_recv, ir, ep_in->bInterval);
else
usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
mceusb_dev_recv, ir);
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* flush buffers on the device */
dev_dbg(&intf->dev, "Flushing receive buffers");
res = usb_submit_urb(ir->urb_in, GFP_KERNEL);
if (res)
dev_err(&intf->dev, "failed to flush buffers: %d", res);
/* figure out which firmware/emulator version this hardware has */
mceusb_get_emulator_version(ir);
/* initialize device */
if (ir->flags.microsoft_gen1)
mceusb_gen1_init(ir);
else if (!is_gen3)
mceusb_gen2_init(ir);
mceusb_get_parameters(ir);
mceusb_flash_led(ir);
if (!ir->flags.no_tx)
mceusb_set_tx_mask(ir->rc, MCE_DEFAULT_TX_MASK);
usb_set_intfdata(intf, ir);
/* enable wake via this device */
device_set_wakeup_capable(ir->dev, true);
device_set_wakeup_enable(ir->dev, true);
dev_info(&intf->dev, "Registered %s with mce emulator interface version %x",
name, ir->emver);
dev_info(&intf->dev, "%x tx ports (0x%x cabled) and %x rx sensors (0x%x active)",
ir->num_txports, ir->txports_cabled,
ir->num_rxports, ir->rxports_active);
return 0;
/* Error-handling path */
rc_dev_fail:
cancel_work_sync(&ir->kevent);
usb_put_dev(ir->usbdev);
usb_kill_urb(ir->urb_in);
usb_free_urb(ir->urb_in);
urb_in_alloc_fail:
usb_free_coherent(dev, maxp, ir->buf_in, ir->dma_in);
buf_in_alloc_fail:
kfree(ir);
mem_alloc_fail:
dev_err(&intf->dev, "%s: device setup failed!", __func__);
return -ENOMEM;
}
static void mceusb_dev_disconnect(struct usb_interface *intf)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct mceusb_dev *ir = usb_get_intfdata(intf);
dev_dbg(&intf->dev, "%s called", __func__);
usb_set_intfdata(intf, NULL);
if (!ir)
return;
ir->usbdev = NULL;
cancel_work_sync(&ir->kevent);
rc_unregister_device(ir->rc);
usb_kill_urb(ir->urb_in);
usb_free_urb(ir->urb_in);
usb_free_coherent(dev, ir->len_in, ir->buf_in, ir->dma_in);
usb_put_dev(dev);
kfree(ir);
}
static int mceusb_dev_suspend(struct usb_interface *intf, pm_message_t message)
{
struct mceusb_dev *ir = usb_get_intfdata(intf);
dev_info(ir->dev, "suspend");
usb_kill_urb(ir->urb_in);
return 0;
}
static int mceusb_dev_resume(struct usb_interface *intf)
{
struct mceusb_dev *ir = usb_get_intfdata(intf);
dev_info(ir->dev, "resume");
if (usb_submit_urb(ir->urb_in, GFP_ATOMIC))
return -EIO;
return 0;
}
static struct usb_driver mceusb_dev_driver = {
.name = DRIVER_NAME,
.probe = mceusb_dev_probe,
.disconnect = mceusb_dev_disconnect,
.suspend = mceusb_dev_suspend,
.resume = mceusb_dev_resume,
.reset_resume = mceusb_dev_resume,
.id_table = mceusb_dev_table
};
module_usb_driver(mceusb_dev_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, mceusb_dev_table);
| linux-master | drivers/media/rc/mceusb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Streamzap Remote Control driver
*
* Copyright (c) 2005 Christoph Bartelmus <[email protected]>
* Copyright (c) 2010 Jarod Wilson <[email protected]>
*
* This driver was based on the work of Greg Wickham and Adrian
* Dewhurst. It was substantially rewritten to support correct signal
* gaps and now maintains a delay buffer, which is used to present
* consistent timing behaviour to user space applications. Without the
* delay buffer an ugly hack would be required in lircd, which can
* cause sluggish signal decoding in certain situations.
*
* Ported to in-kernel ir-core interface by Jarod Wilson
*
* This driver is based on the USB skeleton driver packaged with the
* kernel; copyright (C) 2001-2003 Greg Kroah-Hartman ([email protected])
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <media/rc-core.h>
#define DRIVER_NAME "streamzap"
#define DRIVER_DESC "Streamzap Remote Control driver"
#define USB_STREAMZAP_VENDOR_ID 0x0e9c
#define USB_STREAMZAP_PRODUCT_ID 0x0000
/* table of devices that work with this driver */
static const struct usb_device_id streamzap_table[] = {
/* Streamzap Remote Control */
{ USB_DEVICE(USB_STREAMZAP_VENDOR_ID, USB_STREAMZAP_PRODUCT_ID) },
/* Terminating entry */
{ }
};
MODULE_DEVICE_TABLE(usb, streamzap_table);
#define SZ_PULSE_MASK 0xf0
#define SZ_SPACE_MASK 0x0f
#define SZ_TIMEOUT 0xff
#define SZ_RESOLUTION 256
/* number of samples buffered */
#define SZ_BUF_LEN 128
enum StreamzapDecoderState {
PulseSpace,
FullPulse,
FullSpace,
IgnorePulse
};
/* structure to hold our device specific stuff */
struct streamzap_ir {
/* ir-core */
struct rc_dev *rdev;
/* core device info */
struct device *dev;
/* usb */
struct urb *urb_in;
/* buffer & dma */
unsigned char *buf_in;
dma_addr_t dma_in;
unsigned int buf_in_len;
/* track what state we're in */
enum StreamzapDecoderState decoder_state;
char phys[64];
};
/* local function prototypes */
static int streamzap_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void streamzap_disconnect(struct usb_interface *interface);
static void streamzap_callback(struct urb *urb);
static int streamzap_suspend(struct usb_interface *intf, pm_message_t message);
static int streamzap_resume(struct usb_interface *intf);
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver streamzap_driver = {
.name = DRIVER_NAME,
.probe = streamzap_probe,
.disconnect = streamzap_disconnect,
.suspend = streamzap_suspend,
.resume = streamzap_resume,
.id_table = streamzap_table,
};
static void sz_push(struct streamzap_ir *sz, struct ir_raw_event rawir)
{
dev_dbg(sz->dev, "Storing %s with duration %u us\n",
(rawir.pulse ? "pulse" : "space"), rawir.duration);
ir_raw_event_store_with_filter(sz->rdev, &rawir);
}
static void sz_push_full_pulse(struct streamzap_ir *sz,
unsigned char value)
{
struct ir_raw_event rawir = {
.pulse = true,
.duration = value * SZ_RESOLUTION + SZ_RESOLUTION / 2,
};
sz_push(sz, rawir);
}
static void sz_push_half_pulse(struct streamzap_ir *sz,
unsigned char value)
{
sz_push_full_pulse(sz, (value & SZ_PULSE_MASK) >> 4);
}
static void sz_push_full_space(struct streamzap_ir *sz,
unsigned char value)
{
struct ir_raw_event rawir = {
.pulse = false,
.duration = value * SZ_RESOLUTION + SZ_RESOLUTION / 2,
};
sz_push(sz, rawir);
}
static void sz_push_half_space(struct streamzap_ir *sz,
unsigned long value)
{
sz_push_full_space(sz, value & SZ_SPACE_MASK);
}
/*
* streamzap_callback - usb IRQ handler callback
*
* This procedure is invoked on reception of data from
* the usb remote.
*/
static void streamzap_callback(struct urb *urb)
{
struct streamzap_ir *sz;
unsigned int i;
int len;
if (!urb)
return;
sz = urb->context;
len = urb->actual_length;
switch (urb->status) {
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/*
* this urb is terminated, clean up.
* sz might already be invalid at this point
*/
dev_err(sz->dev, "urb terminated, status: %d\n", urb->status);
return;
default:
break;
}
dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
for (i = 0; i < len; i++) {
dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n",
i, (unsigned char)sz->buf_in[i]);
switch (sz->decoder_state) {
case PulseSpace:
if ((sz->buf_in[i] & SZ_PULSE_MASK) ==
SZ_PULSE_MASK) {
sz->decoder_state = FullPulse;
continue;
} else if ((sz->buf_in[i] & SZ_SPACE_MASK)
== SZ_SPACE_MASK) {
sz_push_half_pulse(sz, sz->buf_in[i]);
sz->decoder_state = FullSpace;
continue;
} else {
sz_push_half_pulse(sz, sz->buf_in[i]);
sz_push_half_space(sz, sz->buf_in[i]);
}
break;
case FullPulse:
sz_push_full_pulse(sz, sz->buf_in[i]);
sz->decoder_state = IgnorePulse;
break;
case FullSpace:
if (sz->buf_in[i] == SZ_TIMEOUT) {
struct ir_raw_event rawir = {
.pulse = false,
.duration = sz->rdev->timeout
};
sz_push(sz, rawir);
} else {
sz_push_full_space(sz, sz->buf_in[i]);
}
sz->decoder_state = PulseSpace;
break;
case IgnorePulse:
if ((sz->buf_in[i] & SZ_SPACE_MASK) ==
SZ_SPACE_MASK) {
sz->decoder_state = FullSpace;
continue;
}
sz_push_half_space(sz, sz->buf_in[i]);
sz->decoder_state = PulseSpace;
break;
}
}
ir_raw_event_handle(sz->rdev);
usb_submit_urb(urb, GFP_ATOMIC);
}
static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz,
struct usb_device *usbdev)
{
struct rc_dev *rdev;
struct device *dev = sz->dev;
int ret;
rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
goto out;
usb_make_path(usbdev, sz->phys, sizeof(sz->phys));
strlcat(sz->phys, "/input0", sizeof(sz->phys));
rdev->device_name = "Streamzap PC Remote Infrared Receiver";
rdev->input_phys = sz->phys;
usb_to_input_id(usbdev, &rdev->input_id);
rdev->dev.parent = dev;
rdev->priv = sz;
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rdev->driver_name = DRIVER_NAME;
rdev->map_name = RC_MAP_STREAMZAP;
rdev->rx_resolution = SZ_RESOLUTION;
ret = rc_register_device(rdev);
if (ret < 0) {
dev_err(dev, "remote input device register failed\n");
goto out;
}
return rdev;
out:
rc_free_device(rdev);
return NULL;
}
/*
* streamzap_probe
*
* Called by usb-core to associated with a candidate device
* On any failure the return value is the ERROR
* On success return 0
*/
static int streamzap_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usbdev = interface_to_usbdev(intf);
struct usb_endpoint_descriptor *endpoint;
struct usb_host_interface *iface_host;
struct streamzap_ir *sz = NULL;
int retval = -ENOMEM;
int pipe, maxp;
/* Allocate space for device driver specific data */
sz = kzalloc(sizeof(struct streamzap_ir), GFP_KERNEL);
if (!sz)
return -ENOMEM;
/* Check to ensure endpoint information matches requirements */
iface_host = intf->cur_altsetting;
if (iface_host->desc.bNumEndpoints != 1) {
dev_err(&intf->dev, "%s: Unexpected desc.bNumEndpoints (%d)\n",
__func__, iface_host->desc.bNumEndpoints);
retval = -ENODEV;
goto free_sz;
}
endpoint = &iface_host->endpoint[0].desc;
if (!usb_endpoint_dir_in(endpoint)) {
dev_err(&intf->dev, "%s: endpoint doesn't match input device 02%02x\n",
__func__, endpoint->bEndpointAddress);
retval = -ENODEV;
goto free_sz;
}
if (!usb_endpoint_xfer_int(endpoint)) {
dev_err(&intf->dev, "%s: endpoint attributes don't match xfer 02%02x\n",
__func__, endpoint->bmAttributes);
retval = -ENODEV;
goto free_sz;
}
pipe = usb_rcvintpipe(usbdev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(usbdev, pipe);
if (maxp == 0) {
dev_err(&intf->dev, "%s: endpoint Max Packet Size is 0!?!\n",
__func__);
retval = -ENODEV;
goto free_sz;
}
/* Allocate the USB buffer and IRQ URB */
sz->buf_in = usb_alloc_coherent(usbdev, maxp, GFP_ATOMIC, &sz->dma_in);
if (!sz->buf_in)
goto free_sz;
sz->urb_in = usb_alloc_urb(0, GFP_KERNEL);
if (!sz->urb_in)
goto free_buf_in;
sz->dev = &intf->dev;
sz->buf_in_len = maxp;
sz->rdev = streamzap_init_rc_dev(sz, usbdev);
if (!sz->rdev)
goto rc_dev_fail;
sz->decoder_state = PulseSpace;
/* FIXME: don't yet have a way to set this */
sz->rdev->timeout = SZ_TIMEOUT * SZ_RESOLUTION;
#if 0
/* not yet supported, depends on patches from maxim */
/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION;
sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION;
#endif
/* Complete final initialisations */
usb_fill_int_urb(sz->urb_in, usbdev, pipe, sz->buf_in,
maxp, streamzap_callback, sz, endpoint->bInterval);
sz->urb_in->transfer_dma = sz->dma_in;
sz->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_set_intfdata(intf, sz);
if (usb_submit_urb(sz->urb_in, GFP_ATOMIC))
dev_err(sz->dev, "urb submit failed\n");
return 0;
rc_dev_fail:
usb_free_urb(sz->urb_in);
free_buf_in:
usb_free_coherent(usbdev, maxp, sz->buf_in, sz->dma_in);
free_sz:
kfree(sz);
return retval;
}
/*
* streamzap_disconnect
*
* Called by the usb core when the device is removed from the system.
*
* This routine guarantees that the driver will not submit any more urbs
* by clearing dev->usbdev. It is also supposed to terminate any currently
* active urbs. Unfortunately, usb_bulk_msg(), used in streamzap_read(),
* does not provide any way to do this.
*/
static void streamzap_disconnect(struct usb_interface *interface)
{
struct streamzap_ir *sz = usb_get_intfdata(interface);
struct usb_device *usbdev = interface_to_usbdev(interface);
usb_set_intfdata(interface, NULL);
if (!sz)
return;
rc_unregister_device(sz->rdev);
usb_kill_urb(sz->urb_in);
usb_free_urb(sz->urb_in);
usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in);
kfree(sz);
}
static int streamzap_suspend(struct usb_interface *intf, pm_message_t message)
{
struct streamzap_ir *sz = usb_get_intfdata(intf);
usb_kill_urb(sz->urb_in);
return 0;
}
static int streamzap_resume(struct usb_interface *intf)
{
struct streamzap_ir *sz = usb_get_intfdata(intf);
if (usb_submit_urb(sz->urb_in, GFP_NOIO)) {
dev_err(sz->dev, "Error submitting urb\n");
return -EIO;
}
return 0;
}
module_usb_driver(streamzap_driver);
MODULE_AUTHOR("Jarod Wilson <[email protected]>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/streamzap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD
*
* Copyright(C) 2010 Jarod Wilson <[email protected]>
* Portions based on the original lirc_imon driver,
* Copyright(C) 2004 Venky Raju([email protected])
*
* Huge thanks to R. Geoff Newbury for invaluable debugging on the
* 0xffdc iMON devices, and for sending me one to hack on, without
* which the support for them wouldn't be nearly as good. Thanks
* also to the numerous 0xffdc device owners that tested auto-config
* support for me and provided debug dumps from their devices.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/ratelimit.h>
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <media/rc-core.h>
#include <linux/timer.h>
#define MOD_AUTHOR "Jarod Wilson <[email protected]>"
#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
#define MOD_NAME "imon"
#define MOD_VERSION "0.9.4"
#define DISPLAY_MINOR_BASE 144
#define DEVICE_NAME "lcd%d"
#define BUF_CHUNK_SIZE 8
#define BUF_SIZE 128
#define BIT_DURATION 250 /* each bit received is 250us */
#define IMON_CLOCK_ENABLE_PACKETS 2
/*** P R O T O T Y P E S ***/
/* USB Callback prototypes */
static int imon_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void imon_disconnect(struct usb_interface *interface);
static void usb_rx_callback_intf0(struct urb *urb);
static void usb_rx_callback_intf1(struct urb *urb);
static void usb_tx_callback(struct urb *urb);
/* suspend/resume support */
static int imon_resume(struct usb_interface *intf);
static int imon_suspend(struct usb_interface *intf, pm_message_t message);
/* Display file_operations function prototypes */
static int display_open(struct inode *inode, struct file *file);
static int display_close(struct inode *inode, struct file *file);
/* VFD write operation */
static ssize_t vfd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos);
/* LCD file_operations override function prototypes */
static ssize_t lcd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos);
/*** G L O B A L S ***/
struct imon_panel_key_table {
u64 hw_code;
u32 keycode;
};
struct imon_usb_dev_descr {
__u16 flags;
#define IMON_NO_FLAGS 0
#define IMON_NEED_20MS_PKT_DELAY 1
#define IMON_SUPPRESS_REPEATED_KEYS 2
struct imon_panel_key_table key_table[];
};
struct imon_context {
struct device *dev;
/* Newer devices have two interfaces */
struct usb_device *usbdev_intf0;
struct usb_device *usbdev_intf1;
bool display_supported; /* not all controllers do */
bool display_isopen; /* display port has been opened */
bool rf_device; /* true if iMON 2.4G LT/DT RF device */
bool rf_isassociating; /* RF remote associating */
bool dev_present_intf0; /* USB device presence, interface 0 */
bool dev_present_intf1; /* USB device presence, interface 1 */
struct mutex lock; /* to lock this object */
wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
struct usb_endpoint_descriptor *rx_endpoint_intf0;
struct usb_endpoint_descriptor *rx_endpoint_intf1;
struct usb_endpoint_descriptor *tx_endpoint;
struct urb *rx_urb_intf0;
struct urb *rx_urb_intf1;
struct urb *tx_urb;
bool tx_control;
unsigned char usb_rx_buf[8];
unsigned char usb_tx_buf[8];
unsigned int send_packet_delay;
struct tx_t {
unsigned char data_buf[35]; /* user data buffer */
struct completion finished; /* wait for write to finish */
bool busy; /* write in progress */
int status; /* status of tx completion */
} tx;
u16 vendor; /* usb vendor ID */
u16 product; /* usb product ID */
struct rc_dev *rdev; /* rc-core device for remote */
struct input_dev *idev; /* input device for panel & IR mouse */
struct input_dev *touch; /* input device for touchscreen */
spinlock_t kc_lock; /* make sure we get keycodes right */
u32 kc; /* current input keycode */
u32 last_keycode; /* last reported input keycode */
u32 rc_scancode; /* the computed remote scancode */
u8 rc_toggle; /* the computed remote toggle bit */
u64 rc_proto; /* iMON or MCE (RC6) IR protocol? */
bool release_code; /* some keys send a release code */
u8 display_type; /* store the display type */
bool pad_mouse; /* toggle kbd(0)/mouse(1) mode */
char name_rdev[128]; /* rc input device name */
char phys_rdev[64]; /* rc input device phys path */
char name_idev[128]; /* input device name */
char phys_idev[64]; /* input device phys path */
char name_touch[128]; /* touch screen name */
char phys_touch[64]; /* touch screen phys path */
struct timer_list ttimer; /* touch screen timer */
int touch_x; /* x coordinate on touchscreen */
int touch_y; /* y coordinate on touchscreen */
const struct imon_usb_dev_descr *dev_descr;
/* device description with key */
/* table for front panels */
/*
* Fields for deferring free_imon_context().
*
* Since reference to "struct imon_context" is stored into
* "struct file"->private_data, we need to remember
* how many file descriptors might access this "struct imon_context".
*/
refcount_t users;
/*
* Use a flag for telling display_open()/vfd_write()/lcd_write() that
* imon_disconnect() was already called.
*/
bool disconnected;
/*
* We need to wait for RCU grace period in order to allow
* display_open() to safely check ->disconnected and increment ->users.
*/
struct rcu_head rcu;
};
#define TOUCH_TIMEOUT (HZ/30)
/* vfd character device file operations */
static const struct file_operations vfd_fops = {
.owner = THIS_MODULE,
.open = display_open,
.write = vfd_write,
.release = display_close,
.llseek = noop_llseek,
};
/* lcd character device file operations */
static const struct file_operations lcd_fops = {
.owner = THIS_MODULE,
.open = display_open,
.write = lcd_write,
.release = display_close,
.llseek = noop_llseek,
};
enum {
IMON_DISPLAY_TYPE_AUTO = 0,
IMON_DISPLAY_TYPE_VFD = 1,
IMON_DISPLAY_TYPE_LCD = 2,
IMON_DISPLAY_TYPE_VGA = 3,
IMON_DISPLAY_TYPE_NONE = 4,
};
enum {
IMON_KEY_IMON = 0,
IMON_KEY_MCE = 1,
IMON_KEY_PANEL = 2,
};
static struct usb_class_driver imon_vfd_class = {
.name = DEVICE_NAME,
.fops = &vfd_fops,
.minor_base = DISPLAY_MINOR_BASE,
};
static struct usb_class_driver imon_lcd_class = {
.name = DEVICE_NAME,
.fops = &lcd_fops,
.minor_base = DISPLAY_MINOR_BASE,
};
/* imon receiver front panel/knob key table */
static const struct imon_usb_dev_descr imon_default_table = {
.flags = IMON_NO_FLAGS,
.key_table = {
{ 0x000000000f00ffeell, KEY_MEDIA }, /* Go */
{ 0x000000001200ffeell, KEY_UP },
{ 0x000000001300ffeell, KEY_DOWN },
{ 0x000000001400ffeell, KEY_LEFT },
{ 0x000000001500ffeell, KEY_RIGHT },
{ 0x000000001600ffeell, KEY_ENTER },
{ 0x000000001700ffeell, KEY_ESC },
{ 0x000000001f00ffeell, KEY_AUDIO },
{ 0x000000002000ffeell, KEY_VIDEO },
{ 0x000000002100ffeell, KEY_CAMERA },
{ 0x000000002700ffeell, KEY_DVD },
{ 0x000000002300ffeell, KEY_TV },
{ 0x000000002b00ffeell, KEY_EXIT },
{ 0x000000002c00ffeell, KEY_SELECT },
{ 0x000000002d00ffeell, KEY_MENU },
{ 0x000000000500ffeell, KEY_PREVIOUS },
{ 0x000000000700ffeell, KEY_REWIND },
{ 0x000000000400ffeell, KEY_STOP },
{ 0x000000003c00ffeell, KEY_PLAYPAUSE },
{ 0x000000000800ffeell, KEY_FASTFORWARD },
{ 0x000000000600ffeell, KEY_NEXT },
{ 0x000000010000ffeell, KEY_RIGHT },
{ 0x000001000000ffeell, KEY_LEFT },
{ 0x000000003d00ffeell, KEY_SELECT },
{ 0x000100000000ffeell, KEY_VOLUMEUP },
{ 0x010000000000ffeell, KEY_VOLUMEDOWN },
{ 0x000000000100ffeell, KEY_MUTE },
/* 0xffdc iMON MCE VFD */
{ 0x00010000ffffffeell, KEY_VOLUMEUP },
{ 0x01000000ffffffeell, KEY_VOLUMEDOWN },
{ 0x00000001ffffffeell, KEY_MUTE },
{ 0x0000000fffffffeell, KEY_MEDIA },
{ 0x00000012ffffffeell, KEY_UP },
{ 0x00000013ffffffeell, KEY_DOWN },
{ 0x00000014ffffffeell, KEY_LEFT },
{ 0x00000015ffffffeell, KEY_RIGHT },
{ 0x00000016ffffffeell, KEY_ENTER },
{ 0x00000017ffffffeell, KEY_ESC },
/* iMON Knob values */
{ 0x000100ffffffffeell, KEY_VOLUMEUP },
{ 0x010000ffffffffeell, KEY_VOLUMEDOWN },
{ 0x000008ffffffffeell, KEY_MUTE },
{ 0, KEY_RESERVED },
}
};
static const struct imon_usb_dev_descr imon_OEM_VFD = {
.flags = IMON_NEED_20MS_PKT_DELAY,
.key_table = {
{ 0x000000000f00ffeell, KEY_MEDIA }, /* Go */
{ 0x000000001200ffeell, KEY_UP },
{ 0x000000001300ffeell, KEY_DOWN },
{ 0x000000001400ffeell, KEY_LEFT },
{ 0x000000001500ffeell, KEY_RIGHT },
{ 0x000000001600ffeell, KEY_ENTER },
{ 0x000000001700ffeell, KEY_ESC },
{ 0x000000001f00ffeell, KEY_AUDIO },
{ 0x000000002b00ffeell, KEY_EXIT },
{ 0x000000002c00ffeell, KEY_SELECT },
{ 0x000000002d00ffeell, KEY_MENU },
{ 0x000000000500ffeell, KEY_PREVIOUS },
{ 0x000000000700ffeell, KEY_REWIND },
{ 0x000000000400ffeell, KEY_STOP },
{ 0x000000003c00ffeell, KEY_PLAYPAUSE },
{ 0x000000000800ffeell, KEY_FASTFORWARD },
{ 0x000000000600ffeell, KEY_NEXT },
{ 0x000000010000ffeell, KEY_RIGHT },
{ 0x000001000000ffeell, KEY_LEFT },
{ 0x000000003d00ffeell, KEY_SELECT },
{ 0x000100000000ffeell, KEY_VOLUMEUP },
{ 0x010000000000ffeell, KEY_VOLUMEDOWN },
{ 0x000000000100ffeell, KEY_MUTE },
/* 0xffdc iMON MCE VFD */
{ 0x00010000ffffffeell, KEY_VOLUMEUP },
{ 0x01000000ffffffeell, KEY_VOLUMEDOWN },
{ 0x00000001ffffffeell, KEY_MUTE },
{ 0x0000000fffffffeell, KEY_MEDIA },
{ 0x00000012ffffffeell, KEY_UP },
{ 0x00000013ffffffeell, KEY_DOWN },
{ 0x00000014ffffffeell, KEY_LEFT },
{ 0x00000015ffffffeell, KEY_RIGHT },
{ 0x00000016ffffffeell, KEY_ENTER },
{ 0x00000017ffffffeell, KEY_ESC },
/* iMON Knob values */
{ 0x000100ffffffffeell, KEY_VOLUMEUP },
{ 0x010000ffffffffeell, KEY_VOLUMEDOWN },
{ 0x000008ffffffffeell, KEY_MUTE },
{ 0, KEY_RESERVED },
}
};
/* imon receiver front panel/knob key table for DH102*/
static const struct imon_usb_dev_descr imon_DH102 = {
.flags = IMON_NO_FLAGS,
.key_table = {
{ 0x000100000000ffeell, KEY_VOLUMEUP },
{ 0x010000000000ffeell, KEY_VOLUMEDOWN },
{ 0x000000010000ffeell, KEY_MUTE },
{ 0x0000000f0000ffeell, KEY_MEDIA },
{ 0x000000120000ffeell, KEY_UP },
{ 0x000000130000ffeell, KEY_DOWN },
{ 0x000000140000ffeell, KEY_LEFT },
{ 0x000000150000ffeell, KEY_RIGHT },
{ 0x000000160000ffeell, KEY_ENTER },
{ 0x000000170000ffeell, KEY_ESC },
{ 0x0000002b0000ffeell, KEY_EXIT },
{ 0x0000002c0000ffeell, KEY_SELECT },
{ 0x0000002d0000ffeell, KEY_MENU },
{ 0, KEY_RESERVED }
}
};
/* imon ultrabay front panel key table */
static const struct imon_usb_dev_descr ultrabay_table = {
.flags = IMON_SUPPRESS_REPEATED_KEYS,
.key_table = {
{ 0x0000000f0000ffeell, KEY_MEDIA }, /* Go */
{ 0x000000000100ffeell, KEY_UP },
{ 0x000000000001ffeell, KEY_DOWN },
{ 0x000000160000ffeell, KEY_ENTER },
{ 0x0000001f0000ffeell, KEY_AUDIO }, /* Music */
{ 0x000000200000ffeell, KEY_VIDEO }, /* Movie */
{ 0x000000210000ffeell, KEY_CAMERA }, /* Photo */
{ 0x000000270000ffeell, KEY_DVD }, /* DVD */
{ 0x000000230000ffeell, KEY_TV }, /* TV */
{ 0x000000050000ffeell, KEY_PREVIOUS }, /* Previous */
{ 0x000000070000ffeell, KEY_REWIND },
{ 0x000000040000ffeell, KEY_STOP },
{ 0x000000020000ffeell, KEY_PLAYPAUSE },
{ 0x000000080000ffeell, KEY_FASTFORWARD },
{ 0x000000060000ffeell, KEY_NEXT }, /* Next */
{ 0x000100000000ffeell, KEY_VOLUMEUP },
{ 0x010000000000ffeell, KEY_VOLUMEDOWN },
{ 0x000000010000ffeell, KEY_MUTE },
{ 0, KEY_RESERVED },
}
};
/*
* USB Device ID for iMON USB Control Boards
*
* The Windows drivers contain 6 different inf files, more or less one for
* each new device until the 0x0034-0x0046 devices, which all use the same
* driver. Some of the devices in the 34-46 range haven't been definitively
* identified yet. Early devices have either a TriGem Computer, Inc. or a
* Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later
* devices use the SoundGraph vendor ID (0x15c2). This driver only supports
* the ffdc and later devices, which do onboard decoding.
*/
static const struct usb_device_id imon_usb_id_table[] = {
/*
* Several devices with this same device ID, all use iMON_PAD.inf
* SoundGraph iMON PAD (IR & VFD)
* SoundGraph iMON PAD (IR & LCD)
* SoundGraph iMON Knob (IR only)
*/
{ USB_DEVICE(0x15c2, 0xffdc),
.driver_info = (unsigned long)&imon_default_table },
/*
* Newer devices, all driven by the latest iMON Windows driver, full
* list of device IDs extracted via 'strings Setup/data1.hdr |grep 15c2'
* Need user input to fill in details on unknown devices.
*/
/* SoundGraph iMON OEM Touch LCD (IR & 7" VGA LCD) */
{ USB_DEVICE(0x15c2, 0x0034),
.driver_info = (unsigned long)&imon_DH102 },
/* SoundGraph iMON OEM Touch LCD (IR & 4.3" VGA LCD) */
{ USB_DEVICE(0x15c2, 0x0035),
.driver_info = (unsigned long)&imon_default_table},
/* SoundGraph iMON OEM VFD (IR & VFD) */
{ USB_DEVICE(0x15c2, 0x0036),
.driver_info = (unsigned long)&imon_OEM_VFD },
/* device specifics unknown */
{ USB_DEVICE(0x15c2, 0x0037),
.driver_info = (unsigned long)&imon_default_table},
/* SoundGraph iMON OEM LCD (IR & LCD) */
{ USB_DEVICE(0x15c2, 0x0038),
.driver_info = (unsigned long)&imon_default_table},
/* SoundGraph iMON UltraBay (IR & LCD) */
{ USB_DEVICE(0x15c2, 0x0039),
.driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
{ USB_DEVICE(0x15c2, 0x003a),
.driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
{ USB_DEVICE(0x15c2, 0x003b),
.driver_info = (unsigned long)&imon_default_table},
/* SoundGraph iMON OEM Inside (IR only) */
{ USB_DEVICE(0x15c2, 0x003c),
.driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
{ USB_DEVICE(0x15c2, 0x003d),
.driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
{ USB_DEVICE(0x15c2, 0x003e),
.driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
{ USB_DEVICE(0x15c2, 0x003f),
.driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
{ USB_DEVICE(0x15c2, 0x0040),
.driver_info = (unsigned long)&imon_default_table},
/* SoundGraph iMON MINI (IR only) */
{ USB_DEVICE(0x15c2, 0x0041),
.driver_info = (unsigned long)&imon_default_table},
/* Antec Veris Multimedia Station EZ External (IR only) */
{ USB_DEVICE(0x15c2, 0x0042),
.driver_info = (unsigned long)&imon_default_table},
/* Antec Veris Multimedia Station Basic Internal (IR only) */
{ USB_DEVICE(0x15c2, 0x0043),
.driver_info = (unsigned long)&imon_default_table},
/* Antec Veris Multimedia Station Elite (IR & VFD) */
{ USB_DEVICE(0x15c2, 0x0044),
.driver_info = (unsigned long)&imon_default_table},
/* Antec Veris Multimedia Station Premiere (IR & LCD) */
{ USB_DEVICE(0x15c2, 0x0045),
.driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
{ USB_DEVICE(0x15c2, 0x0046),
.driver_info = (unsigned long)&imon_default_table},
{}
};
/* USB Device data */
static struct usb_driver imon_driver = {
.name = MOD_NAME,
.probe = imon_probe,
.disconnect = imon_disconnect,
.suspend = imon_suspend,
.resume = imon_resume,
.id_table = imon_usb_id_table,
};
/* Module bookkeeping bits */
MODULE_AUTHOR(MOD_AUTHOR);
MODULE_DESCRIPTION(MOD_DESC);
MODULE_VERSION(MOD_VERSION);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, imon_usb_id_table);
static bool debug;
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)");
/* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */
static int display_type;
module_param(display_type, int, S_IRUGO);
MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, 1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)");
static int pad_stabilize = 1;
module_param(pad_stabilize, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD presses in arrow key mode. 0=disable, 1=enable (default).");
/*
* In certain use cases, mouse mode isn't really helpful, and could actually
* cause confusion, so allow disabling it when the IR device is open.
*/
static bool nomouse;
module_param(nomouse, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is open. 0=don't disable, 1=disable. (default: don't disable)");
/* threshold at which a pad push registers as an arrow key in kbd mode */
static int pad_thresh;
module_param(pad_thresh, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an arrow key in kbd mode (default: 28)");
static void free_imon_context(struct imon_context *ictx)
{
struct device *dev = ictx->dev;
usb_free_urb(ictx->tx_urb);
WARN_ON(ictx->dev_present_intf0);
usb_free_urb(ictx->rx_urb_intf0);
WARN_ON(ictx->dev_present_intf1);
usb_free_urb(ictx->rx_urb_intf1);
kfree_rcu(ictx, rcu);
dev_dbg(dev, "%s: iMON context freed\n", __func__);
}
/*
* Called when the Display device (e.g. /dev/lcd0)
* is opened by the application.
*/
static int display_open(struct inode *inode, struct file *file)
{
struct usb_interface *interface;
struct imon_context *ictx = NULL;
int subminor;
int retval = 0;
subminor = iminor(inode);
interface = usb_find_interface(&imon_driver, subminor);
if (!interface) {
pr_err("could not find interface for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
rcu_read_lock();
ictx = usb_get_intfdata(interface);
if (!ictx || ictx->disconnected || !refcount_inc_not_zero(&ictx->users)) {
rcu_read_unlock();
pr_err("no context found for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
rcu_read_unlock();
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
pr_err("display not supported by device\n");
retval = -ENODEV;
} else if (ictx->display_isopen) {
pr_err("display port is already open\n");
retval = -EBUSY;
} else {
ictx->display_isopen = true;
file->private_data = ictx;
dev_dbg(ictx->dev, "display port opened\n");
}
mutex_unlock(&ictx->lock);
if (retval && refcount_dec_and_test(&ictx->users))
free_imon_context(ictx);
exit:
return retval;
}
/*
* Called when the display device (e.g. /dev/lcd0)
* is closed by the application.
*/
static int display_close(struct inode *inode, struct file *file)
{
struct imon_context *ictx = file->private_data;
int retval = 0;
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
pr_err("display not supported by device\n");
retval = -ENODEV;
} else if (!ictx->display_isopen) {
pr_err("display is not open\n");
retval = -EIO;
} else {
ictx->display_isopen = false;
dev_dbg(ictx->dev, "display port closed\n");
}
mutex_unlock(&ictx->lock);
if (refcount_dec_and_test(&ictx->users))
free_imon_context(ictx);
return retval;
}
/*
* Sends a packet to the device -- this function must be called with
* ictx->lock held, or its unlock/lock sequence while waiting for tx
* to complete can/will lead to a deadlock.
*/
static int send_packet(struct imon_context *ictx)
{
unsigned int pipe;
unsigned long timeout;
int interval = 0;
int retval = 0;
struct usb_ctrlrequest *control_req = NULL;
/* Check if we need to use control or interrupt urb */
if (!ictx->tx_control) {
pipe = usb_sndintpipe(ictx->usbdev_intf0,
ictx->tx_endpoint->bEndpointAddress);
interval = ictx->tx_endpoint->bInterval;
usb_fill_int_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe,
ictx->usb_tx_buf,
sizeof(ictx->usb_tx_buf),
usb_tx_callback, ictx, interval);
ictx->tx_urb->actual_length = 0;
} else {
/* fill request into kmalloc'ed space: */
control_req = kmalloc(sizeof(*control_req), GFP_KERNEL);
if (control_req == NULL)
return -ENOMEM;
/* setup packet is '21 09 0200 0001 0008' */
control_req->bRequestType = 0x21;
control_req->bRequest = 0x09;
control_req->wValue = cpu_to_le16(0x0200);
control_req->wIndex = cpu_to_le16(0x0001);
control_req->wLength = cpu_to_le16(0x0008);
/* control pipe is endpoint 0x00 */
pipe = usb_sndctrlpipe(ictx->usbdev_intf0, 0);
/* build the control urb */
usb_fill_control_urb(ictx->tx_urb, ictx->usbdev_intf0,
pipe, (unsigned char *)control_req,
ictx->usb_tx_buf,
sizeof(ictx->usb_tx_buf),
usb_tx_callback, ictx);
ictx->tx_urb->actual_length = 0;
}
reinit_completion(&ictx->tx.finished);
ictx->tx.busy = true;
smp_rmb(); /* ensure later readers know we're busy */
retval = usb_submit_urb(ictx->tx_urb, GFP_KERNEL);
if (retval) {
ictx->tx.busy = false;
smp_rmb(); /* ensure later readers know we're not busy */
pr_err_ratelimited("error submitting urb(%d)\n", retval);
} else {
/* Wait for transmission to complete (or abort) */
retval = wait_for_completion_interruptible(
&ictx->tx.finished);
if (retval) {
usb_kill_urb(ictx->tx_urb);
pr_err_ratelimited("task interrupted\n");
}
ictx->tx.busy = false;
retval = ictx->tx.status;
if (retval)
pr_err_ratelimited("packet tx failed (%d)\n", retval);
}
kfree(control_req);
/*
* Induce a mandatory delay before returning, as otherwise,
* send_packet can get called so rapidly as to overwhelm the device,
* particularly on faster systems and/or those with quirky usb.
*/
timeout = msecs_to_jiffies(ictx->send_packet_delay);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(timeout);
return retval;
}
/*
* Sends an associate packet to the iMON 2.4G.
*
* This might not be such a good idea, since it has an id collision with
* some versions of the "IR & VFD" combo. The only way to determine if it
* is an RF version is to look at the product description string. (Which
* we currently do not fetch).
*/
static int send_associate_24g(struct imon_context *ictx)
{
const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x20 };
if (!ictx) {
pr_err("no context for device\n");
return -ENODEV;
}
if (!ictx->dev_present_intf0) {
pr_err("no iMON device present\n");
return -ENODEV;
}
memcpy(ictx->usb_tx_buf, packet, sizeof(packet));
return send_packet(ictx);
}
/*
* Sends packets to setup and show clock on iMON display
*
* Arguments: year - last 2 digits of year, month - 1..12,
* day - 1..31, dow - day of the week (0-Sun...6-Sat),
* hour - 0..23, minute - 0..59, second - 0..59
*/
static int send_set_imon_clock(struct imon_context *ictx,
unsigned int year, unsigned int month,
unsigned int day, unsigned int dow,
unsigned int hour, unsigned int minute,
unsigned int second)
{
unsigned char clock_enable_pkt[IMON_CLOCK_ENABLE_PACKETS][8];
int retval = 0;
int i;
if (!ictx) {
pr_err("no context for device\n");
return -ENODEV;
}
switch (ictx->display_type) {
case IMON_DISPLAY_TYPE_LCD:
clock_enable_pkt[0][0] = 0x80;
clock_enable_pkt[0][1] = year;
clock_enable_pkt[0][2] = month-1;
clock_enable_pkt[0][3] = day;
clock_enable_pkt[0][4] = hour;
clock_enable_pkt[0][5] = minute;
clock_enable_pkt[0][6] = second;
clock_enable_pkt[1][0] = 0x80;
clock_enable_pkt[1][1] = 0;
clock_enable_pkt[1][2] = 0;
clock_enable_pkt[1][3] = 0;
clock_enable_pkt[1][4] = 0;
clock_enable_pkt[1][5] = 0;
clock_enable_pkt[1][6] = 0;
if (ictx->product == 0xffdc) {
clock_enable_pkt[0][7] = 0x50;
clock_enable_pkt[1][7] = 0x51;
} else {
clock_enable_pkt[0][7] = 0x88;
clock_enable_pkt[1][7] = 0x8a;
}
break;
case IMON_DISPLAY_TYPE_VFD:
clock_enable_pkt[0][0] = year;
clock_enable_pkt[0][1] = month-1;
clock_enable_pkt[0][2] = day;
clock_enable_pkt[0][3] = dow;
clock_enable_pkt[0][4] = hour;
clock_enable_pkt[0][5] = minute;
clock_enable_pkt[0][6] = second;
clock_enable_pkt[0][7] = 0x40;
clock_enable_pkt[1][0] = 0;
clock_enable_pkt[1][1] = 0;
clock_enable_pkt[1][2] = 1;
clock_enable_pkt[1][3] = 0;
clock_enable_pkt[1][4] = 0;
clock_enable_pkt[1][5] = 0;
clock_enable_pkt[1][6] = 0;
clock_enable_pkt[1][7] = 0x42;
break;
default:
return -ENODEV;
}
for (i = 0; i < IMON_CLOCK_ENABLE_PACKETS; i++) {
memcpy(ictx->usb_tx_buf, clock_enable_pkt[i], 8);
retval = send_packet(ictx);
if (retval) {
pr_err("send_packet failed for packet %d\n", i);
break;
}
}
return retval;
}
/*
* These are the sysfs functions to handle the association on the iMON 2.4G LT.
*/
static ssize_t associate_remote_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct imon_context *ictx = dev_get_drvdata(d);
if (!ictx)
return -ENODEV;
mutex_lock(&ictx->lock);
if (ictx->rf_isassociating)
strscpy(buf, "associating\n", PAGE_SIZE);
else
strscpy(buf, "closed\n", PAGE_SIZE);
dev_info(d, "Visit https://www.lirc.org/html/imon-24g.html for instructions on how to associate your iMON 2.4G DT/LT remote\n");
mutex_unlock(&ictx->lock);
return strlen(buf);
}
static ssize_t associate_remote_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct imon_context *ictx;
ictx = dev_get_drvdata(d);
if (!ictx)
return -ENODEV;
mutex_lock(&ictx->lock);
ictx->rf_isassociating = true;
send_associate_24g(ictx);
mutex_unlock(&ictx->lock);
return count;
}
/*
* sysfs functions to control internal imon clock
*/
static ssize_t imon_clock_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct imon_context *ictx = dev_get_drvdata(d);
size_t len;
if (!ictx)
return -ENODEV;
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
len = snprintf(buf, PAGE_SIZE, "Not supported.");
} else {
len = snprintf(buf, PAGE_SIZE,
"To set the clock on your iMON display:\n"
"# date \"+%%y %%m %%d %%w %%H %%M %%S\" > imon_clock\n"
"%s", ictx->display_isopen ?
"\nNOTE: imon device must be closed\n" : "");
}
mutex_unlock(&ictx->lock);
return len;
}
static ssize_t imon_clock_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct imon_context *ictx = dev_get_drvdata(d);
ssize_t retval;
unsigned int year, month, day, dow, hour, minute, second;
if (!ictx)
return -ENODEV;
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
retval = -ENODEV;
goto exit;
} else if (ictx->display_isopen) {
retval = -EBUSY;
goto exit;
}
if (sscanf(buf, "%u %u %u %u %u %u %u", &year, &month, &day, &dow,
&hour, &minute, &second) != 7) {
retval = -EINVAL;
goto exit;
}
if ((month < 1 || month > 12) ||
(day < 1 || day > 31) || (dow > 6) ||
(hour > 23) || (minute > 59) || (second > 59)) {
retval = -EINVAL;
goto exit;
}
retval = send_set_imon_clock(ictx, year, month, day, dow,
hour, minute, second);
if (retval)
goto exit;
retval = count;
exit:
mutex_unlock(&ictx->lock);
return retval;
}
static DEVICE_ATTR_RW(imon_clock);
static DEVICE_ATTR_RW(associate_remote);
static struct attribute *imon_display_sysfs_entries[] = {
&dev_attr_imon_clock.attr,
NULL
};
static const struct attribute_group imon_display_attr_group = {
.attrs = imon_display_sysfs_entries
};
static struct attribute *imon_rf_sysfs_entries[] = {
&dev_attr_associate_remote.attr,
NULL
};
static const struct attribute_group imon_rf_attr_group = {
.attrs = imon_rf_sysfs_entries
};
/*
* Writes data to the VFD. The iMON VFD is 2x16 characters
* and requires data in 5 consecutive USB interrupt packets,
* each packet but the last carrying 7 bytes.
*
* I don't know if the VFD board supports features such as
* scrolling, clearing rows, blanking, etc. so at
* the caller must provide a full screen of data. If fewer
* than 32 bytes are provided spaces will be appended to
* generate a full screen.
*/
static ssize_t vfd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos)
{
int i;
int offset;
int seq;
int retval = 0;
struct imon_context *ictx = file->private_data;
static const unsigned char vfd_packet6[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
if (ictx->disconnected)
return -ENODEV;
if (mutex_lock_interruptible(&ictx->lock))
return -ERESTARTSYS;
if (!ictx->dev_present_intf0) {
pr_err_ratelimited("no iMON device present\n");
retval = -ENODEV;
goto exit;
}
if (n_bytes <= 0 || n_bytes > 32) {
pr_err_ratelimited("invalid payload size\n");
retval = -EINVAL;
goto exit;
}
if (copy_from_user(ictx->tx.data_buf, buf, n_bytes)) {
retval = -EFAULT;
goto exit;
}
/* Pad with spaces */
for (i = n_bytes; i < 32; ++i)
ictx->tx.data_buf[i] = ' ';
for (i = 32; i < 35; ++i)
ictx->tx.data_buf[i] = 0xFF;
offset = 0;
seq = 0;
do {
memcpy(ictx->usb_tx_buf, ictx->tx.data_buf + offset, 7);
ictx->usb_tx_buf[7] = (unsigned char) seq;
retval = send_packet(ictx);
if (retval) {
pr_err_ratelimited("send packet #%d failed\n", seq / 2);
goto exit;
} else {
seq += 2;
offset += 7;
}
} while (offset < 35);
/* Send packet #6 */
memcpy(ictx->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6));
ictx->usb_tx_buf[7] = (unsigned char) seq;
retval = send_packet(ictx);
if (retval)
pr_err_ratelimited("send packet #%d failed\n", seq / 2);
exit:
mutex_unlock(&ictx->lock);
return (!retval) ? n_bytes : retval;
}
/*
* Writes data to the LCD. The iMON OEM LCD screen expects 8-byte
* packets. We accept data as 16 hexadecimal digits, followed by a
* newline (to make it easy to drive the device from a command-line
* -- even though the actual binary data is a bit complicated).
*
* The device itself is not a "traditional" text-mode display. It's
* actually a 16x96 pixel bitmap display. That means if you want to
* display text, you've got to have your own "font" and translate the
* text into bitmaps for display. This is really flexible (you can
* display whatever diacritics you need, and so on), but it's also
* a lot more complicated than most LCDs...
*/
static ssize_t lcd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos)
{
int retval = 0;
struct imon_context *ictx = file->private_data;
if (ictx->disconnected)
return -ENODEV;
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
pr_err_ratelimited("no iMON display present\n");
retval = -ENODEV;
goto exit;
}
if (n_bytes != 8) {
pr_err_ratelimited("invalid payload size: %d (expected 8)\n",
(int)n_bytes);
retval = -EINVAL;
goto exit;
}
if (copy_from_user(ictx->usb_tx_buf, buf, 8)) {
retval = -EFAULT;
goto exit;
}
retval = send_packet(ictx);
if (retval) {
pr_err_ratelimited("send packet failed!\n");
goto exit;
} else {
dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n",
__func__, (int) n_bytes);
}
exit:
mutex_unlock(&ictx->lock);
return (!retval) ? n_bytes : retval;
}
/*
* Callback function for USB core API: transmit data
*/
static void usb_tx_callback(struct urb *urb)
{
struct imon_context *ictx;
if (!urb)
return;
ictx = (struct imon_context *)urb->context;
if (!ictx)
return;
ictx->tx.status = urb->status;
/* notify waiters that write has finished */
ictx->tx.busy = false;
smp_rmb(); /* ensure later readers know we're not busy */
complete(&ictx->tx.finished);
}
/*
* report touchscreen input
*/
static void imon_touch_display_timeout(struct timer_list *t)
{
struct imon_context *ictx = from_timer(ictx, t, ttimer);
if (ictx->display_type != IMON_DISPLAY_TYPE_VGA)
return;
input_report_abs(ictx->touch, ABS_X, ictx->touch_x);
input_report_abs(ictx->touch, ABS_Y, ictx->touch_y);
input_report_key(ictx->touch, BTN_TOUCH, 0x00);
input_sync(ictx->touch);
}
/*
* iMON IR receivers support two different signal sets -- those used by
* the iMON remotes, and those used by the Windows MCE remotes (which is
* really just RC-6), but only one or the other at a time, as the signals
* are decoded onboard the receiver.
*
* This function gets called two different ways, one way is from
* rc_register_device, for initial protocol selection/setup, and the other is
* via a userspace-initiated protocol change request, either by direct sysfs
* prodding or by something like ir-keytable. In the rc_register_device case,
* the imon context lock is already held, but when initiated from userspace,
* it is not, so we must acquire it prior to calling send_packet, which
* requires that the lock is held.
*/
static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto)
{
int retval;
struct imon_context *ictx = rc->priv;
struct device *dev = ictx->dev;
bool unlock = false;
unsigned char ir_proto_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
if (*rc_proto && !(*rc_proto & rc->allowed_protocols))
dev_warn(dev, "Looks like you're trying to use an IR protocol this device does not support\n");
if (*rc_proto & RC_PROTO_BIT_RC6_MCE) {
dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
ir_proto_packet[0] = 0x01;
*rc_proto = RC_PROTO_BIT_RC6_MCE;
} else if (*rc_proto & RC_PROTO_BIT_IMON) {
dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
/* ir_proto_packet[0] = 0x00; // already the default */
*rc_proto = RC_PROTO_BIT_IMON;
} else {
dev_warn(dev, "Unsupported IR protocol specified, overriding to iMON IR protocol\n");
if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
/* ir_proto_packet[0] = 0x00; // already the default */
*rc_proto = RC_PROTO_BIT_IMON;
}
memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
if (!mutex_is_locked(&ictx->lock)) {
unlock = true;
mutex_lock(&ictx->lock);
}
retval = send_packet(ictx);
if (retval)
goto out;
ictx->rc_proto = *rc_proto;
ictx->pad_mouse = false;
out:
if (unlock)
mutex_unlock(&ictx->lock);
return retval;
}
/*
* The directional pad behaves a bit differently, depending on whether this is
* one of the older ffdc devices or a newer device. Newer devices appear to
* have a higher resolution matrix for more precise mouse movement, but it
* makes things overly sensitive in keyboard mode, so we do some interesting
* contortions to make it less touchy. Older devices run through the same
* routine with shorter timeout and a smaller threshold.
*/
static int stabilize(int a, int b, u16 timeout, u16 threshold)
{
ktime_t ct;
static ktime_t prev_time;
static ktime_t hit_time;
static int x, y, prev_result, hits;
int result = 0;
long msec, msec_hit;
ct = ktime_get();
msec = ktime_ms_delta(ct, prev_time);
msec_hit = ktime_ms_delta(ct, hit_time);
if (msec > 100) {
x = 0;
y = 0;
hits = 0;
}
x += a;
y += b;
prev_time = ct;
if (abs(x) > threshold || abs(y) > threshold) {
if (abs(y) > abs(x))
result = (y > 0) ? 0x7F : 0x80;
else
result = (x > 0) ? 0x7F00 : 0x8000;
x = 0;
y = 0;
if (result == prev_result) {
hits++;
if (hits > 3) {
switch (result) {
case 0x7F:
y = 17 * threshold / 30;
break;
case 0x80:
y -= 17 * threshold / 30;
break;
case 0x7F00:
x = 17 * threshold / 30;
break;
case 0x8000:
x -= 17 * threshold / 30;
break;
}
}
if (hits == 2 && msec_hit < timeout) {
result = 0;
hits = 1;
}
} else {
prev_result = result;
hits = 1;
hit_time = ct;
}
}
return result;
}
static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 scancode)
{
u32 keycode;
u32 release;
bool is_release_code = false;
/* Look for the initial press of a button */
keycode = rc_g_keycode_from_table(ictx->rdev, scancode);
ictx->rc_toggle = 0x0;
ictx->rc_scancode = scancode;
/* Look for the release of a button */
if (keycode == KEY_RESERVED) {
release = scancode & ~0x4000;
keycode = rc_g_keycode_from_table(ictx->rdev, release);
if (keycode != KEY_RESERVED)
is_release_code = true;
}
ictx->release_code = is_release_code;
return keycode;
}
static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode)
{
u32 keycode;
#define MCE_KEY_MASK 0x7000
#define MCE_TOGGLE_BIT 0x8000
/*
* On some receivers, mce keys decode to 0x8000f04xx and 0x8000f84xx
* (the toggle bit flipping between alternating key presses), while
* on other receivers, we see 0x8000f74xx and 0x8000ff4xx. To keep
* the table trim, we always or in the bits to look up 0x8000ff4xx,
* but we can't or them into all codes, as some keys are decoded in
* a different way w/o the same use of the toggle bit...
*/
if (scancode & 0x80000000)
scancode = scancode | MCE_KEY_MASK | MCE_TOGGLE_BIT;
ictx->rc_scancode = scancode;
keycode = rc_g_keycode_from_table(ictx->rdev, scancode);
/* not used in mce mode, but make sure we know its false */
ictx->release_code = false;
return keycode;
}
static u32 imon_panel_key_lookup(struct imon_context *ictx, u64 code)
{
const struct imon_panel_key_table *key_table;
u32 keycode = KEY_RESERVED;
int i;
key_table = ictx->dev_descr->key_table;
for (i = 0; key_table[i].hw_code != 0; i++) {
if (key_table[i].hw_code == (code | 0xffee)) {
keycode = key_table[i].keycode;
break;
}
}
ictx->release_code = false;
return keycode;
}
static bool imon_mouse_event(struct imon_context *ictx,
unsigned char *buf, int len)
{
signed char rel_x = 0x00, rel_y = 0x00;
u8 right_shift = 1;
bool mouse_input = true;
int dir = 0;
unsigned long flags;
spin_lock_irqsave(&ictx->kc_lock, flags);
/* newer iMON device PAD or mouse button */
if (ictx->product != 0xffdc && (buf[0] & 0x01) && len == 5) {
rel_x = buf[2];
rel_y = buf[3];
right_shift = 1;
/* 0xffdc iMON PAD or mouse button input */
} else if (ictx->product == 0xffdc && (buf[0] & 0x40) &&
!((buf[1] & 0x01) || ((buf[1] >> 2) & 0x01))) {
rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 |
(buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6;
if (buf[0] & 0x02)
rel_x |= ~0x0f;
rel_x = rel_x + rel_x / 2;
rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 |
(buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6;
if (buf[0] & 0x01)
rel_y |= ~0x0f;
rel_y = rel_y + rel_y / 2;
right_shift = 2;
/* some ffdc devices decode mouse buttons differently... */
} else if (ictx->product == 0xffdc && (buf[0] == 0x68)) {
right_shift = 2;
/* ch+/- buttons, which we use for an emulated scroll wheel */
} else if (ictx->kc == KEY_CHANNELUP && (buf[2] & 0x40) != 0x40) {
dir = 1;
} else if (ictx->kc == KEY_CHANNELDOWN && (buf[2] & 0x40) != 0x40) {
dir = -1;
} else
mouse_input = false;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
if (mouse_input) {
dev_dbg(ictx->dev, "sending mouse data via input subsystem\n");
if (dir) {
input_report_rel(ictx->idev, REL_WHEEL, dir);
} else if (rel_x || rel_y) {
input_report_rel(ictx->idev, REL_X, rel_x);
input_report_rel(ictx->idev, REL_Y, rel_y);
} else {
input_report_key(ictx->idev, BTN_LEFT, buf[1] & 0x1);
input_report_key(ictx->idev, BTN_RIGHT,
buf[1] >> right_shift & 0x1);
}
input_sync(ictx->idev);
spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->last_keycode = ictx->kc;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
}
return mouse_input;
}
static void imon_touch_event(struct imon_context *ictx, unsigned char *buf)
{
mod_timer(&ictx->ttimer, jiffies + TOUCH_TIMEOUT);
ictx->touch_x = (buf[0] << 4) | (buf[1] >> 4);
ictx->touch_y = 0xfff - ((buf[2] << 4) | (buf[1] & 0xf));
input_report_abs(ictx->touch, ABS_X, ictx->touch_x);
input_report_abs(ictx->touch, ABS_Y, ictx->touch_y);
input_report_key(ictx->touch, BTN_TOUCH, 0x01);
input_sync(ictx->touch);
}
static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
{
int dir = 0;
signed char rel_x = 0x00, rel_y = 0x00;
u16 timeout, threshold;
u32 scancode = KEY_RESERVED;
unsigned long flags;
/*
* The imon directional pad functions more like a touchpad. Bytes 3 & 4
* contain a position coordinate (x,y), with each component ranging
* from -14 to 14. We want to down-sample this to only 4 discrete values
* for up/down/left/right arrow keys. Also, when you get too close to
* diagonals, it has a tendency to jump back and forth, so lets try to
* ignore when they get too close.
*/
if (ictx->product != 0xffdc) {
/* first, pad to 8 bytes so it conforms with everything else */
buf[5] = buf[6] = buf[7] = 0;
timeout = 500; /* in msecs */
/* (2*threshold) x (2*threshold) square */
threshold = pad_thresh ? pad_thresh : 28;
rel_x = buf[2];
rel_y = buf[3];
if (ictx->rc_proto == RC_PROTO_BIT_IMON && pad_stabilize) {
if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) {
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
if (!dir) {
spin_lock_irqsave(&ictx->kc_lock,
flags);
ictx->kc = KEY_UNKNOWN;
spin_unlock_irqrestore(&ictx->kc_lock,
flags);
return;
}
buf[2] = dir & 0xFF;
buf[3] = (dir >> 8) & 0xFF;
scancode = be32_to_cpu(*((__be32 *)buf));
}
} else {
/*
* Hack alert: instead of using keycodes, we have
* to use hard-coded scancodes here...
*/
if (abs(rel_y) > abs(rel_x)) {
buf[2] = (rel_y > 0) ? 0x7F : 0x80;
buf[3] = 0;
if (rel_y > 0)
scancode = 0x01007f00; /* KEY_DOWN */
else
scancode = 0x01008000; /* KEY_UP */
} else {
buf[2] = 0;
buf[3] = (rel_x > 0) ? 0x7F : 0x80;
if (rel_x > 0)
scancode = 0x0100007f; /* KEY_RIGHT */
else
scancode = 0x01000080; /* KEY_LEFT */
}
}
/*
* Handle on-board decoded pad events for e.g. older VFD/iMON-Pad
* device (15c2:ffdc). The remote generates various codes from
* 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates
* 0x688301b7 and the right one 0x688481b7. All other keys generate
* 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with
* reversed endianness. Extract direction from buffer, rotate endianness,
* adjust sign and feed the values into stabilize(). The resulting codes
* will be 0x01008000, 0x01007F00, which match the newer devices.
*/
} else {
timeout = 10; /* in msecs */
/* (2*threshold) x (2*threshold) square */
threshold = pad_thresh ? pad_thresh : 15;
/* buf[1] is x */
rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 |
(buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6;
if (buf[0] & 0x02)
rel_x |= ~0x10+1;
/* buf[2] is y */
rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 |
(buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6;
if (buf[0] & 0x01)
rel_y |= ~0x10+1;
buf[0] = 0x01;
buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0;
if (ictx->rc_proto == RC_PROTO_BIT_IMON && pad_stabilize) {
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
if (!dir) {
spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->kc = KEY_UNKNOWN;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
buf[2] = dir & 0xFF;
buf[3] = (dir >> 8) & 0xFF;
scancode = be32_to_cpu(*((__be32 *)buf));
} else {
/*
* Hack alert: instead of using keycodes, we have
* to use hard-coded scancodes here...
*/
if (abs(rel_y) > abs(rel_x)) {
buf[2] = (rel_y > 0) ? 0x7F : 0x80;
buf[3] = 0;
if (rel_y > 0)
scancode = 0x01007f00; /* KEY_DOWN */
else
scancode = 0x01008000; /* KEY_UP */
} else {
buf[2] = 0;
buf[3] = (rel_x > 0) ? 0x7F : 0x80;
if (rel_x > 0)
scancode = 0x0100007f; /* KEY_RIGHT */
else
scancode = 0x01000080; /* KEY_LEFT */
}
}
}
if (scancode) {
spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->kc = imon_remote_key_lookup(ictx, scancode);
spin_unlock_irqrestore(&ictx->kc_lock, flags);
}
}
/*
* figure out if these is a press or a release. We don't actually
* care about repeats, as those will be auto-generated within the IR
* subsystem for repeating scancodes.
*/
static int imon_parse_press_type(struct imon_context *ictx,
unsigned char *buf, u8 ktype)
{
int press_type = 0;
unsigned long flags;
spin_lock_irqsave(&ictx->kc_lock, flags);
/* key release of 0x02XXXXXX key */
if (ictx->kc == KEY_RESERVED && buf[0] == 0x02 && buf[3] == 0x00)
ictx->kc = ictx->last_keycode;
/* mouse button release on (some) 0xffdc devices */
else if (ictx->kc == KEY_RESERVED && buf[0] == 0x68 && buf[1] == 0x82 &&
buf[2] == 0x81 && buf[3] == 0xb7)
ictx->kc = ictx->last_keycode;
/* mouse button release on (some other) 0xffdc devices */
else if (ictx->kc == KEY_RESERVED && buf[0] == 0x01 && buf[1] == 0x00 &&
buf[2] == 0x81 && buf[3] == 0xb7)
ictx->kc = ictx->last_keycode;
/* mce-specific button handling, no keyup events */
else if (ktype == IMON_KEY_MCE) {
ictx->rc_toggle = buf[2];
press_type = 1;
/* incoherent or irrelevant data */
} else if (ictx->kc == KEY_RESERVED)
press_type = -EINVAL;
/* key release of 0xXXXXXXb7 key */
else if (ictx->release_code)
press_type = 0;
/* this is a button press */
else
press_type = 1;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return press_type;
}
/*
* Process the incoming packet
*/
static void imon_incoming_packet(struct imon_context *ictx,
struct urb *urb, int intf)
{
int len = urb->actual_length;
unsigned char *buf = urb->transfer_buffer;
struct device *dev = ictx->dev;
unsigned long flags;
u32 kc;
u64 scancode;
int press_type = 0;
ktime_t t;
static ktime_t prev_time;
u8 ktype;
/* filter out junk data on the older 0xffdc imon devices */
if ((buf[0] == 0xff) && (buf[1] == 0xff) && (buf[2] == 0xff))
return;
/* Figure out what key was pressed */
if (len == 8 && buf[7] == 0xee) {
scancode = be64_to_cpu(*((__be64 *)buf));
ktype = IMON_KEY_PANEL;
kc = imon_panel_key_lookup(ictx, scancode);
ictx->release_code = false;
} else {
scancode = be32_to_cpu(*((__be32 *)buf));
if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) {
ktype = IMON_KEY_IMON;
if (buf[0] == 0x80)
ktype = IMON_KEY_MCE;
kc = imon_mce_key_lookup(ictx, scancode);
} else {
ktype = IMON_KEY_IMON;
kc = imon_remote_key_lookup(ictx, scancode);
}
}
spin_lock_irqsave(&ictx->kc_lock, flags);
/* keyboard/mouse mode toggle button */
if (kc == KEY_KEYBOARD && !ictx->release_code) {
ictx->last_keycode = kc;
if (!nomouse) {
ictx->pad_mouse = !ictx->pad_mouse;
dev_dbg(dev, "toggling to %s mode\n",
ictx->pad_mouse ? "mouse" : "keyboard");
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
} else {
ictx->pad_mouse = false;
dev_dbg(dev, "mouse mode disabled, passing key value\n");
}
}
ictx->kc = kc;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */
if (ictx->touch && len == 8 && buf[7] == 0x86) {
imon_touch_event(ictx, buf);
return;
/* look for mouse events with pad in mouse mode */
} else if (ictx->pad_mouse) {
if (imon_mouse_event(ictx, buf, len))
return;
}
/* Now for some special handling to convert pad input to arrow keys */
if (((len == 5) && (buf[0] == 0x01) && (buf[4] == 0x00)) ||
((len == 8) && (buf[0] & 0x40) &&
!(buf[1] & 0x1 || buf[1] >> 2 & 0x1))) {
len = 8;
imon_pad_to_keys(ictx, buf);
}
if (debug) {
printk(KERN_INFO "intf%d decoded packet: %*ph\n",
intf, len, buf);
}
press_type = imon_parse_press_type(ictx, buf, ktype);
if (press_type < 0)
goto not_input_data;
if (ktype != IMON_KEY_PANEL) {
if (press_type == 0)
rc_keyup(ictx->rdev);
else {
enum rc_proto proto;
if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE)
proto = RC_PROTO_RC6_MCE;
else if (ictx->rc_proto == RC_PROTO_BIT_IMON)
proto = RC_PROTO_IMON;
else
return;
rc_keydown(ictx->rdev, proto, ictx->rc_scancode,
ictx->rc_toggle);
spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->last_keycode = ictx->kc;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
}
return;
}
/* Only panel type events left to process now */
spin_lock_irqsave(&ictx->kc_lock, flags);
t = ktime_get();
/* KEY repeats from knob and panel that need to be suppressed */
if (ictx->kc == KEY_MUTE ||
ictx->dev_descr->flags & IMON_SUPPRESS_REPEATED_KEYS) {
if (ictx->kc == ictx->last_keycode &&
ktime_ms_delta(t, prev_time) < ictx->idev->rep[REP_DELAY]) {
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
}
prev_time = t;
kc = ictx->kc;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
input_report_key(ictx->idev, kc, press_type);
input_sync(ictx->idev);
/* panel keys don't generate a release */
input_report_key(ictx->idev, kc, 0);
input_sync(ictx->idev);
spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->last_keycode = kc;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
not_input_data:
if (len != 8) {
dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n",
__func__, len, intf);
return;
}
/* iMON 2.4G associate frame */
if (buf[0] == 0x00 &&
buf[2] == 0xFF && /* REFID */
buf[3] == 0xFF &&
buf[4] == 0xFF &&
buf[5] == 0xFF && /* iMON 2.4G */
((buf[6] == 0x4E && buf[7] == 0xDF) || /* LT */
(buf[6] == 0x5E && buf[7] == 0xDF))) { /* DT */
dev_warn(dev, "%s: remote associated refid=%02X\n",
__func__, buf[1]);
ictx->rf_isassociating = false;
}
}
/*
* Callback function for USB core API: receive data
*/
static void usb_rx_callback_intf0(struct urb *urb)
{
struct imon_context *ictx;
int intfnum = 0;
if (!urb)
return;
ictx = (struct imon_context *)urb->context;
if (!ictx)
return;
/*
* if we get a callback before we're done configuring the hardware, we
* can't yet process the data, as there's nowhere to send it, but we
* still need to submit a new rx URB to avoid wedging the hardware
*/
if (!ictx->dev_present_intf0)
goto out;
switch (urb->status) {
case -ENOENT: /* usbcore unlink successful! */
return;
case -ESHUTDOWN: /* transport endpoint was shut down */
break;
case 0:
imon_incoming_packet(ictx, urb, intfnum);
break;
default:
dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
__func__, urb->status);
break;
}
out:
usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
}
static void usb_rx_callback_intf1(struct urb *urb)
{
struct imon_context *ictx;
int intfnum = 1;
if (!urb)
return;
ictx = (struct imon_context *)urb->context;
if (!ictx)
return;
/*
* if we get a callback before we're done configuring the hardware, we
* can't yet process the data, as there's nowhere to send it, but we
* still need to submit a new rx URB to avoid wedging the hardware
*/
if (!ictx->dev_present_intf1)
goto out;
switch (urb->status) {
case -ENOENT: /* usbcore unlink successful! */
return;
case -ESHUTDOWN: /* transport endpoint was shut down */
break;
case 0:
imon_incoming_packet(ictx, urb, intfnum);
break;
default:
dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
__func__, urb->status);
break;
}
out:
usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
}
/*
* The 0x15c2:0xffdc device ID was used for umpteen different imon
* devices, and all of them constantly spew interrupts, even when there
* is no actual data to report. However, byte 6 of this buffer looks like
* its unique across device variants, so we're trying to key off that to
* figure out which display type (if any) and what IR protocol the device
* actually supports. These devices have their IR protocol hard-coded into
* their firmware, they can't be changed on the fly like the newer hardware.
*/
static void imon_get_ffdc_type(struct imon_context *ictx)
{
u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
u64 allowed_protos = RC_PROTO_BIT_IMON;
switch (ffdc_cfg_byte) {
/* iMON Knob, no display, iMON IR + vol knob */
case 0x21:
dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR");
ictx->display_supported = false;
break;
/* iMON 2.4G LT (usb stick), no display, iMON RF */
case 0x4e:
dev_info(ictx->dev, "0xffdc iMON 2.4G LT, iMON RF");
ictx->display_supported = false;
ictx->rf_device = true;
break;
/* iMON VFD, no IR (does have vol knob tho) */
case 0x35:
dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
break;
/* iMON VFD, iMON IR */
case 0x24:
case 0x30:
case 0x85:
dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
break;
/* iMON VFD, MCE IR */
case 0x46:
case 0x9e:
dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
allowed_protos = RC_PROTO_BIT_RC6_MCE;
break;
/* iMON VFD, iMON or MCE IR */
case 0x7e:
dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
allowed_protos |= RC_PROTO_BIT_RC6_MCE;
break;
/* iMON LCD, MCE IR */
case 0x9f:
dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_LCD;
allowed_protos = RC_PROTO_BIT_RC6_MCE;
break;
/* no display, iMON IR */
case 0x26:
dev_info(ictx->dev, "0xffdc iMON Inside, iMON IR");
ictx->display_supported = false;
break;
/* Soundgraph iMON UltraBay */
case 0x98:
dev_info(ictx->dev, "0xffdc iMON UltraBay, LCD + IR");
detected_display_type = IMON_DISPLAY_TYPE_LCD;
allowed_protos = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE;
ictx->dev_descr = &ultrabay_table;
break;
default:
dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
/*
* We don't know which one it is, allow user to set the
* RC6 one from userspace if IMON wasn't correct.
*/
allowed_protos |= RC_PROTO_BIT_RC6_MCE;
break;
}
printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
ictx->display_type = detected_display_type;
ictx->rc_proto = allowed_protos;
}
static void imon_set_display_type(struct imon_context *ictx)
{
u8 configured_display_type = IMON_DISPLAY_TYPE_VFD;
/*
* Try to auto-detect the type of display if the user hasn't set
* it by hand via the display_type modparam. Default is VFD.
*/
if (display_type == IMON_DISPLAY_TYPE_AUTO) {
switch (ictx->product) {
case 0xffdc:
/* set in imon_get_ffdc_type() */
configured_display_type = ictx->display_type;
break;
case 0x0034:
case 0x0035:
configured_display_type = IMON_DISPLAY_TYPE_VGA;
break;
case 0x0038:
case 0x0039:
case 0x0045:
configured_display_type = IMON_DISPLAY_TYPE_LCD;
break;
case 0x003c:
case 0x0041:
case 0x0042:
case 0x0043:
configured_display_type = IMON_DISPLAY_TYPE_NONE;
ictx->display_supported = false;
break;
case 0x0036:
case 0x0044:
default:
configured_display_type = IMON_DISPLAY_TYPE_VFD;
break;
}
} else {
configured_display_type = display_type;
if (display_type == IMON_DISPLAY_TYPE_NONE)
ictx->display_supported = false;
else
ictx->display_supported = true;
dev_info(ictx->dev, "%s: overriding display type to %d via modparam\n",
__func__, display_type);
}
ictx->display_type = configured_display_type;
}
static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
{
struct rc_dev *rdev;
int ret;
static const unsigned char fp_packet[] = {
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88 };
rdev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rdev) {
dev_err(ictx->dev, "remote control dev allocation failed\n");
goto out;
}
snprintf(ictx->name_rdev, sizeof(ictx->name_rdev),
"iMON Remote (%04x:%04x)", ictx->vendor, ictx->product);
usb_make_path(ictx->usbdev_intf0, ictx->phys_rdev,
sizeof(ictx->phys_rdev));
strlcat(ictx->phys_rdev, "/input0", sizeof(ictx->phys_rdev));
rdev->device_name = ictx->name_rdev;
rdev->input_phys = ictx->phys_rdev;
usb_to_input_id(ictx->usbdev_intf0, &rdev->input_id);
rdev->dev.parent = ictx->dev;
rdev->priv = ictx;
/* iMON PAD or MCE */
rdev->allowed_protocols = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE;
rdev->change_protocol = imon_ir_change_protocol;
rdev->driver_name = MOD_NAME;
/* Enable front-panel buttons and/or knobs */
memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
ret = send_packet(ictx);
/* Not fatal, but warn about it */
if (ret)
dev_info(ictx->dev, "panel buttons/knobs setup failed\n");
if (ictx->product == 0xffdc) {
imon_get_ffdc_type(ictx);
rdev->allowed_protocols = ictx->rc_proto;
}
imon_set_display_type(ictx);
if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE)
rdev->map_name = RC_MAP_IMON_MCE;
else
rdev->map_name = RC_MAP_IMON_PAD;
ret = rc_register_device(rdev);
if (ret < 0) {
dev_err(ictx->dev, "remote input dev register failed\n");
goto out;
}
return rdev;
out:
rc_free_device(rdev);
return NULL;
}
static struct input_dev *imon_init_idev(struct imon_context *ictx)
{
const struct imon_panel_key_table *key_table;
struct input_dev *idev;
int ret, i;
key_table = ictx->dev_descr->key_table;
idev = input_allocate_device();
if (!idev)
goto out;
snprintf(ictx->name_idev, sizeof(ictx->name_idev),
"iMON Panel, Knob and Mouse(%04x:%04x)",
ictx->vendor, ictx->product);
idev->name = ictx->name_idev;
usb_make_path(ictx->usbdev_intf0, ictx->phys_idev,
sizeof(ictx->phys_idev));
strlcat(ictx->phys_idev, "/input1", sizeof(ictx->phys_idev));
idev->phys = ictx->phys_idev;
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_REL);
idev->keybit[BIT_WORD(BTN_MOUSE)] =
BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
idev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) |
BIT_MASK(REL_WHEEL);
/* panel and/or knob code support */
for (i = 0; key_table[i].hw_code != 0; i++) {
u32 kc = key_table[i].keycode;
__set_bit(kc, idev->keybit);
}
usb_to_input_id(ictx->usbdev_intf0, &idev->id);
idev->dev.parent = ictx->dev;
input_set_drvdata(idev, ictx);
ret = input_register_device(idev);
if (ret < 0) {
dev_err(ictx->dev, "input dev register failed\n");
goto out;
}
return idev;
out:
input_free_device(idev);
return NULL;
}
static struct input_dev *imon_init_touch(struct imon_context *ictx)
{
struct input_dev *touch;
int ret;
touch = input_allocate_device();
if (!touch)
goto touch_alloc_failed;
snprintf(ictx->name_touch, sizeof(ictx->name_touch),
"iMON USB Touchscreen (%04x:%04x)",
ictx->vendor, ictx->product);
touch->name = ictx->name_touch;
usb_make_path(ictx->usbdev_intf1, ictx->phys_touch,
sizeof(ictx->phys_touch));
strlcat(ictx->phys_touch, "/input2", sizeof(ictx->phys_touch));
touch->phys = ictx->phys_touch;
touch->evbit[0] =
BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
touch->keybit[BIT_WORD(BTN_TOUCH)] =
BIT_MASK(BTN_TOUCH);
input_set_abs_params(touch, ABS_X,
0x00, 0xfff, 0, 0);
input_set_abs_params(touch, ABS_Y,
0x00, 0xfff, 0, 0);
input_set_drvdata(touch, ictx);
usb_to_input_id(ictx->usbdev_intf1, &touch->id);
touch->dev.parent = ictx->dev;
ret = input_register_device(touch);
if (ret < 0) {
dev_info(ictx->dev, "touchscreen input dev register failed\n");
goto touch_register_failed;
}
return touch;
touch_register_failed:
input_free_device(touch);
touch_alloc_failed:
return NULL;
}
static bool imon_find_endpoints(struct imon_context *ictx,
struct usb_host_interface *iface_desc)
{
struct usb_endpoint_descriptor *ep;
struct usb_endpoint_descriptor *rx_endpoint = NULL;
struct usb_endpoint_descriptor *tx_endpoint = NULL;
int ifnum = iface_desc->desc.bInterfaceNumber;
int num_endpts = iface_desc->desc.bNumEndpoints;
int i, ep_dir, ep_type;
bool ir_ep_found = false;
bool display_ep_found = false;
bool tx_control = false;
/*
* Scan the endpoint list and set:
* first input endpoint = IR endpoint
* first output endpoint = display endpoint
*/
for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) {
ep = &iface_desc->endpoint[i].desc;
ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
ep_type = usb_endpoint_type(ep);
if (!ir_ep_found && ep_dir == USB_DIR_IN &&
ep_type == USB_ENDPOINT_XFER_INT) {
rx_endpoint = ep;
ir_ep_found = true;
dev_dbg(ictx->dev, "%s: found IR endpoint\n", __func__);
} else if (!display_ep_found && ep_dir == USB_DIR_OUT &&
ep_type == USB_ENDPOINT_XFER_INT) {
tx_endpoint = ep;
display_ep_found = true;
dev_dbg(ictx->dev, "%s: found display endpoint\n", __func__);
}
}
if (ifnum == 0) {
ictx->rx_endpoint_intf0 = rx_endpoint;
/*
* tx is used to send characters to lcd/vfd, associate RF
* remotes, set IR protocol, and maybe more...
*/
ictx->tx_endpoint = tx_endpoint;
} else {
ictx->rx_endpoint_intf1 = rx_endpoint;
}
/*
* If we didn't find a display endpoint, this is probably one of the
* newer iMON devices that use control urb instead of interrupt
*/
if (!display_ep_found) {
tx_control = true;
display_ep_found = true;
dev_dbg(ictx->dev, "%s: device uses control endpoint, not interface OUT endpoint\n",
__func__);
}
/*
* Some iMON receivers have no display. Unfortunately, it seems
* that SoundGraph recycles device IDs between devices both with
* and without... :\
*/
if (ictx->display_type == IMON_DISPLAY_TYPE_NONE) {
display_ep_found = false;
dev_dbg(ictx->dev, "%s: device has no display\n", __func__);
}
/*
* iMON Touch devices have a VGA touchscreen, but no "display", as
* that refers to e.g. /dev/lcd0 (a character device LCD or VFD).
*/
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
display_ep_found = false;
dev_dbg(ictx->dev, "%s: iMON Touch device found\n", __func__);
}
/* Input endpoint is mandatory */
if (!ir_ep_found)
pr_err("no valid input (IR) endpoint found\n");
ictx->tx_control = tx_control;
if (display_ep_found)
ictx->display_supported = true;
return ir_ep_found;
}
static struct imon_context *imon_init_intf0(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct imon_context *ictx;
struct urb *rx_urb;
struct urb *tx_urb;
struct device *dev = &intf->dev;
struct usb_host_interface *iface_desc;
int ret = -ENOMEM;
ictx = kzalloc(sizeof(*ictx), GFP_KERNEL);
if (!ictx)
goto exit;
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rx_urb)
goto rx_urb_alloc_failed;
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!tx_urb)
goto tx_urb_alloc_failed;
mutex_init(&ictx->lock);
spin_lock_init(&ictx->kc_lock);
mutex_lock(&ictx->lock);
ictx->dev = dev;
ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf));
ictx->rx_urb_intf0 = rx_urb;
ictx->tx_urb = tx_urb;
ictx->rf_device = false;
init_completion(&ictx->tx.finished);
ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor);
ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct);
/* save drive info for later accessing the panel/knob key table */
ictx->dev_descr = (struct imon_usb_dev_descr *)id->driver_info;
/* default send_packet delay is 5ms but some devices need more */
ictx->send_packet_delay = ictx->dev_descr->flags &
IMON_NEED_20MS_PKT_DELAY ? 20 : 5;
ret = -ENODEV;
iface_desc = intf->cur_altsetting;
if (!imon_find_endpoints(ictx, iface_desc)) {
goto find_endpoint_failed;
}
usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
usb_rcvintpipe(ictx->usbdev_intf0,
ictx->rx_endpoint_intf0->bEndpointAddress),
ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
usb_rx_callback_intf0, ictx,
ictx->rx_endpoint_intf0->bInterval);
ret = usb_submit_urb(ictx->rx_urb_intf0, GFP_KERNEL);
if (ret) {
pr_err("usb_submit_urb failed for intf0 (%d)\n", ret);
goto urb_submit_failed;
}
ictx->idev = imon_init_idev(ictx);
if (!ictx->idev) {
dev_err(dev, "%s: input device setup failed\n", __func__);
goto idev_setup_failed;
}
ictx->rdev = imon_init_rdev(ictx);
if (!ictx->rdev) {
dev_err(dev, "%s: rc device setup failed\n", __func__);
goto rdev_setup_failed;
}
ictx->dev_present_intf0 = true;
mutex_unlock(&ictx->lock);
return ictx;
rdev_setup_failed:
input_unregister_device(ictx->idev);
idev_setup_failed:
usb_kill_urb(ictx->rx_urb_intf0);
urb_submit_failed:
find_endpoint_failed:
usb_put_dev(ictx->usbdev_intf0);
mutex_unlock(&ictx->lock);
usb_free_urb(tx_urb);
tx_urb_alloc_failed:
usb_free_urb(rx_urb);
rx_urb_alloc_failed:
kfree(ictx);
exit:
dev_err(dev, "unable to initialize intf0, err %d\n", ret);
return NULL;
}
static struct imon_context *imon_init_intf1(struct usb_interface *intf,
struct imon_context *ictx)
{
struct urb *rx_urb;
struct usb_host_interface *iface_desc;
int ret = -ENOMEM;
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rx_urb)
goto rx_urb_alloc_failed;
mutex_lock(&ictx->lock);
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
timer_setup(&ictx->ttimer, imon_touch_display_timeout, 0);
}
ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf));
ictx->rx_urb_intf1 = rx_urb;
ret = -ENODEV;
iface_desc = intf->cur_altsetting;
if (!imon_find_endpoints(ictx, iface_desc))
goto find_endpoint_failed;
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
ictx->touch = imon_init_touch(ictx);
if (!ictx->touch)
goto touch_setup_failed;
} else
ictx->touch = NULL;
usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1,
usb_rcvintpipe(ictx->usbdev_intf1,
ictx->rx_endpoint_intf1->bEndpointAddress),
ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
usb_rx_callback_intf1, ictx,
ictx->rx_endpoint_intf1->bInterval);
ret = usb_submit_urb(ictx->rx_urb_intf1, GFP_KERNEL);
if (ret) {
pr_err("usb_submit_urb failed for intf1 (%d)\n", ret);
goto urb_submit_failed;
}
ictx->dev_present_intf1 = true;
mutex_unlock(&ictx->lock);
return ictx;
urb_submit_failed:
if (ictx->touch)
input_unregister_device(ictx->touch);
touch_setup_failed:
find_endpoint_failed:
usb_put_dev(ictx->usbdev_intf1);
ictx->usbdev_intf1 = NULL;
mutex_unlock(&ictx->lock);
usb_free_urb(rx_urb);
ictx->rx_urb_intf1 = NULL;
rx_urb_alloc_failed:
dev_err(ictx->dev, "unable to initialize intf1, err %d\n", ret);
return NULL;
}
static void imon_init_display(struct imon_context *ictx,
struct usb_interface *intf)
{
int ret;
dev_dbg(ictx->dev, "Registering iMON display with sysfs\n");
/* set up sysfs entry for built-in clock */
ret = sysfs_create_group(&intf->dev.kobj, &imon_display_attr_group);
if (ret)
dev_err(ictx->dev, "Could not create display sysfs entries(%d)",
ret);
if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
ret = usb_register_dev(intf, &imon_lcd_class);
else
ret = usb_register_dev(intf, &imon_vfd_class);
if (ret)
/* Not a fatal error, so ignore */
dev_info(ictx->dev, "could not get a minor number for display\n");
}
/*
* Callback function for USB core API: Probe
*/
static int imon_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *usbdev = NULL;
struct usb_host_interface *iface_desc = NULL;
struct usb_interface *first_if;
struct device *dev = &interface->dev;
int ifnum, sysfs_err;
int ret = 0;
struct imon_context *ictx = NULL;
u16 vendor, product;
usbdev = usb_get_dev(interface_to_usbdev(interface));
iface_desc = interface->cur_altsetting;
ifnum = iface_desc->desc.bInterfaceNumber;
vendor = le16_to_cpu(usbdev->descriptor.idVendor);
product = le16_to_cpu(usbdev->descriptor.idProduct);
dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
__func__, vendor, product, ifnum);
first_if = usb_ifnum_to_if(usbdev, 0);
if (!first_if) {
ret = -ENODEV;
goto fail;
}
if (ifnum == 0) {
ictx = imon_init_intf0(interface, id);
if (!ictx) {
pr_err("failed to initialize context!\n");
ret = -ENODEV;
goto fail;
}
refcount_set(&ictx->users, 1);
} else {
/* this is the secondary interface on the device */
struct imon_context *first_if_ctx = usb_get_intfdata(first_if);
/* fail early if first intf failed to register */
if (!first_if_ctx) {
ret = -ENODEV;
goto fail;
}
ictx = imon_init_intf1(interface, first_if_ctx);
if (!ictx) {
pr_err("failed to attach to context!\n");
ret = -ENODEV;
goto fail;
}
refcount_inc(&ictx->users);
}
usb_set_intfdata(interface, ictx);
if (ifnum == 0) {
if (product == 0xffdc && ictx->rf_device) {
sysfs_err = sysfs_create_group(&interface->dev.kobj,
&imon_rf_attr_group);
if (sysfs_err)
pr_err("Could not create RF sysfs entries(%d)\n",
sysfs_err);
}
if (ictx->display_supported)
imon_init_display(ictx, interface);
}
dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
vendor, product, ifnum,
usbdev->bus->busnum, usbdev->devnum);
usb_put_dev(usbdev);
return 0;
fail:
usb_put_dev(usbdev);
dev_err(dev, "unable to register, err %d\n", ret);
return ret;
}
/*
* Callback function for USB core API: disconnect
*/
static void imon_disconnect(struct usb_interface *interface)
{
struct imon_context *ictx;
struct device *dev;
int ifnum;
ictx = usb_get_intfdata(interface);
ictx->disconnected = true;
dev = ictx->dev;
ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
/*
* sysfs_remove_group is safe to call even if sysfs_create_group
* hasn't been called
*/
sysfs_remove_group(&interface->dev.kobj, &imon_display_attr_group);
sysfs_remove_group(&interface->dev.kobj, &imon_rf_attr_group);
usb_set_intfdata(interface, NULL);
/* Abort ongoing write */
if (ictx->tx.busy) {
usb_kill_urb(ictx->tx_urb);
complete(&ictx->tx.finished);
}
if (ifnum == 0) {
ictx->dev_present_intf0 = false;
usb_kill_urb(ictx->rx_urb_intf0);
input_unregister_device(ictx->idev);
rc_unregister_device(ictx->rdev);
if (ictx->display_supported) {
if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
usb_deregister_dev(interface, &imon_lcd_class);
else if (ictx->display_type == IMON_DISPLAY_TYPE_VFD)
usb_deregister_dev(interface, &imon_vfd_class);
}
usb_put_dev(ictx->usbdev_intf0);
} else {
ictx->dev_present_intf1 = false;
usb_kill_urb(ictx->rx_urb_intf1);
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
del_timer_sync(&ictx->ttimer);
input_unregister_device(ictx->touch);
}
usb_put_dev(ictx->usbdev_intf1);
}
if (refcount_dec_and_test(&ictx->users))
free_imon_context(ictx);
dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n",
__func__, ifnum);
}
static int imon_suspend(struct usb_interface *intf, pm_message_t message)
{
struct imon_context *ictx = usb_get_intfdata(intf);
int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
if (ifnum == 0)
usb_kill_urb(ictx->rx_urb_intf0);
else
usb_kill_urb(ictx->rx_urb_intf1);
return 0;
}
static int imon_resume(struct usb_interface *intf)
{
int rc = 0;
struct imon_context *ictx = usb_get_intfdata(intf);
int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
if (ifnum == 0) {
usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
usb_rcvintpipe(ictx->usbdev_intf0,
ictx->rx_endpoint_intf0->bEndpointAddress),
ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
usb_rx_callback_intf0, ictx,
ictx->rx_endpoint_intf0->bInterval);
rc = usb_submit_urb(ictx->rx_urb_intf0, GFP_NOIO);
} else {
usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1,
usb_rcvintpipe(ictx->usbdev_intf1,
ictx->rx_endpoint_intf1->bEndpointAddress),
ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
usb_rx_callback_intf1, ictx,
ictx->rx_endpoint_intf1->bInterval);
rc = usb_submit_urb(ictx->rx_urb_intf1, GFP_NOIO);
}
return rc;
}
module_usb_driver(imon_driver);
| linux-master | drivers/media/rc/imon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IguanaWorks USB IR Transceiver support
*
* Copyright (C) 2012 Sean Young <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <media/rc-core.h>
#define BUF_SIZE 152
struct iguanair {
struct rc_dev *rc;
struct device *dev;
struct usb_device *udev;
uint16_t version;
uint8_t bufsize;
uint8_t cycle_overhead;
/* receiver support */
bool receiver_on;
dma_addr_t dma_in, dma_out;
uint8_t *buf_in;
struct urb *urb_in, *urb_out;
struct completion completion;
/* transmit support */
bool tx_overflow;
uint32_t carrier;
struct send_packet *packet;
char name[64];
char phys[64];
};
#define CMD_NOP 0x00
#define CMD_GET_VERSION 0x01
#define CMD_GET_BUFSIZE 0x11
#define CMD_GET_FEATURES 0x10
#define CMD_SEND 0x15
#define CMD_EXECUTE 0x1f
#define CMD_RX_OVERFLOW 0x31
#define CMD_TX_OVERFLOW 0x32
#define CMD_RECEIVER_ON 0x12
#define CMD_RECEIVER_OFF 0x14
#define DIR_IN 0xdc
#define DIR_OUT 0xcd
#define MAX_IN_PACKET 8u
#define MAX_OUT_PACKET (sizeof(struct send_packet) + BUF_SIZE)
#define TIMEOUT 1000
#define RX_RESOLUTION 21
struct packet {
uint16_t start;
uint8_t direction;
uint8_t cmd;
};
struct send_packet {
struct packet header;
uint8_t length;
uint8_t channels;
uint8_t busy7;
uint8_t busy4;
uint8_t payload[];
};
static void process_ir_data(struct iguanair *ir, unsigned len)
{
if (len >= 4 && ir->buf_in[0] == 0 && ir->buf_in[1] == 0) {
switch (ir->buf_in[3]) {
case CMD_GET_VERSION:
if (len == 6) {
ir->version = (ir->buf_in[5] << 8) |
ir->buf_in[4];
complete(&ir->completion);
}
break;
case CMD_GET_BUFSIZE:
if (len >= 5) {
ir->bufsize = ir->buf_in[4];
complete(&ir->completion);
}
break;
case CMD_GET_FEATURES:
if (len > 5) {
ir->cycle_overhead = ir->buf_in[5];
complete(&ir->completion);
}
break;
case CMD_TX_OVERFLOW:
ir->tx_overflow = true;
fallthrough;
case CMD_RECEIVER_OFF:
case CMD_RECEIVER_ON:
case CMD_SEND:
complete(&ir->completion);
break;
case CMD_RX_OVERFLOW:
dev_warn(ir->dev, "receive overflow\n");
ir_raw_event_overflow(ir->rc);
break;
default:
dev_warn(ir->dev, "control code %02x received\n",
ir->buf_in[3]);
break;
}
} else if (len >= 7) {
struct ir_raw_event rawir = {};
unsigned i;
bool event = false;
for (i = 0; i < 7; i++) {
if (ir->buf_in[i] == 0x80) {
rawir.pulse = false;
rawir.duration = 21845;
} else {
rawir.pulse = (ir->buf_in[i] & 0x80) == 0;
rawir.duration = ((ir->buf_in[i] & 0x7f) + 1) *
RX_RESOLUTION;
}
if (ir_raw_event_store_with_filter(ir->rc, &rawir))
event = true;
}
if (event)
ir_raw_event_handle(ir->rc);
}
}
static void iguanair_rx(struct urb *urb)
{
struct iguanair *ir;
int rc;
if (!urb)
return;
ir = urb->context;
if (!ir)
return;
switch (urb->status) {
case 0:
process_ir_data(ir, urb->actual_length);
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
return;
case -EPIPE:
default:
dev_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
break;
}
rc = usb_submit_urb(urb, GFP_ATOMIC);
if (rc && rc != -ENODEV)
dev_warn(ir->dev, "failed to resubmit urb: %d\n", rc);
}
static void iguanair_irq_out(struct urb *urb)
{
struct iguanair *ir = urb->context;
if (urb->status)
dev_dbg(ir->dev, "Error: out urb status = %d\n", urb->status);
/* if we sent an nop packet, do not expect a response */
if (urb->status == 0 && ir->packet->header.cmd == CMD_NOP)
complete(&ir->completion);
}
static int iguanair_send(struct iguanair *ir, unsigned size)
{
int rc;
reinit_completion(&ir->completion);
ir->urb_out->transfer_buffer_length = size;
rc = usb_submit_urb(ir->urb_out, GFP_KERNEL);
if (rc)
return rc;
if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0)
return -ETIMEDOUT;
return rc;
}
static int iguanair_get_features(struct iguanair *ir)
{
int rc;
/*
* On cold boot, the iguanair initializes on the first packet
* received but does not process that packet. Send an empty
* packet.
*/
ir->packet->header.start = 0;
ir->packet->header.direction = DIR_OUT;
ir->packet->header.cmd = CMD_NOP;
iguanair_send(ir, sizeof(ir->packet->header));
ir->packet->header.cmd = CMD_GET_VERSION;
rc = iguanair_send(ir, sizeof(ir->packet->header));
if (rc) {
dev_info(ir->dev, "failed to get version\n");
goto out;
}
if (ir->version < 0x205) {
dev_err(ir->dev, "firmware 0x%04x is too old\n", ir->version);
rc = -ENODEV;
goto out;
}
ir->bufsize = 150;
ir->cycle_overhead = 65;
ir->packet->header.cmd = CMD_GET_BUFSIZE;
rc = iguanair_send(ir, sizeof(ir->packet->header));
if (rc) {
dev_info(ir->dev, "failed to get buffer size\n");
goto out;
}
if (ir->bufsize > BUF_SIZE) {
dev_info(ir->dev, "buffer size %u larger than expected\n",
ir->bufsize);
ir->bufsize = BUF_SIZE;
}
ir->packet->header.cmd = CMD_GET_FEATURES;
rc = iguanair_send(ir, sizeof(ir->packet->header));
if (rc)
dev_info(ir->dev, "failed to get features\n");
out:
return rc;
}
static int iguanair_receiver(struct iguanair *ir, bool enable)
{
ir->packet->header.start = 0;
ir->packet->header.direction = DIR_OUT;
ir->packet->header.cmd = enable ? CMD_RECEIVER_ON : CMD_RECEIVER_OFF;
return iguanair_send(ir, sizeof(ir->packet->header));
}
/*
* The iguanair creates the carrier by busy spinning after each half period.
* This is counted in CPU cycles, with the CPU running at 24MHz. It is
* broken down into 7-cycles and 4-cyles delays, with a preference for
* 4-cycle delays, minus the overhead of the loop itself (cycle_overhead).
*/
static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier)
{
struct iguanair *ir = dev->priv;
if (carrier < 25000 || carrier > 150000)
return -EINVAL;
if (carrier != ir->carrier) {
uint32_t cycles, fours, sevens;
ir->carrier = carrier;
cycles = DIV_ROUND_CLOSEST(24000000, carrier * 2) -
ir->cycle_overhead;
/*
* Calculate minimum number of 7 cycles needed so
* we are left with a multiple of 4; so we want to have
* (sevens * 7) & 3 == cycles & 3
*/
sevens = (4 - cycles) & 3;
fours = (cycles - sevens * 7) / 4;
/*
* The firmware interprets these values as a relative offset
* for a branch. Immediately following the branches, there
* 4 instructions of 7 cycles (2 bytes each) and 110
* instructions of 4 cycles (1 byte each). A relative branch
* of 0 will execute all of them, branch further for less
* cycle burning.
*/
ir->packet->busy7 = (4 - sevens) * 2;
ir->packet->busy4 = 110 - fours;
}
return 0;
}
static int iguanair_set_tx_mask(struct rc_dev *dev, uint32_t mask)
{
struct iguanair *ir = dev->priv;
if (mask > 15)
return 4;
ir->packet->channels = mask << 4;
return 0;
}
static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
struct iguanair *ir = dev->priv;
unsigned int i, size, p, periods;
int rc;
/* convert from us to carrier periods */
for (i = size = 0; i < count; i++) {
periods = DIV_ROUND_CLOSEST(txbuf[i] * ir->carrier, 1000000);
while (periods) {
p = min(periods, 127u);
if (size >= ir->bufsize) {
rc = -EINVAL;
goto out;
}
ir->packet->payload[size++] = p | ((i & 1) ? 0x80 : 0);
periods -= p;
}
}
ir->packet->header.start = 0;
ir->packet->header.direction = DIR_OUT;
ir->packet->header.cmd = CMD_SEND;
ir->packet->length = size;
ir->tx_overflow = false;
rc = iguanair_send(ir, sizeof(*ir->packet) + size);
if (rc == 0 && ir->tx_overflow)
rc = -EOVERFLOW;
out:
return rc ? rc : count;
}
static int iguanair_open(struct rc_dev *rdev)
{
struct iguanair *ir = rdev->priv;
int rc;
rc = iguanair_receiver(ir, true);
if (rc == 0)
ir->receiver_on = true;
return rc;
}
static void iguanair_close(struct rc_dev *rdev)
{
struct iguanair *ir = rdev->priv;
int rc;
rc = iguanair_receiver(ir, false);
ir->receiver_on = false;
if (rc && rc != -ENODEV)
dev_warn(ir->dev, "failed to disable receiver: %d\n", rc);
}
static int iguanair_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct iguanair *ir;
struct rc_dev *rc;
int ret, pipein, pipeout;
struct usb_host_interface *idesc;
idesc = intf->cur_altsetting;
if (idesc->desc.bNumEndpoints < 2)
return -ENODEV;
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!ir || !rc) {
ret = -ENOMEM;
goto out;
}
ir->buf_in = usb_alloc_coherent(udev, MAX_IN_PACKET, GFP_KERNEL,
&ir->dma_in);
ir->packet = usb_alloc_coherent(udev, MAX_OUT_PACKET, GFP_KERNEL,
&ir->dma_out);
ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
ir->urb_out = usb_alloc_urb(0, GFP_KERNEL);
if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out ||
!usb_endpoint_is_int_in(&idesc->endpoint[0].desc) ||
!usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) {
ret = -ENOMEM;
goto out;
}
ir->rc = rc;
ir->dev = &intf->dev;
ir->udev = udev;
init_completion(&ir->completion);
pipeout = usb_sndintpipe(udev,
idesc->endpoint[1].desc.bEndpointAddress);
usb_fill_int_urb(ir->urb_out, udev, pipeout, ir->packet, MAX_OUT_PACKET,
iguanair_irq_out, ir, 1);
ir->urb_out->transfer_dma = ir->dma_out;
ir->urb_out->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
pipein = usb_rcvintpipe(udev, idesc->endpoint[0].desc.bEndpointAddress);
usb_fill_int_urb(ir->urb_in, udev, pipein, ir->buf_in, MAX_IN_PACKET,
iguanair_rx, ir, 1);
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
ret = usb_submit_urb(ir->urb_in, GFP_KERNEL);
if (ret) {
dev_warn(&intf->dev, "failed to submit urb: %d\n", ret);
goto out;
}
ret = iguanair_get_features(ir);
if (ret)
goto out2;
snprintf(ir->name, sizeof(ir->name),
"IguanaWorks USB IR Transceiver version 0x%04x", ir->version);
usb_make_path(ir->udev, ir->phys, sizeof(ir->phys));
rc->device_name = ir->name;
rc->input_phys = ir->phys;
usb_to_input_id(ir->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->priv = ir;
rc->open = iguanair_open;
rc->close = iguanair_close;
rc->s_tx_mask = iguanair_set_tx_mask;
rc->s_tx_carrier = iguanair_set_tx_carrier;
rc->tx_ir = iguanair_tx;
rc->driver_name = KBUILD_MODNAME;
rc->map_name = RC_MAP_RC6_MCE;
rc->min_timeout = 1;
rc->timeout = IR_DEFAULT_TIMEOUT;
rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rc->rx_resolution = RX_RESOLUTION;
iguanair_set_tx_carrier(rc, 38000);
iguanair_set_tx_mask(rc, 0);
ret = rc_register_device(rc);
if (ret < 0) {
dev_err(&intf->dev, "failed to register rc device %d", ret);
goto out2;
}
usb_set_intfdata(intf, ir);
return 0;
out2:
usb_kill_urb(ir->urb_in);
usb_kill_urb(ir->urb_out);
out:
if (ir) {
usb_free_urb(ir->urb_in);
usb_free_urb(ir->urb_out);
usb_free_coherent(udev, MAX_IN_PACKET, ir->buf_in, ir->dma_in);
usb_free_coherent(udev, MAX_OUT_PACKET, ir->packet,
ir->dma_out);
}
rc_free_device(rc);
kfree(ir);
return ret;
}
static void iguanair_disconnect(struct usb_interface *intf)
{
struct iguanair *ir = usb_get_intfdata(intf);
rc_unregister_device(ir->rc);
usb_set_intfdata(intf, NULL);
usb_kill_urb(ir->urb_in);
usb_kill_urb(ir->urb_out);
usb_free_urb(ir->urb_in);
usb_free_urb(ir->urb_out);
usb_free_coherent(ir->udev, MAX_IN_PACKET, ir->buf_in, ir->dma_in);
usb_free_coherent(ir->udev, MAX_OUT_PACKET, ir->packet, ir->dma_out);
kfree(ir);
}
static int iguanair_suspend(struct usb_interface *intf, pm_message_t message)
{
struct iguanair *ir = usb_get_intfdata(intf);
int rc = 0;
if (ir->receiver_on) {
rc = iguanair_receiver(ir, false);
if (rc)
dev_warn(ir->dev, "failed to disable receiver for suspend\n");
}
usb_kill_urb(ir->urb_in);
usb_kill_urb(ir->urb_out);
return rc;
}
static int iguanair_resume(struct usb_interface *intf)
{
struct iguanair *ir = usb_get_intfdata(intf);
int rc;
rc = usb_submit_urb(ir->urb_in, GFP_KERNEL);
if (rc)
dev_warn(&intf->dev, "failed to submit urb: %d\n", rc);
if (ir->receiver_on) {
rc = iguanair_receiver(ir, true);
if (rc)
dev_warn(ir->dev, "failed to enable receiver after resume\n");
}
return rc;
}
static const struct usb_device_id iguanair_table[] = {
{ USB_DEVICE(0x1781, 0x0938) },
{ }
};
static struct usb_driver iguanair_driver = {
.name = KBUILD_MODNAME,
.probe = iguanair_probe,
.disconnect = iguanair_disconnect,
.suspend = iguanair_suspend,
.resume = iguanair_resume,
.reset_resume = iguanair_resume,
.id_table = iguanair_table,
.soft_unbind = 1 /* we want to disable receiver on unbind */
};
module_usb_driver(iguanair_driver);
MODULE_DESCRIPTION("IguanaWorks USB IR Transceiver");
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, iguanair_table);
| linux-master | drivers/media/rc/iguanair.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IgorPlug-USB IR Receiver
*
* Copyright (C) 2014 Sean Young <[email protected]>
*
* Supports the standard homebrew IgorPlugUSB receiver with Igor's firmware.
* See http://www.cesko.host.sk/IgorPlugUSB/IgorPlug-USB%20(AVR)_eng.htm
*
* Based on the lirc_igorplugusb.c driver:
* Copyright (C) 2004 Jan M. Hochstein
* <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <media/rc-core.h>
#define DRIVER_DESC "IgorPlug-USB IR Receiver"
#define DRIVER_NAME "igorplugusb"
#define HEADERLEN 3
#define BUFLEN 36
#define MAX_PACKET (HEADERLEN + BUFLEN)
#define SET_INFRABUFFER_EMPTY 1
#define GET_INFRACODE 2
struct igorplugusb {
struct rc_dev *rc;
struct device *dev;
struct urb *urb;
struct usb_ctrlrequest request;
struct timer_list timer;
u8 *buf_in;
char phys[64];
};
static void igorplugusb_cmd(struct igorplugusb *ir, int cmd);
static void igorplugusb_irdata(struct igorplugusb *ir, unsigned len)
{
struct ir_raw_event rawir = {};
unsigned i, start, overflow;
dev_dbg(ir->dev, "irdata: %*ph (len=%u)", len, ir->buf_in, len);
/*
* If more than 36 pulses and spaces follow each other, the igorplugusb
* overwrites its buffer from the beginning. The overflow value is the
* last offset which was not overwritten. Everything from this offset
* onwards occurred before everything until this offset.
*/
overflow = ir->buf_in[2];
i = start = overflow + HEADERLEN;
if (start >= len) {
dev_err(ir->dev, "receive overflow invalid: %u", overflow);
} else {
if (overflow > 0) {
dev_warn(ir->dev, "receive overflow, at least %u lost",
overflow);
ir_raw_event_overflow(ir->rc);
}
do {
rawir.duration = ir->buf_in[i] * 85;
rawir.pulse = i & 1;
ir_raw_event_store_with_filter(ir->rc, &rawir);
if (++i == len)
i = HEADERLEN;
} while (i != start);
/* add a trailing space */
rawir.duration = ir->rc->timeout;
rawir.pulse = false;
ir_raw_event_store_with_filter(ir->rc, &rawir);
ir_raw_event_handle(ir->rc);
}
igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
}
static void igorplugusb_callback(struct urb *urb)
{
struct usb_ctrlrequest *req;
struct igorplugusb *ir = urb->context;
req = (struct usb_ctrlrequest *)urb->setup_packet;
switch (urb->status) {
case 0:
if (req->bRequest == GET_INFRACODE &&
urb->actual_length > HEADERLEN)
igorplugusb_irdata(ir, urb->actual_length);
else /* request IR */
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(50));
break;
case -EPROTO:
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
return;
default:
dev_warn(ir->dev, "Error: urb status = %d\n", urb->status);
igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
break;
}
}
static void igorplugusb_cmd(struct igorplugusb *ir, int cmd)
{
int ret;
ir->request.bRequest = cmd;
ir->urb->transfer_flags = 0;
ret = usb_submit_urb(ir->urb, GFP_ATOMIC);
if (ret && ret != -EPERM)
dev_err(ir->dev, "submit urb failed: %d", ret);
}
static void igorplugusb_timer(struct timer_list *t)
{
struct igorplugusb *ir = from_timer(ir, t, timer);
igorplugusb_cmd(ir, GET_INFRACODE);
}
static int igorplugusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev;
struct usb_host_interface *idesc;
struct usb_endpoint_descriptor *ep;
struct igorplugusb *ir;
struct rc_dev *rc;
int ret = -ENOMEM;
udev = interface_to_usbdev(intf);
idesc = intf->cur_altsetting;
if (idesc->desc.bNumEndpoints != 1) {
dev_err(&intf->dev, "incorrect number of endpoints");
return -ENODEV;
}
ep = &idesc->endpoint[0].desc;
if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_control(ep)) {
dev_err(&intf->dev, "endpoint incorrect");
return -ENODEV;
}
ir = devm_kzalloc(&intf->dev, sizeof(*ir), GFP_KERNEL);
if (!ir)
return -ENOMEM;
ir->dev = &intf->dev;
timer_setup(&ir->timer, igorplugusb_timer, 0);
ir->request.bRequest = GET_INFRACODE;
ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
ir->request.wLength = cpu_to_le16(MAX_PACKET);
ir->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ir->urb)
goto fail;
ir->buf_in = kmalloc(MAX_PACKET, GFP_KERNEL);
if (!ir->buf_in)
goto fail;
usb_fill_control_urb(ir->urb, udev,
usb_rcvctrlpipe(udev, 0), (uint8_t *)&ir->request,
ir->buf_in, MAX_PACKET, igorplugusb_callback, ir);
usb_make_path(udev, ir->phys, sizeof(ir->phys));
rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc)
goto fail;
rc->device_name = DRIVER_DESC;
rc->input_phys = ir->phys;
usb_to_input_id(udev, &rc->input_id);
rc->dev.parent = &intf->dev;
/*
* This device can only store 36 pulses + spaces, which is not enough
* for the NEC protocol and many others.
*/
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER &
~(RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32 |
RC_PROTO_BIT_RC6_6A_20 | RC_PROTO_BIT_RC6_6A_24 |
RC_PROTO_BIT_RC6_6A_32 | RC_PROTO_BIT_RC6_MCE |
RC_PROTO_BIT_SONY20 | RC_PROTO_BIT_SANYO);
rc->priv = ir;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_HAUPPAUGE;
rc->timeout = MS_TO_US(100);
rc->rx_resolution = 85;
ir->rc = rc;
ret = rc_register_device(rc);
if (ret) {
dev_err(&intf->dev, "failed to register rc device: %d", ret);
goto fail;
}
usb_set_intfdata(intf, ir);
igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
return 0;
fail:
usb_poison_urb(ir->urb);
del_timer(&ir->timer);
usb_unpoison_urb(ir->urb);
usb_free_urb(ir->urb);
rc_free_device(ir->rc);
kfree(ir->buf_in);
return ret;
}
static void igorplugusb_disconnect(struct usb_interface *intf)
{
struct igorplugusb *ir = usb_get_intfdata(intf);
rc_unregister_device(ir->rc);
usb_poison_urb(ir->urb);
del_timer_sync(&ir->timer);
usb_set_intfdata(intf, NULL);
usb_unpoison_urb(ir->urb);
usb_free_urb(ir->urb);
kfree(ir->buf_in);
}
static const struct usb_device_id igorplugusb_table[] = {
/* Igor Plug USB (Atmel's Manufact. ID) */
{ USB_DEVICE(0x03eb, 0x0002) },
/* Fit PC2 Infrared Adapter */
{ USB_DEVICE(0x03eb, 0x21fe) },
/* Terminating entry */
{ }
};
static struct usb_driver igorplugusb_driver = {
.name = DRIVER_NAME,
.probe = igorplugusb_probe,
.disconnect = igorplugusb_disconnect,
.id_table = igorplugusb_table
};
module_usb_driver(igorplugusb_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, igorplugusb_table);
| linux-master | drivers/media/rc/igorplugusb.c |
// SPDX-License-Identifier: GPL-2.0
// rc-main.c - Remote Controller core module
//
// Copyright (C) 2009-2010 by Mauro Carvalho Chehab
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <media/rc-core.h>
#include <linux/bsearch.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/device.h>
#include <linux/module.h>
#include "rc-core-priv.h"
/* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
#define IR_TAB_MIN_SIZE 256
#define IR_TAB_MAX_SIZE 8192
static const struct {
const char *name;
unsigned int repeat_period;
unsigned int scancode_bits;
} protocols[] = {
[RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 125 },
[RC_PROTO_OTHER] = { .name = "other", .repeat_period = 125 },
[RC_PROTO_RC5] = { .name = "rc-5",
.scancode_bits = 0x1f7f, .repeat_period = 114 },
[RC_PROTO_RC5X_20] = { .name = "rc-5x-20",
.scancode_bits = 0x1f7f3f, .repeat_period = 114 },
[RC_PROTO_RC5_SZ] = { .name = "rc-5-sz",
.scancode_bits = 0x2fff, .repeat_period = 114 },
[RC_PROTO_JVC] = { .name = "jvc",
.scancode_bits = 0xffff, .repeat_period = 125 },
[RC_PROTO_SONY12] = { .name = "sony-12",
.scancode_bits = 0x1f007f, .repeat_period = 100 },
[RC_PROTO_SONY15] = { .name = "sony-15",
.scancode_bits = 0xff007f, .repeat_period = 100 },
[RC_PROTO_SONY20] = { .name = "sony-20",
.scancode_bits = 0x1fff7f, .repeat_period = 100 },
[RC_PROTO_NEC] = { .name = "nec",
.scancode_bits = 0xffff, .repeat_period = 110 },
[RC_PROTO_NECX] = { .name = "nec-x",
.scancode_bits = 0xffffff, .repeat_period = 110 },
[RC_PROTO_NEC32] = { .name = "nec-32",
.scancode_bits = 0xffffffff, .repeat_period = 110 },
[RC_PROTO_SANYO] = { .name = "sanyo",
.scancode_bits = 0x1fffff, .repeat_period = 125 },
[RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd",
.scancode_bits = 0xffffff, .repeat_period = 100 },
[RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse",
.scancode_bits = 0x1fffff, .repeat_period = 100 },
[RC_PROTO_RC6_0] = { .name = "rc-6-0",
.scancode_bits = 0xffff, .repeat_period = 114 },
[RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20",
.scancode_bits = 0xfffff, .repeat_period = 114 },
[RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24",
.scancode_bits = 0xffffff, .repeat_period = 114 },
[RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32",
.scancode_bits = 0xffffffff, .repeat_period = 114 },
[RC_PROTO_RC6_MCE] = { .name = "rc-6-mce",
.scancode_bits = 0xffff7fff, .repeat_period = 114 },
[RC_PROTO_SHARP] = { .name = "sharp",
.scancode_bits = 0x1fff, .repeat_period = 125 },
[RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 125 },
[RC_PROTO_CEC] = { .name = "cec", .repeat_period = 0 },
[RC_PROTO_IMON] = { .name = "imon",
.scancode_bits = 0x7fffffff, .repeat_period = 114 },
[RC_PROTO_RCMM12] = { .name = "rc-mm-12",
.scancode_bits = 0x00000fff, .repeat_period = 114 },
[RC_PROTO_RCMM24] = { .name = "rc-mm-24",
.scancode_bits = 0x00ffffff, .repeat_period = 114 },
[RC_PROTO_RCMM32] = { .name = "rc-mm-32",
.scancode_bits = 0xffffffff, .repeat_period = 114 },
[RC_PROTO_XBOX_DVD] = { .name = "xbox-dvd", .repeat_period = 64 },
};
/* Used to keep track of known keymaps */
static LIST_HEAD(rc_map_list);
static DEFINE_SPINLOCK(rc_map_lock);
static struct led_trigger *led_feedback;
/* Used to keep track of rc devices */
static DEFINE_IDA(rc_ida);
static struct rc_map_list *seek_rc_map(const char *name)
{
struct rc_map_list *map = NULL;
spin_lock(&rc_map_lock);
list_for_each_entry(map, &rc_map_list, list) {
if (!strcmp(name, map->map.name)) {
spin_unlock(&rc_map_lock);
return map;
}
}
spin_unlock(&rc_map_lock);
return NULL;
}
struct rc_map *rc_map_get(const char *name)
{
struct rc_map_list *map;
map = seek_rc_map(name);
#ifdef CONFIG_MODULES
if (!map) {
int rc = request_module("%s", name);
if (rc < 0) {
pr_err("Couldn't load IR keymap %s\n", name);
return NULL;
}
msleep(20); /* Give some time for IR to register */
map = seek_rc_map(name);
}
#endif
if (!map) {
pr_err("IR keymap %s not found\n", name);
return NULL;
}
printk(KERN_INFO "Registered IR keymap %s\n", map->map.name);
return &map->map;
}
EXPORT_SYMBOL_GPL(rc_map_get);
int rc_map_register(struct rc_map_list *map)
{
spin_lock(&rc_map_lock);
list_add_tail(&map->list, &rc_map_list);
spin_unlock(&rc_map_lock);
return 0;
}
EXPORT_SYMBOL_GPL(rc_map_register);
void rc_map_unregister(struct rc_map_list *map)
{
spin_lock(&rc_map_lock);
list_del(&map->list);
spin_unlock(&rc_map_lock);
}
EXPORT_SYMBOL_GPL(rc_map_unregister);
static struct rc_map_table empty[] = {
{ 0x2a, KEY_COFFEE },
};
static struct rc_map_list empty_map = {
.map = {
.scan = empty,
.size = ARRAY_SIZE(empty),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_EMPTY,
}
};
/**
* scancode_to_u64() - converts scancode in &struct input_keymap_entry
* @ke: keymap entry containing scancode to be converted.
* @scancode: pointer to the location where converted scancode should
* be stored.
*
* This function is a version of input_scancode_to_scalar specialized for
* rc-core.
*/
static int scancode_to_u64(const struct input_keymap_entry *ke, u64 *scancode)
{
switch (ke->len) {
case 1:
*scancode = *((u8 *)ke->scancode);
break;
case 2:
*scancode = *((u16 *)ke->scancode);
break;
case 4:
*scancode = *((u32 *)ke->scancode);
break;
case 8:
*scancode = *((u64 *)ke->scancode);
break;
default:
return -EINVAL;
}
return 0;
}
/**
* ir_create_table() - initializes a scancode table
* @dev: the rc_dev device
* @rc_map: the rc_map to initialize
* @name: name to assign to the table
* @rc_proto: ir type to assign to the new table
* @size: initial size of the table
*
* This routine will initialize the rc_map and will allocate
* memory to hold at least the specified number of elements.
*
* return: zero on success or a negative error code
*/
static int ir_create_table(struct rc_dev *dev, struct rc_map *rc_map,
const char *name, u64 rc_proto, size_t size)
{
rc_map->name = kstrdup(name, GFP_KERNEL);
if (!rc_map->name)
return -ENOMEM;
rc_map->rc_proto = rc_proto;
rc_map->alloc = roundup_pow_of_two(size * sizeof(struct rc_map_table));
rc_map->size = rc_map->alloc / sizeof(struct rc_map_table);
rc_map->scan = kmalloc(rc_map->alloc, GFP_KERNEL);
if (!rc_map->scan) {
kfree(rc_map->name);
rc_map->name = NULL;
return -ENOMEM;
}
dev_dbg(&dev->dev, "Allocated space for %u keycode entries (%u bytes)\n",
rc_map->size, rc_map->alloc);
return 0;
}
/**
* ir_free_table() - frees memory allocated by a scancode table
* @rc_map: the table whose mappings need to be freed
*
* This routine will free memory alloctaed for key mappings used by given
* scancode table.
*/
static void ir_free_table(struct rc_map *rc_map)
{
rc_map->size = 0;
kfree(rc_map->name);
rc_map->name = NULL;
kfree(rc_map->scan);
rc_map->scan = NULL;
}
/**
* ir_resize_table() - resizes a scancode table if necessary
* @dev: the rc_dev device
* @rc_map: the rc_map to resize
* @gfp_flags: gfp flags to use when allocating memory
*
* This routine will shrink the rc_map if it has lots of
* unused entries and grow it if it is full.
*
* return: zero on success or a negative error code
*/
static int ir_resize_table(struct rc_dev *dev, struct rc_map *rc_map,
gfp_t gfp_flags)
{
unsigned int oldalloc = rc_map->alloc;
unsigned int newalloc = oldalloc;
struct rc_map_table *oldscan = rc_map->scan;
struct rc_map_table *newscan;
if (rc_map->size == rc_map->len) {
/* All entries in use -> grow keytable */
if (rc_map->alloc >= IR_TAB_MAX_SIZE)
return -ENOMEM;
newalloc *= 2;
dev_dbg(&dev->dev, "Growing table to %u bytes\n", newalloc);
}
if ((rc_map->len * 3 < rc_map->size) && (oldalloc > IR_TAB_MIN_SIZE)) {
/* Less than 1/3 of entries in use -> shrink keytable */
newalloc /= 2;
dev_dbg(&dev->dev, "Shrinking table to %u bytes\n", newalloc);
}
if (newalloc == oldalloc)
return 0;
newscan = kmalloc(newalloc, gfp_flags);
if (!newscan)
return -ENOMEM;
memcpy(newscan, rc_map->scan, rc_map->len * sizeof(struct rc_map_table));
rc_map->scan = newscan;
rc_map->alloc = newalloc;
rc_map->size = rc_map->alloc / sizeof(struct rc_map_table);
kfree(oldscan);
return 0;
}
/**
* ir_update_mapping() - set a keycode in the scancode->keycode table
* @dev: the struct rc_dev device descriptor
* @rc_map: scancode table to be adjusted
* @index: index of the mapping that needs to be updated
* @new_keycode: the desired keycode
*
* This routine is used to update scancode->keycode mapping at given
* position.
*
* return: previous keycode assigned to the mapping
*
*/
static unsigned int ir_update_mapping(struct rc_dev *dev,
struct rc_map *rc_map,
unsigned int index,
unsigned int new_keycode)
{
int old_keycode = rc_map->scan[index].keycode;
int i;
/* Did the user wish to remove the mapping? */
if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) {
dev_dbg(&dev->dev, "#%d: Deleting scan 0x%04llx\n",
index, rc_map->scan[index].scancode);
rc_map->len--;
memmove(&rc_map->scan[index], &rc_map->scan[index+ 1],
(rc_map->len - index) * sizeof(struct rc_map_table));
} else {
dev_dbg(&dev->dev, "#%d: %s scan 0x%04llx with key 0x%04x\n",
index,
old_keycode == KEY_RESERVED ? "New" : "Replacing",
rc_map->scan[index].scancode, new_keycode);
rc_map->scan[index].keycode = new_keycode;
__set_bit(new_keycode, dev->input_dev->keybit);
}
if (old_keycode != KEY_RESERVED) {
/* A previous mapping was updated... */
__clear_bit(old_keycode, dev->input_dev->keybit);
/* ... but another scancode might use the same keycode */
for (i = 0; i < rc_map->len; i++) {
if (rc_map->scan[i].keycode == old_keycode) {
__set_bit(old_keycode, dev->input_dev->keybit);
break;
}
}
/* Possibly shrink the keytable, failure is not a problem */
ir_resize_table(dev, rc_map, GFP_ATOMIC);
}
return old_keycode;
}
/**
* ir_establish_scancode() - set a keycode in the scancode->keycode table
* @dev: the struct rc_dev device descriptor
* @rc_map: scancode table to be searched
* @scancode: the desired scancode
* @resize: controls whether we allowed to resize the table to
* accommodate not yet present scancodes
*
* This routine is used to locate given scancode in rc_map.
* If scancode is not yet present the routine will allocate a new slot
* for it.
*
* return: index of the mapping containing scancode in question
* or -1U in case of failure.
*/
static unsigned int ir_establish_scancode(struct rc_dev *dev,
struct rc_map *rc_map,
u64 scancode, bool resize)
{
unsigned int i;
/*
* Unfortunately, some hardware-based IR decoders don't provide
* all bits for the complete IR code. In general, they provide only
* the command part of the IR code. Yet, as it is possible to replace
* the provided IR with another one, it is needed to allow loading
* IR tables from other remotes. So, we support specifying a mask to
* indicate the valid bits of the scancodes.
*/
if (dev->scancode_mask)
scancode &= dev->scancode_mask;
/* First check if we already have a mapping for this ir command */
for (i = 0; i < rc_map->len; i++) {
if (rc_map->scan[i].scancode == scancode)
return i;
/* Keytable is sorted from lowest to highest scancode */
if (rc_map->scan[i].scancode >= scancode)
break;
}
/* No previous mapping found, we might need to grow the table */
if (rc_map->size == rc_map->len) {
if (!resize || ir_resize_table(dev, rc_map, GFP_ATOMIC))
return -1U;
}
/* i is the proper index to insert our new keycode */
if (i < rc_map->len)
memmove(&rc_map->scan[i + 1], &rc_map->scan[i],
(rc_map->len - i) * sizeof(struct rc_map_table));
rc_map->scan[i].scancode = scancode;
rc_map->scan[i].keycode = KEY_RESERVED;
rc_map->len++;
return i;
}
/**
* ir_setkeycode() - set a keycode in the scancode->keycode table
* @idev: the struct input_dev device descriptor
* @ke: Input keymap entry
* @old_keycode: result
*
* This routine is used to handle evdev EVIOCSKEY ioctl.
*
* return: -EINVAL if the keycode could not be inserted, otherwise zero.
*/
static int ir_setkeycode(struct input_dev *idev,
const struct input_keymap_entry *ke,
unsigned int *old_keycode)
{
struct rc_dev *rdev = input_get_drvdata(idev);
struct rc_map *rc_map = &rdev->rc_map;
unsigned int index;
u64 scancode;
int retval = 0;
unsigned long flags;
spin_lock_irqsave(&rc_map->lock, flags);
if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
index = ke->index;
if (index >= rc_map->len) {
retval = -EINVAL;
goto out;
}
} else {
retval = scancode_to_u64(ke, &scancode);
if (retval)
goto out;
index = ir_establish_scancode(rdev, rc_map, scancode, true);
if (index >= rc_map->len) {
retval = -ENOMEM;
goto out;
}
}
*old_keycode = ir_update_mapping(rdev, rc_map, index, ke->keycode);
out:
spin_unlock_irqrestore(&rc_map->lock, flags);
return retval;
}
/**
* ir_setkeytable() - sets several entries in the scancode->keycode table
* @dev: the struct rc_dev device descriptor
* @from: the struct rc_map to copy entries from
*
* This routine is used to handle table initialization.
*
* return: -ENOMEM if all keycodes could not be inserted, otherwise zero.
*/
static int ir_setkeytable(struct rc_dev *dev, const struct rc_map *from)
{
struct rc_map *rc_map = &dev->rc_map;
unsigned int i, index;
int rc;
rc = ir_create_table(dev, rc_map, from->name, from->rc_proto,
from->size);
if (rc)
return rc;
for (i = 0; i < from->size; i++) {
index = ir_establish_scancode(dev, rc_map,
from->scan[i].scancode, false);
if (index >= rc_map->len) {
rc = -ENOMEM;
break;
}
ir_update_mapping(dev, rc_map, index,
from->scan[i].keycode);
}
if (rc)
ir_free_table(rc_map);
return rc;
}
static int rc_map_cmp(const void *key, const void *elt)
{
const u64 *scancode = key;
const struct rc_map_table *e = elt;
if (*scancode < e->scancode)
return -1;
else if (*scancode > e->scancode)
return 1;
return 0;
}
/**
* ir_lookup_by_scancode() - locate mapping by scancode
* @rc_map: the struct rc_map to search
* @scancode: scancode to look for in the table
*
* This routine performs binary search in RC keykeymap table for
* given scancode.
*
* return: index in the table, -1U if not found
*/
static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
u64 scancode)
{
struct rc_map_table *res;
res = bsearch(&scancode, rc_map->scan, rc_map->len,
sizeof(struct rc_map_table), rc_map_cmp);
if (!res)
return -1U;
else
return res - rc_map->scan;
}
/**
* ir_getkeycode() - get a keycode from the scancode->keycode table
* @idev: the struct input_dev device descriptor
* @ke: Input keymap entry
*
* This routine is used to handle evdev EVIOCGKEY ioctl.
*
* return: always returns zero.
*/
static int ir_getkeycode(struct input_dev *idev,
struct input_keymap_entry *ke)
{
struct rc_dev *rdev = input_get_drvdata(idev);
struct rc_map *rc_map = &rdev->rc_map;
struct rc_map_table *entry;
unsigned long flags;
unsigned int index;
u64 scancode;
int retval;
spin_lock_irqsave(&rc_map->lock, flags);
if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
index = ke->index;
} else {
retval = scancode_to_u64(ke, &scancode);
if (retval)
goto out;
index = ir_lookup_by_scancode(rc_map, scancode);
}
if (index < rc_map->len) {
entry = &rc_map->scan[index];
ke->index = index;
ke->keycode = entry->keycode;
ke->len = sizeof(entry->scancode);
memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
} else if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) {
/*
* We do not really know the valid range of scancodes
* so let's respond with KEY_RESERVED to anything we
* do not have mapping for [yet].
*/
ke->index = index;
ke->keycode = KEY_RESERVED;
} else {
retval = -EINVAL;
goto out;
}
retval = 0;
out:
spin_unlock_irqrestore(&rc_map->lock, flags);
return retval;
}
/**
* rc_g_keycode_from_table() - gets the keycode that corresponds to a scancode
* @dev: the struct rc_dev descriptor of the device
* @scancode: the scancode to look for
*
* This routine is used by drivers which need to convert a scancode to a
* keycode. Normally it should not be used since drivers should have no
* interest in keycodes.
*
* return: the corresponding keycode, or KEY_RESERVED
*/
u32 rc_g_keycode_from_table(struct rc_dev *dev, u64 scancode)
{
struct rc_map *rc_map = &dev->rc_map;
unsigned int keycode;
unsigned int index;
unsigned long flags;
spin_lock_irqsave(&rc_map->lock, flags);
index = ir_lookup_by_scancode(rc_map, scancode);
keycode = index < rc_map->len ?
rc_map->scan[index].keycode : KEY_RESERVED;
spin_unlock_irqrestore(&rc_map->lock, flags);
if (keycode != KEY_RESERVED)
dev_dbg(&dev->dev, "%s: scancode 0x%04llx keycode 0x%02x\n",
dev->device_name, scancode, keycode);
return keycode;
}
EXPORT_SYMBOL_GPL(rc_g_keycode_from_table);
/**
* ir_do_keyup() - internal function to signal the release of a keypress
* @dev: the struct rc_dev descriptor of the device
* @sync: whether or not to call input_sync
*
* This function is used internally to release a keypress, it must be
* called with keylock held.
*/
static void ir_do_keyup(struct rc_dev *dev, bool sync)
{
if (!dev->keypressed)
return;
dev_dbg(&dev->dev, "keyup key 0x%04x\n", dev->last_keycode);
del_timer(&dev->timer_repeat);
input_report_key(dev->input_dev, dev->last_keycode, 0);
led_trigger_event(led_feedback, LED_OFF);
if (sync)
input_sync(dev->input_dev);
dev->keypressed = false;
}
/**
* rc_keyup() - signals the release of a keypress
* @dev: the struct rc_dev descriptor of the device
*
* This routine is used to signal that a key has been released on the
* remote control.
*/
void rc_keyup(struct rc_dev *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->keylock, flags);
ir_do_keyup(dev, true);
spin_unlock_irqrestore(&dev->keylock, flags);
}
EXPORT_SYMBOL_GPL(rc_keyup);
/**
* ir_timer_keyup() - generates a keyup event after a timeout
*
* @t: a pointer to the struct timer_list
*
* This routine will generate a keyup event some time after a keydown event
* is generated when no further activity has been detected.
*/
static void ir_timer_keyup(struct timer_list *t)
{
struct rc_dev *dev = from_timer(dev, t, timer_keyup);
unsigned long flags;
/*
* ir->keyup_jiffies is used to prevent a race condition if a
* hardware interrupt occurs at this point and the keyup timer
* event is moved further into the future as a result.
*
* The timer will then be reactivated and this function called
* again in the future. We need to exit gracefully in that case
* to allow the input subsystem to do its auto-repeat magic or
* a keyup event might follow immediately after the keydown.
*/
spin_lock_irqsave(&dev->keylock, flags);
if (time_is_before_eq_jiffies(dev->keyup_jiffies))
ir_do_keyup(dev, true);
spin_unlock_irqrestore(&dev->keylock, flags);
}
/**
* ir_timer_repeat() - generates a repeat event after a timeout
*
* @t: a pointer to the struct timer_list
*
* This routine will generate a soft repeat event every REP_PERIOD
* milliseconds.
*/
static void ir_timer_repeat(struct timer_list *t)
{
struct rc_dev *dev = from_timer(dev, t, timer_repeat);
struct input_dev *input = dev->input_dev;
unsigned long flags;
spin_lock_irqsave(&dev->keylock, flags);
if (dev->keypressed) {
input_event(input, EV_KEY, dev->last_keycode, 2);
input_sync(input);
if (input->rep[REP_PERIOD])
mod_timer(&dev->timer_repeat, jiffies +
msecs_to_jiffies(input->rep[REP_PERIOD]));
}
spin_unlock_irqrestore(&dev->keylock, flags);
}
static unsigned int repeat_period(int protocol)
{
if (protocol >= ARRAY_SIZE(protocols))
return 100;
return protocols[protocol].repeat_period;
}
/**
* rc_repeat() - signals that a key is still pressed
* @dev: the struct rc_dev descriptor of the device
*
* This routine is used by IR decoders when a repeat message which does
* not include the necessary bits to reproduce the scancode has been
* received.
*/
void rc_repeat(struct rc_dev *dev)
{
unsigned long flags;
unsigned int timeout = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(dev->last_protocol));
struct lirc_scancode sc = {
.scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
.keycode = dev->keypressed ? dev->last_keycode : KEY_RESERVED,
.flags = LIRC_SCANCODE_FLAG_REPEAT |
(dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0)
};
if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
lirc_scancode_event(dev, &sc);
spin_lock_irqsave(&dev->keylock, flags);
if (dev->last_scancode <= U32_MAX) {
input_event(dev->input_dev, EV_MSC, MSC_SCAN,
dev->last_scancode);
input_sync(dev->input_dev);
}
if (dev->keypressed) {
dev->keyup_jiffies = jiffies + timeout;
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
}
spin_unlock_irqrestore(&dev->keylock, flags);
}
EXPORT_SYMBOL_GPL(rc_repeat);
/**
* ir_do_keydown() - internal function to process a keypress
* @dev: the struct rc_dev descriptor of the device
* @protocol: the protocol of the keypress
* @scancode: the scancode of the keypress
* @keycode: the keycode of the keypress
* @toggle: the toggle value of the keypress
*
* This function is used internally to register a keypress, it must be
* called with keylock held.
*/
static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
u64 scancode, u32 keycode, u8 toggle)
{
bool new_event = (!dev->keypressed ||
dev->last_protocol != protocol ||
dev->last_scancode != scancode ||
dev->last_toggle != toggle);
struct lirc_scancode sc = {
.scancode = scancode, .rc_proto = protocol,
.flags = (toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0) |
(!new_event ? LIRC_SCANCODE_FLAG_REPEAT : 0),
.keycode = keycode
};
if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
lirc_scancode_event(dev, &sc);
if (new_event && dev->keypressed)
ir_do_keyup(dev, false);
if (scancode <= U32_MAX)
input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
dev->last_protocol = protocol;
dev->last_scancode = scancode;
dev->last_toggle = toggle;
dev->last_keycode = keycode;
if (new_event && keycode != KEY_RESERVED) {
/* Register a keypress */
dev->keypressed = true;
dev_dbg(&dev->dev, "%s: key down event, key 0x%04x, protocol 0x%04x, scancode 0x%08llx\n",
dev->device_name, keycode, protocol, scancode);
input_report_key(dev->input_dev, keycode, 1);
led_trigger_event(led_feedback, LED_FULL);
}
/*
* For CEC, start sending repeat messages as soon as the first
* repeated message is sent, as long as REP_DELAY = 0 and REP_PERIOD
* is non-zero. Otherwise, the input layer will generate repeat
* messages.
*/
if (!new_event && keycode != KEY_RESERVED &&
dev->allowed_protocols == RC_PROTO_BIT_CEC &&
!timer_pending(&dev->timer_repeat) &&
dev->input_dev->rep[REP_PERIOD] &&
!dev->input_dev->rep[REP_DELAY]) {
input_event(dev->input_dev, EV_KEY, keycode, 2);
mod_timer(&dev->timer_repeat, jiffies +
msecs_to_jiffies(dev->input_dev->rep[REP_PERIOD]));
}
input_sync(dev->input_dev);
}
/**
* rc_keydown() - generates input event for a key press
* @dev: the struct rc_dev descriptor of the device
* @protocol: the protocol for the keypress
* @scancode: the scancode for the keypress
* @toggle: the toggle value (protocol dependent, if the protocol doesn't
* support toggle values, this should be set to zero)
*
* This routine is used to signal that a key has been pressed on the
* remote control.
*/
void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
u8 toggle)
{
unsigned long flags;
u32 keycode = rc_g_keycode_from_table(dev, scancode);
spin_lock_irqsave(&dev->keylock, flags);
ir_do_keydown(dev, protocol, scancode, keycode, toggle);
if (dev->keypressed) {
dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(protocol));
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
}
spin_unlock_irqrestore(&dev->keylock, flags);
}
EXPORT_SYMBOL_GPL(rc_keydown);
/**
* rc_keydown_notimeout() - generates input event for a key press without
* an automatic keyup event at a later time
* @dev: the struct rc_dev descriptor of the device
* @protocol: the protocol for the keypress
* @scancode: the scancode for the keypress
* @toggle: the toggle value (protocol dependent, if the protocol doesn't
* support toggle values, this should be set to zero)
*
* This routine is used to signal that a key has been pressed on the
* remote control. The driver must manually call rc_keyup() at a later stage.
*/
void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol,
u64 scancode, u8 toggle)
{
unsigned long flags;
u32 keycode = rc_g_keycode_from_table(dev, scancode);
spin_lock_irqsave(&dev->keylock, flags);
ir_do_keydown(dev, protocol, scancode, keycode, toggle);
spin_unlock_irqrestore(&dev->keylock, flags);
}
EXPORT_SYMBOL_GPL(rc_keydown_notimeout);
/**
* rc_validate_scancode() - checks that a scancode is valid for a protocol.
* For nec, it should do the opposite of ir_nec_bytes_to_scancode()
* @proto: protocol
* @scancode: scancode
*/
bool rc_validate_scancode(enum rc_proto proto, u32 scancode)
{
switch (proto) {
/*
* NECX has a 16-bit address; if the lower 8 bits match the upper
* 8 bits inverted, then the address would match regular nec.
*/
case RC_PROTO_NECX:
if ((((scancode >> 16) ^ ~(scancode >> 8)) & 0xff) == 0)
return false;
break;
/*
* NEC32 has a 16 bit address and 16 bit command. If the lower 8 bits
* of the command match the upper 8 bits inverted, then it would
* be either NEC or NECX.
*/
case RC_PROTO_NEC32:
if ((((scancode >> 8) ^ ~scancode) & 0xff) == 0)
return false;
break;
/*
* If the customer code (top 32-bit) is 0x800f, it is MCE else it
* is regular mode-6a 32 bit
*/
case RC_PROTO_RC6_MCE:
if ((scancode & 0xffff0000) != 0x800f0000)
return false;
break;
case RC_PROTO_RC6_6A_32:
if ((scancode & 0xffff0000) == 0x800f0000)
return false;
break;
default:
break;
}
return true;
}
/**
* rc_validate_filter() - checks that the scancode and mask are valid and
* provides sensible defaults
* @dev: the struct rc_dev descriptor of the device
* @filter: the scancode and mask
*
* return: 0 or -EINVAL if the filter is not valid
*/
static int rc_validate_filter(struct rc_dev *dev,
struct rc_scancode_filter *filter)
{
u32 mask, s = filter->data;
enum rc_proto protocol = dev->wakeup_protocol;
if (protocol >= ARRAY_SIZE(protocols))
return -EINVAL;
mask = protocols[protocol].scancode_bits;
if (!rc_validate_scancode(protocol, s))
return -EINVAL;
filter->data &= mask;
filter->mask &= mask;
/*
* If we have to raw encode the IR for wakeup, we cannot have a mask
*/
if (dev->encode_wakeup && filter->mask != 0 && filter->mask != mask)
return -EINVAL;
return 0;
}
int rc_open(struct rc_dev *rdev)
{
int rval = 0;
if (!rdev)
return -EINVAL;
mutex_lock(&rdev->lock);
if (!rdev->registered) {
rval = -ENODEV;
} else {
if (!rdev->users++ && rdev->open)
rval = rdev->open(rdev);
if (rval)
rdev->users--;
}
mutex_unlock(&rdev->lock);
return rval;
}
static int ir_open(struct input_dev *idev)
{
struct rc_dev *rdev = input_get_drvdata(idev);
return rc_open(rdev);
}
void rc_close(struct rc_dev *rdev)
{
if (rdev) {
mutex_lock(&rdev->lock);
if (!--rdev->users && rdev->close && rdev->registered)
rdev->close(rdev);
mutex_unlock(&rdev->lock);
}
}
static void ir_close(struct input_dev *idev)
{
struct rc_dev *rdev = input_get_drvdata(idev);
rc_close(rdev);
}
/* class for /sys/class/rc */
static char *rc_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev));
}
static struct class rc_class = {
.name = "rc",
.devnode = rc_devnode,
};
/*
* These are the protocol textual descriptions that are
* used by the sysfs protocols file. Note that the order
* of the entries is relevant.
*/
static const struct {
u64 type;
const char *name;
const char *module_name;
} proto_names[] = {
{ RC_PROTO_BIT_NONE, "none", NULL },
{ RC_PROTO_BIT_OTHER, "other", NULL },
{ RC_PROTO_BIT_UNKNOWN, "unknown", NULL },
{ RC_PROTO_BIT_RC5 |
RC_PROTO_BIT_RC5X_20, "rc-5", "ir-rc5-decoder" },
{ RC_PROTO_BIT_NEC |
RC_PROTO_BIT_NECX |
RC_PROTO_BIT_NEC32, "nec", "ir-nec-decoder" },
{ RC_PROTO_BIT_RC6_0 |
RC_PROTO_BIT_RC6_6A_20 |
RC_PROTO_BIT_RC6_6A_24 |
RC_PROTO_BIT_RC6_6A_32 |
RC_PROTO_BIT_RC6_MCE, "rc-6", "ir-rc6-decoder" },
{ RC_PROTO_BIT_JVC, "jvc", "ir-jvc-decoder" },
{ RC_PROTO_BIT_SONY12 |
RC_PROTO_BIT_SONY15 |
RC_PROTO_BIT_SONY20, "sony", "ir-sony-decoder" },
{ RC_PROTO_BIT_RC5_SZ, "rc-5-sz", "ir-rc5-decoder" },
{ RC_PROTO_BIT_SANYO, "sanyo", "ir-sanyo-decoder" },
{ RC_PROTO_BIT_SHARP, "sharp", "ir-sharp-decoder" },
{ RC_PROTO_BIT_MCIR2_KBD |
RC_PROTO_BIT_MCIR2_MSE, "mce_kbd", "ir-mce_kbd-decoder" },
{ RC_PROTO_BIT_XMP, "xmp", "ir-xmp-decoder" },
{ RC_PROTO_BIT_CEC, "cec", NULL },
{ RC_PROTO_BIT_IMON, "imon", "ir-imon-decoder" },
{ RC_PROTO_BIT_RCMM12 |
RC_PROTO_BIT_RCMM24 |
RC_PROTO_BIT_RCMM32, "rc-mm", "ir-rcmm-decoder" },
{ RC_PROTO_BIT_XBOX_DVD, "xbox-dvd", NULL },
};
/**
* struct rc_filter_attribute - Device attribute relating to a filter type.
* @attr: Device attribute.
* @type: Filter type.
* @mask: false for filter value, true for filter mask.
*/
struct rc_filter_attribute {
struct device_attribute attr;
enum rc_filter_type type;
bool mask;
};
#define to_rc_filter_attr(a) container_of(a, struct rc_filter_attribute, attr)
#define RC_FILTER_ATTR(_name, _mode, _show, _store, _type, _mask) \
struct rc_filter_attribute dev_attr_##_name = { \
.attr = __ATTR(_name, _mode, _show, _store), \
.type = (_type), \
.mask = (_mask), \
}
/**
* show_protocols() - shows the current IR protocol(s)
* @device: the device descriptor
* @mattr: the device attribute struct
* @buf: a pointer to the output buffer
*
* This routine is a callback routine for input read the IR protocol type(s).
* it is triggered by reading /sys/class/rc/rc?/protocols.
* It returns the protocol names of supported protocols.
* Enabled protocols are printed in brackets.
*
* dev->lock is taken to guard against races between
* store_protocols and show_protocols.
*/
static ssize_t show_protocols(struct device *device,
struct device_attribute *mattr, char *buf)
{
struct rc_dev *dev = to_rc_dev(device);
u64 allowed, enabled;
char *tmp = buf;
int i;
mutex_lock(&dev->lock);
enabled = dev->enabled_protocols;
allowed = dev->allowed_protocols;
if (dev->raw && !allowed)
allowed = ir_raw_get_allowed_protocols();
mutex_unlock(&dev->lock);
dev_dbg(&dev->dev, "%s: allowed - 0x%llx, enabled - 0x%llx\n",
__func__, (long long)allowed, (long long)enabled);
for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
if (allowed & enabled & proto_names[i].type)
tmp += sprintf(tmp, "[%s] ", proto_names[i].name);
else if (allowed & proto_names[i].type)
tmp += sprintf(tmp, "%s ", proto_names[i].name);
if (allowed & proto_names[i].type)
allowed &= ~proto_names[i].type;
}
#ifdef CONFIG_LIRC
if (dev->driver_type == RC_DRIVER_IR_RAW)
tmp += sprintf(tmp, "[lirc] ");
#endif
if (tmp != buf)
tmp--;
*tmp = '\n';
return tmp + 1 - buf;
}
/**
* parse_protocol_change() - parses a protocol change request
* @dev: rc_dev device
* @protocols: pointer to the bitmask of current protocols
* @buf: pointer to the buffer with a list of changes
*
* Writing "+proto" will add a protocol to the protocol mask.
* Writing "-proto" will remove a protocol from protocol mask.
* Writing "proto" will enable only "proto".
* Writing "none" will disable all protocols.
* Returns the number of changes performed or a negative error code.
*/
static int parse_protocol_change(struct rc_dev *dev, u64 *protocols,
const char *buf)
{
const char *tmp;
unsigned count = 0;
bool enable, disable;
u64 mask;
int i;
while ((tmp = strsep((char **)&buf, " \n")) != NULL) {
if (!*tmp)
break;
if (*tmp == '+') {
enable = true;
disable = false;
tmp++;
} else if (*tmp == '-') {
enable = false;
disable = true;
tmp++;
} else {
enable = false;
disable = false;
}
for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
if (!strcasecmp(tmp, proto_names[i].name)) {
mask = proto_names[i].type;
break;
}
}
if (i == ARRAY_SIZE(proto_names)) {
if (!strcasecmp(tmp, "lirc"))
mask = 0;
else {
dev_dbg(&dev->dev, "Unknown protocol: '%s'\n",
tmp);
return -EINVAL;
}
}
count++;
if (enable)
*protocols |= mask;
else if (disable)
*protocols &= ~mask;
else
*protocols = mask;
}
if (!count) {
dev_dbg(&dev->dev, "Protocol not specified\n");
return -EINVAL;
}
return count;
}
void ir_raw_load_modules(u64 *protocols)
{
u64 available;
int i, ret;
for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
if (proto_names[i].type == RC_PROTO_BIT_NONE ||
proto_names[i].type & (RC_PROTO_BIT_OTHER |
RC_PROTO_BIT_UNKNOWN))
continue;
available = ir_raw_get_allowed_protocols();
if (!(*protocols & proto_names[i].type & ~available))
continue;
if (!proto_names[i].module_name) {
pr_err("Can't enable IR protocol %s\n",
proto_names[i].name);
*protocols &= ~proto_names[i].type;
continue;
}
ret = request_module("%s", proto_names[i].module_name);
if (ret < 0) {
pr_err("Couldn't load IR protocol module %s\n",
proto_names[i].module_name);
*protocols &= ~proto_names[i].type;
continue;
}
msleep(20);
available = ir_raw_get_allowed_protocols();
if (!(*protocols & proto_names[i].type & ~available))
continue;
pr_err("Loaded IR protocol module %s, but protocol %s still not available\n",
proto_names[i].module_name,
proto_names[i].name);
*protocols &= ~proto_names[i].type;
}
}
/**
* store_protocols() - changes the current/wakeup IR protocol(s)
* @device: the device descriptor
* @mattr: the device attribute struct
* @buf: a pointer to the input buffer
* @len: length of the input buffer
*
* This routine is for changing the IR protocol type.
* It is triggered by writing to /sys/class/rc/rc?/[wakeup_]protocols.
* See parse_protocol_change() for the valid commands.
* Returns @len on success or a negative error code.
*
* dev->lock is taken to guard against races between
* store_protocols and show_protocols.
*/
static ssize_t store_protocols(struct device *device,
struct device_attribute *mattr,
const char *buf, size_t len)
{
struct rc_dev *dev = to_rc_dev(device);
u64 *current_protocols;
struct rc_scancode_filter *filter;
u64 old_protocols, new_protocols;
ssize_t rc;
dev_dbg(&dev->dev, "Normal protocol change requested\n");
current_protocols = &dev->enabled_protocols;
filter = &dev->scancode_filter;
if (!dev->change_protocol) {
dev_dbg(&dev->dev, "Protocol switching not supported\n");
return -EINVAL;
}
mutex_lock(&dev->lock);
if (!dev->registered) {
mutex_unlock(&dev->lock);
return -ENODEV;
}
old_protocols = *current_protocols;
new_protocols = old_protocols;
rc = parse_protocol_change(dev, &new_protocols, buf);
if (rc < 0)
goto out;
if (dev->driver_type == RC_DRIVER_IR_RAW)
ir_raw_load_modules(&new_protocols);
rc = dev->change_protocol(dev, &new_protocols);
if (rc < 0) {
dev_dbg(&dev->dev, "Error setting protocols to 0x%llx\n",
(long long)new_protocols);
goto out;
}
if (new_protocols != old_protocols) {
*current_protocols = new_protocols;
dev_dbg(&dev->dev, "Protocols changed to 0x%llx\n",
(long long)new_protocols);
}
/*
* If a protocol change was attempted the filter may need updating, even
* if the actual protocol mask hasn't changed (since the driver may have
* cleared the filter).
* Try setting the same filter with the new protocol (if any).
* Fall back to clearing the filter.
*/
if (dev->s_filter && filter->mask) {
if (new_protocols)
rc = dev->s_filter(dev, filter);
else
rc = -1;
if (rc < 0) {
filter->data = 0;
filter->mask = 0;
dev->s_filter(dev, filter);
}
}
rc = len;
out:
mutex_unlock(&dev->lock);
return rc;
}
/**
* show_filter() - shows the current scancode filter value or mask
* @device: the device descriptor
* @attr: the device attribute struct
* @buf: a pointer to the output buffer
*
* This routine is a callback routine to read a scancode filter value or mask.
* It is triggered by reading /sys/class/rc/rc?/[wakeup_]filter[_mask].
* It prints the current scancode filter value or mask of the appropriate filter
* type in hexadecimal into @buf and returns the size of the buffer.
*
* Bits of the filter value corresponding to set bits in the filter mask are
* compared against input scancodes and non-matching scancodes are discarded.
*
* dev->lock is taken to guard against races between
* store_filter and show_filter.
*/
static ssize_t show_filter(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct rc_dev *dev = to_rc_dev(device);
struct rc_filter_attribute *fattr = to_rc_filter_attr(attr);
struct rc_scancode_filter *filter;
u32 val;
mutex_lock(&dev->lock);
if (fattr->type == RC_FILTER_NORMAL)
filter = &dev->scancode_filter;
else
filter = &dev->scancode_wakeup_filter;
if (fattr->mask)
val = filter->mask;
else
val = filter->data;
mutex_unlock(&dev->lock);
return sprintf(buf, "%#x\n", val);
}
/**
* store_filter() - changes the scancode filter value
* @device: the device descriptor
* @attr: the device attribute struct
* @buf: a pointer to the input buffer
* @len: length of the input buffer
*
* This routine is for changing a scancode filter value or mask.
* It is triggered by writing to /sys/class/rc/rc?/[wakeup_]filter[_mask].
* Returns -EINVAL if an invalid filter value for the current protocol was
* specified or if scancode filtering is not supported by the driver, otherwise
* returns @len.
*
* Bits of the filter value corresponding to set bits in the filter mask are
* compared against input scancodes and non-matching scancodes are discarded.
*
* dev->lock is taken to guard against races between
* store_filter and show_filter.
*/
static ssize_t store_filter(struct device *device,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct rc_dev *dev = to_rc_dev(device);
struct rc_filter_attribute *fattr = to_rc_filter_attr(attr);
struct rc_scancode_filter new_filter, *filter;
int ret;
unsigned long val;
int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
ret = kstrtoul(buf, 0, &val);
if (ret < 0)
return ret;
if (fattr->type == RC_FILTER_NORMAL) {
set_filter = dev->s_filter;
filter = &dev->scancode_filter;
} else {
set_filter = dev->s_wakeup_filter;
filter = &dev->scancode_wakeup_filter;
}
if (!set_filter)
return -EINVAL;
mutex_lock(&dev->lock);
if (!dev->registered) {
mutex_unlock(&dev->lock);
return -ENODEV;
}
new_filter = *filter;
if (fattr->mask)
new_filter.mask = val;
else
new_filter.data = val;
if (fattr->type == RC_FILTER_WAKEUP) {
/*
* Refuse to set a filter unless a protocol is enabled
* and the filter is valid for that protocol
*/
if (dev->wakeup_protocol != RC_PROTO_UNKNOWN)
ret = rc_validate_filter(dev, &new_filter);
else
ret = -EINVAL;
if (ret != 0)
goto unlock;
}
if (fattr->type == RC_FILTER_NORMAL && !dev->enabled_protocols &&
val) {
/* refuse to set a filter unless a protocol is enabled */
ret = -EINVAL;
goto unlock;
}
ret = set_filter(dev, &new_filter);
if (ret < 0)
goto unlock;
*filter = new_filter;
unlock:
mutex_unlock(&dev->lock);
return (ret < 0) ? ret : len;
}
/**
* show_wakeup_protocols() - shows the wakeup IR protocol
* @device: the device descriptor
* @mattr: the device attribute struct
* @buf: a pointer to the output buffer
*
* This routine is a callback routine for input read the IR protocol type(s).
* it is triggered by reading /sys/class/rc/rc?/wakeup_protocols.
* It returns the protocol names of supported protocols.
* The enabled protocols are printed in brackets.
*
* dev->lock is taken to guard against races between
* store_wakeup_protocols and show_wakeup_protocols.
*/
static ssize_t show_wakeup_protocols(struct device *device,
struct device_attribute *mattr,
char *buf)
{
struct rc_dev *dev = to_rc_dev(device);
u64 allowed;
enum rc_proto enabled;
char *tmp = buf;
int i;
mutex_lock(&dev->lock);
allowed = dev->allowed_wakeup_protocols;
enabled = dev->wakeup_protocol;
mutex_unlock(&dev->lock);
dev_dbg(&dev->dev, "%s: allowed - 0x%llx, enabled - %d\n",
__func__, (long long)allowed, enabled);
for (i = 0; i < ARRAY_SIZE(protocols); i++) {
if (allowed & (1ULL << i)) {
if (i == enabled)
tmp += sprintf(tmp, "[%s] ", protocols[i].name);
else
tmp += sprintf(tmp, "%s ", protocols[i].name);
}
}
if (tmp != buf)
tmp--;
*tmp = '\n';
return tmp + 1 - buf;
}
/**
* store_wakeup_protocols() - changes the wakeup IR protocol(s)
* @device: the device descriptor
* @mattr: the device attribute struct
* @buf: a pointer to the input buffer
* @len: length of the input buffer
*
* This routine is for changing the IR protocol type.
* It is triggered by writing to /sys/class/rc/rc?/wakeup_protocols.
* Returns @len on success or a negative error code.
*
* dev->lock is taken to guard against races between
* store_wakeup_protocols and show_wakeup_protocols.
*/
static ssize_t store_wakeup_protocols(struct device *device,
struct device_attribute *mattr,
const char *buf, size_t len)
{
struct rc_dev *dev = to_rc_dev(device);
enum rc_proto protocol = RC_PROTO_UNKNOWN;
ssize_t rc;
u64 allowed;
int i;
mutex_lock(&dev->lock);
if (!dev->registered) {
mutex_unlock(&dev->lock);
return -ENODEV;
}
allowed = dev->allowed_wakeup_protocols;
if (!sysfs_streq(buf, "none")) {
for (i = 0; i < ARRAY_SIZE(protocols); i++) {
if ((allowed & (1ULL << i)) &&
sysfs_streq(buf, protocols[i].name)) {
protocol = i;
break;
}
}
if (i == ARRAY_SIZE(protocols)) {
rc = -EINVAL;
goto out;
}
if (dev->encode_wakeup) {
u64 mask = 1ULL << protocol;
ir_raw_load_modules(&mask);
if (!mask) {
rc = -EINVAL;
goto out;
}
}
}
if (dev->wakeup_protocol != protocol) {
dev->wakeup_protocol = protocol;
dev_dbg(&dev->dev, "Wakeup protocol changed to %d\n", protocol);
if (protocol == RC_PROTO_RC6_MCE)
dev->scancode_wakeup_filter.data = 0x800f0000;
else
dev->scancode_wakeup_filter.data = 0;
dev->scancode_wakeup_filter.mask = 0;
rc = dev->s_wakeup_filter(dev, &dev->scancode_wakeup_filter);
if (rc == 0)
rc = len;
} else {
rc = len;
}
out:
mutex_unlock(&dev->lock);
return rc;
}
static void rc_dev_release(struct device *device)
{
struct rc_dev *dev = to_rc_dev(device);
kfree(dev);
}
static int rc_dev_uevent(const struct device *device, struct kobj_uevent_env *env)
{
struct rc_dev *dev = to_rc_dev(device);
int ret = 0;
mutex_lock(&dev->lock);
if (!dev->registered)
ret = -ENODEV;
if (ret == 0 && dev->rc_map.name)
ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name);
if (ret == 0 && dev->driver_name)
ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name);
if (ret == 0 && dev->device_name)
ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name);
mutex_unlock(&dev->lock);
return ret;
}
/*
* Static device attribute struct with the sysfs attributes for IR's
*/
static struct device_attribute dev_attr_ro_protocols =
__ATTR(protocols, 0444, show_protocols, NULL);
static struct device_attribute dev_attr_rw_protocols =
__ATTR(protocols, 0644, show_protocols, store_protocols);
static DEVICE_ATTR(wakeup_protocols, 0644, show_wakeup_protocols,
store_wakeup_protocols);
static RC_FILTER_ATTR(filter, S_IRUGO|S_IWUSR,
show_filter, store_filter, RC_FILTER_NORMAL, false);
static RC_FILTER_ATTR(filter_mask, S_IRUGO|S_IWUSR,
show_filter, store_filter, RC_FILTER_NORMAL, true);
static RC_FILTER_ATTR(wakeup_filter, S_IRUGO|S_IWUSR,
show_filter, store_filter, RC_FILTER_WAKEUP, false);
static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
show_filter, store_filter, RC_FILTER_WAKEUP, true);
static struct attribute *rc_dev_rw_protocol_attrs[] = {
&dev_attr_rw_protocols.attr,
NULL,
};
static const struct attribute_group rc_dev_rw_protocol_attr_grp = {
.attrs = rc_dev_rw_protocol_attrs,
};
static struct attribute *rc_dev_ro_protocol_attrs[] = {
&dev_attr_ro_protocols.attr,
NULL,
};
static const struct attribute_group rc_dev_ro_protocol_attr_grp = {
.attrs = rc_dev_ro_protocol_attrs,
};
static struct attribute *rc_dev_filter_attrs[] = {
&dev_attr_filter.attr.attr,
&dev_attr_filter_mask.attr.attr,
NULL,
};
static const struct attribute_group rc_dev_filter_attr_grp = {
.attrs = rc_dev_filter_attrs,
};
static struct attribute *rc_dev_wakeup_filter_attrs[] = {
&dev_attr_wakeup_filter.attr.attr,
&dev_attr_wakeup_filter_mask.attr.attr,
&dev_attr_wakeup_protocols.attr,
NULL,
};
static const struct attribute_group rc_dev_wakeup_filter_attr_grp = {
.attrs = rc_dev_wakeup_filter_attrs,
};
static const struct device_type rc_dev_type = {
.release = rc_dev_release,
.uevent = rc_dev_uevent,
};
struct rc_dev *rc_allocate_device(enum rc_driver_type type)
{
struct rc_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
if (type != RC_DRIVER_IR_RAW_TX) {
dev->input_dev = input_allocate_device();
if (!dev->input_dev) {
kfree(dev);
return NULL;
}
dev->input_dev->getkeycode = ir_getkeycode;
dev->input_dev->setkeycode = ir_setkeycode;
input_set_drvdata(dev->input_dev, dev);
dev->timeout = IR_DEFAULT_TIMEOUT;
timer_setup(&dev->timer_keyup, ir_timer_keyup, 0);
timer_setup(&dev->timer_repeat, ir_timer_repeat, 0);
spin_lock_init(&dev->rc_map.lock);
spin_lock_init(&dev->keylock);
}
mutex_init(&dev->lock);
dev->dev.type = &rc_dev_type;
dev->dev.class = &rc_class;
device_initialize(&dev->dev);
dev->driver_type = type;
__module_get(THIS_MODULE);
return dev;
}
EXPORT_SYMBOL_GPL(rc_allocate_device);
void rc_free_device(struct rc_dev *dev)
{
if (!dev)
return;
input_free_device(dev->input_dev);
put_device(&dev->dev);
/* kfree(dev) will be called by the callback function
rc_dev_release() */
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(rc_free_device);
static void devm_rc_alloc_release(struct device *dev, void *res)
{
rc_free_device(*(struct rc_dev **)res);
}
struct rc_dev *devm_rc_allocate_device(struct device *dev,
enum rc_driver_type type)
{
struct rc_dev **dr, *rc;
dr = devres_alloc(devm_rc_alloc_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return NULL;
rc = rc_allocate_device(type);
if (!rc) {
devres_free(dr);
return NULL;
}
rc->dev.parent = dev;
rc->managed_alloc = true;
*dr = rc;
devres_add(dev, dr);
return rc;
}
EXPORT_SYMBOL_GPL(devm_rc_allocate_device);
static int rc_prepare_rx_device(struct rc_dev *dev)
{
int rc;
struct rc_map *rc_map;
u64 rc_proto;
if (!dev->map_name)
return -EINVAL;
rc_map = rc_map_get(dev->map_name);
if (!rc_map)
rc_map = rc_map_get(RC_MAP_EMPTY);
if (!rc_map || !rc_map->scan || rc_map->size == 0)
return -EINVAL;
rc = ir_setkeytable(dev, rc_map);
if (rc)
return rc;
rc_proto = BIT_ULL(rc_map->rc_proto);
if (dev->driver_type == RC_DRIVER_SCANCODE && !dev->change_protocol)
dev->enabled_protocols = dev->allowed_protocols;
if (dev->driver_type == RC_DRIVER_IR_RAW)
ir_raw_load_modules(&rc_proto);
if (dev->change_protocol) {
rc = dev->change_protocol(dev, &rc_proto);
if (rc < 0)
goto out_table;
dev->enabled_protocols = rc_proto;
}
/* Keyboard events */
set_bit(EV_KEY, dev->input_dev->evbit);
set_bit(EV_REP, dev->input_dev->evbit);
set_bit(EV_MSC, dev->input_dev->evbit);
set_bit(MSC_SCAN, dev->input_dev->mscbit);
/* Pointer/mouse events */
set_bit(INPUT_PROP_POINTING_STICK, dev->input_dev->propbit);
set_bit(EV_REL, dev->input_dev->evbit);
set_bit(REL_X, dev->input_dev->relbit);
set_bit(REL_Y, dev->input_dev->relbit);
if (dev->open)
dev->input_dev->open = ir_open;
if (dev->close)
dev->input_dev->close = ir_close;
dev->input_dev->dev.parent = &dev->dev;
memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id));
dev->input_dev->phys = dev->input_phys;
dev->input_dev->name = dev->device_name;
return 0;
out_table:
ir_free_table(&dev->rc_map);
return rc;
}
static int rc_setup_rx_device(struct rc_dev *dev)
{
int rc;
/* rc_open will be called here */
rc = input_register_device(dev->input_dev);
if (rc)
return rc;
/*
* Default delay of 250ms is too short for some protocols, especially
* since the timeout is currently set to 250ms. Increase it to 500ms,
* to avoid wrong repetition of the keycodes. Note that this must be
* set after the call to input_register_device().
*/
if (dev->allowed_protocols == RC_PROTO_BIT_CEC)
dev->input_dev->rep[REP_DELAY] = 0;
else
dev->input_dev->rep[REP_DELAY] = 500;
/*
* As a repeat event on protocols like RC-5 and NEC take as long as
* 110/114ms, using 33ms as a repeat period is not the right thing
* to do.
*/
dev->input_dev->rep[REP_PERIOD] = 125;
return 0;
}
static void rc_free_rx_device(struct rc_dev *dev)
{
if (!dev)
return;
if (dev->input_dev) {
input_unregister_device(dev->input_dev);
dev->input_dev = NULL;
}
ir_free_table(&dev->rc_map);
}
int rc_register_device(struct rc_dev *dev)
{
const char *path;
int attr = 0;
int minor;
int rc;
if (!dev)
return -EINVAL;
minor = ida_alloc_max(&rc_ida, RC_DEV_MAX - 1, GFP_KERNEL);
if (minor < 0)
return minor;
dev->minor = minor;
dev_set_name(&dev->dev, "rc%u", dev->minor);
dev_set_drvdata(&dev->dev, dev);
dev->dev.groups = dev->sysfs_groups;
if (dev->driver_type == RC_DRIVER_SCANCODE && !dev->change_protocol)
dev->sysfs_groups[attr++] = &rc_dev_ro_protocol_attr_grp;
else if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
dev->sysfs_groups[attr++] = &rc_dev_rw_protocol_attr_grp;
if (dev->s_filter)
dev->sysfs_groups[attr++] = &rc_dev_filter_attr_grp;
if (dev->s_wakeup_filter)
dev->sysfs_groups[attr++] = &rc_dev_wakeup_filter_attr_grp;
dev->sysfs_groups[attr++] = NULL;
if (dev->driver_type == RC_DRIVER_IR_RAW) {
rc = ir_raw_event_prepare(dev);
if (rc < 0)
goto out_minor;
}
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
rc = rc_prepare_rx_device(dev);
if (rc)
goto out_raw;
}
dev->registered = true;
rc = device_add(&dev->dev);
if (rc)
goto out_rx_free;
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
dev_info(&dev->dev, "%s as %s\n",
dev->device_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
/*
* once the input device is registered in rc_setup_rx_device,
* userspace can open the input device and rc_open() will be called
* as a result. This results in driver code being allowed to submit
* keycodes with rc_keydown, so lirc must be registered first.
*/
if (dev->allowed_protocols != RC_PROTO_BIT_CEC) {
rc = lirc_register(dev);
if (rc < 0)
goto out_dev;
}
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
rc = rc_setup_rx_device(dev);
if (rc)
goto out_lirc;
}
if (dev->driver_type == RC_DRIVER_IR_RAW) {
rc = ir_raw_event_register(dev);
if (rc < 0)
goto out_rx;
}
dev_dbg(&dev->dev, "Registered rc%u (driver: %s)\n", dev->minor,
dev->driver_name ? dev->driver_name : "unknown");
return 0;
out_rx:
rc_free_rx_device(dev);
out_lirc:
if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
lirc_unregister(dev);
out_dev:
device_del(&dev->dev);
out_rx_free:
ir_free_table(&dev->rc_map);
out_raw:
ir_raw_event_free(dev);
out_minor:
ida_free(&rc_ida, minor);
return rc;
}
EXPORT_SYMBOL_GPL(rc_register_device);
static void devm_rc_release(struct device *dev, void *res)
{
rc_unregister_device(*(struct rc_dev **)res);
}
int devm_rc_register_device(struct device *parent, struct rc_dev *dev)
{
struct rc_dev **dr;
int ret;
dr = devres_alloc(devm_rc_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
ret = rc_register_device(dev);
if (ret) {
devres_free(dr);
return ret;
}
*dr = dev;
devres_add(parent, dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_rc_register_device);
void rc_unregister_device(struct rc_dev *dev)
{
if (!dev)
return;
if (dev->driver_type == RC_DRIVER_IR_RAW)
ir_raw_event_unregister(dev);
del_timer_sync(&dev->timer_keyup);
del_timer_sync(&dev->timer_repeat);
mutex_lock(&dev->lock);
if (dev->users && dev->close)
dev->close(dev);
dev->registered = false;
mutex_unlock(&dev->lock);
rc_free_rx_device(dev);
/*
* lirc device should be freed with dev->registered = false, so
* that userspace polling will get notified.
*/
if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
lirc_unregister(dev);
device_del(&dev->dev);
ida_free(&rc_ida, dev->minor);
if (!dev->managed_alloc)
rc_free_device(dev);
}
EXPORT_SYMBOL_GPL(rc_unregister_device);
/*
* Init/exit code for the module. Basically, creates/removes /sys/class/rc
*/
static int __init rc_core_init(void)
{
int rc = class_register(&rc_class);
if (rc) {
pr_err("rc_core: unable to register rc class\n");
return rc;
}
rc = lirc_dev_init();
if (rc) {
pr_err("rc_core: unable to init lirc\n");
class_unregister(&rc_class);
return rc;
}
led_trigger_register_simple("rc-feedback", &led_feedback);
rc_map_register(&empty_map);
#ifdef CONFIG_MEDIA_CEC_RC
rc_map_register(&cec_map);
#endif
return 0;
}
static void __exit rc_core_exit(void)
{
lirc_dev_exit();
class_unregister(&rc_class);
led_trigger_unregister_simple(led_feedback);
#ifdef CONFIG_MEDIA_CEC_RC
rc_map_unregister(&cec_map);
#endif
rc_map_unregister(&empty_map);
}
subsys_initcall(rc_core_init);
module_exit(rc_core_exit);
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/rc/rc-main.c |
/*
* Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
*
* Copyright (C) 2010 Jarod Wilson <[email protected]>
* Copyright (C) 2009 Nuvoton PS Team
*
* Special thanks to Nuvoton for providing hardware, spec sheets and
* sample code upon which portions of this driver are based. Indirect
* thanks also to Maxim Levitsky, whose ene_ir driver this driver is
* modeled after.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pnp.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/rc-core.h>
#include <linux/pci_ids.h>
#include "nuvoton-cir.h"
static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt);
static const struct nvt_chip nvt_chips[] = {
{ "w83667hg", NVT_W83667HG },
{ "NCT6775F", NVT_6775F },
{ "NCT6776F", NVT_6776F },
{ "NCT6779D", NVT_6779D },
};
static inline struct device *nvt_get_dev(const struct nvt_dev *nvt)
{
return nvt->rdev->dev.parent;
}
static inline bool is_w83667hg(struct nvt_dev *nvt)
{
return nvt->chip_ver == NVT_W83667HG;
}
/* write val to config reg */
static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
{
outb(reg, nvt->cr_efir);
outb(val, nvt->cr_efdr);
}
/* read val from config reg */
static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
{
outb(reg, nvt->cr_efir);
return inb(nvt->cr_efdr);
}
/* update config register bit without changing other bits */
static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
{
u8 tmp = nvt_cr_read(nvt, reg) | val;
nvt_cr_write(nvt, tmp, reg);
}
/* enter extended function mode */
static inline int nvt_efm_enable(struct nvt_dev *nvt)
{
if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
return -EBUSY;
/* Enabling Extended Function Mode explicitly requires writing 2x */
outb(EFER_EFM_ENABLE, nvt->cr_efir);
outb(EFER_EFM_ENABLE, nvt->cr_efir);
return 0;
}
/* exit extended function mode */
static inline void nvt_efm_disable(struct nvt_dev *nvt)
{
outb(EFER_EFM_DISABLE, nvt->cr_efir);
release_region(nvt->cr_efir, 2);
}
/*
* When you want to address a specific logical device, write its logical
* device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
* 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
*/
static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
{
nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
}
/* select and enable logical device with setting EFM mode*/
static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
{
nvt_efm_enable(nvt);
nvt_select_logical_dev(nvt, ldev);
nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
nvt_efm_disable(nvt);
}
/* select and disable logical device with setting EFM mode*/
static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
{
nvt_efm_enable(nvt);
nvt_select_logical_dev(nvt, ldev);
nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
nvt_efm_disable(nvt);
}
/* write val to cir config register */
static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
{
outb(val, nvt->cir_addr + offset);
}
/* read val from cir config register */
static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
{
return inb(nvt->cir_addr + offset);
}
/* write val to cir wake register */
static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
u8 val, u8 offset)
{
outb(val, nvt->cir_wake_addr + offset);
}
/* read val from cir wake config register */
static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
{
return inb(nvt->cir_wake_addr + offset);
}
/* don't override io address if one is set already */
static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
{
unsigned long old_addr;
old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);
if (old_addr)
*ioaddr = old_addr;
else {
nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
}
}
static void nvt_write_wakeup_codes(struct rc_dev *dev,
const u8 *wbuf, int count)
{
u8 tolerance, config;
struct nvt_dev *nvt = dev->priv;
unsigned long flags;
int i;
/* hardcode the tolerance to 10% */
tolerance = DIV_ROUND_UP(count, 10);
spin_lock_irqsave(&nvt->lock, flags);
nvt_clear_cir_wake_fifo(nvt);
nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
/* enable writes to wake fifo */
nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
CIR_WAKE_IRCON);
if (count)
pr_info("Wake samples (%d) =", count);
else
pr_info("Wake sample fifo cleared");
for (i = 0; i < count; i++)
nvt_cir_wake_reg_write(nvt, wbuf[i], CIR_WAKE_WR_FIFO_DATA);
nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
spin_unlock_irqrestore(&nvt->lock, flags);
}
static ssize_t wakeup_data_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rc_dev *rc_dev = to_rc_dev(dev);
struct nvt_dev *nvt = rc_dev->priv;
int fifo_len, duration;
unsigned long flags;
ssize_t buf_len = 0;
int i;
spin_lock_irqsave(&nvt->lock, flags);
fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
/* go to first element to be read */
while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX))
nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
for (i = 0; i < fifo_len; i++) {
duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len,
"%d ", duration);
}
buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
spin_unlock_irqrestore(&nvt->lock, flags);
return buf_len;
}
static ssize_t wakeup_data_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct rc_dev *rc_dev = to_rc_dev(dev);
u8 wake_buf[WAKEUP_MAX_SIZE];
char **argv;
int i, count;
unsigned int val;
ssize_t ret;
argv = argv_split(GFP_KERNEL, buf, &count);
if (!argv)
return -ENOMEM;
if (!count || count > WAKEUP_MAX_SIZE) {
ret = -EINVAL;
goto out;
}
for (i = 0; i < count; i++) {
ret = kstrtouint(argv[i], 10, &val);
if (ret)
goto out;
val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD);
if (!val || val > 0x7f) {
ret = -EINVAL;
goto out;
}
wake_buf[i] = val;
/* sequence must start with a pulse */
if (i % 2 == 0)
wake_buf[i] |= BUF_PULSE_BIT;
}
nvt_write_wakeup_codes(rc_dev, wake_buf, count);
ret = len;
out:
argv_free(argv);
return ret;
}
static DEVICE_ATTR_RW(wakeup_data);
/* dump current cir register contents */
static void cir_dump_regs(struct nvt_dev *nvt)
{
nvt_efm_enable(nvt);
nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
pr_info(" * CR CIR ACTIVE : 0x%x\n",
nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
pr_info(" * CR CIR BASE ADDR: 0x%x\n",
(nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
pr_info(" * CR CIR IRQ NUM: 0x%x\n",
nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
nvt_efm_disable(nvt);
pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
pr_info(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
pr_info(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
pr_info(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
pr_info(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
pr_info(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
pr_info(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
pr_info(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
pr_info(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
pr_info(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
pr_info(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
pr_info(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
pr_info(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
pr_info(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
pr_info(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
pr_info(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
}
/* dump current cir wake register contents */
static void cir_wake_dump_regs(struct nvt_dev *nvt)
{
u8 i, fifo_len;
nvt_efm_enable(nvt);
nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
pr_info("%s: Dump CIR WAKE logical device registers:\n",
NVT_DRIVER_NAME);
pr_info(" * CR CIR WAKE ACTIVE : 0x%x\n",
nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
(nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
pr_info(" * CR CIR WAKE IRQ NUM: 0x%x\n",
nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
nvt_efm_disable(nvt);
pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
pr_info(" * IRCON: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
pr_info(" * IRSTS: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
pr_info(" * IREN: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
pr_info(" * FIFO CMP DEEP: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
pr_info(" * FIFO CMP TOL: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
pr_info(" * FIFO COUNT: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
pr_info(" * SLCH: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
pr_info(" * SLCL: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
pr_info(" * FIFOCON: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
pr_info(" * SRXFSTS: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
pr_info(" * SAMPLE RX FIFO: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
pr_info(" * WR FIFO DATA: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
pr_info(" * RD FIFO ONLY: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
pr_info(" * FIFO IGNORE: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
pr_info(" * IRFSM: 0x%x\n",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
pr_info("* Contents =");
for (i = 0; i < fifo_len; i++)
pr_cont(" %02x",
nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
pr_cont("\n");
}
static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
{
int i;
for (i = 0; i < ARRAY_SIZE(nvt_chips); i++)
if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) {
nvt->chip_ver = nvt_chips[i].chip_ver;
return nvt_chips[i].name;
}
return NULL;
}
/* detect hardware features */
static int nvt_hw_detect(struct nvt_dev *nvt)
{
struct device *dev = nvt_get_dev(nvt);
const char *chip_name;
int chip_id;
nvt_efm_enable(nvt);
/* Check if we're wired for the alternate EFER setup */
nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
if (nvt->chip_major == 0xff) {
nvt_efm_disable(nvt);
nvt->cr_efir = CR_EFIR2;
nvt->cr_efdr = CR_EFDR2;
nvt_efm_enable(nvt);
nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
}
nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
nvt_efm_disable(nvt);
chip_id = nvt->chip_major << 8 | nvt->chip_minor;
if (chip_id == NVT_INVALID) {
dev_err(dev, "No device found on either EFM port\n");
return -ENODEV;
}
chip_name = nvt_find_chip(nvt, chip_id);
/* warn, but still let the driver load, if we don't know this chip */
if (!chip_name)
dev_warn(dev,
"unknown chip, id: 0x%02x 0x%02x, it may not work...",
nvt->chip_major, nvt->chip_minor);
else
dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x",
chip_name, nvt->chip_major, nvt->chip_minor);
return 0;
}
static void nvt_cir_ldev_init(struct nvt_dev *nvt)
{
u8 val, psreg, psmask, psval;
if (is_w83667hg(nvt)) {
psreg = CR_MULTIFUNC_PIN_SEL;
psmask = MULTIFUNC_PIN_SEL_MASK;
psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
} else {
psreg = CR_OUTPUT_PIN_SEL;
psmask = OUTPUT_PIN_SEL_MASK;
psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
}
/* output pin selection: enable CIR, with WB sensor enabled */
val = nvt_cr_read(nvt, psreg);
val &= psmask;
val |= psval;
nvt_cr_write(nvt, val, psreg);
/* Select CIR logical device */
nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
nvt_set_ioaddr(nvt, &nvt->cir_addr);
nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
nvt->cir_addr, nvt->cir_irq);
}
static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
{
/* Select ACPI logical device and anable it */
nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
/* Enable CIR Wake via PSOUT# (Pin60) */
nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
/* enable pme interrupt of cir wakeup event */
nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
/* Select CIR Wake logical device */
nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
nvt_dbg("CIR Wake initialized, base io port address: 0x%lx",
nvt->cir_wake_addr);
}
/* clear out the hardware's cir rx fifo */
static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
{
u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
}
/* clear out the hardware's cir wake rx fifo */
static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
{
u8 val, config;
config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
/* clearing wake fifo works in learning mode only */
nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
CIR_WAKE_IRCON);
val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
CIR_WAKE_FIFOCON);
nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
}
/* clear out the hardware's cir tx fifo */
static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
{
u8 val;
val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
}
/* enable RX Trigger Level Reach and Packet End interrupts */
static void nvt_set_cir_iren(struct nvt_dev *nvt)
{
u8 iren;
iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO;
nvt_cir_reg_write(nvt, iren, CIR_IREN);
}
static void nvt_cir_regs_init(struct nvt_dev *nvt)
{
nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
/* set sample limit count (PE interrupt raised when reached) */
nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
/* set fifo irq trigger levels */
nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
/* clear hardware rx and tx fifos */
nvt_clear_cir_fifo(nvt);
nvt_clear_tx_fifo(nvt);
nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
}
static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
{
nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
/*
* Disable RX, set specific carrier on = low, off = high,
* and sample period (currently 50us)
*/
nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 |
CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
CIR_WAKE_IRCON);
/* clear any and all stray interrupts */
nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
}
static void nvt_enable_wake(struct nvt_dev *nvt)
{
unsigned long flags;
nvt_efm_enable(nvt);
nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
nvt_efm_disable(nvt);
spin_lock_irqsave(&nvt->lock, flags);
nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
CIR_WAKE_IRCON);
nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
spin_unlock_irqrestore(&nvt->lock, flags);
}
#if 0 /* Currently unused */
/* rx carrier detect only works in learning mode, must be called w/lock */
static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
{
u32 count, carrier, duration = 0;
int i;
count = nvt_cir_reg_read(nvt, CIR_FCCL) |
nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
for (i = 0; i < nvt->pkts; i++) {
if (nvt->buf[i] & BUF_PULSE_BIT)
duration += nvt->buf[i] & BUF_LEN_MASK;
}
duration *= SAMPLE_PERIOD;
if (!count || !duration) {
dev_notice(nvt_get_dev(nvt),
"Unable to determine carrier! (c:%u, d:%u)",
count, duration);
return 0;
}
carrier = MS_TO_NS(count) / duration;
if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
nvt_dbg("WTF? Carrier frequency out of range!");
nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
carrier, count, duration);
return carrier;
}
#endif
static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
struct rc_scancode_filter *sc_filter)
{
u8 buf_val;
int i, ret, count;
unsigned int val;
struct ir_raw_event *raw;
u8 wake_buf[WAKEUP_MAX_SIZE];
bool complete;
/* Require mask to be set */
if (!sc_filter->mask)
return 0;
raw = kmalloc_array(WAKEUP_MAX_SIZE, sizeof(*raw), GFP_KERNEL);
if (!raw)
return -ENOMEM;
ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc_filter->data,
raw, WAKEUP_MAX_SIZE);
complete = (ret != -ENOBUFS);
if (!complete)
ret = WAKEUP_MAX_SIZE;
else if (ret < 0)
goto out_raw;
/* Inspect the ir samples */
for (i = 0, count = 0; i < ret && count < WAKEUP_MAX_SIZE; ++i) {
val = raw[i].duration / SAMPLE_PERIOD;
/* Split too large values into several smaller ones */
while (val > 0 && count < WAKEUP_MAX_SIZE) {
/* Skip last value for better comparison tolerance */
if (complete && i == ret - 1 && val < BUF_LEN_MASK)
break;
/* Clamp values to BUF_LEN_MASK at most */
buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
wake_buf[count] = buf_val;
val -= buf_val;
if ((raw[i]).pulse)
wake_buf[count] |= BUF_PULSE_BIT;
count++;
}
}
nvt_write_wakeup_codes(dev, wake_buf, count);
ret = 0;
out_raw:
kfree(raw);
return ret;
}
/* dump contents of the last rx buffer we got from the hw rx fifo */
static void nvt_dump_rx_buf(struct nvt_dev *nvt)
{
int i;
printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
printk(KERN_CONT "0x%02x ", nvt->buf[i]);
printk(KERN_CONT "\n");
}
/*
* Process raw data in rx driver buffer, store it in raw IR event kfifo,
* trigger decode when appropriate.
*
* We get IR data samples one byte at a time. If the msb is set, its a pulse,
* otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
* (default 50us) intervals for that pulse/space. A discrete signal is
* followed by a series of 0x7f packets, then either 0x7<something> or 0x80
* to signal more IR coming (repeats) or end of IR, respectively. We store
* sample data in the raw event kfifo until we see 0x7<something> (except f)
* or 0x80, at which time, we trigger a decode operation.
*/
static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
{
struct ir_raw_event rawir = {};
u8 sample;
int i;
nvt_dbg_verbose("%s firing", __func__);
if (debug)
nvt_dump_rx_buf(nvt);
nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
for (i = 0; i < nvt->pkts; i++) {
sample = nvt->buf[i];
rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
rawir.duration = (sample & BUF_LEN_MASK) * SAMPLE_PERIOD;
nvt_dbg("Storing %s with duration %d",
rawir.pulse ? "pulse" : "space", rawir.duration);
ir_raw_event_store_with_filter(nvt->rdev, &rawir);
}
nvt->pkts = 0;
nvt_dbg("Calling ir_raw_event_handle\n");
ir_raw_event_handle(nvt->rdev);
nvt_dbg_verbose("%s done", __func__);
}
static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
{
dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!");
nvt->pkts = 0;
nvt_clear_cir_fifo(nvt);
ir_raw_event_overflow(nvt->rdev);
}
/* copy data from hardware rx fifo into driver buffer */
static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
{
u8 fifocount;
int i;
/* Get count of how many bytes to read from RX FIFO */
fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
/* Read fifocount bytes from CIR Sample RX FIFO register */
for (i = 0; i < fifocount; i++)
nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
nvt->pkts = fifocount;
nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
nvt_process_rx_ir_data(nvt);
}
static void nvt_cir_log_irqs(u8 status, u8 iren)
{
nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
status, iren,
status & CIR_IRSTS_RDR ? " RDR" : "",
status & CIR_IRSTS_RTR ? " RTR" : "",
status & CIR_IRSTS_PE ? " PE" : "",
status & CIR_IRSTS_RFO ? " RFO" : "",
status & CIR_IRSTS_TE ? " TE" : "",
status & CIR_IRSTS_TTR ? " TTR" : "",
status & CIR_IRSTS_TFU ? " TFU" : "",
status & CIR_IRSTS_GH ? " GH" : "",
status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
}
/* interrupt service routine for incoming and outgoing CIR data */
static irqreturn_t nvt_cir_isr(int irq, void *data)
{
struct nvt_dev *nvt = data;
u8 status, iren;
nvt_dbg_verbose("%s firing", __func__);
spin_lock(&nvt->lock);
/*
* Get IR Status register contents. Write 1 to ack/clear
*
* bit: reg name - description
* 7: CIR_IRSTS_RDR - RX Data Ready
* 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
* 5: CIR_IRSTS_PE - Packet End
* 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
* 3: CIR_IRSTS_TE - TX FIFO Empty
* 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
* 1: CIR_IRSTS_TFU - TX FIFO Underrun
* 0: CIR_IRSTS_GH - Min Length Detected
*/
status = nvt_cir_reg_read(nvt, CIR_IRSTS);
iren = nvt_cir_reg_read(nvt, CIR_IREN);
/* At least NCT6779D creates a spurious interrupt when the
* logical device is being disabled.
*/
if (status == 0xff && iren == 0xff) {
spin_unlock(&nvt->lock);
nvt_dbg_verbose("Spurious interrupt detected");
return IRQ_HANDLED;
}
/* IRQ may be shared with CIR WAKE, therefore check for each
* status bit whether the related interrupt source is enabled
*/
if (!(status & iren)) {
spin_unlock(&nvt->lock);
nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
return IRQ_NONE;
}
/* ack/clear all irq flags we've got */
nvt_cir_reg_write(nvt, status, CIR_IRSTS);
nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
nvt_cir_log_irqs(status, iren);
if (status & CIR_IRSTS_RFO)
nvt_handle_rx_fifo_overrun(nvt);
else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE))
nvt_get_rx_ir_data(nvt);
spin_unlock(&nvt->lock);
nvt_dbg_verbose("%s done", __func__);
return IRQ_HANDLED;
}
static void nvt_enable_cir(struct nvt_dev *nvt)
{
unsigned long flags;
/* enable the CIR logical device */
nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
spin_lock_irqsave(&nvt->lock, flags);
/*
* Enable TX and RX, specify carrier on = low, off = high, and set
* sample period (currently 50us)
*/
nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
CIR_IRCON);
/* clear all pending interrupts */
nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
/* enable interrupts */
nvt_set_cir_iren(nvt);
spin_unlock_irqrestore(&nvt->lock, flags);
}
static void nvt_disable_cir(struct nvt_dev *nvt)
{
unsigned long flags;
spin_lock_irqsave(&nvt->lock, flags);
/* disable CIR interrupts */
nvt_cir_reg_write(nvt, 0, CIR_IREN);
/* clear any and all pending interrupts */
nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
/* clear all function enable flags */
nvt_cir_reg_write(nvt, 0, CIR_IRCON);
/* clear hardware rx and tx fifos */
nvt_clear_cir_fifo(nvt);
nvt_clear_tx_fifo(nvt);
spin_unlock_irqrestore(&nvt->lock, flags);
/* disable the CIR logical device */
nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
}
static int nvt_open(struct rc_dev *dev)
{
struct nvt_dev *nvt = dev->priv;
nvt_enable_cir(nvt);
return 0;
}
static void nvt_close(struct rc_dev *dev)
{
struct nvt_dev *nvt = dev->priv;
nvt_disable_cir(nvt);
}
/* Allocate memory, probe hardware, and initialize everything */
static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
{
struct nvt_dev *nvt;
struct rc_dev *rdev;
int ret;
nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
if (!nvt)
return -ENOMEM;
/* input device for IR remote */
nvt->rdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW);
if (!nvt->rdev)
return -ENOMEM;
rdev = nvt->rdev;
/* activate pnp device */
ret = pnp_activate_dev(pdev);
if (ret) {
dev_err(&pdev->dev, "Could not activate PNP device!\n");
return ret;
}
/* validate pnp resources */
if (!pnp_port_valid(pdev, 0) ||
pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
return -EINVAL;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "PNP IRQ not valid!\n");
return -EINVAL;
}
if (!pnp_port_valid(pdev, 1) ||
pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
return -EINVAL;
}
nvt->cir_addr = pnp_port_start(pdev, 0);
nvt->cir_irq = pnp_irq(pdev, 0);
nvt->cir_wake_addr = pnp_port_start(pdev, 1);
nvt->cr_efir = CR_EFIR;
nvt->cr_efdr = CR_EFDR;
spin_lock_init(&nvt->lock);
pnp_set_drvdata(pdev, nvt);
ret = nvt_hw_detect(nvt);
if (ret)
return ret;
/* Initialize CIR & CIR Wake Logical Devices */
nvt_efm_enable(nvt);
nvt_cir_ldev_init(nvt);
nvt_cir_wake_ldev_init(nvt);
nvt_efm_disable(nvt);
/*
* Initialize CIR & CIR Wake Config Registers
* and enable logical devices
*/
nvt_cir_regs_init(nvt);
nvt_cir_wake_regs_init(nvt);
/* Set up the rc device */
rdev->priv = nvt;
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rdev->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER;
rdev->encode_wakeup = true;
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
rdev->device_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
rdev->input_phys = "nuvoton/cir0";
rdev->input_id.bustype = BUS_HOST;
rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
rdev->input_id.product = nvt->chip_major;
rdev->input_id.version = nvt->chip_minor;
rdev->driver_name = NVT_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
rdev->timeout = MS_TO_US(100);
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
rdev->rx_resolution = CIR_SAMPLE_PERIOD;
#if 0
rdev->min_timeout = XYZ;
rdev->max_timeout = XYZ;
#endif
ret = devm_rc_register_device(&pdev->dev, rdev);
if (ret)
return ret;
/* now claim resources */
if (!devm_request_region(&pdev->dev, nvt->cir_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
return -EBUSY;
ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
IRQF_SHARED, NVT_DRIVER_NAME, nvt);
if (ret)
return ret;
if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
return -EBUSY;
ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
if (ret)
return ret;
device_init_wakeup(&pdev->dev, true);
dev_notice(&pdev->dev, "driver has been successfully loaded\n");
if (debug) {
cir_dump_regs(nvt);
cir_wake_dump_regs(nvt);
}
return 0;
}
static void nvt_remove(struct pnp_dev *pdev)
{
struct nvt_dev *nvt = pnp_get_drvdata(pdev);
device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data);
nvt_disable_cir(nvt);
/* enable CIR Wake (for IR power-on) */
nvt_enable_wake(nvt);
}
static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
{
struct nvt_dev *nvt = pnp_get_drvdata(pdev);
nvt_dbg("%s called", __func__);
mutex_lock(&nvt->rdev->lock);
if (nvt->rdev->users)
nvt_disable_cir(nvt);
mutex_unlock(&nvt->rdev->lock);
/* make sure wake is enabled */
nvt_enable_wake(nvt);
return 0;
}
static int nvt_resume(struct pnp_dev *pdev)
{
struct nvt_dev *nvt = pnp_get_drvdata(pdev);
nvt_dbg("%s called", __func__);
nvt_cir_regs_init(nvt);
nvt_cir_wake_regs_init(nvt);
mutex_lock(&nvt->rdev->lock);
if (nvt->rdev->users)
nvt_enable_cir(nvt);
mutex_unlock(&nvt->rdev->lock);
return 0;
}
static void nvt_shutdown(struct pnp_dev *pdev)
{
struct nvt_dev *nvt = pnp_get_drvdata(pdev);
nvt_enable_wake(nvt);
}
static const struct pnp_device_id nvt_ids[] = {
{ "WEC0530", 0 }, /* CIR */
{ "NTN0530", 0 }, /* CIR for new chip's pnp id*/
{ "", 0 },
};
static struct pnp_driver nvt_driver = {
.name = NVT_DRIVER_NAME,
.id_table = nvt_ids,
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = nvt_probe,
.remove = nvt_remove,
.suspend = nvt_suspend,
.resume = nvt_resume,
.shutdown = nvt_shutdown,
};
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debugging output");
MODULE_DEVICE_TABLE(pnp, nvt_ids);
MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
MODULE_AUTHOR("Jarod Wilson <[email protected]>");
MODULE_LICENSE("GPL");
module_pnp_driver(nvt_driver);
| linux-master | drivers/media/rc/nuvoton-cir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Mediatek IR Receiver Controller
*
* Copyright (C) 2017 Sean Wang <[email protected]>
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <media/rc-core.h>
#define MTK_IR_DEV KBUILD_MODNAME
/* Register to enable PWM and IR */
#define MTK_CONFIG_HIGH_REG 0x0c
/* Bit to enable IR pulse width detection */
#define MTK_PWM_EN BIT(13)
/*
* Register to setting ok count whose unit based on hardware sampling period
* indicating IR receiving completion and then making IRQ fires
*/
#define MTK_OK_COUNT_MASK (GENMASK(22, 16))
#define MTK_OK_COUNT(x) ((x) << 16)
/* Bit to enable IR hardware function */
#define MTK_IR_EN BIT(0)
/* Bit to restart IR receiving */
#define MTK_IRCLR BIT(0)
/* Fields containing pulse width data */
#define MTK_WIDTH_MASK (GENMASK(7, 0))
/* IR threshold */
#define MTK_IRTHD 0x14
#define MTK_DG_CNT_MASK (GENMASK(12, 8))
#define MTK_DG_CNT(x) ((x) << 8)
/* Bit to enable interrupt */
#define MTK_IRINT_EN BIT(0)
/* Bit to clear interrupt status */
#define MTK_IRINT_CLR BIT(0)
/* Maximum count of samples */
#define MTK_MAX_SAMPLES 0xff
/* Indicate the end of IR message */
#define MTK_IR_END(v, p) ((v) == MTK_MAX_SAMPLES && (p) == 0)
/* Number of registers to record the pulse width */
#define MTK_CHKDATA_SZ 17
/* Sample period in us */
#define MTK_IR_SAMPLE 46
enum mtk_fields {
/* Register to setting software sampling period */
MTK_CHK_PERIOD,
/* Register to setting hardware sampling period */
MTK_HW_PERIOD,
};
enum mtk_regs {
/* Register to clear state of state machine */
MTK_IRCLR_REG,
/* Register containing pulse width data */
MTK_CHKDATA_REG,
/* Register to enable IR interrupt */
MTK_IRINT_EN_REG,
/* Register to ack IR interrupt */
MTK_IRINT_CLR_REG
};
static const u32 mt7623_regs[] = {
[MTK_IRCLR_REG] = 0x20,
[MTK_CHKDATA_REG] = 0x88,
[MTK_IRINT_EN_REG] = 0xcc,
[MTK_IRINT_CLR_REG] = 0xd0,
};
static const u32 mt7622_regs[] = {
[MTK_IRCLR_REG] = 0x18,
[MTK_CHKDATA_REG] = 0x30,
[MTK_IRINT_EN_REG] = 0x1c,
[MTK_IRINT_CLR_REG] = 0x20,
};
struct mtk_field_type {
u32 reg;
u8 offset;
u32 mask;
};
/*
* struct mtk_ir_data - This is the structure holding all differences among
various hardwares
* @regs: The pointer to the array holding registers offset
* @fields: The pointer to the array holding fields location
* @div: The internal divisor for the based reference clock
* @ok_count: The count indicating the completion of IR data
* receiving when count is reached
* @hw_period: The value indicating the hardware sampling period
*/
struct mtk_ir_data {
const u32 *regs;
const struct mtk_field_type *fields;
u8 div;
u8 ok_count;
u32 hw_period;
};
static const struct mtk_field_type mt7623_fields[] = {
[MTK_CHK_PERIOD] = {0x10, 8, GENMASK(20, 8)},
[MTK_HW_PERIOD] = {0x10, 0, GENMASK(7, 0)},
};
static const struct mtk_field_type mt7622_fields[] = {
[MTK_CHK_PERIOD] = {0x24, 0, GENMASK(24, 0)},
[MTK_HW_PERIOD] = {0x10, 0, GENMASK(24, 0)},
};
/*
* struct mtk_ir - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
* @rc: The rc instrance
* @base: The mapped register i/o base
* @irq: The IRQ that we are using
* @clk: The clock that IR internal is using
* @bus: The clock that software decoder is using
* @data: Holding specific data for vaious platform
*/
struct mtk_ir {
struct device *dev;
struct rc_dev *rc;
void __iomem *base;
int irq;
struct clk *clk;
struct clk *bus;
const struct mtk_ir_data *data;
};
static inline u32 mtk_chkdata_reg(struct mtk_ir *ir, u32 i)
{
return ir->data->regs[MTK_CHKDATA_REG] + 4 * i;
}
static inline u32 mtk_chk_period(struct mtk_ir *ir)
{
u32 val;
/*
* Period for software decoder used in the
* unit of raw software sampling
*/
val = DIV_ROUND_CLOSEST(clk_get_rate(ir->bus),
USEC_PER_SEC * ir->data->div / MTK_IR_SAMPLE);
dev_dbg(ir->dev, "@pwm clk = \t%lu\n",
clk_get_rate(ir->bus) / ir->data->div);
dev_dbg(ir->dev, "@chkperiod = %08x\n", val);
return val;
}
static void mtk_w32_mask(struct mtk_ir *ir, u32 val, u32 mask, unsigned int reg)
{
u32 tmp;
tmp = __raw_readl(ir->base + reg);
tmp = (tmp & ~mask) | val;
__raw_writel(tmp, ir->base + reg);
}
static void mtk_w32(struct mtk_ir *ir, u32 val, unsigned int reg)
{
__raw_writel(val, ir->base + reg);
}
static u32 mtk_r32(struct mtk_ir *ir, unsigned int reg)
{
return __raw_readl(ir->base + reg);
}
static inline void mtk_irq_disable(struct mtk_ir *ir, u32 mask)
{
u32 val;
val = mtk_r32(ir, ir->data->regs[MTK_IRINT_EN_REG]);
mtk_w32(ir, val & ~mask, ir->data->regs[MTK_IRINT_EN_REG]);
}
static inline void mtk_irq_enable(struct mtk_ir *ir, u32 mask)
{
u32 val;
val = mtk_r32(ir, ir->data->regs[MTK_IRINT_EN_REG]);
mtk_w32(ir, val | mask, ir->data->regs[MTK_IRINT_EN_REG]);
}
static irqreturn_t mtk_ir_irq(int irqno, void *dev_id)
{
struct ir_raw_event rawir = {};
struct mtk_ir *ir = dev_id;
u32 i, j, val;
u8 wid;
/*
* Each pulse and space is encoded as a single byte, each byte
* alternating between pulse and space. If a pulse or space is longer
* than can be encoded in a single byte, it is encoded as the maximum
* value 0xff.
*
* If a space is longer than ok_count (about 23ms), the value is
* encoded as zero, and all following bytes are zero. Any IR that
* follows will be presented in the next interrupt.
*
* If there are more than 68 (=MTK_CHKDATA_SZ * 4) pulses and spaces,
* then the only the first 68 will be presented; the rest is lost.
*/
/* Handle all pulse and space IR controller captures */
for (i = 0 ; i < MTK_CHKDATA_SZ ; i++) {
val = mtk_r32(ir, mtk_chkdata_reg(ir, i));
dev_dbg(ir->dev, "@reg%d=0x%08x\n", i, val);
for (j = 0 ; j < 4 ; j++) {
wid = val & MTK_WIDTH_MASK;
val >>= 8;
rawir.pulse = !rawir.pulse;
rawir.duration = wid * (MTK_IR_SAMPLE + 1);
ir_raw_event_store_with_filter(ir->rc, &rawir);
}
}
/*
* The maximum number of edges the IR controller can
* hold is MTK_CHKDATA_SZ * 4. So if received IR messages
* is over the limit, the last incomplete IR message would
* be appended trailing space and still would be sent into
* ir-rc-raw to decode. That helps it is possible that it
* has enough information to decode a scancode even if the
* trailing end of the message is missing.
*/
if (!MTK_IR_END(wid, rawir.pulse)) {
rawir.pulse = false;
rawir.duration = MTK_MAX_SAMPLES * (MTK_IR_SAMPLE + 1);
ir_raw_event_store_with_filter(ir->rc, &rawir);
}
ir_raw_event_handle(ir->rc);
/*
* Restart controller for the next receive that would
* clear up all CHKDATA registers
*/
mtk_w32_mask(ir, 0x1, MTK_IRCLR, ir->data->regs[MTK_IRCLR_REG]);
/* Clear interrupt status */
mtk_w32_mask(ir, 0x1, MTK_IRINT_CLR,
ir->data->regs[MTK_IRINT_CLR_REG]);
return IRQ_HANDLED;
}
static const struct mtk_ir_data mt7623_data = {
.regs = mt7623_regs,
.fields = mt7623_fields,
.ok_count = 3,
.hw_period = 0xff,
.div = 4,
};
static const struct mtk_ir_data mt7622_data = {
.regs = mt7622_regs,
.fields = mt7622_fields,
.ok_count = 3,
.hw_period = 0xffff,
.div = 32,
};
static const struct of_device_id mtk_ir_match[] = {
{ .compatible = "mediatek,mt7623-cir", .data = &mt7623_data},
{ .compatible = "mediatek,mt7622-cir", .data = &mt7622_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_ir_match);
static int mtk_ir_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
struct mtk_ir *ir;
u32 val;
int ret = 0;
const char *map_name;
ir = devm_kzalloc(dev, sizeof(struct mtk_ir), GFP_KERNEL);
if (!ir)
return -ENOMEM;
ir->dev = dev;
ir->data = of_device_get_match_data(dev);
ir->clk = devm_clk_get(dev, "clk");
if (IS_ERR(ir->clk)) {
dev_err(dev, "failed to get a ir clock.\n");
return PTR_ERR(ir->clk);
}
ir->bus = devm_clk_get(dev, "bus");
if (IS_ERR(ir->bus)) {
/*
* For compatibility with older device trees try unnamed
* ir->bus uses the same clock as ir->clock.
*/
ir->bus = ir->clk;
}
ir->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->base))
return PTR_ERR(ir->base);
ir->rc = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
if (!ir->rc) {
dev_err(dev, "failed to allocate device\n");
return -ENOMEM;
}
ir->rc->priv = ir;
ir->rc->device_name = MTK_IR_DEV;
ir->rc->input_phys = MTK_IR_DEV "/input0";
ir->rc->input_id.bustype = BUS_HOST;
ir->rc->input_id.vendor = 0x0001;
ir->rc->input_id.product = 0x0001;
ir->rc->input_id.version = 0x0001;
map_name = of_get_property(dn, "linux,rc-map-name", NULL);
ir->rc->map_name = map_name ?: RC_MAP_EMPTY;
ir->rc->dev.parent = dev;
ir->rc->driver_name = MTK_IR_DEV;
ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
ir->rc->rx_resolution = MTK_IR_SAMPLE;
ir->rc->timeout = MTK_MAX_SAMPLES * (MTK_IR_SAMPLE + 1);
ret = devm_rc_register_device(dev, ir->rc);
if (ret) {
dev_err(dev, "failed to register rc device\n");
return ret;
}
platform_set_drvdata(pdev, ir);
ir->irq = platform_get_irq(pdev, 0);
if (ir->irq < 0)
return -ENODEV;
if (clk_prepare_enable(ir->clk)) {
dev_err(dev, "try to enable ir_clk failed\n");
return -EINVAL;
}
if (clk_prepare_enable(ir->bus)) {
dev_err(dev, "try to enable ir_clk failed\n");
ret = -EINVAL;
goto exit_clkdisable_clk;
}
/*
* Enable interrupt after proper hardware
* setup and IRQ handler registration
*/
mtk_irq_disable(ir, MTK_IRINT_EN);
ret = devm_request_irq(dev, ir->irq, mtk_ir_irq, 0, MTK_IR_DEV, ir);
if (ret) {
dev_err(dev, "failed request irq\n");
goto exit_clkdisable_bus;
}
/*
* Setup software sample period as the reference of software decoder
*/
val = (mtk_chk_period(ir) << ir->data->fields[MTK_CHK_PERIOD].offset) &
ir->data->fields[MTK_CHK_PERIOD].mask;
mtk_w32_mask(ir, val, ir->data->fields[MTK_CHK_PERIOD].mask,
ir->data->fields[MTK_CHK_PERIOD].reg);
/*
* Setup hardware sampling period used to setup the proper timeout for
* indicating end of IR receiving completion
*/
val = (ir->data->hw_period << ir->data->fields[MTK_HW_PERIOD].offset) &
ir->data->fields[MTK_HW_PERIOD].mask;
mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask,
ir->data->fields[MTK_HW_PERIOD].reg);
/* Set de-glitch counter */
mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD);
/* Enable IR and PWM */
val = mtk_r32(ir, MTK_CONFIG_HIGH_REG) & ~MTK_OK_COUNT_MASK;
val |= MTK_OK_COUNT(ir->data->ok_count) | MTK_PWM_EN | MTK_IR_EN;
mtk_w32(ir, val, MTK_CONFIG_HIGH_REG);
mtk_irq_enable(ir, MTK_IRINT_EN);
dev_info(dev, "Initialized MT7623 IR driver, sample period = %dus\n",
MTK_IR_SAMPLE);
return 0;
exit_clkdisable_bus:
clk_disable_unprepare(ir->bus);
exit_clkdisable_clk:
clk_disable_unprepare(ir->clk);
return ret;
}
static void mtk_ir_remove(struct platform_device *pdev)
{
struct mtk_ir *ir = platform_get_drvdata(pdev);
/*
* Avoid contention between remove handler and
* IRQ handler so that disabling IR interrupt and
* waiting for pending IRQ handler to complete
*/
mtk_irq_disable(ir, MTK_IRINT_EN);
synchronize_irq(ir->irq);
clk_disable_unprepare(ir->bus);
clk_disable_unprepare(ir->clk);
}
static struct platform_driver mtk_ir_driver = {
.probe = mtk_ir_probe,
.remove_new = mtk_ir_remove,
.driver = {
.name = MTK_IR_DEV,
.of_match_table = mtk_ir_match,
},
};
module_platform_driver(mtk_ir_driver);
MODULE_DESCRIPTION("Mediatek IR Receiver Controller Driver");
MODULE_AUTHOR("Sean Wang <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/mtk-cir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Sean Young <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <media/rc-core.h>
#define DRIVER_NAME "gpio-ir-tx"
#define DEVICE_NAME "GPIO IR Bit Banging Transmitter"
struct gpio_ir {
struct gpio_desc *gpio;
unsigned int carrier;
unsigned int duty_cycle;
};
static const struct of_device_id gpio_ir_tx_of_match[] = {
{ .compatible = "gpio-ir-tx", },
{ },
};
MODULE_DEVICE_TABLE(of, gpio_ir_tx_of_match);
static int gpio_ir_tx_set_duty_cycle(struct rc_dev *dev, u32 duty_cycle)
{
struct gpio_ir *gpio_ir = dev->priv;
gpio_ir->duty_cycle = duty_cycle;
return 0;
}
static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier)
{
struct gpio_ir *gpio_ir = dev->priv;
if (carrier > 500000)
return -EINVAL;
gpio_ir->carrier = carrier;
return 0;
}
static void delay_until(ktime_t until)
{
/*
* delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on
* m68k ndelay(s64) does not compile; so use s32 rather than s64.
*/
s32 delta;
while (true) {
delta = ktime_us_delta(until, ktime_get());
if (delta <= 0)
return;
/* udelay more than 1ms may not work */
if (delta >= 1000) {
mdelay(delta / 1000);
continue;
}
udelay(delta);
break;
}
}
static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf,
uint count)
{
ktime_t edge;
int i;
local_irq_disable();
edge = ktime_get();
for (i = 0; i < count; i++) {
gpiod_set_value(gpio_ir->gpio, !(i % 2));
edge = ktime_add_us(edge, txbuf[i]);
delay_until(edge);
}
gpiod_set_value(gpio_ir->gpio, 0);
}
static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf,
uint count)
{
ktime_t edge;
/*
* delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on
* m68k ndelay(s64) does not compile; so use s32 rather than s64.
*/
s32 delta;
int i;
unsigned int pulse, space;
/* Ensure the dividend fits into 32 bit */
pulse = DIV_ROUND_CLOSEST(gpio_ir->duty_cycle * (NSEC_PER_SEC / 100),
gpio_ir->carrier);
space = DIV_ROUND_CLOSEST((100 - gpio_ir->duty_cycle) *
(NSEC_PER_SEC / 100), gpio_ir->carrier);
local_irq_disable();
edge = ktime_get();
for (i = 0; i < count; i++) {
if (i % 2) {
// space
edge = ktime_add_us(edge, txbuf[i]);
delay_until(edge);
} else {
// pulse
ktime_t last = ktime_add_us(edge, txbuf[i]);
while (ktime_before(ktime_get(), last)) {
gpiod_set_value(gpio_ir->gpio, 1);
edge = ktime_add_ns(edge, pulse);
delta = ktime_to_ns(ktime_sub(edge,
ktime_get()));
if (delta > 0)
ndelay(delta);
gpiod_set_value(gpio_ir->gpio, 0);
edge = ktime_add_ns(edge, space);
delta = ktime_to_ns(ktime_sub(edge,
ktime_get()));
if (delta > 0)
ndelay(delta);
}
edge = last;
}
}
}
static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
unsigned int count)
{
struct gpio_ir *gpio_ir = dev->priv;
unsigned long flags;
local_irq_save(flags);
if (gpio_ir->carrier)
gpio_ir_tx_modulated(gpio_ir, txbuf, count);
else
gpio_ir_tx_unmodulated(gpio_ir, txbuf, count);
local_irq_restore(flags);
return count;
}
static int gpio_ir_tx_probe(struct platform_device *pdev)
{
struct gpio_ir *gpio_ir;
struct rc_dev *rcdev;
int rc;
gpio_ir = devm_kmalloc(&pdev->dev, sizeof(*gpio_ir), GFP_KERNEL);
if (!gpio_ir)
return -ENOMEM;
rcdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW_TX);
if (!rcdev)
return -ENOMEM;
gpio_ir->gpio = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_LOW);
if (IS_ERR(gpio_ir->gpio))
return dev_err_probe(&pdev->dev, PTR_ERR(gpio_ir->gpio),
"Failed to get gpio\n");
rcdev->priv = gpio_ir;
rcdev->driver_name = DRIVER_NAME;
rcdev->device_name = DEVICE_NAME;
rcdev->tx_ir = gpio_ir_tx;
rcdev->s_tx_duty_cycle = gpio_ir_tx_set_duty_cycle;
rcdev->s_tx_carrier = gpio_ir_tx_set_carrier;
gpio_ir->carrier = 38000;
gpio_ir->duty_cycle = 50;
rc = devm_rc_register_device(&pdev->dev, rcdev);
if (rc < 0)
dev_err(&pdev->dev, "failed to register rc device\n");
return rc;
}
static struct platform_driver gpio_ir_tx_driver = {
.probe = gpio_ir_tx_probe,
.driver = {
.name = DRIVER_NAME,
.of_match_table = gpio_ir_tx_of_match,
},
};
module_platform_driver(gpio_ir_tx_driver);
MODULE_DESCRIPTION("GPIO IR Bit Banging Transmitter");
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/gpio-ir-tx.c |
// SPDX-License-Identifier: GPL-2.0-only
/* ir-sharp-decoder.c - handle Sharp IR Pulse/Space protocol
*
* Copyright (C) 2013-2014 Imagination Technologies Ltd.
*
* Based on NEC decoder:
* Copyright (C) 2010 by Mauro Carvalho Chehab
*/
#include <linux/bitrev.h>
#include <linux/module.h>
#include "rc-core-priv.h"
#define SHARP_NBITS 15
#define SHARP_UNIT 40 /* us */
#define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */
#define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */
#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */
#define SHARP_ECHO_SPACE (1000 * SHARP_UNIT) /* 40 ms */
#define SHARP_TRAILER_SPACE (125 * SHARP_UNIT) /* 5 ms (even longer) */
enum sharp_state {
STATE_INACTIVE,
STATE_BIT_PULSE,
STATE_BIT_SPACE,
STATE_TRAILER_PULSE,
STATE_ECHO_SPACE,
STATE_TRAILER_SPACE,
};
/**
* ir_sharp_decode() - Decode one Sharp pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_sharp_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct sharp_dec *data = &dev->raw->sharp;
u32 msg, echo, address, command, scancode;
if (!is_timing_event(ev)) {
if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
dev_dbg(&dev->dev, "Sharp decode started at state %d (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, SHARP_BIT_PULSE,
SHARP_BIT_PULSE / 2))
break;
data->count = 0;
data->pulse_len = ev.duration;
data->state = STATE_BIT_SPACE;
return 0;
case STATE_BIT_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, SHARP_BIT_PULSE,
SHARP_BIT_PULSE / 2))
break;
data->pulse_len = ev.duration;
data->state = STATE_BIT_SPACE;
return 0;
case STATE_BIT_SPACE:
if (ev.pulse)
break;
data->bits <<= 1;
if (eq_margin(data->pulse_len + ev.duration, SHARP_BIT_1_PERIOD,
SHARP_BIT_PULSE * 2))
data->bits |= 1;
else if (!eq_margin(data->pulse_len + ev.duration,
SHARP_BIT_0_PERIOD, SHARP_BIT_PULSE * 2))
break;
data->count++;
if (data->count == SHARP_NBITS ||
data->count == SHARP_NBITS * 2)
data->state = STATE_TRAILER_PULSE;
else
data->state = STATE_BIT_PULSE;
return 0;
case STATE_TRAILER_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, SHARP_BIT_PULSE,
SHARP_BIT_PULSE / 2))
break;
if (data->count == SHARP_NBITS) {
/* exp,chk bits should be 1,0 */
if ((data->bits & 0x3) != 0x2 &&
/* DENON variant, both chk bits 0 */
(data->bits & 0x3) != 0x0)
break;
data->state = STATE_ECHO_SPACE;
} else {
data->state = STATE_TRAILER_SPACE;
}
return 0;
case STATE_ECHO_SPACE:
if (ev.pulse)
break;
if (!eq_margin(ev.duration, SHARP_ECHO_SPACE,
SHARP_ECHO_SPACE / 4))
break;
data->state = STATE_BIT_PULSE;
return 0;
case STATE_TRAILER_SPACE:
if (ev.pulse)
break;
if (!geq_margin(ev.duration, SHARP_TRAILER_SPACE,
SHARP_BIT_PULSE / 2))
break;
/* Validate - command, ext, chk should be inverted in 2nd */
msg = (data->bits >> 15) & 0x7fff;
echo = data->bits & 0x7fff;
if ((msg ^ echo) != 0x3ff) {
dev_dbg(&dev->dev,
"Sharp checksum error: received 0x%04x, 0x%04x\n",
msg, echo);
break;
}
address = bitrev8((msg >> 7) & 0xf8);
command = bitrev8((msg >> 2) & 0xff);
scancode = address << 8 | command;
dev_dbg(&dev->dev, "Sharp scancode 0x%04x\n", scancode);
rc_keydown(dev, RC_PROTO_SHARP, scancode, 0);
data->state = STATE_INACTIVE;
return 0;
}
dev_dbg(&dev->dev, "Sharp decode failed at count %d state %d (%uus %s)\n",
data->count, data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static const struct ir_raw_timings_pd ir_sharp_timings = {
.header_pulse = 0,
.header_space = 0,
.bit_pulse = SHARP_BIT_PULSE,
.bit_space[0] = SHARP_BIT_0_PERIOD,
.bit_space[1] = SHARP_BIT_1_PERIOD,
.trailer_pulse = SHARP_BIT_PULSE,
.trailer_space = SHARP_ECHO_SPACE,
.msb_first = 1,
};
/**
* ir_sharp_encode() - Encode a scancode as a stream of raw events
*
* @protocol: protocol to encode
* @scancode: scancode to encode
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
*/
static int ir_sharp_encode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
struct ir_raw_event *e = events;
int ret;
u32 raw;
raw = (((bitrev8(scancode >> 8) >> 3) << 8) & 0x1f00) |
bitrev8(scancode);
ret = ir_raw_gen_pd(&e, max, &ir_sharp_timings, SHARP_NBITS,
(raw << 2) | 2);
if (ret < 0)
return ret;
max -= ret;
raw = (((bitrev8(scancode >> 8) >> 3) << 8) & 0x1f00) |
bitrev8(~scancode);
ret = ir_raw_gen_pd(&e, max, &ir_sharp_timings, SHARP_NBITS,
(raw << 2) | 1);
if (ret < 0)
return ret;
return e - events;
}
static struct ir_raw_handler sharp_handler = {
.protocols = RC_PROTO_BIT_SHARP,
.decode = ir_sharp_decode,
.encode = ir_sharp_encode,
.carrier = 38000,
.min_timeout = SHARP_ECHO_SPACE + SHARP_ECHO_SPACE / 4,
};
static int __init ir_sharp_decode_init(void)
{
ir_raw_handler_register(&sharp_handler);
pr_info("IR Sharp protocol handler initialized\n");
return 0;
}
static void __exit ir_sharp_decode_exit(void)
{
ir_raw_handler_unregister(&sharp_handler);
}
module_init(ir_sharp_decode_init);
module_exit(ir_sharp_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Hogan <[email protected]>");
MODULE_DESCRIPTION("Sharp IR protocol decoder");
| linux-master | drivers/media/rc/ir-sharp-decoder.c |
// SPDX-License-Identifier: GPL-2.0+
// ir-rcmm-decoder.c - A decoder for the RCMM IR protocol
//
// Copyright (C) 2018 by Patrick Lerda <[email protected]>
#include "rc-core-priv.h"
#include <linux/module.h>
#define RCMM_UNIT 166 /* microseconds */
#define RCMM_PREFIX_PULSE 417 /* 166.666666666666*2.5 */
#define RCMM_PULSE_0 278 /* 166.666666666666*(1+2/3) */
#define RCMM_PULSE_1 444 /* 166.666666666666*(2+2/3) */
#define RCMM_PULSE_2 611 /* 166.666666666666*(3+2/3) */
#define RCMM_PULSE_3 778 /* 166.666666666666*(4+2/3) */
enum rcmm_state {
STATE_INACTIVE,
STATE_LOW,
STATE_BUMP,
STATE_VALUE,
STATE_FINISHED,
};
static bool rcmm_mode(const struct rcmm_dec *data)
{
return !((0x000c0000 & data->bits) == 0x000c0000);
}
static int rcmm_miscmode(struct rc_dev *dev, struct rcmm_dec *data)
{
switch (data->count) {
case 24:
if (dev->enabled_protocols & RC_PROTO_BIT_RCMM24) {
rc_keydown(dev, RC_PROTO_RCMM24, data->bits, 0);
data->state = STATE_INACTIVE;
return 0;
}
return -1;
case 12:
if (dev->enabled_protocols & RC_PROTO_BIT_RCMM12) {
rc_keydown(dev, RC_PROTO_RCMM12, data->bits, 0);
data->state = STATE_INACTIVE;
return 0;
}
return -1;
}
return -1;
}
/**
* ir_rcmm_decode() - Decode one RCMM pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct rcmm_dec *data = &dev->raw->rcmm;
u32 scancode;
u8 toggle;
int value;
if (!(dev->enabled_protocols & (RC_PROTO_BIT_RCMM32 |
RC_PROTO_BIT_RCMM24 |
RC_PROTO_BIT_RCMM12)))
return 0;
if (!is_timing_event(ev)) {
if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT))
break;
data->state = STATE_LOW;
data->count = 0;
data->bits = 0;
return 0;
case STATE_LOW:
if (ev.pulse)
break;
if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT))
break;
data->state = STATE_BUMP;
return 0;
case STATE_BUMP:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, RCMM_UNIT, RCMM_UNIT / 2))
break;
data->state = STATE_VALUE;
return 0;
case STATE_VALUE:
if (ev.pulse)
break;
if (eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT / 2))
value = 0;
else if (eq_margin(ev.duration, RCMM_PULSE_1, RCMM_UNIT / 2))
value = 1;
else if (eq_margin(ev.duration, RCMM_PULSE_2, RCMM_UNIT / 2))
value = 2;
else if (eq_margin(ev.duration, RCMM_PULSE_3, RCMM_UNIT / 2))
value = 3;
else
value = -1;
if (value == -1) {
if (!rcmm_miscmode(dev, data))
return 0;
break;
}
data->bits <<= 2;
data->bits |= value;
data->count += 2;
if (data->count < 32)
data->state = STATE_BUMP;
else
data->state = STATE_FINISHED;
return 0;
case STATE_FINISHED:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, RCMM_UNIT, RCMM_UNIT / 2))
break;
if (rcmm_mode(data)) {
toggle = !!(0x8000 & data->bits);
scancode = data->bits & ~0x8000;
} else {
toggle = 0;
scancode = data->bits;
}
if (dev->enabled_protocols & RC_PROTO_BIT_RCMM32) {
rc_keydown(dev, RC_PROTO_RCMM32, scancode, toggle);
data->state = STATE_INACTIVE;
return 0;
}
break;
}
dev_dbg(&dev->dev, "RC-MM decode failed at count %d state %d (%uus %s)\n",
data->count, data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static const int rcmmspace[] = {
RCMM_PULSE_0,
RCMM_PULSE_1,
RCMM_PULSE_2,
RCMM_PULSE_3,
};
static int ir_rcmm_rawencoder(struct ir_raw_event **ev, unsigned int max,
unsigned int n, u32 data)
{
int i;
int ret;
ret = ir_raw_gen_pulse_space(ev, &max, RCMM_PREFIX_PULSE, RCMM_PULSE_0);
if (ret)
return ret;
for (i = n - 2; i >= 0; i -= 2) {
const unsigned int space = rcmmspace[(data >> i) & 3];
ret = ir_raw_gen_pulse_space(ev, &max, RCMM_UNIT, space);
if (ret)
return ret;
}
return ir_raw_gen_pulse_space(ev, &max, RCMM_UNIT, RCMM_PULSE_3 * 2);
}
static int ir_rcmm_encode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
struct ir_raw_event *e = events;
int ret;
switch (protocol) {
case RC_PROTO_RCMM32:
ret = ir_rcmm_rawencoder(&e, max, 32, scancode);
break;
case RC_PROTO_RCMM24:
ret = ir_rcmm_rawencoder(&e, max, 24, scancode);
break;
case RC_PROTO_RCMM12:
ret = ir_rcmm_rawencoder(&e, max, 12, scancode);
break;
default:
ret = -EINVAL;
}
if (ret < 0)
return ret;
return e - events;
}
static struct ir_raw_handler rcmm_handler = {
.protocols = RC_PROTO_BIT_RCMM32 |
RC_PROTO_BIT_RCMM24 |
RC_PROTO_BIT_RCMM12,
.decode = ir_rcmm_decode,
.encode = ir_rcmm_encode,
.carrier = 36000,
.min_timeout = RCMM_PULSE_3 + RCMM_UNIT,
};
static int __init ir_rcmm_decode_init(void)
{
ir_raw_handler_register(&rcmm_handler);
pr_info("IR RCMM protocol handler initialized\n");
return 0;
}
static void __exit ir_rcmm_decode_exit(void)
{
ir_raw_handler_unregister(&rcmm_handler);
}
module_init(ir_rcmm_decode_init);
module_exit(ir_rcmm_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick Lerda");
MODULE_DESCRIPTION("RCMM IR protocol decoder");
| linux-master | drivers/media/rc/ir-rcmm-decoder.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for ITE Tech Inc. IT8712F/IT8512 CIR
*
* Copyright (C) 2010 Juan Jesús García de Soria <[email protected]>
*
* Inspired by the original lirc_it87 and lirc_ite8709 drivers, on top of the
* skeleton provided by the nuvoton-cir driver.
*
* The lirc_it87 driver was originally written by Hans-Gunter Lutke Uphues
* <[email protected]> in 2001, with enhancements by Christoph Bartelmus
* <[email protected]>, Andrew Calkin <[email protected]> and James Edwards
* <[email protected]>.
*
* The lirc_ite8709 driver was written by Grégory Lardière
* <[email protected]> in 2008.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pnp.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/bitops.h>
#include <media/rc-core.h>
#include <linux/pci_ids.h>
#include "ite-cir.h"
/* module parameters */
/* default sample period */
static long sample_period = NSEC_PER_SEC / 115200;
module_param(sample_period, long, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sample_period, "sample period");
/* override detected model id */
static int model_number = -1;
module_param(model_number, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(model_number, "Use this model number, don't autodetect");
/* HW-independent code functions */
/* check whether carrier frequency is high frequency */
static inline bool ite_is_high_carrier_freq(unsigned int freq)
{
return freq >= ITE_HCF_MIN_CARRIER_FREQ;
}
/* get the bits required to program the carrier frequency in CFQ bits,
* unshifted */
static u8 ite_get_carrier_freq_bits(unsigned int freq)
{
if (ite_is_high_carrier_freq(freq)) {
if (freq < 425000)
return ITE_CFQ_400;
else if (freq < 465000)
return ITE_CFQ_450;
else if (freq < 490000)
return ITE_CFQ_480;
else
return ITE_CFQ_500;
} else {
/* trim to limits */
if (freq < ITE_LCF_MIN_CARRIER_FREQ)
freq = ITE_LCF_MIN_CARRIER_FREQ;
if (freq > ITE_LCF_MAX_CARRIER_FREQ)
freq = ITE_LCF_MAX_CARRIER_FREQ;
/* convert to kHz and subtract the base freq */
freq = DIV_ROUND_CLOSEST(freq - ITE_LCF_MIN_CARRIER_FREQ, 1000);
return (u8) freq;
}
}
/* get the bits required to program the pulse with in TXMPW */
static u8 ite_get_pulse_width_bits(unsigned int freq, int duty_cycle)
{
unsigned long period_ns, on_ns;
/* sanitize freq into range */
if (freq < ITE_LCF_MIN_CARRIER_FREQ)
freq = ITE_LCF_MIN_CARRIER_FREQ;
if (freq > ITE_HCF_MAX_CARRIER_FREQ)
freq = ITE_HCF_MAX_CARRIER_FREQ;
period_ns = 1000000000UL / freq;
on_ns = period_ns * duty_cycle / 100;
if (ite_is_high_carrier_freq(freq)) {
if (on_ns < 750)
return ITE_TXMPW_A;
else if (on_ns < 850)
return ITE_TXMPW_B;
else if (on_ns < 950)
return ITE_TXMPW_C;
else if (on_ns < 1080)
return ITE_TXMPW_D;
else
return ITE_TXMPW_E;
} else {
if (on_ns < 6500)
return ITE_TXMPW_A;
else if (on_ns < 7850)
return ITE_TXMPW_B;
else if (on_ns < 9650)
return ITE_TXMPW_C;
else if (on_ns < 11950)
return ITE_TXMPW_D;
else
return ITE_TXMPW_E;
}
}
/* decode raw bytes as received by the hardware, and push them to the ir-core
* layer */
static void ite_decode_bytes(struct ite_dev *dev, const u8 * data, int
length)
{
unsigned long *ldata;
unsigned int next_one, next_zero, size;
struct ir_raw_event ev = {};
if (length == 0)
return;
ldata = (unsigned long *)data;
size = length << 3;
next_one = find_next_bit_le(ldata, size, 0);
if (next_one > 0) {
ev.pulse = true;
ev.duration = ITE_BITS_TO_US(next_one, sample_period);
ir_raw_event_store_with_filter(dev->rdev, &ev);
}
while (next_one < size) {
next_zero = find_next_zero_bit_le(ldata, size, next_one + 1);
ev.pulse = false;
ev.duration = ITE_BITS_TO_US(next_zero - next_one, sample_period);
ir_raw_event_store_with_filter(dev->rdev, &ev);
if (next_zero < size) {
next_one = find_next_bit_le(ldata, size, next_zero + 1);
ev.pulse = true;
ev.duration = ITE_BITS_TO_US(next_one - next_zero,
sample_period);
ir_raw_event_store_with_filter(dev->rdev, &ev);
} else
next_one = size;
}
ir_raw_event_handle(dev->rdev);
dev_dbg(&dev->rdev->dev, "decoded %d bytes\n", length);
}
/* set all the rx/tx carrier parameters; this must be called with the device
* spinlock held */
static void ite_set_carrier_params(struct ite_dev *dev)
{
unsigned int freq, low_freq, high_freq;
int allowance;
bool use_demodulator;
bool for_tx = dev->transmitting;
if (for_tx) {
/* we don't need no stinking calculations */
freq = dev->tx_carrier_freq;
allowance = ITE_RXDCR_DEFAULT;
use_demodulator = false;
} else {
low_freq = dev->rx_low_carrier_freq;
high_freq = dev->rx_high_carrier_freq;
if (low_freq == 0) {
/* don't demodulate */
freq = ITE_DEFAULT_CARRIER_FREQ;
allowance = ITE_RXDCR_DEFAULT;
use_demodulator = false;
} else {
/* calculate the middle freq */
freq = (low_freq + high_freq) / 2;
/* calculate the allowance */
allowance =
DIV_ROUND_CLOSEST(10000 * (high_freq - low_freq),
ITE_RXDCR_PER_10000_STEP
* (high_freq + low_freq));
if (allowance < 1)
allowance = 1;
if (allowance > ITE_RXDCR_MAX)
allowance = ITE_RXDCR_MAX;
use_demodulator = true;
}
}
/* set the carrier parameters in a device-dependent way */
dev->params->set_carrier_params(dev, ite_is_high_carrier_freq(freq),
use_demodulator, ite_get_carrier_freq_bits(freq), allowance,
ite_get_pulse_width_bits(freq, dev->tx_duty_cycle));
}
/* interrupt service routine for incoming and outgoing CIR data */
static irqreturn_t ite_cir_isr(int irq, void *data)
{
struct ite_dev *dev = data;
irqreturn_t ret = IRQ_RETVAL(IRQ_NONE);
u8 rx_buf[ITE_RX_FIFO_LEN];
int rx_bytes;
int iflags;
/* grab the spinlock */
spin_lock(&dev->lock);
/* read the interrupt flags */
iflags = dev->params->get_irq_causes(dev);
/* Check for RX overflow */
if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
dev_warn(&dev->rdev->dev, "receive overflow\n");
ir_raw_event_overflow(dev->rdev);
}
/* check for the receive interrupt */
if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
/* read the FIFO bytes */
rx_bytes = dev->params->get_rx_bytes(dev, rx_buf,
ITE_RX_FIFO_LEN);
dev_dbg(&dev->rdev->dev, "interrupt %d RX bytes\n", rx_bytes);
if (rx_bytes > 0) {
/* drop the spinlock, since the ir-core layer
* may call us back again through
* ite_s_idle() */
spin_unlock(&dev->lock);
/* decode the data we've just received */
ite_decode_bytes(dev, rx_buf, rx_bytes);
/* reacquire the spinlock */
spin_lock(&dev->lock);
/* mark the interrupt as serviced */
ret = IRQ_RETVAL(IRQ_HANDLED);
}
} else if (iflags & ITE_IRQ_TX_FIFO) {
/* FIFO space available interrupt */
dev_dbg(&dev->rdev->dev, "interrupt TX FIFO\n");
/* wake any sleeping transmitter */
wake_up_interruptible(&dev->tx_queue);
/* mark the interrupt as serviced */
ret = IRQ_RETVAL(IRQ_HANDLED);
}
/* drop the spinlock */
spin_unlock(&dev->lock);
return ret;
}
/* set the rx carrier freq range, guess it's in Hz... */
static int ite_set_rx_carrier_range(struct rc_dev *rcdev, u32 carrier_low, u32
carrier_high)
{
unsigned long flags;
struct ite_dev *dev = rcdev->priv;
spin_lock_irqsave(&dev->lock, flags);
dev->rx_low_carrier_freq = carrier_low;
dev->rx_high_carrier_freq = carrier_high;
ite_set_carrier_params(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/* set the tx carrier freq, guess it's in Hz... */
static int ite_set_tx_carrier(struct rc_dev *rcdev, u32 carrier)
{
unsigned long flags;
struct ite_dev *dev = rcdev->priv;
spin_lock_irqsave(&dev->lock, flags);
dev->tx_carrier_freq = carrier;
ite_set_carrier_params(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/* set the tx duty cycle by controlling the pulse width */
static int ite_set_tx_duty_cycle(struct rc_dev *rcdev, u32 duty_cycle)
{
unsigned long flags;
struct ite_dev *dev = rcdev->priv;
spin_lock_irqsave(&dev->lock, flags);
dev->tx_duty_cycle = duty_cycle;
ite_set_carrier_params(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/* transmit out IR pulses; what you get here is a batch of alternating
* pulse/space/pulse/space lengths that we should write out completely through
* the FIFO, blocking on a full FIFO */
static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n)
{
unsigned long flags;
struct ite_dev *dev = rcdev->priv;
bool is_pulse = false;
int remaining_us, fifo_avail, fifo_remaining, last_idx = 0;
int max_rle_us, next_rle_us;
int ret = n;
u8 last_sent[ITE_TX_FIFO_LEN];
u8 val;
/* clear the array just in case */
memset(last_sent, 0, sizeof(last_sent));
spin_lock_irqsave(&dev->lock, flags);
/* let everybody know we're now transmitting */
dev->transmitting = true;
/* and set the carrier values for transmission */
ite_set_carrier_params(dev);
/* calculate how much time we can send in one byte */
max_rle_us =
(ITE_BAUDRATE_DIVISOR * sample_period *
ITE_TX_MAX_RLE) / 1000;
/* disable the receiver */
dev->params->disable_rx(dev);
/* this is where we'll begin filling in the FIFO, until it's full.
* then we'll just activate the interrupt, wait for it to wake us up
* again, disable it, continue filling the FIFO... until everything
* has been pushed out */
fifo_avail = ITE_TX_FIFO_LEN - dev->params->get_tx_used_slots(dev);
while (n > 0) {
/* transmit the next sample */
is_pulse = !is_pulse;
remaining_us = *(txbuf++);
n--;
dev_dbg(&dev->rdev->dev, "%s: %d\n",
is_pulse ? "pulse" : "space", remaining_us);
/* repeat while the pulse is non-zero length */
while (remaining_us > 0) {
if (remaining_us > max_rle_us)
next_rle_us = max_rle_us;
else
next_rle_us = remaining_us;
remaining_us -= next_rle_us;
/* check what's the length we have to pump out */
val = (ITE_TX_MAX_RLE * next_rle_us) / max_rle_us;
/* put it into the sent buffer */
last_sent[last_idx++] = val;
last_idx &= (ITE_TX_FIFO_LEN);
/* encode it for 7 bits */
val = (val - 1) & ITE_TX_RLE_MASK;
/* take into account pulse/space prefix */
if (is_pulse)
val |= ITE_TX_PULSE;
else
val |= ITE_TX_SPACE;
/*
* if we get to 0 available, read again, just in case
* some other slot got freed
*/
if (fifo_avail <= 0)
fifo_avail = ITE_TX_FIFO_LEN - dev->params->get_tx_used_slots(dev);
/* if it's still full */
if (fifo_avail <= 0) {
/* enable the tx interrupt */
dev->params->enable_tx_interrupt(dev);
/* drop the spinlock */
spin_unlock_irqrestore(&dev->lock, flags);
/* wait for the FIFO to empty enough */
wait_event_interruptible(dev->tx_queue,
(fifo_avail = ITE_TX_FIFO_LEN - dev->params->get_tx_used_slots(dev)) >= 8);
/* get the spinlock again */
spin_lock_irqsave(&dev->lock, flags);
/* disable the tx interrupt again. */
dev->params->disable_tx_interrupt(dev);
}
/* now send the byte through the FIFO */
dev->params->put_tx_byte(dev, val);
fifo_avail--;
}
}
/* wait and don't return until the whole FIFO has been sent out;
* otherwise we could configure the RX carrier params instead of the
* TX ones while the transmission is still being performed! */
fifo_remaining = dev->params->get_tx_used_slots(dev);
remaining_us = 0;
while (fifo_remaining > 0) {
fifo_remaining--;
last_idx--;
last_idx &= (ITE_TX_FIFO_LEN - 1);
remaining_us += last_sent[last_idx];
}
remaining_us = (remaining_us * max_rle_us) / (ITE_TX_MAX_RLE);
/* drop the spinlock while we sleep */
spin_unlock_irqrestore(&dev->lock, flags);
/* sleep remaining_us microseconds */
mdelay(DIV_ROUND_UP(remaining_us, 1000));
/* reacquire the spinlock */
spin_lock_irqsave(&dev->lock, flags);
/* now we're not transmitting anymore */
dev->transmitting = false;
/* and set the carrier values for reception */
ite_set_carrier_params(dev);
/* re-enable the receiver */
dev->params->enable_rx(dev);
/* notify transmission end */
wake_up_interruptible(&dev->tx_ended);
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
/* idle the receiver if needed */
static void ite_s_idle(struct rc_dev *rcdev, bool enable)
{
unsigned long flags;
struct ite_dev *dev = rcdev->priv;
if (enable) {
spin_lock_irqsave(&dev->lock, flags);
dev->params->idle_rx(dev);
spin_unlock_irqrestore(&dev->lock, flags);
}
}
/* IT8712F HW-specific functions */
/* retrieve a bitmask of the current causes for a pending interrupt; this may
* be composed of ITE_IRQ_TX_FIFO, ITE_IRQ_RX_FIFO and ITE_IRQ_RX_FIFO_OVERRUN
* */
static int it87_get_irq_causes(struct ite_dev *dev)
{
u8 iflags;
int ret = 0;
/* read the interrupt flags */
iflags = inb(dev->cir_addr + IT87_IIR) & IT87_II;
switch (iflags) {
case IT87_II_RXDS:
ret = ITE_IRQ_RX_FIFO;
break;
case IT87_II_RXFO:
ret = ITE_IRQ_RX_FIFO_OVERRUN;
break;
case IT87_II_TXLDL:
ret = ITE_IRQ_TX_FIFO;
break;
}
return ret;
}
/* set the carrier parameters; to be called with the spinlock held */
static void it87_set_carrier_params(struct ite_dev *dev, bool high_freq,
bool use_demodulator,
u8 carrier_freq_bits, u8 allowance_bits,
u8 pulse_width_bits)
{
u8 val;
/* program the RCR register */
val = inb(dev->cir_addr + IT87_RCR)
& ~(IT87_HCFS | IT87_RXEND | IT87_RXDCR);
if (high_freq)
val |= IT87_HCFS;
if (use_demodulator)
val |= IT87_RXEND;
val |= allowance_bits;
outb(val, dev->cir_addr + IT87_RCR);
/* program the TCR2 register */
outb((carrier_freq_bits << IT87_CFQ_SHIFT) | pulse_width_bits,
dev->cir_addr + IT87_TCR2);
}
/* read up to buf_size bytes from the RX FIFO; to be called with the spinlock
* held */
static int it87_get_rx_bytes(struct ite_dev *dev, u8 * buf, int buf_size)
{
int fifo, read = 0;
/* read how many bytes are still in the FIFO */
fifo = inb(dev->cir_addr + IT87_RSR) & IT87_RXFBC;
while (fifo > 0 && buf_size > 0) {
*(buf++) = inb(dev->cir_addr + IT87_DR);
fifo--;
read++;
buf_size--;
}
return read;
}
/* return how many bytes are still in the FIFO; this will be called
* with the device spinlock NOT HELD while waiting for the TX FIFO to get
* empty; let's expect this won't be a problem */
static int it87_get_tx_used_slots(struct ite_dev *dev)
{
return inb(dev->cir_addr + IT87_TSR) & IT87_TXFBC;
}
/* put a byte to the TX fifo; this should be called with the spinlock held */
static void it87_put_tx_byte(struct ite_dev *dev, u8 value)
{
outb(value, dev->cir_addr + IT87_DR);
}
/* idle the receiver so that we won't receive samples until another
pulse is detected; this must be called with the device spinlock held */
static void it87_idle_rx(struct ite_dev *dev)
{
/* disable streaming by clearing RXACT writing it as 1 */
outb(inb(dev->cir_addr + IT87_RCR) | IT87_RXACT,
dev->cir_addr + IT87_RCR);
/* clear the FIFO */
outb(inb(dev->cir_addr + IT87_TCR1) | IT87_FIFOCLR,
dev->cir_addr + IT87_TCR1);
}
/* disable the receiver; this must be called with the device spinlock held */
static void it87_disable_rx(struct ite_dev *dev)
{
/* disable the receiver interrupts */
outb(inb(dev->cir_addr + IT87_IER) & ~(IT87_RDAIE | IT87_RFOIE),
dev->cir_addr + IT87_IER);
/* disable the receiver */
outb(inb(dev->cir_addr + IT87_RCR) & ~IT87_RXEN,
dev->cir_addr + IT87_RCR);
/* clear the FIFO and RXACT (actually RXACT should have been cleared
* in the previous outb() call) */
it87_idle_rx(dev);
}
/* enable the receiver; this must be called with the device spinlock held */
static void it87_enable_rx(struct ite_dev *dev)
{
/* enable the receiver by setting RXEN */
outb(inb(dev->cir_addr + IT87_RCR) | IT87_RXEN,
dev->cir_addr + IT87_RCR);
/* just prepare it to idle for the next reception */
it87_idle_rx(dev);
/* enable the receiver interrupts and master enable flag */
outb(inb(dev->cir_addr + IT87_IER) | IT87_RDAIE | IT87_RFOIE | IT87_IEC,
dev->cir_addr + IT87_IER);
}
/* disable the transmitter interrupt; this must be called with the device
* spinlock held */
static void it87_disable_tx_interrupt(struct ite_dev *dev)
{
/* disable the transmitter interrupts */
outb(inb(dev->cir_addr + IT87_IER) & ~IT87_TLDLIE,
dev->cir_addr + IT87_IER);
}
/* enable the transmitter interrupt; this must be called with the device
* spinlock held */
static void it87_enable_tx_interrupt(struct ite_dev *dev)
{
/* enable the transmitter interrupts and master enable flag */
outb(inb(dev->cir_addr + IT87_IER) | IT87_TLDLIE | IT87_IEC,
dev->cir_addr + IT87_IER);
}
/* disable the device; this must be called with the device spinlock held */
static void it87_disable(struct ite_dev *dev)
{
/* clear out all interrupt enable flags */
outb(inb(dev->cir_addr + IT87_IER) &
~(IT87_IEC | IT87_RFOIE | IT87_RDAIE | IT87_TLDLIE),
dev->cir_addr + IT87_IER);
/* disable the receiver */
it87_disable_rx(dev);
/* erase the FIFO */
outb(IT87_FIFOCLR | inb(dev->cir_addr + IT87_TCR1),
dev->cir_addr + IT87_TCR1);
}
/* initialize the hardware */
static void it87_init_hardware(struct ite_dev *dev)
{
/* enable just the baud rate divisor register,
disabling all the interrupts at the same time */
outb((inb(dev->cir_addr + IT87_IER) &
~(IT87_IEC | IT87_RFOIE | IT87_RDAIE | IT87_TLDLIE)) | IT87_BR,
dev->cir_addr + IT87_IER);
/* write out the baud rate divisor */
outb(ITE_BAUDRATE_DIVISOR & 0xff, dev->cir_addr + IT87_BDLR);
outb((ITE_BAUDRATE_DIVISOR >> 8) & 0xff, dev->cir_addr + IT87_BDHR);
/* disable the baud rate divisor register again */
outb(inb(dev->cir_addr + IT87_IER) & ~IT87_BR,
dev->cir_addr + IT87_IER);
/* program the RCR register defaults */
outb(ITE_RXDCR_DEFAULT, dev->cir_addr + IT87_RCR);
/* program the TCR1 register */
outb(IT87_TXMPM_DEFAULT | IT87_TXENDF | IT87_TXRLE
| IT87_FIFOTL_DEFAULT | IT87_FIFOCLR,
dev->cir_addr + IT87_TCR1);
/* program the carrier parameters */
ite_set_carrier_params(dev);
}
/* IT8512F on ITE8708 HW-specific functions */
/* retrieve a bitmask of the current causes for a pending interrupt; this may
* be composed of ITE_IRQ_TX_FIFO, ITE_IRQ_RX_FIFO and ITE_IRQ_RX_FIFO_OVERRUN
* */
static int it8708_get_irq_causes(struct ite_dev *dev)
{
u8 iflags;
int ret = 0;
/* read the interrupt flags */
iflags = inb(dev->cir_addr + IT8708_C0IIR);
if (iflags & IT85_TLDLI)
ret |= ITE_IRQ_TX_FIFO;
if (iflags & IT85_RDAI)
ret |= ITE_IRQ_RX_FIFO;
if (iflags & IT85_RFOI)
ret |= ITE_IRQ_RX_FIFO_OVERRUN;
return ret;
}
/* set the carrier parameters; to be called with the spinlock held */
static void it8708_set_carrier_params(struct ite_dev *dev, bool high_freq,
bool use_demodulator,
u8 carrier_freq_bits, u8 allowance_bits,
u8 pulse_width_bits)
{
u8 val;
/* program the C0CFR register, with HRAE=1 */
outb(inb(dev->cir_addr + IT8708_BANKSEL) | IT8708_HRAE,
dev->cir_addr + IT8708_BANKSEL);
val = (inb(dev->cir_addr + IT8708_C0CFR)
& ~(IT85_HCFS | IT85_CFQ)) | carrier_freq_bits;
if (high_freq)
val |= IT85_HCFS;
outb(val, dev->cir_addr + IT8708_C0CFR);
outb(inb(dev->cir_addr + IT8708_BANKSEL) & ~IT8708_HRAE,
dev->cir_addr + IT8708_BANKSEL);
/* program the C0RCR register */
val = inb(dev->cir_addr + IT8708_C0RCR)
& ~(IT85_RXEND | IT85_RXDCR);
if (use_demodulator)
val |= IT85_RXEND;
val |= allowance_bits;
outb(val, dev->cir_addr + IT8708_C0RCR);
/* program the C0TCR register */
val = inb(dev->cir_addr + IT8708_C0TCR) & ~IT85_TXMPW;
val |= pulse_width_bits;
outb(val, dev->cir_addr + IT8708_C0TCR);
}
/* read up to buf_size bytes from the RX FIFO; to be called with the spinlock
* held */
static int it8708_get_rx_bytes(struct ite_dev *dev, u8 * buf, int buf_size)
{
int fifo, read = 0;
/* read how many bytes are still in the FIFO */
fifo = inb(dev->cir_addr + IT8708_C0RFSR) & IT85_RXFBC;
while (fifo > 0 && buf_size > 0) {
*(buf++) = inb(dev->cir_addr + IT8708_C0DR);
fifo--;
read++;
buf_size--;
}
return read;
}
/* return how many bytes are still in the FIFO; this will be called
* with the device spinlock NOT HELD while waiting for the TX FIFO to get
* empty; let's expect this won't be a problem */
static int it8708_get_tx_used_slots(struct ite_dev *dev)
{
return inb(dev->cir_addr + IT8708_C0TFSR) & IT85_TXFBC;
}
/* put a byte to the TX fifo; this should be called with the spinlock held */
static void it8708_put_tx_byte(struct ite_dev *dev, u8 value)
{
outb(value, dev->cir_addr + IT8708_C0DR);
}
/* idle the receiver so that we won't receive samples until another
pulse is detected; this must be called with the device spinlock held */
static void it8708_idle_rx(struct ite_dev *dev)
{
/* disable streaming by clearing RXACT writing it as 1 */
outb(inb(dev->cir_addr + IT8708_C0RCR) | IT85_RXACT,
dev->cir_addr + IT8708_C0RCR);
/* clear the FIFO */
outb(inb(dev->cir_addr + IT8708_C0MSTCR) | IT85_FIFOCLR,
dev->cir_addr + IT8708_C0MSTCR);
}
/* disable the receiver; this must be called with the device spinlock held */
static void it8708_disable_rx(struct ite_dev *dev)
{
/* disable the receiver interrupts */
outb(inb(dev->cir_addr + IT8708_C0IER) &
~(IT85_RDAIE | IT85_RFOIE),
dev->cir_addr + IT8708_C0IER);
/* disable the receiver */
outb(inb(dev->cir_addr + IT8708_C0RCR) & ~IT85_RXEN,
dev->cir_addr + IT8708_C0RCR);
/* clear the FIFO and RXACT (actually RXACT should have been cleared
* in the previous outb() call) */
it8708_idle_rx(dev);
}
/* enable the receiver; this must be called with the device spinlock held */
static void it8708_enable_rx(struct ite_dev *dev)
{
/* enable the receiver by setting RXEN */
outb(inb(dev->cir_addr + IT8708_C0RCR) | IT85_RXEN,
dev->cir_addr + IT8708_C0RCR);
/* just prepare it to idle for the next reception */
it8708_idle_rx(dev);
/* enable the receiver interrupts and master enable flag */
outb(inb(dev->cir_addr + IT8708_C0IER)
|IT85_RDAIE | IT85_RFOIE | IT85_IEC,
dev->cir_addr + IT8708_C0IER);
}
/* disable the transmitter interrupt; this must be called with the device
* spinlock held */
static void it8708_disable_tx_interrupt(struct ite_dev *dev)
{
/* disable the transmitter interrupts */
outb(inb(dev->cir_addr + IT8708_C0IER) & ~IT85_TLDLIE,
dev->cir_addr + IT8708_C0IER);
}
/* enable the transmitter interrupt; this must be called with the device
* spinlock held */
static void it8708_enable_tx_interrupt(struct ite_dev *dev)
{
/* enable the transmitter interrupts and master enable flag */
outb(inb(dev->cir_addr + IT8708_C0IER)
|IT85_TLDLIE | IT85_IEC,
dev->cir_addr + IT8708_C0IER);
}
/* disable the device; this must be called with the device spinlock held */
static void it8708_disable(struct ite_dev *dev)
{
/* clear out all interrupt enable flags */
outb(inb(dev->cir_addr + IT8708_C0IER) &
~(IT85_IEC | IT85_RFOIE | IT85_RDAIE | IT85_TLDLIE),
dev->cir_addr + IT8708_C0IER);
/* disable the receiver */
it8708_disable_rx(dev);
/* erase the FIFO */
outb(IT85_FIFOCLR | inb(dev->cir_addr + IT8708_C0MSTCR),
dev->cir_addr + IT8708_C0MSTCR);
}
/* initialize the hardware */
static void it8708_init_hardware(struct ite_dev *dev)
{
/* disable all the interrupts */
outb(inb(dev->cir_addr + IT8708_C0IER) &
~(IT85_IEC | IT85_RFOIE | IT85_RDAIE | IT85_TLDLIE),
dev->cir_addr + IT8708_C0IER);
/* program the baud rate divisor */
outb(inb(dev->cir_addr + IT8708_BANKSEL) | IT8708_HRAE,
dev->cir_addr + IT8708_BANKSEL);
outb(ITE_BAUDRATE_DIVISOR & 0xff, dev->cir_addr + IT8708_C0BDLR);
outb((ITE_BAUDRATE_DIVISOR >> 8) & 0xff,
dev->cir_addr + IT8708_C0BDHR);
outb(inb(dev->cir_addr + IT8708_BANKSEL) & ~IT8708_HRAE,
dev->cir_addr + IT8708_BANKSEL);
/* program the C0MSTCR register defaults */
outb((inb(dev->cir_addr + IT8708_C0MSTCR) &
~(IT85_ILSEL | IT85_ILE | IT85_FIFOTL |
IT85_FIFOCLR | IT85_RESET)) |
IT85_FIFOTL_DEFAULT,
dev->cir_addr + IT8708_C0MSTCR);
/* program the C0RCR register defaults */
outb((inb(dev->cir_addr + IT8708_C0RCR) &
~(IT85_RXEN | IT85_RDWOS | IT85_RXEND |
IT85_RXACT | IT85_RXDCR)) |
ITE_RXDCR_DEFAULT,
dev->cir_addr + IT8708_C0RCR);
/* program the C0TCR register defaults */
outb((inb(dev->cir_addr + IT8708_C0TCR) &
~(IT85_TXMPM | IT85_TXMPW))
|IT85_TXRLE | IT85_TXENDF |
IT85_TXMPM_DEFAULT | IT85_TXMPW_DEFAULT,
dev->cir_addr + IT8708_C0TCR);
/* program the carrier parameters */
ite_set_carrier_params(dev);
}
/* IT8512F on ITE8709 HW-specific functions */
/* read a byte from the SRAM module */
static inline u8 it8709_rm(struct ite_dev *dev, int index)
{
outb(index, dev->cir_addr + IT8709_RAM_IDX);
return inb(dev->cir_addr + IT8709_RAM_VAL);
}
/* write a byte to the SRAM module */
static inline void it8709_wm(struct ite_dev *dev, u8 val, int index)
{
outb(index, dev->cir_addr + IT8709_RAM_IDX);
outb(val, dev->cir_addr + IT8709_RAM_VAL);
}
static void it8709_wait(struct ite_dev *dev)
{
int i = 0;
/*
* loop until device tells it's ready to continue
* iterations count is usually ~750 but can sometimes achieve 13000
*/
for (i = 0; i < 15000; i++) {
udelay(2);
if (it8709_rm(dev, IT8709_MODE) == IT8709_IDLE)
break;
}
}
/* read the value of a CIR register */
static u8 it8709_rr(struct ite_dev *dev, int index)
{
/* just wait in case the previous access was a write */
it8709_wait(dev);
it8709_wm(dev, index, IT8709_REG_IDX);
it8709_wm(dev, IT8709_READ, IT8709_MODE);
/* wait for the read data to be available */
it8709_wait(dev);
/* return the read value */
return it8709_rm(dev, IT8709_REG_VAL);
}
/* write the value of a CIR register */
static void it8709_wr(struct ite_dev *dev, u8 val, int index)
{
/* we wait before writing, and not afterwards, since this allows us to
* pipeline the host CPU with the microcontroller */
it8709_wait(dev);
it8709_wm(dev, val, IT8709_REG_VAL);
it8709_wm(dev, index, IT8709_REG_IDX);
it8709_wm(dev, IT8709_WRITE, IT8709_MODE);
}
/* retrieve a bitmask of the current causes for a pending interrupt; this may
* be composed of ITE_IRQ_TX_FIFO, ITE_IRQ_RX_FIFO and ITE_IRQ_RX_FIFO_OVERRUN
* */
static int it8709_get_irq_causes(struct ite_dev *dev)
{
u8 iflags;
int ret = 0;
/* read the interrupt flags */
iflags = it8709_rm(dev, IT8709_IIR);
if (iflags & IT85_TLDLI)
ret |= ITE_IRQ_TX_FIFO;
if (iflags & IT85_RDAI)
ret |= ITE_IRQ_RX_FIFO;
if (iflags & IT85_RFOI)
ret |= ITE_IRQ_RX_FIFO_OVERRUN;
return ret;
}
/* set the carrier parameters; to be called with the spinlock held */
static void it8709_set_carrier_params(struct ite_dev *dev, bool high_freq,
bool use_demodulator,
u8 carrier_freq_bits, u8 allowance_bits,
u8 pulse_width_bits)
{
u8 val;
val = (it8709_rr(dev, IT85_C0CFR)
&~(IT85_HCFS | IT85_CFQ)) |
carrier_freq_bits;
if (high_freq)
val |= IT85_HCFS;
it8709_wr(dev, val, IT85_C0CFR);
/* program the C0RCR register */
val = it8709_rr(dev, IT85_C0RCR)
& ~(IT85_RXEND | IT85_RXDCR);
if (use_demodulator)
val |= IT85_RXEND;
val |= allowance_bits;
it8709_wr(dev, val, IT85_C0RCR);
/* program the C0TCR register */
val = it8709_rr(dev, IT85_C0TCR) & ~IT85_TXMPW;
val |= pulse_width_bits;
it8709_wr(dev, val, IT85_C0TCR);
}
/* read up to buf_size bytes from the RX FIFO; to be called with the spinlock
* held */
static int it8709_get_rx_bytes(struct ite_dev *dev, u8 * buf, int buf_size)
{
int fifo, read = 0;
/* read how many bytes are still in the FIFO */
fifo = it8709_rm(dev, IT8709_RFSR) & IT85_RXFBC;
while (fifo > 0 && buf_size > 0) {
*(buf++) = it8709_rm(dev, IT8709_FIFO + read);
fifo--;
read++;
buf_size--;
}
/* 'clear' the FIFO by setting the writing index to 0; this is
* completely bound to be racy, but we can't help it, since it's a
* limitation of the protocol */
it8709_wm(dev, 0, IT8709_RFSR);
return read;
}
/* return how many bytes are still in the FIFO; this will be called
* with the device spinlock NOT HELD while waiting for the TX FIFO to get
* empty; let's expect this won't be a problem */
static int it8709_get_tx_used_slots(struct ite_dev *dev)
{
return it8709_rr(dev, IT85_C0TFSR) & IT85_TXFBC;
}
/* put a byte to the TX fifo; this should be called with the spinlock held */
static void it8709_put_tx_byte(struct ite_dev *dev, u8 value)
{
it8709_wr(dev, value, IT85_C0DR);
}
/* idle the receiver so that we won't receive samples until another
pulse is detected; this must be called with the device spinlock held */
static void it8709_idle_rx(struct ite_dev *dev)
{
/* disable streaming by clearing RXACT writing it as 1 */
it8709_wr(dev, it8709_rr(dev, IT85_C0RCR) | IT85_RXACT,
IT85_C0RCR);
/* clear the FIFO */
it8709_wr(dev, it8709_rr(dev, IT85_C0MSTCR) | IT85_FIFOCLR,
IT85_C0MSTCR);
}
/* disable the receiver; this must be called with the device spinlock held */
static void it8709_disable_rx(struct ite_dev *dev)
{
/* disable the receiver interrupts */
it8709_wr(dev, it8709_rr(dev, IT85_C0IER) &
~(IT85_RDAIE | IT85_RFOIE),
IT85_C0IER);
/* disable the receiver */
it8709_wr(dev, it8709_rr(dev, IT85_C0RCR) & ~IT85_RXEN,
IT85_C0RCR);
/* clear the FIFO and RXACT (actually RXACT should have been cleared
* in the previous it8709_wr(dev, ) call) */
it8709_idle_rx(dev);
}
/* enable the receiver; this must be called with the device spinlock held */
static void it8709_enable_rx(struct ite_dev *dev)
{
/* enable the receiver by setting RXEN */
it8709_wr(dev, it8709_rr(dev, IT85_C0RCR) | IT85_RXEN,
IT85_C0RCR);
/* just prepare it to idle for the next reception */
it8709_idle_rx(dev);
/* enable the receiver interrupts and master enable flag */
it8709_wr(dev, it8709_rr(dev, IT85_C0IER)
|IT85_RDAIE | IT85_RFOIE | IT85_IEC,
IT85_C0IER);
}
/* disable the transmitter interrupt; this must be called with the device
* spinlock held */
static void it8709_disable_tx_interrupt(struct ite_dev *dev)
{
/* disable the transmitter interrupts */
it8709_wr(dev, it8709_rr(dev, IT85_C0IER) & ~IT85_TLDLIE,
IT85_C0IER);
}
/* enable the transmitter interrupt; this must be called with the device
* spinlock held */
static void it8709_enable_tx_interrupt(struct ite_dev *dev)
{
/* enable the transmitter interrupts and master enable flag */
it8709_wr(dev, it8709_rr(dev, IT85_C0IER)
|IT85_TLDLIE | IT85_IEC,
IT85_C0IER);
}
/* disable the device; this must be called with the device spinlock held */
static void it8709_disable(struct ite_dev *dev)
{
/* clear out all interrupt enable flags */
it8709_wr(dev, it8709_rr(dev, IT85_C0IER) &
~(IT85_IEC | IT85_RFOIE | IT85_RDAIE | IT85_TLDLIE),
IT85_C0IER);
/* disable the receiver */
it8709_disable_rx(dev);
/* erase the FIFO */
it8709_wr(dev, IT85_FIFOCLR | it8709_rr(dev, IT85_C0MSTCR),
IT85_C0MSTCR);
}
/* initialize the hardware */
static void it8709_init_hardware(struct ite_dev *dev)
{
/* disable all the interrupts */
it8709_wr(dev, it8709_rr(dev, IT85_C0IER) &
~(IT85_IEC | IT85_RFOIE | IT85_RDAIE | IT85_TLDLIE),
IT85_C0IER);
/* program the baud rate divisor */
it8709_wr(dev, ITE_BAUDRATE_DIVISOR & 0xff, IT85_C0BDLR);
it8709_wr(dev, (ITE_BAUDRATE_DIVISOR >> 8) & 0xff,
IT85_C0BDHR);
/* program the C0MSTCR register defaults */
it8709_wr(dev, (it8709_rr(dev, IT85_C0MSTCR) &
~(IT85_ILSEL | IT85_ILE | IT85_FIFOTL
| IT85_FIFOCLR | IT85_RESET)) | IT85_FIFOTL_DEFAULT,
IT85_C0MSTCR);
/* program the C0RCR register defaults */
it8709_wr(dev, (it8709_rr(dev, IT85_C0RCR) &
~(IT85_RXEN | IT85_RDWOS | IT85_RXEND | IT85_RXACT
| IT85_RXDCR)) | ITE_RXDCR_DEFAULT,
IT85_C0RCR);
/* program the C0TCR register defaults */
it8709_wr(dev, (it8709_rr(dev, IT85_C0TCR) & ~(IT85_TXMPM | IT85_TXMPW))
| IT85_TXRLE | IT85_TXENDF | IT85_TXMPM_DEFAULT
| IT85_TXMPW_DEFAULT,
IT85_C0TCR);
/* program the carrier parameters */
ite_set_carrier_params(dev);
}
/* generic hardware setup/teardown code */
/* activate the device for use */
static int ite_open(struct rc_dev *rcdev)
{
struct ite_dev *dev = rcdev->priv;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
/* enable the receiver */
dev->params->enable_rx(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/* deactivate the device for use */
static void ite_close(struct rc_dev *rcdev)
{
struct ite_dev *dev = rcdev->priv;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
/* wait for any transmission to end */
spin_unlock_irqrestore(&dev->lock, flags);
wait_event_interruptible(dev->tx_ended, !dev->transmitting);
spin_lock_irqsave(&dev->lock, flags);
dev->params->disable(dev);
spin_unlock_irqrestore(&dev->lock, flags);
}
/* supported models and their parameters */
static const struct ite_dev_params ite_dev_descs[] = {
{ /* 0: ITE8704 */
.model = "ITE8704 CIR transceiver",
.io_region_size = IT87_IOREG_LENGTH,
.io_rsrc_no = 0,
/* operations */
.get_irq_causes = it87_get_irq_causes,
.enable_rx = it87_enable_rx,
.idle_rx = it87_idle_rx,
.disable_rx = it87_idle_rx,
.get_rx_bytes = it87_get_rx_bytes,
.enable_tx_interrupt = it87_enable_tx_interrupt,
.disable_tx_interrupt = it87_disable_tx_interrupt,
.get_tx_used_slots = it87_get_tx_used_slots,
.put_tx_byte = it87_put_tx_byte,
.disable = it87_disable,
.init_hardware = it87_init_hardware,
.set_carrier_params = it87_set_carrier_params,
},
{ /* 1: ITE8713 */
.model = "ITE8713 CIR transceiver",
.io_region_size = IT87_IOREG_LENGTH,
.io_rsrc_no = 0,
/* operations */
.get_irq_causes = it87_get_irq_causes,
.enable_rx = it87_enable_rx,
.idle_rx = it87_idle_rx,
.disable_rx = it87_idle_rx,
.get_rx_bytes = it87_get_rx_bytes,
.enable_tx_interrupt = it87_enable_tx_interrupt,
.disable_tx_interrupt = it87_disable_tx_interrupt,
.get_tx_used_slots = it87_get_tx_used_slots,
.put_tx_byte = it87_put_tx_byte,
.disable = it87_disable,
.init_hardware = it87_init_hardware,
.set_carrier_params = it87_set_carrier_params,
},
{ /* 2: ITE8708 */
.model = "ITE8708 CIR transceiver",
.io_region_size = IT8708_IOREG_LENGTH,
.io_rsrc_no = 0,
/* operations */
.get_irq_causes = it8708_get_irq_causes,
.enable_rx = it8708_enable_rx,
.idle_rx = it8708_idle_rx,
.disable_rx = it8708_idle_rx,
.get_rx_bytes = it8708_get_rx_bytes,
.enable_tx_interrupt = it8708_enable_tx_interrupt,
.disable_tx_interrupt =
it8708_disable_tx_interrupt,
.get_tx_used_slots = it8708_get_tx_used_slots,
.put_tx_byte = it8708_put_tx_byte,
.disable = it8708_disable,
.init_hardware = it8708_init_hardware,
.set_carrier_params = it8708_set_carrier_params,
},
{ /* 3: ITE8709 */
.model = "ITE8709 CIR transceiver",
.io_region_size = IT8709_IOREG_LENGTH,
.io_rsrc_no = 2,
/* operations */
.get_irq_causes = it8709_get_irq_causes,
.enable_rx = it8709_enable_rx,
.idle_rx = it8709_idle_rx,
.disable_rx = it8709_idle_rx,
.get_rx_bytes = it8709_get_rx_bytes,
.enable_tx_interrupt = it8709_enable_tx_interrupt,
.disable_tx_interrupt =
it8709_disable_tx_interrupt,
.get_tx_used_slots = it8709_get_tx_used_slots,
.put_tx_byte = it8709_put_tx_byte,
.disable = it8709_disable,
.init_hardware = it8709_init_hardware,
.set_carrier_params = it8709_set_carrier_params,
},
};
static const struct pnp_device_id ite_ids[] = {
{"ITE8704", 0}, /* Default model */
{"ITE8713", 1}, /* CIR found in EEEBox 1501U */
{"ITE8708", 2}, /* Bridged IT8512 */
{"ITE8709", 3}, /* SRAM-Bridged IT8512 */
{"", 0},
};
/* allocate memory, probe hardware, and initialize everything */
static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
*dev_id)
{
const struct ite_dev_params *dev_desc = NULL;
struct ite_dev *itdev = NULL;
struct rc_dev *rdev = NULL;
int ret = -ENOMEM;
int model_no;
int io_rsrc_no;
itdev = kzalloc(sizeof(struct ite_dev), GFP_KERNEL);
if (!itdev)
return ret;
/* input device for IR remote (and tx) */
rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
goto exit_free_dev_rdev;
itdev->rdev = rdev;
ret = -ENODEV;
/* get the model number */
model_no = (int)dev_id->driver_data;
dev_dbg(&pdev->dev, "Auto-detected model: %s\n",
ite_dev_descs[model_no].model);
if (model_number >= 0 && model_number < ARRAY_SIZE(ite_dev_descs)) {
model_no = model_number;
dev_info(&pdev->dev, "model has been forced to: %s",
ite_dev_descs[model_no].model);
}
/* get the description for the device */
dev_desc = &ite_dev_descs[model_no];
io_rsrc_no = dev_desc->io_rsrc_no;
/* validate pnp resources */
if (!pnp_port_valid(pdev, io_rsrc_no) ||
pnp_port_len(pdev, io_rsrc_no) < dev_desc->io_region_size) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
goto exit_free_dev_rdev;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "PNP IRQ not valid!\n");
goto exit_free_dev_rdev;
}
/* store resource values */
itdev->cir_addr = pnp_port_start(pdev, io_rsrc_no);
itdev->cir_irq = pnp_irq(pdev, 0);
/* initialize spinlocks */
spin_lock_init(&itdev->lock);
/* set driver data into the pnp device */
pnp_set_drvdata(pdev, itdev);
itdev->pdev = pdev;
/* initialize waitqueues for transmission */
init_waitqueue_head(&itdev->tx_queue);
init_waitqueue_head(&itdev->tx_ended);
/* Set model-specific parameters */
itdev->params = dev_desc;
/* set up hardware initial state */
itdev->tx_duty_cycle = 33;
itdev->tx_carrier_freq = ITE_DEFAULT_CARRIER_FREQ;
itdev->params->init_hardware(itdev);
/* set up ir-core props */
rdev->priv = itdev;
rdev->dev.parent = &pdev->dev;
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rdev->open = ite_open;
rdev->close = ite_close;
rdev->s_idle = ite_s_idle;
rdev->s_rx_carrier_range = ite_set_rx_carrier_range;
/* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */
rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR *
sample_period / 1000;
rdev->timeout = IR_DEFAULT_TIMEOUT;
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR * sample_period / 1000;
rdev->tx_resolution = ITE_BAUDRATE_DIVISOR * sample_period / 1000;
/* set up transmitter related values */
rdev->tx_ir = ite_tx_ir;
rdev->s_tx_carrier = ite_set_tx_carrier;
rdev->s_tx_duty_cycle = ite_set_tx_duty_cycle;
rdev->device_name = dev_desc->model;
rdev->input_id.bustype = BUS_HOST;
rdev->input_id.vendor = PCI_VENDOR_ID_ITE;
rdev->input_id.product = 0;
rdev->input_id.version = 0;
rdev->driver_name = ITE_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
ret = rc_register_device(rdev);
if (ret)
goto exit_free_dev_rdev;
ret = -EBUSY;
/* now claim resources */
if (!request_region(itdev->cir_addr,
dev_desc->io_region_size, ITE_DRIVER_NAME))
goto exit_unregister_device;
if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
ITE_DRIVER_NAME, (void *)itdev))
goto exit_release_cir_addr;
return 0;
exit_release_cir_addr:
release_region(itdev->cir_addr, itdev->params->io_region_size);
exit_unregister_device:
rc_unregister_device(rdev);
rdev = NULL;
exit_free_dev_rdev:
rc_free_device(rdev);
kfree(itdev);
return ret;
}
static void ite_remove(struct pnp_dev *pdev)
{
struct ite_dev *dev = pnp_get_drvdata(pdev);
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
/* disable hardware */
dev->params->disable(dev);
spin_unlock_irqrestore(&dev->lock, flags);
/* free resources */
free_irq(dev->cir_irq, dev);
release_region(dev->cir_addr, dev->params->io_region_size);
rc_unregister_device(dev->rdev);
kfree(dev);
}
static int ite_suspend(struct pnp_dev *pdev, pm_message_t state)
{
struct ite_dev *dev = pnp_get_drvdata(pdev);
unsigned long flags;
/* wait for any transmission to end */
wait_event_interruptible(dev->tx_ended, !dev->transmitting);
spin_lock_irqsave(&dev->lock, flags);
/* disable all interrupts */
dev->params->disable(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static int ite_resume(struct pnp_dev *pdev)
{
struct ite_dev *dev = pnp_get_drvdata(pdev);
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
/* reinitialize hardware config registers */
dev->params->init_hardware(dev);
/* enable the receiver */
dev->params->enable_rx(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static void ite_shutdown(struct pnp_dev *pdev)
{
struct ite_dev *dev = pnp_get_drvdata(pdev);
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
/* disable all interrupts */
dev->params->disable(dev);
spin_unlock_irqrestore(&dev->lock, flags);
}
static struct pnp_driver ite_driver = {
.name = ITE_DRIVER_NAME,
.id_table = ite_ids,
.probe = ite_probe,
.remove = ite_remove,
.suspend = ite_suspend,
.resume = ite_resume,
.shutdown = ite_shutdown,
};
MODULE_DEVICE_TABLE(pnp, ite_ids);
MODULE_DESCRIPTION("ITE Tech Inc. IT8712F/ITE8512F CIR driver");
MODULE_AUTHOR("Juan J. Garcia de Soria <[email protected]>");
MODULE_LICENSE("GPL");
module_pnp_driver(ite_driver);
| linux-master | drivers/media/rc/ite-cir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* TechnoTrend USB IR Receiver
*
* Copyright (C) 2012 Sean Young <[email protected]>
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <media/rc-core.h>
#define DRIVER_NAME "ttusbir"
#define DRIVER_DESC "TechnoTrend USB IR Receiver"
/*
* The Windows driver uses 8 URBS, the original lirc drivers has a
* configurable amount (2 default, 4 max). This device generates about 125
* messages per second (!), whether IR is idle or not.
*/
#define NUM_URBS 4
#define US_PER_BYTE 62
#define US_PER_BIT (US_PER_BYTE / 8)
struct ttusbir {
struct rc_dev *rc;
struct device *dev;
struct usb_device *udev;
struct urb *urb[NUM_URBS];
struct led_classdev led;
struct urb *bulk_urb;
uint8_t bulk_buffer[5];
int bulk_out_endp, iso_in_endp;
bool led_on, is_led_on;
atomic_t led_complete;
char phys[64];
};
static enum led_brightness ttusbir_brightness_get(struct led_classdev *led_dev)
{
struct ttusbir *tt = container_of(led_dev, struct ttusbir, led);
return tt->led_on ? LED_FULL : LED_OFF;
}
static void ttusbir_set_led(struct ttusbir *tt)
{
int ret;
smp_mb();
if (tt->led_on != tt->is_led_on && tt->udev &&
atomic_add_unless(&tt->led_complete, 1, 1)) {
tt->bulk_buffer[4] = tt->is_led_on = tt->led_on;
ret = usb_submit_urb(tt->bulk_urb, GFP_ATOMIC);
if (ret) {
dev_warn(tt->dev, "failed to submit bulk urb: %d\n",
ret);
atomic_dec(&tt->led_complete);
}
}
}
static void ttusbir_brightness_set(struct led_classdev *led_dev, enum
led_brightness brightness)
{
struct ttusbir *tt = container_of(led_dev, struct ttusbir, led);
tt->led_on = brightness != LED_OFF;
ttusbir_set_led(tt);
}
/*
* The urb cannot be reused until the urb completes
*/
static void ttusbir_bulk_complete(struct urb *urb)
{
struct ttusbir *tt = urb->context;
atomic_dec(&tt->led_complete);
switch (urb->status) {
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
return;
case -EPIPE:
default:
dev_dbg(tt->dev, "Error: urb status = %d\n", urb->status);
break;
}
ttusbir_set_led(tt);
}
/*
* The data is one bit per sample, a set bit signifying silence and samples
* being MSB first. Bit 0 can contain garbage so take it to be whatever
* bit 1 is, so we don't have unexpected edges.
*/
static void ttusbir_process_ir_data(struct ttusbir *tt, uint8_t *buf)
{
struct ir_raw_event rawir = {};
unsigned i, v, b;
bool event = false;
for (i = 0; i < 128; i++) {
v = buf[i] & 0xfe;
switch (v) {
case 0xfe:
rawir.pulse = false;
rawir.duration = US_PER_BYTE;
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
break;
case 0:
rawir.pulse = true;
rawir.duration = US_PER_BYTE;
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
break;
default:
/* one edge per byte */
if (v & 2) {
b = ffz(v | 1);
rawir.pulse = true;
} else {
b = ffs(v) - 1;
rawir.pulse = false;
}
rawir.duration = US_PER_BIT * (8 - b);
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
rawir.pulse = !rawir.pulse;
rawir.duration = US_PER_BIT * b;
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
break;
}
}
/* don't wakeup when there's nothing to do */
if (event)
ir_raw_event_handle(tt->rc);
}
static void ttusbir_urb_complete(struct urb *urb)
{
struct ttusbir *tt = urb->context;
int rc;
switch (urb->status) {
case 0:
ttusbir_process_ir_data(tt, urb->transfer_buffer);
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
return;
case -EPIPE:
default:
dev_dbg(tt->dev, "Error: urb status = %d\n", urb->status);
break;
}
rc = usb_submit_urb(urb, GFP_ATOMIC);
if (rc && rc != -ENODEV)
dev_warn(tt->dev, "failed to resubmit urb: %d\n", rc);
}
static int ttusbir_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct ttusbir *tt;
struct usb_interface_descriptor *idesc;
struct usb_endpoint_descriptor *desc;
struct rc_dev *rc;
int i, j, ret;
int altsetting = -1;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!tt || !rc) {
ret = -ENOMEM;
goto out;
}
/* find the correct alt setting */
for (i = 0; i < intf->num_altsetting && altsetting == -1; i++) {
int max_packet, bulk_out_endp = -1, iso_in_endp = -1;
idesc = &intf->altsetting[i].desc;
for (j = 0; j < idesc->bNumEndpoints; j++) {
desc = &intf->altsetting[i].endpoint[j].desc;
max_packet = le16_to_cpu(desc->wMaxPacketSize);
if (usb_endpoint_dir_in(desc) &&
usb_endpoint_xfer_isoc(desc) &&
max_packet == 0x10)
iso_in_endp = j;
else if (usb_endpoint_dir_out(desc) &&
usb_endpoint_xfer_bulk(desc) &&
max_packet == 0x20)
bulk_out_endp = j;
if (bulk_out_endp != -1 && iso_in_endp != -1) {
tt->bulk_out_endp = bulk_out_endp;
tt->iso_in_endp = iso_in_endp;
altsetting = i;
break;
}
}
}
if (altsetting == -1) {
dev_err(&intf->dev, "cannot find expected altsetting\n");
ret = -ENODEV;
goto out;
}
tt->dev = &intf->dev;
tt->udev = interface_to_usbdev(intf);
tt->rc = rc;
ret = usb_set_interface(tt->udev, 0, altsetting);
if (ret)
goto out;
for (i = 0; i < NUM_URBS; i++) {
struct urb *urb = usb_alloc_urb(8, GFP_KERNEL);
void *buffer;
if (!urb) {
ret = -ENOMEM;
goto out;
}
urb->dev = tt->udev;
urb->context = tt;
urb->pipe = usb_rcvisocpipe(tt->udev, tt->iso_in_endp);
urb->interval = 1;
buffer = usb_alloc_coherent(tt->udev, 128, GFP_KERNEL,
&urb->transfer_dma);
if (!buffer) {
usb_free_urb(urb);
ret = -ENOMEM;
goto out;
}
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP | URB_ISO_ASAP;
urb->transfer_buffer = buffer;
urb->complete = ttusbir_urb_complete;
urb->number_of_packets = 8;
urb->transfer_buffer_length = 128;
for (j = 0; j < 8; j++) {
urb->iso_frame_desc[j].offset = j * 16;
urb->iso_frame_desc[j].length = 16;
}
tt->urb[i] = urb;
}
tt->bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!tt->bulk_urb) {
ret = -ENOMEM;
goto out;
}
tt->bulk_buffer[0] = 0xaa;
tt->bulk_buffer[1] = 0x01;
tt->bulk_buffer[2] = 0x05;
tt->bulk_buffer[3] = 0x01;
usb_fill_bulk_urb(tt->bulk_urb, tt->udev, usb_sndbulkpipe(tt->udev,
tt->bulk_out_endp), tt->bulk_buffer, sizeof(tt->bulk_buffer),
ttusbir_bulk_complete, tt);
tt->led.name = "ttusbir:green:power";
tt->led.default_trigger = "rc-feedback";
tt->led.brightness_set = ttusbir_brightness_set;
tt->led.brightness_get = ttusbir_brightness_get;
tt->is_led_on = tt->led_on = true;
atomic_set(&tt->led_complete, 0);
ret = led_classdev_register(&intf->dev, &tt->led);
if (ret)
goto out;
usb_make_path(tt->udev, tt->phys, sizeof(tt->phys));
rc->device_name = DRIVER_DESC;
rc->input_phys = tt->phys;
usb_to_input_id(tt->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->priv = tt;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_TT_1500;
rc->min_timeout = 1;
rc->timeout = IR_DEFAULT_TIMEOUT;
rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
/*
* The precision is US_PER_BIT, but since every 8th bit can be
* overwritten with garbage the accuracy is at best 2 * US_PER_BIT.
*/
rc->rx_resolution = 2 * US_PER_BIT;
ret = rc_register_device(rc);
if (ret) {
dev_err(&intf->dev, "failed to register rc device %d\n", ret);
goto out2;
}
usb_set_intfdata(intf, tt);
for (i = 0; i < NUM_URBS; i++) {
ret = usb_submit_urb(tt->urb[i], GFP_KERNEL);
if (ret) {
dev_err(tt->dev, "failed to submit urb %d\n", ret);
goto out3;
}
}
return 0;
out3:
rc_unregister_device(rc);
rc = NULL;
out2:
led_classdev_unregister(&tt->led);
out:
if (tt) {
for (i = 0; i < NUM_URBS && tt->urb[i]; i++) {
struct urb *urb = tt->urb[i];
usb_kill_urb(urb);
usb_free_coherent(tt->udev, 128, urb->transfer_buffer,
urb->transfer_dma);
usb_free_urb(urb);
}
usb_kill_urb(tt->bulk_urb);
usb_free_urb(tt->bulk_urb);
kfree(tt);
}
rc_free_device(rc);
return ret;
}
static void ttusbir_disconnect(struct usb_interface *intf)
{
struct ttusbir *tt = usb_get_intfdata(intf);
struct usb_device *udev = tt->udev;
int i;
tt->udev = NULL;
rc_unregister_device(tt->rc);
led_classdev_unregister(&tt->led);
for (i = 0; i < NUM_URBS; i++) {
usb_kill_urb(tt->urb[i]);
usb_free_coherent(udev, 128, tt->urb[i]->transfer_buffer,
tt->urb[i]->transfer_dma);
usb_free_urb(tt->urb[i]);
}
usb_kill_urb(tt->bulk_urb);
usb_free_urb(tt->bulk_urb);
usb_set_intfdata(intf, NULL);
kfree(tt);
}
static int ttusbir_suspend(struct usb_interface *intf, pm_message_t message)
{
struct ttusbir *tt = usb_get_intfdata(intf);
int i;
for (i = 0; i < NUM_URBS; i++)
usb_kill_urb(tt->urb[i]);
led_classdev_suspend(&tt->led);
usb_kill_urb(tt->bulk_urb);
return 0;
}
static int ttusbir_resume(struct usb_interface *intf)
{
struct ttusbir *tt = usb_get_intfdata(intf);
int i, rc;
tt->is_led_on = true;
led_classdev_resume(&tt->led);
for (i = 0; i < NUM_URBS; i++) {
rc = usb_submit_urb(tt->urb[i], GFP_NOIO);
if (rc) {
dev_warn(tt->dev, "failed to submit urb: %d\n", rc);
break;
}
}
return rc;
}
static const struct usb_device_id ttusbir_table[] = {
{ USB_DEVICE(0x0b48, 0x2003) },
{ }
};
static struct usb_driver ttusbir_driver = {
.name = DRIVER_NAME,
.id_table = ttusbir_table,
.probe = ttusbir_probe,
.suspend = ttusbir_suspend,
.resume = ttusbir_resume,
.reset_resume = ttusbir_resume,
.disconnect = ttusbir_disconnect,
};
module_usb_driver(ttusbir_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, ttusbir_table);
| linux-master | drivers/media/rc/ttusbir.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (C) 2018 Sean Young <[email protected]>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <media/rc-core.h>
/* Each bit is 250us */
#define BIT_DURATION 250
struct imon {
struct device *dev;
struct urb *ir_urb;
struct rc_dev *rcdev;
__be64 *ir_buf;
char phys[64];
};
/*
* The first 5 bytes of data represent IR pulse or space. Each bit, starting
* from highest bit in the first byte, represents 250µs of data. It is 1
* for space and 0 for pulse.
*
* The station sends 10 packets, and the 7th byte will be number 1 to 10, so
* when we receive 10 we assume all the data has arrived.
*/
static void imon_ir_data(struct imon *imon)
{
struct ir_raw_event rawir = {};
u64 data = be64_to_cpup(imon->ir_buf);
u8 packet_no = data & 0xff;
int offset = 40;
int bit;
if (packet_no == 0xff)
return;
dev_dbg(imon->dev, "data: %*ph", 8, imon->ir_buf);
/*
* Only the first 5 bytes contain IR data. Right shift so we move
* the IR bits to the lower 40 bits.
*/
data >>= 24;
do {
/*
* Find highest set bit which is less or equal to offset
*
* offset is the bit above (base 0) where we start looking.
*
* data & (BIT_ULL(offset) - 1) masks off any unwanted bits,
* so we have just bits less than offset.
*
* fls will tell us the highest bit set plus 1 (or 0 if no
* bits are set).
*/
rawir.pulse = !rawir.pulse;
bit = fls64(data & (BIT_ULL(offset) - 1));
if (bit < offset) {
dev_dbg(imon->dev, "%s: %d bits",
rawir.pulse ? "pulse" : "space", offset - bit);
rawir.duration = (offset - bit) * BIT_DURATION;
ir_raw_event_store_with_filter(imon->rcdev, &rawir);
offset = bit;
}
data = ~data;
} while (offset > 0);
if (packet_no == 0x0a && !imon->rcdev->idle) {
ir_raw_event_set_idle(imon->rcdev, true);
ir_raw_event_handle(imon->rcdev);
}
}
static void imon_ir_rx(struct urb *urb)
{
struct imon *imon = urb->context;
int ret;
switch (urb->status) {
case 0:
imon_ir_data(imon);
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
case -EPIPE:
default:
dev_dbg(imon->dev, "error: urb status = %d", urb->status);
break;
}
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret && ret != -ENODEV)
dev_warn(imon->dev, "failed to resubmit urb: %d", ret);
}
static int imon_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_endpoint_descriptor *ir_ep = NULL;
struct usb_host_interface *idesc;
struct usb_device *udev;
struct rc_dev *rcdev;
struct imon *imon;
int i, ret;
udev = interface_to_usbdev(intf);
idesc = intf->cur_altsetting;
for (i = 0; i < idesc->desc.bNumEndpoints; i++) {
struct usb_endpoint_descriptor *ep = &idesc->endpoint[i].desc;
if (usb_endpoint_is_int_in(ep)) {
ir_ep = ep;
break;
}
}
if (!ir_ep) {
dev_err(&intf->dev, "IR endpoint missing");
return -ENODEV;
}
imon = devm_kmalloc(&intf->dev, sizeof(*imon), GFP_KERNEL);
if (!imon)
return -ENOMEM;
imon->ir_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!imon->ir_urb)
return -ENOMEM;
imon->ir_buf = kmalloc(sizeof(__be64), GFP_KERNEL);
if (!imon->ir_buf) {
ret = -ENOMEM;
goto free_urb;
}
imon->dev = &intf->dev;
usb_fill_int_urb(imon->ir_urb, udev,
usb_rcvintpipe(udev, ir_ep->bEndpointAddress),
imon->ir_buf, sizeof(__be64),
imon_ir_rx, imon, ir_ep->bInterval);
rcdev = devm_rc_allocate_device(&intf->dev, RC_DRIVER_IR_RAW);
if (!rcdev) {
ret = -ENOMEM;
goto free_urb;
}
usb_make_path(udev, imon->phys, sizeof(imon->phys));
rcdev->device_name = "iMON Station";
rcdev->driver_name = KBUILD_MODNAME;
rcdev->input_phys = imon->phys;
usb_to_input_id(udev, &rcdev->input_id);
rcdev->dev.parent = &intf->dev;
rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rcdev->map_name = RC_MAP_IMON_RSC;
rcdev->rx_resolution = BIT_DURATION;
rcdev->priv = imon;
ret = devm_rc_register_device(&intf->dev, rcdev);
if (ret)
goto free_urb;
imon->rcdev = rcdev;
ret = usb_submit_urb(imon->ir_urb, GFP_KERNEL);
if (ret)
goto free_urb;
usb_set_intfdata(intf, imon);
return 0;
free_urb:
usb_free_urb(imon->ir_urb);
kfree(imon->ir_buf);
return ret;
}
static void imon_disconnect(struct usb_interface *intf)
{
struct imon *imon = usb_get_intfdata(intf);
usb_kill_urb(imon->ir_urb);
usb_free_urb(imon->ir_urb);
kfree(imon->ir_buf);
}
static const struct usb_device_id imon_table[] = {
/* SoundGraph iMON (IR only) -- sg_imon.inf */
{ USB_DEVICE(0x04e8, 0xff30) },
{}
};
static struct usb_driver imon_driver = {
.name = KBUILD_MODNAME,
.probe = imon_probe,
.disconnect = imon_disconnect,
.id_table = imon_table
};
module_usb_driver(imon_driver);
MODULE_DESCRIPTION("Early raw iMON IR devices");
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, imon_table);
| linux-master | drivers/media/rc/imon_raw.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Sean Young <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pwm.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <media/rc-core.h>
#define DRIVER_NAME "pwm-ir-tx"
#define DEVICE_NAME "PWM IR Transmitter"
struct pwm_ir {
struct pwm_device *pwm;
unsigned int carrier;
unsigned int duty_cycle;
};
static const struct of_device_id pwm_ir_of_match[] = {
{ .compatible = "pwm-ir-tx", },
{ },
};
MODULE_DEVICE_TABLE(of, pwm_ir_of_match);
static int pwm_ir_set_duty_cycle(struct rc_dev *dev, u32 duty_cycle)
{
struct pwm_ir *pwm_ir = dev->priv;
pwm_ir->duty_cycle = duty_cycle;
return 0;
}
static int pwm_ir_set_carrier(struct rc_dev *dev, u32 carrier)
{
struct pwm_ir *pwm_ir = dev->priv;
if (!carrier)
return -EINVAL;
pwm_ir->carrier = carrier;
return 0;
}
static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
unsigned int count)
{
struct pwm_ir *pwm_ir = dev->priv;
struct pwm_device *pwm = pwm_ir->pwm;
struct pwm_state state;
int i;
ktime_t edge;
long delta;
pwm_init_state(pwm, &state);
state.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, pwm_ir->carrier);
pwm_set_relative_duty_cycle(&state, pwm_ir->duty_cycle, 100);
edge = ktime_get();
for (i = 0; i < count; i++) {
state.enabled = !(i % 2);
pwm_apply_state(pwm, &state);
edge = ktime_add_us(edge, txbuf[i]);
delta = ktime_us_delta(edge, ktime_get());
if (delta > 0)
usleep_range(delta, delta + 10);
}
state.enabled = false;
pwm_apply_state(pwm, &state);
return count;
}
static int pwm_ir_probe(struct platform_device *pdev)
{
struct pwm_ir *pwm_ir;
struct rc_dev *rcdev;
int rc;
pwm_ir = devm_kmalloc(&pdev->dev, sizeof(*pwm_ir), GFP_KERNEL);
if (!pwm_ir)
return -ENOMEM;
pwm_ir->pwm = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(pwm_ir->pwm))
return PTR_ERR(pwm_ir->pwm);
pwm_ir->carrier = 38000;
pwm_ir->duty_cycle = 50;
rcdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW_TX);
if (!rcdev)
return -ENOMEM;
rcdev->priv = pwm_ir;
rcdev->driver_name = DRIVER_NAME;
rcdev->device_name = DEVICE_NAME;
rcdev->tx_ir = pwm_ir_tx;
rcdev->s_tx_duty_cycle = pwm_ir_set_duty_cycle;
rcdev->s_tx_carrier = pwm_ir_set_carrier;
rc = devm_rc_register_device(&pdev->dev, rcdev);
if (rc < 0)
dev_err(&pdev->dev, "failed to register rc device\n");
return rc;
}
static struct platform_driver pwm_ir_driver = {
.probe = pwm_ir_probe,
.driver = {
.name = DRIVER_NAME,
.of_match_table = pwm_ir_of_match,
},
};
module_platform_driver(pwm_ir_driver);
MODULE_DESCRIPTION("PWM IR Transmitter");
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/pwm-ir-tx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2013 STMicroelectronics Limited
* Author: Srinivas Kandagatla <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <media/rc-core.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_wakeirq.h>
struct st_rc_device {
struct device *dev;
int irq;
int irq_wake;
struct clk *sys_clock;
void __iomem *base; /* Register base address */
void __iomem *rx_base;/* RX Register base address */
struct rc_dev *rdev;
bool overclocking;
int sample_mult;
int sample_div;
bool rxuhfmode;
struct reset_control *rstc;
};
/* Registers */
#define IRB_SAMPLE_RATE_COMM 0x64 /* sample freq divisor*/
#define IRB_CLOCK_SEL 0x70 /* clock select */
#define IRB_CLOCK_SEL_STATUS 0x74 /* clock status */
/* IRB IR/UHF receiver registers */
#define IRB_RX_ON 0x40 /* pulse time capture */
#define IRB_RX_SYS 0X44 /* sym period capture */
#define IRB_RX_INT_EN 0x48 /* IRQ enable (R/W) */
#define IRB_RX_INT_STATUS 0x4c /* IRQ status (R/W) */
#define IRB_RX_EN 0x50 /* Receive enable */
#define IRB_MAX_SYM_PERIOD 0x54 /* max sym value */
#define IRB_RX_INT_CLEAR 0x58 /* overrun status */
#define IRB_RX_STATUS 0x6c /* receive status */
#define IRB_RX_NOISE_SUPPR 0x5c /* noise suppression */
#define IRB_RX_POLARITY_INV 0x68 /* polarity inverter */
/*
* IRQ set: Enable full FIFO 1 -> bit 3;
* Enable overrun IRQ 1 -> bit 2;
* Enable last symbol IRQ 1 -> bit 1:
* Enable RX interrupt 1 -> bit 0;
*/
#define IRB_RX_INTS 0x0f
#define IRB_RX_OVERRUN_INT 0x04
/* maximum symbol period (microsecs),timeout to detect end of symbol train */
#define MAX_SYMB_TIME 0x5000
#define IRB_SAMPLE_FREQ 10000000
#define IRB_FIFO_NOT_EMPTY 0xff00
#define IRB_OVERFLOW 0x4
#define IRB_TIMEOUT 0xffff
#define IR_ST_NAME "st-rc"
static void st_rc_send_lirc_timeout(struct rc_dev *rdev)
{
struct ir_raw_event ev = { .timeout = true, .duration = rdev->timeout };
ir_raw_event_store(rdev, &ev);
}
/*
* RX graphical example to better understand the difference between ST IR block
* output and standard definition used by LIRC (and most of the world!)
*
* mark mark
* |-IRB_RX_ON-| |-IRB_RX_ON-|
* ___ ___ ___ ___ ___ ___ _
* | | | | | | | | | | | | |
* | | | | | | space 0 | | | | | | space 1 |
* _____| |__| |__| |____________________________| |__| |__| |_____________|
*
* |--------------- IRB_RX_SYS -------------|------ IRB_RX_SYS -------|
*
* |------------- encoding bit 0 -----------|---- encoding bit 1 -----|
*
* ST hardware returns mark (IRB_RX_ON) and total symbol time (IRB_RX_SYS), so
* convert to standard mark/space we have to calculate space=(IRB_RX_SYS-mark)
* The mark time represents the amount of time the carrier (usually 36-40kHz)
* is detected.The above examples shows Pulse Width Modulation encoding where
* bit 0 is represented by space>mark.
*/
static irqreturn_t st_rc_rx_interrupt(int irq, void *data)
{
unsigned long timeout;
unsigned int symbol, mark = 0;
struct st_rc_device *dev = data;
int last_symbol = 0;
u32 status, int_status;
struct ir_raw_event ev = {};
if (dev->irq_wake)
pm_wakeup_event(dev->dev, 0);
/* FIXME: is 10ms good enough ? */
timeout = jiffies + msecs_to_jiffies(10);
do {
status = readl(dev->rx_base + IRB_RX_STATUS);
if (!(status & (IRB_FIFO_NOT_EMPTY | IRB_OVERFLOW)))
break;
int_status = readl(dev->rx_base + IRB_RX_INT_STATUS);
if (unlikely(int_status & IRB_RX_OVERRUN_INT)) {
/* discard the entire collection in case of errors! */
ir_raw_event_overflow(dev->rdev);
dev_info(dev->dev, "IR RX overrun\n");
writel(IRB_RX_OVERRUN_INT,
dev->rx_base + IRB_RX_INT_CLEAR);
continue;
}
symbol = readl(dev->rx_base + IRB_RX_SYS);
mark = readl(dev->rx_base + IRB_RX_ON);
if (symbol == IRB_TIMEOUT)
last_symbol = 1;
/* Ignore any noise */
if ((mark > 2) && (symbol > 1)) {
symbol -= mark;
if (dev->overclocking) { /* adjustments to timings */
symbol *= dev->sample_mult;
symbol /= dev->sample_div;
mark *= dev->sample_mult;
mark /= dev->sample_div;
}
ev.duration = mark;
ev.pulse = true;
ir_raw_event_store(dev->rdev, &ev);
if (!last_symbol) {
ev.duration = symbol;
ev.pulse = false;
ir_raw_event_store(dev->rdev, &ev);
} else {
st_rc_send_lirc_timeout(dev->rdev);
}
}
last_symbol = 0;
} while (time_is_after_jiffies(timeout));
writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_CLEAR);
/* Empty software fifo */
ir_raw_event_handle(dev->rdev);
return IRQ_HANDLED;
}
static int st_rc_hardware_init(struct st_rc_device *dev)
{
int ret;
int baseclock, freqdiff;
unsigned int rx_max_symbol_per = MAX_SYMB_TIME;
unsigned int rx_sampling_freq_div;
/* Enable the IP */
reset_control_deassert(dev->rstc);
ret = clk_prepare_enable(dev->sys_clock);
if (ret) {
dev_err(dev->dev, "Failed to prepare/enable system clock\n");
return ret;
}
baseclock = clk_get_rate(dev->sys_clock);
/* IRB input pins are inverted internally from high to low. */
writel(1, dev->rx_base + IRB_RX_POLARITY_INV);
rx_sampling_freq_div = baseclock / IRB_SAMPLE_FREQ;
writel(rx_sampling_freq_div, dev->base + IRB_SAMPLE_RATE_COMM);
freqdiff = baseclock - (rx_sampling_freq_div * IRB_SAMPLE_FREQ);
if (freqdiff) { /* over clocking, workout the adjustment factors */
dev->overclocking = true;
dev->sample_mult = 1000;
dev->sample_div = baseclock / (10000 * rx_sampling_freq_div);
rx_max_symbol_per = (rx_max_symbol_per * 1000)/dev->sample_div;
}
writel(rx_max_symbol_per, dev->rx_base + IRB_MAX_SYM_PERIOD);
return 0;
}
static void st_rc_remove(struct platform_device *pdev)
{
struct st_rc_device *rc_dev = platform_get_drvdata(pdev);
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
clk_disable_unprepare(rc_dev->sys_clock);
rc_unregister_device(rc_dev->rdev);
}
static int st_rc_open(struct rc_dev *rdev)
{
struct st_rc_device *dev = rdev->priv;
unsigned long flags;
local_irq_save(flags);
/* enable interrupts and receiver */
writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_EN);
writel(0x01, dev->rx_base + IRB_RX_EN);
local_irq_restore(flags);
return 0;
}
static void st_rc_close(struct rc_dev *rdev)
{
struct st_rc_device *dev = rdev->priv;
/* disable interrupts and receiver */
writel(0x00, dev->rx_base + IRB_RX_EN);
writel(0x00, dev->rx_base + IRB_RX_INT_EN);
}
static int st_rc_probe(struct platform_device *pdev)
{
int ret = -EINVAL;
struct rc_dev *rdev;
struct device *dev = &pdev->dev;
struct st_rc_device *rc_dev;
struct device_node *np = pdev->dev.of_node;
const char *rx_mode;
rc_dev = devm_kzalloc(dev, sizeof(struct st_rc_device), GFP_KERNEL);
if (!rc_dev)
return -ENOMEM;
rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
return -ENOMEM;
if (np && !of_property_read_string(np, "rx-mode", &rx_mode)) {
if (!strcmp(rx_mode, "uhf")) {
rc_dev->rxuhfmode = true;
} else if (!strcmp(rx_mode, "infrared")) {
rc_dev->rxuhfmode = false;
} else {
dev_err(dev, "Unsupported rx mode [%s]\n", rx_mode);
goto err;
}
} else {
goto err;
}
rc_dev->sys_clock = devm_clk_get(dev, NULL);
if (IS_ERR(rc_dev->sys_clock)) {
dev_err(dev, "System clock not found\n");
ret = PTR_ERR(rc_dev->sys_clock);
goto err;
}
rc_dev->irq = platform_get_irq(pdev, 0);
if (rc_dev->irq < 0) {
ret = rc_dev->irq;
goto err;
}
rc_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rc_dev->base)) {
ret = PTR_ERR(rc_dev->base);
goto err;
}
if (rc_dev->rxuhfmode)
rc_dev->rx_base = rc_dev->base + 0x40;
else
rc_dev->rx_base = rc_dev->base;
rc_dev->rstc = reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(rc_dev->rstc)) {
ret = PTR_ERR(rc_dev->rstc);
goto err;
}
rc_dev->dev = dev;
platform_set_drvdata(pdev, rc_dev);
ret = st_rc_hardware_init(rc_dev);
if (ret)
goto err;
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
/* rx sampling rate is 10Mhz */
rdev->rx_resolution = 100;
rdev->timeout = MAX_SYMB_TIME;
rdev->priv = rc_dev;
rdev->open = st_rc_open;
rdev->close = st_rc_close;
rdev->driver_name = IR_ST_NAME;
rdev->map_name = RC_MAP_EMPTY;
rdev->device_name = "ST Remote Control Receiver";
ret = rc_register_device(rdev);
if (ret < 0)
goto clkerr;
rc_dev->rdev = rdev;
if (devm_request_irq(dev, rc_dev->irq, st_rc_rx_interrupt,
0, IR_ST_NAME, rc_dev) < 0) {
dev_err(dev, "IRQ %d register failed\n", rc_dev->irq);
ret = -EINVAL;
goto rcerr;
}
/* enable wake via this device */
device_init_wakeup(dev, true);
dev_pm_set_wake_irq(dev, rc_dev->irq);
/*
* for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW
* lircd expects a long space first before a signal train to sync.
*/
st_rc_send_lirc_timeout(rdev);
dev_info(dev, "setup in %s mode\n", rc_dev->rxuhfmode ? "UHF" : "IR");
return ret;
rcerr:
rc_unregister_device(rdev);
rdev = NULL;
clkerr:
clk_disable_unprepare(rc_dev->sys_clock);
err:
rc_free_device(rdev);
dev_err(dev, "Unable to register device (%d)\n", ret);
return ret;
}
#ifdef CONFIG_PM_SLEEP
static int st_rc_suspend(struct device *dev)
{
struct st_rc_device *rc_dev = dev_get_drvdata(dev);
if (device_may_wakeup(dev)) {
if (!enable_irq_wake(rc_dev->irq))
rc_dev->irq_wake = 1;
else
return -EINVAL;
} else {
pinctrl_pm_select_sleep_state(dev);
writel(0x00, rc_dev->rx_base + IRB_RX_EN);
writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN);
clk_disable_unprepare(rc_dev->sys_clock);
reset_control_assert(rc_dev->rstc);
}
return 0;
}
static int st_rc_resume(struct device *dev)
{
int ret;
struct st_rc_device *rc_dev = dev_get_drvdata(dev);
struct rc_dev *rdev = rc_dev->rdev;
if (rc_dev->irq_wake) {
disable_irq_wake(rc_dev->irq);
rc_dev->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(dev);
ret = st_rc_hardware_init(rc_dev);
if (ret)
return ret;
if (rdev->users) {
writel(IRB_RX_INTS, rc_dev->rx_base + IRB_RX_INT_EN);
writel(0x01, rc_dev->rx_base + IRB_RX_EN);
}
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(st_rc_pm_ops, st_rc_suspend, st_rc_resume);
#ifdef CONFIG_OF
static const struct of_device_id st_rc_match[] = {
{ .compatible = "st,comms-irb", },
{},
};
MODULE_DEVICE_TABLE(of, st_rc_match);
#endif
static struct platform_driver st_rc_driver = {
.driver = {
.name = IR_ST_NAME,
.of_match_table = of_match_ptr(st_rc_match),
.pm = &st_rc_pm_ops,
},
.probe = st_rc_probe,
.remove_new = st_rc_remove,
};
module_platform_driver(st_rc_driver);
MODULE_DESCRIPTION("RC Transceiver driver for STMicroelectronics platforms");
MODULE_AUTHOR("STMicroelectronics (R&D) Ltd");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/st_rc.c |
// SPDX-License-Identifier: GPL-2.0-only
/* ir-jvc-decoder.c - handle JVC IR Pulse/Space protocol
*
* Copyright (C) 2010 by David Härdeman <[email protected]>
*/
#include <linux/bitrev.h>
#include <linux/module.h>
#include "rc-core-priv.h"
#define JVC_NBITS 16 /* dev(8) + func(8) */
#define JVC_UNIT 525 /* us */
#define JVC_HEADER_PULSE (16 * JVC_UNIT) /* lack of header -> repeat */
#define JVC_HEADER_SPACE (8 * JVC_UNIT)
#define JVC_BIT_PULSE (1 * JVC_UNIT)
#define JVC_BIT_0_SPACE (1 * JVC_UNIT)
#define JVC_BIT_1_SPACE (3 * JVC_UNIT)
#define JVC_TRAILER_PULSE (1 * JVC_UNIT)
#define JVC_TRAILER_SPACE (35 * JVC_UNIT)
enum jvc_state {
STATE_INACTIVE,
STATE_HEADER_SPACE,
STATE_BIT_PULSE,
STATE_BIT_SPACE,
STATE_TRAILER_PULSE,
STATE_TRAILER_SPACE,
STATE_CHECK_REPEAT,
};
/**
* ir_jvc_decode() - Decode one JVC pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_jvc_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct jvc_dec *data = &dev->raw->jvc;
if (!is_timing_event(ev)) {
if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
if (!geq_margin(ev.duration, JVC_UNIT, JVC_UNIT / 2))
goto out;
dev_dbg(&dev->dev, "JVC decode started at state %d (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
again:
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, JVC_HEADER_PULSE, JVC_UNIT / 2))
break;
data->count = 0;
data->first = true;
data->toggle = !data->toggle;
data->state = STATE_HEADER_SPACE;
return 0;
case STATE_HEADER_SPACE:
if (ev.pulse)
break;
if (!eq_margin(ev.duration, JVC_HEADER_SPACE, JVC_UNIT / 2))
break;
data->state = STATE_BIT_PULSE;
return 0;
case STATE_BIT_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, JVC_BIT_PULSE, JVC_UNIT / 2))
break;
data->state = STATE_BIT_SPACE;
return 0;
case STATE_BIT_SPACE:
if (ev.pulse)
break;
data->bits <<= 1;
if (eq_margin(ev.duration, JVC_BIT_1_SPACE, JVC_UNIT / 2)) {
data->bits |= 1;
decrease_duration(&ev, JVC_BIT_1_SPACE);
} else if (eq_margin(ev.duration, JVC_BIT_0_SPACE, JVC_UNIT / 2))
decrease_duration(&ev, JVC_BIT_0_SPACE);
else
break;
data->count++;
if (data->count == JVC_NBITS)
data->state = STATE_TRAILER_PULSE;
else
data->state = STATE_BIT_PULSE;
return 0;
case STATE_TRAILER_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, JVC_TRAILER_PULSE, JVC_UNIT / 2))
break;
data->state = STATE_TRAILER_SPACE;
return 0;
case STATE_TRAILER_SPACE:
if (ev.pulse)
break;
if (!geq_margin(ev.duration, JVC_TRAILER_SPACE, JVC_UNIT / 2))
break;
if (data->first) {
u32 scancode;
scancode = (bitrev8((data->bits >> 8) & 0xff) << 8) |
(bitrev8((data->bits >> 0) & 0xff) << 0);
dev_dbg(&dev->dev, "JVC scancode 0x%04x\n", scancode);
rc_keydown(dev, RC_PROTO_JVC, scancode, data->toggle);
data->first = false;
data->old_bits = data->bits;
} else if (data->bits == data->old_bits) {
dev_dbg(&dev->dev, "JVC repeat\n");
rc_repeat(dev);
} else {
dev_dbg(&dev->dev, "JVC invalid repeat msg\n");
break;
}
data->count = 0;
data->state = STATE_CHECK_REPEAT;
return 0;
case STATE_CHECK_REPEAT:
if (!ev.pulse)
break;
if (eq_margin(ev.duration, JVC_HEADER_PULSE, JVC_UNIT / 2))
data->state = STATE_INACTIVE;
else
data->state = STATE_BIT_PULSE;
goto again;
}
out:
dev_dbg(&dev->dev, "JVC decode failed at state %d (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static const struct ir_raw_timings_pd ir_jvc_timings = {
.header_pulse = JVC_HEADER_PULSE,
.header_space = JVC_HEADER_SPACE,
.bit_pulse = JVC_BIT_PULSE,
.bit_space[0] = JVC_BIT_0_SPACE,
.bit_space[1] = JVC_BIT_1_SPACE,
.trailer_pulse = JVC_TRAILER_PULSE,
.trailer_space = JVC_TRAILER_SPACE,
.msb_first = 1,
};
/**
* ir_jvc_encode() - Encode a scancode as a stream of raw events
*
* @protocol: protocol to encode
* @scancode: scancode to encode
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
*/
static int ir_jvc_encode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
struct ir_raw_event *e = events;
int ret;
u32 raw = (bitrev8((scancode >> 8) & 0xff) << 8) |
(bitrev8((scancode >> 0) & 0xff) << 0);
ret = ir_raw_gen_pd(&e, max, &ir_jvc_timings, JVC_NBITS, raw);
if (ret < 0)
return ret;
return e - events;
}
static struct ir_raw_handler jvc_handler = {
.protocols = RC_PROTO_BIT_JVC,
.decode = ir_jvc_decode,
.encode = ir_jvc_encode,
.carrier = 38000,
.min_timeout = JVC_TRAILER_SPACE,
};
static int __init ir_jvc_decode_init(void)
{
ir_raw_handler_register(&jvc_handler);
printk(KERN_INFO "IR JVC protocol handler initialized\n");
return 0;
}
static void __exit ir_jvc_decode_exit(void)
{
ir_raw_handler_unregister(&jvc_handler);
}
module_init(ir_jvc_decode_init);
module_exit(ir_jvc_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Härdeman <[email protected]>");
MODULE_DESCRIPTION("JVC IR protocol decoder");
| linux-master | drivers/media/rc/ir-jvc-decoder.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* serial_ir.c
*
* serial_ir - Device driver that records pulse- and pause-lengths
* (space-lengths) between DDCD event on a serial port.
*
* Copyright (C) 1996,97 Ralph Metzler <[email protected]>
* Copyright (C) 1998 Trent Piepho <[email protected]>
* Copyright (C) 1998 Ben Pfaff <[email protected]>
* Copyright (C) 1999 Christoph Bartelmus <[email protected]>
* Copyright (C) 2007 Andrei Tanas <[email protected]> (suspend/resume support)
* Copyright (C) 2016 Sean Young <[email protected]> (port to rc-core)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/serial_reg.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <media/rc-core.h>
struct serial_ir_hw {
int signal_pin;
int signal_pin_change;
u8 on;
u8 off;
unsigned set_send_carrier:1;
unsigned set_duty_cycle:1;
void (*send_pulse)(unsigned int length, ktime_t edge);
void (*send_space)(void);
spinlock_t lock;
};
#define IR_HOMEBREW 0
#define IR_IRDEO 1
#define IR_IRDEO_REMOTE 2
#define IR_ANIMAX 3
#define IR_IGOR 4
/* module parameters */
static int type;
static int io;
static int irq;
static ulong iommap;
static int ioshift;
static bool softcarrier = true;
static bool share_irq;
static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */
static bool txsense; /* 0 = active high, 1 = active low */
/* forward declarations */
static void send_pulse_irdeo(unsigned int length, ktime_t edge);
static void send_space_irdeo(void);
#ifdef CONFIG_IR_SERIAL_TRANSMITTER
static void send_pulse_homebrew(unsigned int length, ktime_t edge);
static void send_space_homebrew(void);
#endif
static struct serial_ir_hw hardware[] = {
[IR_HOMEBREW] = {
.lock = __SPIN_LOCK_UNLOCKED(hardware[IR_HOMEBREW].lock),
.signal_pin = UART_MSR_DCD,
.signal_pin_change = UART_MSR_DDCD,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
.off = (UART_MCR_RTS | UART_MCR_OUT2),
#ifdef CONFIG_IR_SERIAL_TRANSMITTER
.send_pulse = send_pulse_homebrew,
.send_space = send_space_homebrew,
.set_send_carrier = true,
.set_duty_cycle = true,
#endif
},
[IR_IRDEO] = {
.lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IRDEO].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = UART_MCR_OUT2,
.off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
.send_pulse = send_pulse_irdeo,
.send_space = send_space_irdeo,
.set_duty_cycle = true,
},
[IR_IRDEO_REMOTE] = {
.lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IRDEO_REMOTE].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
.off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
.send_pulse = send_pulse_irdeo,
.send_space = send_space_irdeo,
.set_duty_cycle = true,
},
[IR_ANIMAX] = {
.lock = __SPIN_LOCK_UNLOCKED(hardware[IR_ANIMAX].lock),
.signal_pin = UART_MSR_DCD,
.signal_pin_change = UART_MSR_DDCD,
.on = 0,
.off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
},
[IR_IGOR] = {
.lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IGOR].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
.off = (UART_MCR_RTS | UART_MCR_OUT2),
#ifdef CONFIG_IR_SERIAL_TRANSMITTER
.send_pulse = send_pulse_homebrew,
.send_space = send_space_homebrew,
.set_send_carrier = true,
.set_duty_cycle = true,
#endif
},
};
#define RS_ISR_PASS_LIMIT 256
struct serial_ir {
ktime_t lastkt;
struct rc_dev *rcdev;
struct platform_device *pdev;
struct timer_list timeout_timer;
unsigned int carrier;
unsigned int duty_cycle;
};
static struct serial_ir serial_ir;
/* fetch serial input packet (1 byte) from register offset */
static u8 sinp(int offset)
{
if (iommap)
/* the register is memory-mapped */
offset <<= ioshift;
return inb(io + offset);
}
/* write serial output packet (1 byte) of value to register offset */
static void soutp(int offset, u8 value)
{
if (iommap)
/* the register is memory-mapped */
offset <<= ioshift;
outb(value, io + offset);
}
static void on(void)
{
if (txsense)
soutp(UART_MCR, hardware[type].off);
else
soutp(UART_MCR, hardware[type].on);
}
static void off(void)
{
if (txsense)
soutp(UART_MCR, hardware[type].on);
else
soutp(UART_MCR, hardware[type].off);
}
static void send_pulse_irdeo(unsigned int length, ktime_t target)
{
long rawbits;
int i;
unsigned char output;
unsigned char chunk, shifted;
/* how many bits have to be sent ? */
rawbits = length * 1152 / 10000;
if (serial_ir.duty_cycle > 50)
chunk = 3;
else
chunk = 1;
for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) {
shifted = chunk << (i * 3);
shifted >>= 1;
output &= (~shifted);
i++;
if (i == 3) {
soutp(UART_TX, output);
while (!(sinp(UART_LSR) & UART_LSR_THRE))
;
output = 0x7f;
i = 0;
}
}
if (i != 0) {
soutp(UART_TX, output);
while (!(sinp(UART_LSR) & UART_LSR_TEMT))
;
}
}
static void send_space_irdeo(void)
{
}
#ifdef CONFIG_IR_SERIAL_TRANSMITTER
static void send_pulse_homebrew_softcarrier(unsigned int length, ktime_t edge)
{
ktime_t now, target = ktime_add_us(edge, length);
/*
* delta should never exceed 4 seconds and on m68k
* ndelay(s64) does not compile; so use s32 rather than s64.
*/
s32 delta;
unsigned int pulse, space;
/* Ensure the dividend fits into 32 bit */
pulse = DIV_ROUND_CLOSEST(serial_ir.duty_cycle * (NSEC_PER_SEC / 100),
serial_ir.carrier);
space = DIV_ROUND_CLOSEST((100 - serial_ir.duty_cycle) *
(NSEC_PER_SEC / 100), serial_ir.carrier);
for (;;) {
now = ktime_get();
if (ktime_compare(now, target) >= 0)
break;
on();
edge = ktime_add_ns(edge, pulse);
delta = ktime_to_ns(ktime_sub(edge, now));
if (delta > 0)
ndelay(delta);
now = ktime_get();
off();
if (ktime_compare(now, target) >= 0)
break;
edge = ktime_add_ns(edge, space);
delta = ktime_to_ns(ktime_sub(edge, now));
if (delta > 0)
ndelay(delta);
}
}
static void send_pulse_homebrew(unsigned int length, ktime_t edge)
{
if (softcarrier)
send_pulse_homebrew_softcarrier(length, edge);
else
on();
}
static void send_space_homebrew(void)
{
off();
}
#endif
static void frbwrite(unsigned int l, bool is_pulse)
{
/* simple noise filter */
static unsigned int ptr, pulse, space;
struct ir_raw_event ev = {};
if (ptr > 0 && is_pulse) {
pulse += l;
if (pulse > 250) {
ev.duration = space;
ev.pulse = false;
ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
ev.duration = pulse;
ev.pulse = true;
ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
ptr = 0;
pulse = 0;
}
return;
}
if (!is_pulse) {
if (ptr == 0) {
if (l > 20000) {
space = l;
ptr++;
return;
}
} else {
if (l > 20000) {
space += pulse;
if (space > IR_MAX_DURATION)
space = IR_MAX_DURATION;
space += l;
if (space > IR_MAX_DURATION)
space = IR_MAX_DURATION;
pulse = 0;
return;
}
ev.duration = space;
ev.pulse = false;
ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
ev.duration = pulse;
ev.pulse = true;
ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
ptr = 0;
pulse = 0;
}
}
ev.duration = l;
ev.pulse = is_pulse;
ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
}
static irqreturn_t serial_ir_irq_handler(int i, void *blah)
{
ktime_t kt;
int counter, dcd;
u8 status;
ktime_t delkt;
unsigned int data;
static int last_dcd = -1;
if ((sinp(UART_IIR) & UART_IIR_NO_INT)) {
/* not our interrupt */
return IRQ_NONE;
}
counter = 0;
do {
counter++;
status = sinp(UART_MSR);
if (counter > RS_ISR_PASS_LIMIT) {
dev_err(&serial_ir.pdev->dev, "Trapped in interrupt");
break;
}
if ((status & hardware[type].signal_pin_change) &&
sense != -1) {
/* get current time */
kt = ktime_get();
/*
* The driver needs to know if your receiver is
* active high or active low, or the space/pulse
* sense could be inverted.
*/
/* calc time since last interrupt in nanoseconds */
dcd = (status & hardware[type].signal_pin) ? 1 : 0;
if (dcd == last_dcd) {
dev_dbg(&serial_ir.pdev->dev,
"ignoring spike: %d %d %lldns %lldns\n",
dcd, sense, ktime_to_ns(kt),
ktime_to_ns(serial_ir.lastkt));
continue;
}
delkt = ktime_sub(kt, serial_ir.lastkt);
if (ktime_compare(delkt, ktime_set(15, 0)) > 0) {
data = IR_MAX_DURATION; /* really long time */
if (!(dcd ^ sense)) {
/* sanity check */
dev_err(&serial_ir.pdev->dev,
"dcd unexpected: %d %d %lldns %lldns\n",
dcd, sense, ktime_to_ns(kt),
ktime_to_ns(serial_ir.lastkt));
/*
* detecting pulse while this
* MUST be a space!
*/
sense = sense ? 0 : 1;
}
} else {
data = ktime_to_us(delkt);
}
frbwrite(data, !(dcd ^ sense));
serial_ir.lastkt = kt;
last_dcd = dcd;
}
} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
mod_timer(&serial_ir.timeout_timer,
jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout));
ir_raw_event_handle(serial_ir.rcdev);
return IRQ_HANDLED;
}
static int hardware_init_port(void)
{
u8 scratch, scratch2, scratch3;
/*
* This is a simple port existence test, borrowed from the autoconfig
* function in drivers/tty/serial/8250/8250_port.c
*/
scratch = sinp(UART_IER);
soutp(UART_IER, 0);
#ifdef __i386__
outb(0xff, 0x080);
#endif
scratch2 = sinp(UART_IER) & 0x0f;
soutp(UART_IER, 0x0f);
#ifdef __i386__
outb(0x00, 0x080);
#endif
scratch3 = sinp(UART_IER) & 0x0f;
soutp(UART_IER, scratch);
if (scratch2 != 0 || scratch3 != 0x0f) {
/* we fail, there's nothing here */
pr_err("port existence test failed, cannot continue\n");
return -ENODEV;
}
/* Set DLAB 0. */
soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
/* First of all, disable all interrupts */
soutp(UART_IER, sinp(UART_IER) &
(~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
/* Clear registers. */
sinp(UART_LSR);
sinp(UART_RX);
sinp(UART_IIR);
sinp(UART_MSR);
/* Set line for power source */
off();
/* Clear registers again to be sure. */
sinp(UART_LSR);
sinp(UART_RX);
sinp(UART_IIR);
sinp(UART_MSR);
switch (type) {
case IR_IRDEO:
case IR_IRDEO_REMOTE:
/* setup port to 7N1 @ 115200 Baud */
/* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */
/* Set DLAB 1. */
soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
/* Set divisor to 1 => 115200 Baud */
soutp(UART_DLM, 0);
soutp(UART_DLL, 1);
/* Set DLAB 0 + 7N1 */
soutp(UART_LCR, UART_LCR_WLEN7);
/* THR interrupt already disabled at this point */
break;
default:
break;
}
return 0;
}
static void serial_ir_timeout(struct timer_list *unused)
{
struct ir_raw_event ev = {
.timeout = true,
.duration = serial_ir.rcdev->timeout
};
ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
ir_raw_event_handle(serial_ir.rcdev);
}
/* Needed by serial_ir_probe() */
static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
unsigned int count);
static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle);
static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier);
static int serial_ir_open(struct rc_dev *rcdev);
static void serial_ir_close(struct rc_dev *rcdev);
static int serial_ir_probe(struct platform_device *dev)
{
struct rc_dev *rcdev;
int i, nlow, nhigh, result;
rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW);
if (!rcdev)
return -ENOMEM;
if (hardware[type].send_pulse && hardware[type].send_space)
rcdev->tx_ir = serial_ir_tx;
if (hardware[type].set_send_carrier)
rcdev->s_tx_carrier = serial_ir_tx_carrier;
if (hardware[type].set_duty_cycle)
rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
switch (type) {
case IR_HOMEBREW:
rcdev->device_name = "Serial IR type home-brew";
break;
case IR_IRDEO:
rcdev->device_name = "Serial IR type IRdeo";
break;
case IR_IRDEO_REMOTE:
rcdev->device_name = "Serial IR type IRdeo remote";
break;
case IR_ANIMAX:
rcdev->device_name = "Serial IR type AnimaX";
break;
case IR_IGOR:
rcdev->device_name = "Serial IR type IgorPlug";
break;
}
rcdev->input_phys = KBUILD_MODNAME "/input0";
rcdev->input_id.bustype = BUS_HOST;
rcdev->input_id.vendor = 0x0001;
rcdev->input_id.product = 0x0001;
rcdev->input_id.version = 0x0100;
rcdev->open = serial_ir_open;
rcdev->close = serial_ir_close;
rcdev->dev.parent = &serial_ir.pdev->dev;
rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rcdev->driver_name = KBUILD_MODNAME;
rcdev->map_name = RC_MAP_RC6_MCE;
rcdev->min_timeout = 1;
rcdev->timeout = IR_DEFAULT_TIMEOUT;
rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rcdev->rx_resolution = 250;
serial_ir.rcdev = rcdev;
timer_setup(&serial_ir.timeout_timer, serial_ir_timeout, 0);
result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
share_irq ? IRQF_SHARED : 0,
KBUILD_MODNAME, &hardware);
if (result < 0) {
if (result == -EBUSY)
dev_err(&dev->dev, "IRQ %d busy\n", irq);
else if (result == -EINVAL)
dev_err(&dev->dev, "Bad irq number or handler\n");
return result;
}
/* Reserve io region. */
if ((iommap &&
(devm_request_mem_region(&dev->dev, iommap, 8UL << ioshift,
KBUILD_MODNAME) == NULL)) ||
(!iommap && (devm_request_region(&dev->dev, io, 8,
KBUILD_MODNAME) == NULL))) {
dev_err(&dev->dev, "port %04x already in use\n", io);
dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n");
dev_warn(&dev->dev,
"or compile the serial port driver as module and\n");
dev_warn(&dev->dev, "make sure this module is loaded first\n");
return -EBUSY;
}
result = hardware_init_port();
if (result < 0)
return result;
/* Initialize pulse/space widths */
serial_ir.duty_cycle = 50;
serial_ir.carrier = 38000;
/* If pin is high, then this must be an active low receiver. */
if (sense == -1) {
/* wait 1/2 sec for the power supply */
msleep(500);
/*
* probe 9 times every 0.04s, collect "votes" for
* active high/low
*/
nlow = 0;
nhigh = 0;
for (i = 0; i < 9; i++) {
if (sinp(UART_MSR) & hardware[type].signal_pin)
nlow++;
else
nhigh++;
msleep(40);
}
sense = nlow >= nhigh ? 1 : 0;
dev_info(&dev->dev, "auto-detected active %s receiver\n",
sense ? "low" : "high");
} else
dev_info(&dev->dev, "Manually using active %s receiver\n",
sense ? "low" : "high");
dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
return devm_rc_register_device(&dev->dev, rcdev);
}
static int serial_ir_open(struct rc_dev *rcdev)
{
unsigned long flags;
/* initialize timestamp */
serial_ir.lastkt = ktime_get();
spin_lock_irqsave(&hardware[type].lock, flags);
/* Set DLAB 0. */
soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
soutp(UART_IER, sinp(UART_IER) | UART_IER_MSI);
spin_unlock_irqrestore(&hardware[type].lock, flags);
return 0;
}
static void serial_ir_close(struct rc_dev *rcdev)
{
unsigned long flags;
spin_lock_irqsave(&hardware[type].lock, flags);
/* Set DLAB 0. */
soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
/* First of all, disable all interrupts */
soutp(UART_IER, sinp(UART_IER) &
(~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
spin_unlock_irqrestore(&hardware[type].lock, flags);
}
static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
unsigned int count)
{
unsigned long flags;
ktime_t edge;
s64 delta;
int i;
spin_lock_irqsave(&hardware[type].lock, flags);
if (type == IR_IRDEO) {
/* DTR, RTS down */
on();
}
edge = ktime_get();
for (i = 0; i < count; i++) {
if (i % 2)
hardware[type].send_space();
else
hardware[type].send_pulse(txbuf[i], edge);
edge = ktime_add_us(edge, txbuf[i]);
delta = ktime_us_delta(edge, ktime_get());
if (delta > 25) {
spin_unlock_irqrestore(&hardware[type].lock, flags);
usleep_range(delta - 25, delta + 25);
spin_lock_irqsave(&hardware[type].lock, flags);
} else if (delta > 0) {
udelay(delta);
}
}
off();
spin_unlock_irqrestore(&hardware[type].lock, flags);
return count;
}
static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle)
{
serial_ir.duty_cycle = cycle;
return 0;
}
static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier)
{
if (carrier > 500000 || carrier < 20000)
return -EINVAL;
serial_ir.carrier = carrier;
return 0;
}
static int serial_ir_suspend(struct platform_device *dev,
pm_message_t state)
{
/* Set DLAB 0. */
soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
/* Disable all interrupts */
soutp(UART_IER, sinp(UART_IER) &
(~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
/* Clear registers. */
sinp(UART_LSR);
sinp(UART_RX);
sinp(UART_IIR);
sinp(UART_MSR);
return 0;
}
static int serial_ir_resume(struct platform_device *dev)
{
unsigned long flags;
int result;
result = hardware_init_port();
if (result < 0)
return result;
spin_lock_irqsave(&hardware[type].lock, flags);
/* Enable Interrupt */
serial_ir.lastkt = ktime_get();
soutp(UART_IER, sinp(UART_IER) | UART_IER_MSI);
off();
spin_unlock_irqrestore(&hardware[type].lock, flags);
return 0;
}
static struct platform_driver serial_ir_driver = {
.probe = serial_ir_probe,
.suspend = serial_ir_suspend,
.resume = serial_ir_resume,
.driver = {
.name = "serial_ir",
},
};
static int __init serial_ir_init(void)
{
int result;
result = platform_driver_register(&serial_ir_driver);
if (result)
return result;
serial_ir.pdev = platform_device_alloc("serial_ir", 0);
if (!serial_ir.pdev) {
result = -ENOMEM;
goto exit_driver_unregister;
}
result = platform_device_add(serial_ir.pdev);
if (result)
goto exit_device_put;
return 0;
exit_device_put:
platform_device_put(serial_ir.pdev);
exit_driver_unregister:
platform_driver_unregister(&serial_ir_driver);
return result;
}
static void serial_ir_exit(void)
{
platform_device_unregister(serial_ir.pdev);
platform_driver_unregister(&serial_ir_driver);
}
static int __init serial_ir_init_module(void)
{
switch (type) {
case IR_HOMEBREW:
case IR_IRDEO:
case IR_IRDEO_REMOTE:
case IR_ANIMAX:
case IR_IGOR:
/* if nothing specified, use ttyS0/com1 and irq 4 */
io = io ? io : 0x3f8;
irq = irq ? irq : 4;
break;
default:
return -EINVAL;
}
if (!softcarrier) {
switch (type) {
case IR_HOMEBREW:
case IR_IGOR:
hardware[type].set_send_carrier = false;
hardware[type].set_duty_cycle = false;
break;
}
}
/* make sure sense is either -1, 0, or 1 */
if (sense != -1)
sense = !!sense;
return serial_ir_init();
}
static void __exit serial_ir_exit_module(void)
{
del_timer_sync(&serial_ir.timeout_timer);
serial_ir_exit();
}
module_init(serial_ir_init_module);
module_exit(serial_ir_exit_module);
MODULE_DESCRIPTION("Infra-red receiver driver for serial ports.");
MODULE_AUTHOR("Ralph Metzler, Trent Piepho, Ben Pfaff, Christoph Bartelmus, Andrei Tanas");
MODULE_LICENSE("GPL");
module_param(type, int, 0444);
MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo, 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug");
module_param_hw(io, int, ioport, 0444);
MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
/* some architectures (e.g. intel xscale) have memory mapped registers */
module_param_hw(iommap, ulong, other, 0444);
MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O (0 = no memory mapped io)");
/*
* some architectures (e.g. intel xscale) align the 8bit serial registers
* on 32bit word boundaries.
* See linux-kernel/drivers/tty/serial/8250/8250.c serial_in()/out()
*/
module_param_hw(ioshift, int, other, 0444);
MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)");
module_param_hw(irq, int, irq, 0444);
MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
module_param_hw(share_irq, bool, other, 0444);
MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)");
module_param(sense, int, 0444);
MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit (0 = active high, 1 = active low )");
#ifdef CONFIG_IR_SERIAL_TRANSMITTER
module_param(txsense, bool, 0444);
MODULE_PARM_DESC(txsense, "Sense of transmitter circuit (0 = active high, 1 = active low )");
#endif
module_param(softcarrier, bool, 0444);
MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
| linux-master | drivers/media/rc/serial_ir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* USB RedRat3 IR Transceiver rc-core driver
*
* Copyright (c) 2011 by Jarod Wilson <[email protected]>
* based heavily on the work of Stephen Cox, with additional
* help from RedRat Ltd.
*
* This driver began life based on an old version of the first-generation
* lirc_mceusb driver from the lirc 0.7.2 distribution. It was then
* significantly rewritten by Stephen Cox with the aid of RedRat Ltd's
* Chris Dodge.
*
* The driver was then ported to rc-core and significantly rewritten again,
* by Jarod, using the in-kernel mceusb driver as a guide, after an initial
* port effort was started by Stephen.
*
* TODO LIST:
* - fix lirc not showing repeats properly
* --
*
* The RedRat3 is a USB transceiver with both send & receive,
* with 2 separate sensors available for receive to enable
* both good long range reception for general use, and good
* short range reception when required for learning a signal.
*
* http://www.redrat.co.uk/
*
* It uses its own little protocol to communicate, the required
* parts of which are embedded within this driver.
* --
*/
#include <asm/unaligned.h>
#include <linux/device.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <media/rc-core.h>
/* Driver Information */
#define DRIVER_AUTHOR "Jarod Wilson <[email protected]>"
#define DRIVER_AUTHOR2 "The Dweller, Stephen Cox"
#define DRIVER_DESC "RedRat3 USB IR Transceiver Driver"
#define DRIVER_NAME "redrat3"
/* bulk data transfer types */
#define RR3_ERROR 0x01
#define RR3_MOD_SIGNAL_IN 0x20
#define RR3_MOD_SIGNAL_OUT 0x21
/* Get the RR firmware version */
#define RR3_FW_VERSION 0xb1
#define RR3_FW_VERSION_LEN 64
/* Send encoded signal bulk-sent earlier*/
#define RR3_TX_SEND_SIGNAL 0xb3
#define RR3_SET_IR_PARAM 0xb7
#define RR3_GET_IR_PARAM 0xb8
/* Blink the red LED on the device */
#define RR3_BLINK_LED 0xb9
/* Read serial number of device */
#define RR3_READ_SER_NO 0xba
#define RR3_SER_NO_LEN 4
/* Start capture with the RC receiver */
#define RR3_RC_DET_ENABLE 0xbb
/* Stop capture with the RC receiver */
#define RR3_RC_DET_DISABLE 0xbc
/* Start capture with the wideband receiver */
#define RR3_MODSIG_CAPTURE 0xb2
/* Return the status of RC detector capture */
#define RR3_RC_DET_STATUS 0xbd
/* Reset redrat */
#define RR3_RESET 0xa0
/* Max number of lengths in the signal. */
#define RR3_IR_IO_MAX_LENGTHS 0x01
/* Periods to measure mod. freq. */
#define RR3_IR_IO_PERIODS_MF 0x02
/* Size of memory for main signal data */
#define RR3_IR_IO_SIG_MEM_SIZE 0x03
/* Delta value when measuring lengths */
#define RR3_IR_IO_LENGTH_FUZZ 0x04
/* Timeout for end of signal detection */
#define RR3_IR_IO_SIG_TIMEOUT 0x05
/* Minimum value for pause recognition. */
#define RR3_IR_IO_MIN_PAUSE 0x06
/* Clock freq. of EZ-USB chip */
#define RR3_CLK 24000000
/* Clock periods per timer count */
#define RR3_CLK_PER_COUNT 12
/* (RR3_CLK / RR3_CLK_PER_COUNT) */
#define RR3_CLK_CONV_FACTOR 2000000
/* USB bulk-in wideband IR data endpoint address */
#define RR3_WIDE_IN_EP_ADDR 0x81
/* USB bulk-in narrowband IR data endpoint address */
#define RR3_NARROW_IN_EP_ADDR 0x82
/* Size of the fixed-length portion of the signal */
#define RR3_DRIVER_MAXLENS 255
#define RR3_MAX_SIG_SIZE 512
#define RR3_TIME_UNIT 50
#define RR3_END_OF_SIGNAL 0x7f
#define RR3_TX_TRAILER_LEN 2
#define RR3_RX_MIN_TIMEOUT 5
#define RR3_RX_MAX_TIMEOUT 2000
/* The 8051's CPUCS Register address */
#define RR3_CPUCS_REG_ADDR 0x7f92
#define USB_RR3USB_VENDOR_ID 0x112a
#define USB_RR3USB_PRODUCT_ID 0x0001
#define USB_RR3IIUSB_PRODUCT_ID 0x0005
/*
* The redrat3 encodes an IR signal as set of different lengths and a set
* of indices into those lengths. This sets how much two lengths must
* differ before they are considered distinct, the value is specified
* in microseconds.
* Default 5, value 0 to 127.
*/
static int length_fuzz = 5;
module_param(length_fuzz, uint, 0644);
MODULE_PARM_DESC(length_fuzz, "Length Fuzz (0-127)");
/*
* When receiving a continuous ir stream (for example when a user is
* holding a button down on a remote), this specifies the minimum size
* of a space when the redrat3 sends a irdata packet to the host. Specified
* in milliseconds. Default value 18ms.
* The value can be between 2 and 30 inclusive.
*/
static int minimum_pause = 18;
module_param(minimum_pause, uint, 0644);
MODULE_PARM_DESC(minimum_pause, "Minimum Pause in ms (2-30)");
/*
* The carrier frequency is measured during the first pulse of the IR
* signal. The larger the number of periods used To measure, the more
* accurate the result is likely to be, however some signals have short
* initial pulses, so in some case it may be necessary to reduce this value.
* Default 8, value 1 to 255.
*/
static int periods_measure_carrier = 8;
module_param(periods_measure_carrier, uint, 0644);
MODULE_PARM_DESC(periods_measure_carrier, "Number of Periods to Measure Carrier (1-255)");
struct redrat3_header {
__be16 length;
__be16 transfer_type;
} __packed;
/* sending and receiving irdata */
struct redrat3_irdata {
struct redrat3_header header;
__be32 pause;
__be16 mod_freq_count;
__be16 num_periods;
__u8 max_lengths;
__u8 no_lengths;
__be16 max_sig_size;
__be16 sig_size;
__u8 no_repeats;
__be16 lens[RR3_DRIVER_MAXLENS]; /* not aligned */
__u8 sigdata[RR3_MAX_SIG_SIZE];
} __packed;
/* firmware errors */
struct redrat3_error {
struct redrat3_header header;
__be16 fw_error;
} __packed;
/* table of devices that work with this driver */
static const struct usb_device_id redrat3_dev_table[] = {
/* Original version of the RedRat3 */
{USB_DEVICE(USB_RR3USB_VENDOR_ID, USB_RR3USB_PRODUCT_ID)},
/* Second Version/release of the RedRat3 - RetRat3-II */
{USB_DEVICE(USB_RR3USB_VENDOR_ID, USB_RR3IIUSB_PRODUCT_ID)},
{} /* Terminating entry */
};
/* Structure to hold all of our device specific stuff */
struct redrat3_dev {
/* core device bits */
struct rc_dev *rc;
struct device *dev;
/* led control */
struct led_classdev led;
atomic_t flash;
struct usb_ctrlrequest flash_control;
struct urb *flash_urb;
u8 flash_in_buf;
/* learning */
bool wideband;
struct usb_ctrlrequest learn_control;
struct urb *learn_urb;
u8 learn_buf;
/* save off the usb device pointer */
struct usb_device *udev;
/* the receive endpoint */
struct usb_endpoint_descriptor *ep_narrow;
/* the buffer to receive data */
void *bulk_in_buf;
/* urb used to read ir data */
struct urb *narrow_urb;
struct urb *wide_urb;
/* the send endpoint */
struct usb_endpoint_descriptor *ep_out;
/* usb dma */
dma_addr_t dma_in;
/* Is the device currently transmitting?*/
bool transmitting;
/* store for current packet */
struct redrat3_irdata irdata;
u16 bytes_read;
u32 carrier;
char name[64];
char phys[64];
};
static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
{
if (!rr3->transmitting && (code != 0x40))
dev_info(rr3->dev, "fw error code 0x%02x: ", code);
switch (code) {
case 0x00:
pr_cont("No Error\n");
break;
/* Codes 0x20 through 0x2f are IR Firmware Errors */
case 0x20:
pr_cont("Initial signal pulse not long enough to measure carrier frequency\n");
break;
case 0x21:
pr_cont("Not enough length values allocated for signal\n");
break;
case 0x22:
pr_cont("Not enough memory allocated for signal data\n");
break;
case 0x23:
pr_cont("Too many signal repeats\n");
break;
case 0x28:
pr_cont("Insufficient memory available for IR signal data memory allocation\n");
break;
case 0x29:
pr_cont("Insufficient memory available for IrDa signal data memory allocation\n");
break;
/* Codes 0x30 through 0x3f are USB Firmware Errors */
case 0x30:
pr_cont("Insufficient memory available for bulk transfer structure\n");
break;
/*
* Other error codes... These are primarily errors that can occur in
* the control messages sent to the redrat
*/
case 0x40:
if (!rr3->transmitting)
pr_cont("Signal capture has been terminated\n");
break;
case 0x41:
pr_cont("Attempt to set/get and unknown signal I/O algorithm parameter\n");
break;
case 0x42:
pr_cont("Signal capture already started\n");
break;
default:
pr_cont("Unknown Error\n");
break;
}
}
static u32 redrat3_val_to_mod_freq(struct redrat3_irdata *irdata)
{
u32 mod_freq = 0;
u16 mod_freq_count = be16_to_cpu(irdata->mod_freq_count);
if (mod_freq_count != 0)
mod_freq = (RR3_CLK * be16_to_cpu(irdata->num_periods)) /
(mod_freq_count * RR3_CLK_PER_COUNT);
return mod_freq;
}
/* this function scales down the figures for the same result... */
static u32 redrat3_len_to_us(u32 length)
{
u32 biglen = length * 1000;
u32 divisor = (RR3_CLK_CONV_FACTOR) / 1000;
u32 result = (u32) (biglen / divisor);
/* don't allow zero lengths to go back, breaks lirc */
return result ? result : 1;
}
/*
* convert us back into redrat3 lengths
*
* length * 1000 length * 1000000
* ------------- = ---------------- = micro
* rr3clk / 1000 rr3clk
* 6 * 2 4 * 3 micro * rr3clk micro * rr3clk / 1000
* ----- = 4 ----- = 6 -------------- = len ---------------------
* 3 2 1000000 1000
*/
static u32 redrat3_us_to_len(u32 microsec)
{
u32 result;
u32 divisor;
microsec = (microsec > IR_MAX_DURATION) ? IR_MAX_DURATION : microsec;
divisor = (RR3_CLK_CONV_FACTOR / 1000);
result = (u32)(microsec * divisor) / 1000;
/* don't allow zero lengths to go back, breaks lirc */
return result ? result : 1;
}
static void redrat3_process_ir_data(struct redrat3_dev *rr3)
{
struct ir_raw_event rawir = {};
struct device *dev;
unsigned int i, sig_size, offset, val;
u32 mod_freq;
dev = rr3->dev;
mod_freq = redrat3_val_to_mod_freq(&rr3->irdata);
dev_dbg(dev, "Got mod_freq of %u\n", mod_freq);
if (mod_freq && rr3->wideband) {
struct ir_raw_event ev = {
.carrier_report = 1,
.carrier = mod_freq
};
ir_raw_event_store(rr3->rc, &ev);
}
/* process each rr3 encoded byte into an int */
sig_size = be16_to_cpu(rr3->irdata.sig_size);
for (i = 0; i < sig_size; i++) {
offset = rr3->irdata.sigdata[i];
val = get_unaligned_be16(&rr3->irdata.lens[offset]);
/* we should always get pulse/space/pulse/space samples */
if (i % 2)
rawir.pulse = false;
else
rawir.pulse = true;
rawir.duration = redrat3_len_to_us(val);
/* cap the value to IR_MAX_DURATION */
rawir.duration = (rawir.duration > IR_MAX_DURATION) ?
IR_MAX_DURATION : rawir.duration;
dev_dbg(dev, "storing %s with duration %d (i: %d)\n",
rawir.pulse ? "pulse" : "space", rawir.duration, i);
ir_raw_event_store_with_filter(rr3->rc, &rawir);
}
/* add a trailing space */
rawir.pulse = false;
rawir.timeout = true;
rawir.duration = rr3->rc->timeout;
dev_dbg(dev, "storing trailing timeout with duration %d\n",
rawir.duration);
ir_raw_event_store_with_filter(rr3->rc, &rawir);
dev_dbg(dev, "calling ir_raw_event_handle\n");
ir_raw_event_handle(rr3->rc);
}
/* Util fn to send rr3 cmds */
static int redrat3_send_cmd(int cmd, struct redrat3_dev *rr3)
{
struct usb_device *udev;
u8 *data;
int res;
data = kzalloc(sizeof(u8), GFP_KERNEL);
if (!data)
return -ENOMEM;
udev = rr3->udev;
res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), cmd,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x0000, 0x0000, data, sizeof(u8), 10000);
if (res < 0) {
dev_err(rr3->dev, "%s: Error sending rr3 cmd res %d, data %d",
__func__, res, *data);
res = -EIO;
} else
res = data[0];
kfree(data);
return res;
}
/* Enables the long range detector and starts async receive */
static int redrat3_enable_detector(struct redrat3_dev *rr3)
{
struct device *dev = rr3->dev;
u8 ret;
ret = redrat3_send_cmd(RR3_RC_DET_ENABLE, rr3);
if (ret != 0)
dev_dbg(dev, "%s: unexpected ret of %d\n",
__func__, ret);
ret = redrat3_send_cmd(RR3_RC_DET_STATUS, rr3);
if (ret != 1) {
dev_err(dev, "%s: detector status: %d, should be 1\n",
__func__, ret);
return -EIO;
}
ret = usb_submit_urb(rr3->narrow_urb, GFP_KERNEL);
if (ret) {
dev_err(rr3->dev, "narrow band urb failed: %d", ret);
return ret;
}
ret = usb_submit_urb(rr3->wide_urb, GFP_KERNEL);
if (ret)
dev_err(rr3->dev, "wide band urb failed: %d", ret);
return ret;
}
static inline void redrat3_delete(struct redrat3_dev *rr3,
struct usb_device *udev)
{
usb_kill_urb(rr3->narrow_urb);
usb_kill_urb(rr3->wide_urb);
usb_kill_urb(rr3->flash_urb);
usb_kill_urb(rr3->learn_urb);
usb_free_urb(rr3->narrow_urb);
usb_free_urb(rr3->wide_urb);
usb_free_urb(rr3->flash_urb);
usb_free_urb(rr3->learn_urb);
usb_free_coherent(udev, le16_to_cpu(rr3->ep_narrow->wMaxPacketSize),
rr3->bulk_in_buf, rr3->dma_in);
kfree(rr3);
}
static u32 redrat3_get_timeout(struct redrat3_dev *rr3)
{
__be32 *tmp;
u32 timeout = MS_TO_US(150); /* a sane default, if things go haywire */
int len, ret, pipe;
len = sizeof(*tmp);
tmp = kzalloc(len, GFP_KERNEL);
if (!tmp)
return timeout;
pipe = usb_rcvctrlpipe(rr3->udev, 0);
ret = usb_control_msg(rr3->udev, pipe, RR3_GET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, 5000);
if (ret != len)
dev_warn(rr3->dev, "Failed to read timeout from hardware\n");
else {
timeout = redrat3_len_to_us(be32_to_cpup(tmp));
dev_dbg(rr3->dev, "Got timeout of %d ms\n", timeout / 1000);
}
kfree(tmp);
return timeout;
}
static int redrat3_set_timeout(struct rc_dev *rc_dev, unsigned int timeoutus)
{
struct redrat3_dev *rr3 = rc_dev->priv;
struct usb_device *udev = rr3->udev;
struct device *dev = rr3->dev;
__be32 *timeout;
int ret;
timeout = kmalloc(sizeof(*timeout), GFP_KERNEL);
if (!timeout)
return -ENOMEM;
*timeout = cpu_to_be32(redrat3_us_to_len(timeoutus));
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
RR3_IR_IO_SIG_TIMEOUT, 0, timeout, sizeof(*timeout),
25000);
dev_dbg(dev, "set ir parm timeout %d ret 0x%02x\n",
be32_to_cpu(*timeout), ret);
if (ret == sizeof(*timeout))
ret = 0;
else if (ret >= 0)
ret = -EIO;
kfree(timeout);
return ret;
}
static void redrat3_reset(struct redrat3_dev *rr3)
{
struct usb_device *udev = rr3->udev;
struct device *dev = rr3->dev;
int rc, rxpipe, txpipe;
u8 *val;
size_t const len = sizeof(*val);
rxpipe = usb_rcvctrlpipe(udev, 0);
txpipe = usb_sndctrlpipe(udev, 0);
val = kmalloc(len, GFP_KERNEL);
if (!val)
return;
*val = 0x01;
rc = usb_control_msg(udev, rxpipe, RR3_RESET,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
RR3_CPUCS_REG_ADDR, 0, val, len, 25000);
dev_dbg(dev, "reset returned 0x%02x\n", rc);
*val = length_fuzz;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
RR3_IR_IO_LENGTH_FUZZ, 0, val, len, 25000);
dev_dbg(dev, "set ir parm len fuzz %d rc 0x%02x\n", *val, rc);
*val = (65536 - (minimum_pause * 2000)) / 256;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
RR3_IR_IO_MIN_PAUSE, 0, val, len, 25000);
dev_dbg(dev, "set ir parm min pause %d rc 0x%02x\n", *val, rc);
*val = periods_measure_carrier;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
RR3_IR_IO_PERIODS_MF, 0, val, len, 25000);
dev_dbg(dev, "set ir parm periods measure carrier %d rc 0x%02x", *val,
rc);
*val = RR3_DRIVER_MAXLENS;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
RR3_IR_IO_MAX_LENGTHS, 0, val, len, 25000);
dev_dbg(dev, "set ir parm max lens %d rc 0x%02x\n", *val, rc);
kfree(val);
}
static void redrat3_get_firmware_rev(struct redrat3_dev *rr3)
{
int rc;
char *buffer;
buffer = kcalloc(RR3_FW_VERSION_LEN + 1, sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return;
rc = usb_control_msg(rr3->udev, usb_rcvctrlpipe(rr3->udev, 0),
RR3_FW_VERSION,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0, 0, buffer, RR3_FW_VERSION_LEN, 5000);
if (rc >= 0)
dev_info(rr3->dev, "Firmware rev: %s", buffer);
else
dev_err(rr3->dev, "Problem fetching firmware ID\n");
kfree(buffer);
}
static void redrat3_read_packet_start(struct redrat3_dev *rr3, unsigned len)
{
struct redrat3_header *header = rr3->bulk_in_buf;
unsigned pktlen, pkttype;
/* grab the Length and type of transfer */
pktlen = be16_to_cpu(header->length);
pkttype = be16_to_cpu(header->transfer_type);
if (pktlen > sizeof(rr3->irdata)) {
dev_warn(rr3->dev, "packet length %u too large\n", pktlen);
return;
}
switch (pkttype) {
case RR3_ERROR:
if (len >= sizeof(struct redrat3_error)) {
struct redrat3_error *error = rr3->bulk_in_buf;
unsigned fw_error = be16_to_cpu(error->fw_error);
redrat3_dump_fw_error(rr3, fw_error);
}
break;
case RR3_MOD_SIGNAL_IN:
memcpy(&rr3->irdata, rr3->bulk_in_buf, len);
rr3->bytes_read = len;
dev_dbg(rr3->dev, "bytes_read %d, pktlen %d\n",
rr3->bytes_read, pktlen);
break;
default:
dev_dbg(rr3->dev, "ignoring packet with type 0x%02x, len of %d, 0x%02x\n",
pkttype, len, pktlen);
break;
}
}
static void redrat3_read_packet_continue(struct redrat3_dev *rr3, unsigned len)
{
void *irdata = &rr3->irdata;
if (len + rr3->bytes_read > sizeof(rr3->irdata)) {
dev_warn(rr3->dev, "too much data for packet\n");
rr3->bytes_read = 0;
return;
}
memcpy(irdata + rr3->bytes_read, rr3->bulk_in_buf, len);
rr3->bytes_read += len;
dev_dbg(rr3->dev, "bytes_read %d, pktlen %d\n", rr3->bytes_read,
be16_to_cpu(rr3->irdata.header.length));
}
/* gather IR data from incoming urb, process it when we have enough */
static int redrat3_get_ir_data(struct redrat3_dev *rr3, unsigned len)
{
struct device *dev = rr3->dev;
unsigned pkttype;
int ret = 0;
if (rr3->bytes_read == 0 && len >= sizeof(struct redrat3_header)) {
redrat3_read_packet_start(rr3, len);
} else if (rr3->bytes_read != 0) {
redrat3_read_packet_continue(rr3, len);
} else if (rr3->bytes_read == 0) {
dev_err(dev, "error: no packet data read\n");
ret = -ENODATA;
goto out;
}
if (rr3->bytes_read < be16_to_cpu(rr3->irdata.header.length) +
sizeof(struct redrat3_header))
/* we're still accumulating data */
return 0;
/* if we get here, we've got IR data to decode */
pkttype = be16_to_cpu(rr3->irdata.header.transfer_type);
if (pkttype == RR3_MOD_SIGNAL_IN)
redrat3_process_ir_data(rr3);
else
dev_dbg(dev, "discarding non-signal data packet (type 0x%02x)\n",
pkttype);
out:
rr3->bytes_read = 0;
return ret;
}
/* callback function from USB when async USB request has completed */
static void redrat3_handle_async(struct urb *urb)
{
struct redrat3_dev *rr3 = urb->context;
int ret;
switch (urb->status) {
case 0:
ret = redrat3_get_ir_data(rr3, urb->actual_length);
if (!ret && rr3->wideband && !rr3->learn_urb->hcpriv) {
ret = usb_submit_urb(rr3->learn_urb, GFP_ATOMIC);
if (ret)
dev_err(rr3->dev, "Failed to submit learning urb: %d",
ret);
}
if (!ret) {
/* no error, prepare to read more */
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret)
dev_err(rr3->dev, "Failed to resubmit urb: %d",
ret);
}
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
case -EPIPE:
default:
dev_warn(rr3->dev, "Error: urb status = %d\n", urb->status);
rr3->bytes_read = 0;
break;
}
}
static u16 mod_freq_to_val(unsigned int mod_freq)
{
int mult = 6000000;
/* Clk used in mod. freq. generation is CLK24/4. */
return 65536 - (mult / mod_freq);
}
static int redrat3_set_tx_carrier(struct rc_dev *rcdev, u32 carrier)
{
struct redrat3_dev *rr3 = rcdev->priv;
struct device *dev = rr3->dev;
dev_dbg(dev, "Setting modulation frequency to %u", carrier);
if (carrier == 0)
return -EINVAL;
rr3->carrier = carrier;
return 0;
}
static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
unsigned count)
{
struct redrat3_dev *rr3 = rcdev->priv;
struct device *dev = rr3->dev;
struct redrat3_irdata *irdata = NULL;
int ret, ret_len;
int lencheck, cur_sample_len, pipe;
int *sample_lens = NULL;
u8 curlencheck = 0;
unsigned i, sendbuf_len;
if (rr3->transmitting) {
dev_warn(dev, "%s: transmitter already in use\n", __func__);
return -EAGAIN;
}
if (count > RR3_MAX_SIG_SIZE - RR3_TX_TRAILER_LEN)
return -EINVAL;
/* rr3 will disable rc detector on transmit */
rr3->transmitting = true;
sample_lens = kcalloc(RR3_DRIVER_MAXLENS,
sizeof(*sample_lens),
GFP_KERNEL);
if (!sample_lens)
return -ENOMEM;
irdata = kzalloc(sizeof(*irdata), GFP_KERNEL);
if (!irdata) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < count; i++) {
cur_sample_len = redrat3_us_to_len(txbuf[i]);
if (cur_sample_len > 0xffff) {
dev_warn(dev, "transmit period of %uus truncated to %uus\n",
txbuf[i], redrat3_len_to_us(0xffff));
cur_sample_len = 0xffff;
}
for (lencheck = 0; lencheck < curlencheck; lencheck++) {
if (sample_lens[lencheck] == cur_sample_len)
break;
}
if (lencheck == curlencheck) {
dev_dbg(dev, "txbuf[%d]=%u, pos %d, enc %u\n",
i, txbuf[i], curlencheck, cur_sample_len);
if (curlencheck < RR3_DRIVER_MAXLENS) {
/* now convert the value to a proper
* rr3 value.. */
sample_lens[curlencheck] = cur_sample_len;
put_unaligned_be16(cur_sample_len,
&irdata->lens[curlencheck]);
curlencheck++;
} else {
ret = -EINVAL;
goto out;
}
}
irdata->sigdata[i] = lencheck;
}
irdata->sigdata[count] = RR3_END_OF_SIGNAL;
irdata->sigdata[count + 1] = RR3_END_OF_SIGNAL;
sendbuf_len = offsetof(struct redrat3_irdata,
sigdata[count + RR3_TX_TRAILER_LEN]);
/* fill in our packet header */
irdata->header.length = cpu_to_be16(sendbuf_len -
sizeof(struct redrat3_header));
irdata->header.transfer_type = cpu_to_be16(RR3_MOD_SIGNAL_OUT);
irdata->pause = cpu_to_be32(redrat3_len_to_us(100));
irdata->mod_freq_count = cpu_to_be16(mod_freq_to_val(rr3->carrier));
irdata->no_lengths = curlencheck;
irdata->sig_size = cpu_to_be16(count + RR3_TX_TRAILER_LEN);
pipe = usb_sndbulkpipe(rr3->udev, rr3->ep_out->bEndpointAddress);
ret = usb_bulk_msg(rr3->udev, pipe, irdata,
sendbuf_len, &ret_len, 10000);
dev_dbg(dev, "sent %d bytes, (ret %d)\n", ret_len, ret);
/* now tell the hardware to transmit what we sent it */
pipe = usb_rcvctrlpipe(rr3->udev, 0);
ret = usb_control_msg(rr3->udev, pipe, RR3_TX_SEND_SIGNAL,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0, 0, irdata, 2, 10000);
if (ret < 0)
dev_err(dev, "Error: control msg send failed, rc %d\n", ret);
else
ret = count;
out:
kfree(irdata);
kfree(sample_lens);
rr3->transmitting = false;
/* rr3 re-enables rc detector because it was enabled before */
return ret;
}
static void redrat3_brightness_set(struct led_classdev *led_dev, enum
led_brightness brightness)
{
struct redrat3_dev *rr3 = container_of(led_dev, struct redrat3_dev,
led);
if (brightness != LED_OFF && atomic_cmpxchg(&rr3->flash, 0, 1) == 0) {
int ret = usb_submit_urb(rr3->flash_urb, GFP_ATOMIC);
if (ret != 0) {
dev_dbg(rr3->dev, "%s: unexpected ret of %d\n",
__func__, ret);
atomic_set(&rr3->flash, 0);
}
}
}
static int redrat3_wideband_receiver(struct rc_dev *rcdev, int enable)
{
struct redrat3_dev *rr3 = rcdev->priv;
int ret = 0;
rr3->wideband = enable != 0;
if (enable) {
ret = usb_submit_urb(rr3->learn_urb, GFP_KERNEL);
if (ret)
dev_err(rr3->dev, "Failed to submit learning urb: %d",
ret);
}
return ret;
}
static void redrat3_learn_complete(struct urb *urb)
{
struct redrat3_dev *rr3 = urb->context;
switch (urb->status) {
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
case -EPIPE:
default:
dev_err(rr3->dev, "Error: learn urb status = %d", urb->status);
break;
}
}
static void redrat3_led_complete(struct urb *urb)
{
struct redrat3_dev *rr3 = urb->context;
switch (urb->status) {
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
case -EPIPE:
default:
dev_dbg(rr3->dev, "Error: urb status = %d\n", urb->status);
break;
}
rr3->led.brightness = LED_OFF;
atomic_dec(&rr3->flash);
}
static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
{
struct device *dev = rr3->dev;
struct rc_dev *rc;
int ret;
u16 prod = le16_to_cpu(rr3->udev->descriptor.idProduct);
rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc)
return NULL;
snprintf(rr3->name, sizeof(rr3->name),
"RedRat3%s Infrared Remote Transceiver",
prod == USB_RR3IIUSB_PRODUCT_ID ? "-II" : "");
usb_make_path(rr3->udev, rr3->phys, sizeof(rr3->phys));
rc->device_name = rr3->name;
rc->input_phys = rr3->phys;
usb_to_input_id(rr3->udev, &rc->input_id);
rc->dev.parent = dev;
rc->priv = rr3;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->min_timeout = MS_TO_US(RR3_RX_MIN_TIMEOUT);
rc->max_timeout = MS_TO_US(RR3_RX_MAX_TIMEOUT);
rc->timeout = redrat3_get_timeout(rr3);
rc->s_timeout = redrat3_set_timeout;
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
rc->s_carrier_report = redrat3_wideband_receiver;
rc->driver_name = DRIVER_NAME;
rc->rx_resolution = 2;
rc->map_name = RC_MAP_HAUPPAUGE;
ret = rc_register_device(rc);
if (ret < 0) {
dev_err(dev, "remote dev registration failed\n");
goto out;
}
return rc;
out:
rc_free_device(rc);
return NULL;
}
static int redrat3_dev_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct device *dev = &intf->dev;
struct usb_host_interface *uhi;
struct redrat3_dev *rr3;
struct usb_endpoint_descriptor *ep;
struct usb_endpoint_descriptor *ep_narrow = NULL;
struct usb_endpoint_descriptor *ep_wide = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
u8 addr, attrs;
int pipe, i;
int retval = -ENOMEM;
uhi = intf->cur_altsetting;
/* find our bulk-in and bulk-out endpoints */
for (i = 0; i < uhi->desc.bNumEndpoints; ++i) {
ep = &uhi->endpoint[i].desc;
addr = ep->bEndpointAddress;
attrs = ep->bmAttributes;
if (((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) &&
((attrs & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
dev_dbg(dev, "found bulk-in endpoint at 0x%02x\n",
ep->bEndpointAddress);
/* data comes in on 0x82, 0x81 is for learning */
if (ep->bEndpointAddress == RR3_NARROW_IN_EP_ADDR)
ep_narrow = ep;
if (ep->bEndpointAddress == RR3_WIDE_IN_EP_ADDR)
ep_wide = ep;
}
if ((ep_out == NULL) &&
((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) &&
((attrs & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
dev_dbg(dev, "found bulk-out endpoint at 0x%02x\n",
ep->bEndpointAddress);
ep_out = ep;
}
}
if (!ep_narrow || !ep_out || !ep_wide) {
dev_err(dev, "Couldn't find all endpoints\n");
retval = -ENODEV;
goto no_endpoints;
}
/* allocate memory for our device state and initialize it */
rr3 = kzalloc(sizeof(*rr3), GFP_KERNEL);
if (!rr3)
goto no_endpoints;
rr3->dev = &intf->dev;
rr3->ep_narrow = ep_narrow;
rr3->ep_out = ep_out;
rr3->udev = udev;
/* set up bulk-in endpoint */
rr3->narrow_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rr3->narrow_urb)
goto redrat_free;
rr3->wide_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rr3->wide_urb)
goto redrat_free;
rr3->bulk_in_buf = usb_alloc_coherent(udev,
le16_to_cpu(ep_narrow->wMaxPacketSize),
GFP_KERNEL, &rr3->dma_in);
if (!rr3->bulk_in_buf)
goto redrat_free;
pipe = usb_rcvbulkpipe(udev, ep_narrow->bEndpointAddress);
usb_fill_bulk_urb(rr3->narrow_urb, udev, pipe, rr3->bulk_in_buf,
le16_to_cpu(ep_narrow->wMaxPacketSize),
redrat3_handle_async, rr3);
rr3->narrow_urb->transfer_dma = rr3->dma_in;
rr3->narrow_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
pipe = usb_rcvbulkpipe(udev, ep_wide->bEndpointAddress);
usb_fill_bulk_urb(rr3->wide_urb, udev, pipe, rr3->bulk_in_buf,
le16_to_cpu(ep_narrow->wMaxPacketSize),
redrat3_handle_async, rr3);
rr3->wide_urb->transfer_dma = rr3->dma_in;
rr3->wide_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
redrat3_reset(rr3);
redrat3_get_firmware_rev(rr3);
/* default.. will get overridden by any sends with a freq defined */
rr3->carrier = 38000;
atomic_set(&rr3->flash, 0);
rr3->flash_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rr3->flash_urb)
goto redrat_free;
/* learn urb */
rr3->learn_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rr3->learn_urb)
goto redrat_free;
/* setup packet is 'c0 b2 0000 0000 0001' */
rr3->learn_control.bRequestType = 0xc0;
rr3->learn_control.bRequest = RR3_MODSIG_CAPTURE;
rr3->learn_control.wLength = cpu_to_le16(1);
usb_fill_control_urb(rr3->learn_urb, udev, usb_rcvctrlpipe(udev, 0),
(unsigned char *)&rr3->learn_control,
&rr3->learn_buf, sizeof(rr3->learn_buf),
redrat3_learn_complete, rr3);
/* setup packet is 'c0 b9 0000 0000 0001' */
rr3->flash_control.bRequestType = 0xc0;
rr3->flash_control.bRequest = RR3_BLINK_LED;
rr3->flash_control.wLength = cpu_to_le16(1);
usb_fill_control_urb(rr3->flash_urb, udev, usb_rcvctrlpipe(udev, 0),
(unsigned char *)&rr3->flash_control,
&rr3->flash_in_buf, sizeof(rr3->flash_in_buf),
redrat3_led_complete, rr3);
/* led control */
rr3->led.name = "redrat3:red:feedback";
rr3->led.default_trigger = "rc-feedback";
rr3->led.brightness_set = redrat3_brightness_set;
retval = led_classdev_register(&intf->dev, &rr3->led);
if (retval)
goto redrat_free;
rr3->rc = redrat3_init_rc_dev(rr3);
if (!rr3->rc) {
retval = -ENOMEM;
goto led_free;
}
/* might be all we need to do? */
retval = redrat3_enable_detector(rr3);
if (retval < 0)
goto led_free;
/* we can register the device now, as it is ready */
usb_set_intfdata(intf, rr3);
return 0;
led_free:
led_classdev_unregister(&rr3->led);
redrat_free:
redrat3_delete(rr3, rr3->udev);
no_endpoints:
return retval;
}
static void redrat3_dev_disconnect(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
rc_unregister_device(rr3->rc);
led_classdev_unregister(&rr3->led);
redrat3_delete(rr3, udev);
}
static int redrat3_dev_suspend(struct usb_interface *intf, pm_message_t message)
{
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
led_classdev_suspend(&rr3->led);
usb_kill_urb(rr3->narrow_urb);
usb_kill_urb(rr3->wide_urb);
usb_kill_urb(rr3->flash_urb);
return 0;
}
static int redrat3_dev_resume(struct usb_interface *intf)
{
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
if (usb_submit_urb(rr3->narrow_urb, GFP_NOIO))
return -EIO;
if (usb_submit_urb(rr3->wide_urb, GFP_NOIO))
return -EIO;
led_classdev_resume(&rr3->led);
return 0;
}
static struct usb_driver redrat3_dev_driver = {
.name = DRIVER_NAME,
.probe = redrat3_dev_probe,
.disconnect = redrat3_dev_disconnect,
.suspend = redrat3_dev_suspend,
.resume = redrat3_dev_resume,
.reset_resume = redrat3_dev_resume,
.id_table = redrat3_dev_table
};
module_usb_driver(redrat3_dev_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_AUTHOR(DRIVER_AUTHOR2);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, redrat3_dev_table);
| linux-master | drivers/media/rc/redrat3.c |
// SPDX-License-Identifier: GPL-2.0
// bpf-lirc.c - handles bpf
//
// Copyright (C) 2018 Sean Young <[email protected]>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/bpf_lirc.h>
#include "rc-core-priv.h"
#define lirc_rcu_dereference(p) \
rcu_dereference_protected(p, lockdep_is_held(&ir_raw_handler_lock))
/*
* BPF interface for raw IR
*/
const struct bpf_prog_ops lirc_mode2_prog_ops = {
};
BPF_CALL_1(bpf_rc_repeat, u32*, sample)
{
struct ir_raw_event_ctrl *ctrl;
ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
rc_repeat(ctrl->dev);
return 0;
}
static const struct bpf_func_proto rc_repeat_proto = {
.func = bpf_rc_repeat,
.gpl_only = true, /* rc_repeat is EXPORT_SYMBOL_GPL */
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
BPF_CALL_4(bpf_rc_keydown, u32*, sample, u32, protocol, u64, scancode,
u32, toggle)
{
struct ir_raw_event_ctrl *ctrl;
ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
rc_keydown(ctrl->dev, protocol, scancode, toggle != 0);
return 0;
}
static const struct bpf_func_proto rc_keydown_proto = {
.func = bpf_rc_keydown,
.gpl_only = true, /* rc_keydown is EXPORT_SYMBOL_GPL */
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
};
BPF_CALL_3(bpf_rc_pointer_rel, u32*, sample, s32, rel_x, s32, rel_y)
{
struct ir_raw_event_ctrl *ctrl;
ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
input_report_rel(ctrl->dev->input_dev, REL_X, rel_x);
input_report_rel(ctrl->dev->input_dev, REL_Y, rel_y);
input_sync(ctrl->dev->input_dev);
return 0;
}
static const struct bpf_func_proto rc_pointer_rel_proto = {
.func = bpf_rc_pointer_rel,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
static const struct bpf_func_proto *
lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_rc_repeat:
return &rc_repeat_proto;
case BPF_FUNC_rc_keydown:
return &rc_keydown_proto;
case BPF_FUNC_rc_pointer_rel:
return &rc_pointer_rel_proto;
case BPF_FUNC_map_lookup_elem:
return &bpf_map_lookup_elem_proto;
case BPF_FUNC_map_update_elem:
return &bpf_map_update_elem_proto;
case BPF_FUNC_map_delete_elem:
return &bpf_map_delete_elem_proto;
case BPF_FUNC_map_push_elem:
return &bpf_map_push_elem_proto;
case BPF_FUNC_map_pop_elem:
return &bpf_map_pop_elem_proto;
case BPF_FUNC_map_peek_elem:
return &bpf_map_peek_elem_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_ktime_get_boot_ns:
return &bpf_ktime_get_boot_ns_proto;
case BPF_FUNC_tail_call:
return &bpf_tail_call_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_trace_printk:
if (perfmon_capable())
return bpf_get_trace_printk_proto();
fallthrough;
default:
return NULL;
}
}
static bool lirc_mode2_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
/* We have one field of u32 */
return type == BPF_READ && off == 0 && size == sizeof(u32);
}
const struct bpf_verifier_ops lirc_mode2_verifier_ops = {
.get_func_proto = lirc_mode2_func_proto,
.is_valid_access = lirc_mode2_is_valid_access
};
#define BPF_MAX_PROGS 64
static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog)
{
struct bpf_prog_array *old_array;
struct bpf_prog_array *new_array;
struct ir_raw_event_ctrl *raw;
int ret;
if (rcdev->driver_type != RC_DRIVER_IR_RAW)
return -EINVAL;
ret = mutex_lock_interruptible(&ir_raw_handler_lock);
if (ret)
return ret;
raw = rcdev->raw;
if (!raw) {
ret = -ENODEV;
goto unlock;
}
old_array = lirc_rcu_dereference(raw->progs);
if (old_array && bpf_prog_array_length(old_array) >= BPF_MAX_PROGS) {
ret = -E2BIG;
goto unlock;
}
ret = bpf_prog_array_copy(old_array, NULL, prog, 0, &new_array);
if (ret < 0)
goto unlock;
rcu_assign_pointer(raw->progs, new_array);
bpf_prog_array_free(old_array);
unlock:
mutex_unlock(&ir_raw_handler_lock);
return ret;
}
static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
{
struct bpf_prog_array *old_array;
struct bpf_prog_array *new_array;
struct ir_raw_event_ctrl *raw;
int ret;
if (rcdev->driver_type != RC_DRIVER_IR_RAW)
return -EINVAL;
ret = mutex_lock_interruptible(&ir_raw_handler_lock);
if (ret)
return ret;
raw = rcdev->raw;
if (!raw) {
ret = -ENODEV;
goto unlock;
}
old_array = lirc_rcu_dereference(raw->progs);
ret = bpf_prog_array_copy(old_array, prog, NULL, 0, &new_array);
/*
* Do not use bpf_prog_array_delete_safe() as we would end up
* with a dummy entry in the array, and the we would free the
* dummy in lirc_bpf_free()
*/
if (ret)
goto unlock;
rcu_assign_pointer(raw->progs, new_array);
bpf_prog_array_free(old_array);
bpf_prog_put(prog);
unlock:
mutex_unlock(&ir_raw_handler_lock);
return ret;
}
void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
{
struct ir_raw_event_ctrl *raw = rcdev->raw;
raw->bpf_sample = sample;
if (raw->progs) {
rcu_read_lock();
bpf_prog_run_array(rcu_dereference(raw->progs),
&raw->bpf_sample, bpf_prog_run);
rcu_read_unlock();
}
}
/*
* This should be called once the rc thread has been stopped, so there can be
* no concurrent bpf execution.
*
* Should be called with the ir_raw_handler_lock held.
*/
void lirc_bpf_free(struct rc_dev *rcdev)
{
struct bpf_prog_array_item *item;
struct bpf_prog_array *array;
array = lirc_rcu_dereference(rcdev->raw->progs);
if (!array)
return;
for (item = array->items; item->prog; item++)
bpf_prog_put(item->prog);
bpf_prog_array_free(array);
}
int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
struct rc_dev *rcdev;
int ret;
if (attr->attach_flags)
return -EINVAL;
rcdev = rc_dev_get_from_fd(attr->target_fd);
if (IS_ERR(rcdev))
return PTR_ERR(rcdev);
ret = lirc_bpf_attach(rcdev, prog);
put_device(&rcdev->dev);
return ret;
}
int lirc_prog_detach(const union bpf_attr *attr)
{
struct bpf_prog *prog;
struct rc_dev *rcdev;
int ret;
if (attr->attach_flags)
return -EINVAL;
prog = bpf_prog_get_type(attr->attach_bpf_fd,
BPF_PROG_TYPE_LIRC_MODE2);
if (IS_ERR(prog))
return PTR_ERR(prog);
rcdev = rc_dev_get_from_fd(attr->target_fd);
if (IS_ERR(rcdev)) {
bpf_prog_put(prog);
return PTR_ERR(rcdev);
}
ret = lirc_bpf_detach(rcdev, prog);
bpf_prog_put(prog);
put_device(&rcdev->dev);
return ret;
}
int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
{
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
struct bpf_prog_array *progs;
struct rc_dev *rcdev;
u32 cnt, flags = 0;
int ret;
if (attr->query.query_flags)
return -EINVAL;
rcdev = rc_dev_get_from_fd(attr->query.target_fd);
if (IS_ERR(rcdev))
return PTR_ERR(rcdev);
if (rcdev->driver_type != RC_DRIVER_IR_RAW) {
ret = -EINVAL;
goto put;
}
ret = mutex_lock_interruptible(&ir_raw_handler_lock);
if (ret)
goto put;
progs = lirc_rcu_dereference(rcdev->raw->progs);
cnt = progs ? bpf_prog_array_length(progs) : 0;
if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) {
ret = -EFAULT;
goto unlock;
}
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) {
ret = -EFAULT;
goto unlock;
}
if (attr->query.prog_cnt != 0 && prog_ids && cnt)
ret = bpf_prog_array_copy_to_user(progs, prog_ids,
attr->query.prog_cnt);
unlock:
mutex_unlock(&ir_raw_handler_lock);
put:
put_device(&rcdev->dev);
return ret;
}
| linux-master | drivers/media/rc/bpf-lirc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Amlogic Meson IR remote receiver
*
* Copyright (C) 2014 Beniamino Galvani <[email protected]>
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/bitfield.h>
#include <linux/regmap.h>
#include <media/rc-core.h>
#define DRIVER_NAME "meson-ir"
#define IR_DEC_LDR_ACTIVE 0x00
#define IR_DEC_LDR_IDLE 0x04
#define IR_DEC_LDR_REPEAT 0x08
#define IR_DEC_BIT_0 0x0c
#define IR_DEC_REG0 0x10
#define IR_DEC_REG0_BASE_TIME GENMASK(11, 0)
#define IR_DEC_FRAME 0x14
#define IR_DEC_STATUS 0x18
#define IR_DEC_STATUS_PULSE BIT(8)
#define IR_DEC_REG1 0x1c
#define IR_DEC_REG1_TIME_IV GENMASK(28, 16)
#define IR_DEC_REG1_ENABLE BIT(15)
#define IR_DEC_REG1_MODE GENMASK(8, 7)
#define IR_DEC_REG1_IRQSEL GENMASK(3, 2)
#define IR_DEC_REG1_RESET BIT(0)
/* The following regs are only available on Meson 8b and newer */
#define IR_DEC_REG2 0x20
#define IR_DEC_REG2_MODE GENMASK(3, 0)
#define DEC_MODE_NEC 0x0
#define DEC_MODE_RAW 0x2
#define IRQSEL_NEC_MODE 0
#define IRQSEL_RISE_FALL 1
#define IRQSEL_FALL 2
#define IRQSEL_RISE 3
#define MESON_RAW_TRATE 10 /* us */
#define MESON_HW_TRATE 20 /* us */
struct meson_ir {
struct regmap *reg;
struct rc_dev *rc;
spinlock_t lock;
};
static const struct regmap_config meson_ir_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
{
struct meson_ir *ir = dev_id;
u32 duration, status;
struct ir_raw_event rawir = {};
spin_lock(&ir->lock);
regmap_read(ir->reg, IR_DEC_REG1, &duration);
duration = FIELD_GET(IR_DEC_REG1_TIME_IV, duration);
rawir.duration = duration * MESON_RAW_TRATE;
regmap_read(ir->reg, IR_DEC_STATUS, &status);
rawir.pulse = !!(status & IR_DEC_STATUS_PULSE);
ir_raw_event_store_with_timeout(ir->rc, &rawir);
spin_unlock(&ir->lock);
return IRQ_HANDLED;
}
static int meson_ir_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
void __iomem *res_start;
const char *map_name;
struct meson_ir *ir;
int irq, ret;
ir = devm_kzalloc(dev, sizeof(struct meson_ir), GFP_KERNEL);
if (!ir)
return -ENOMEM;
res_start = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(res_start))
return PTR_ERR(res_start);
ir->reg = devm_regmap_init_mmio(&pdev->dev, res_start,
&meson_ir_regmap_config);
if (IS_ERR(ir->reg))
return PTR_ERR(ir->reg);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ir->rc = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
if (!ir->rc) {
dev_err(dev, "failed to allocate rc device\n");
return -ENOMEM;
}
ir->rc->priv = ir;
ir->rc->device_name = DRIVER_NAME;
ir->rc->input_phys = DRIVER_NAME "/input0";
ir->rc->input_id.bustype = BUS_HOST;
map_name = of_get_property(node, "linux,rc-map-name", NULL);
ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY;
ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
ir->rc->rx_resolution = MESON_RAW_TRATE;
ir->rc->min_timeout = 1;
ir->rc->timeout = IR_DEFAULT_TIMEOUT;
ir->rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
ir->rc->driver_name = DRIVER_NAME;
spin_lock_init(&ir->lock);
platform_set_drvdata(pdev, ir);
ret = devm_rc_register_device(dev, ir->rc);
if (ret) {
dev_err(dev, "failed to register rc device\n");
return ret;
}
ret = devm_request_irq(dev, irq, meson_ir_irq, 0, NULL, ir);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
/* Reset the decoder */
regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET,
IR_DEC_REG1_RESET);
regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET, 0);
/* Set general operation mode (= raw/software decoding) */
if (of_device_is_compatible(node, "amlogic,meson6-ir"))
regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_MODE,
FIELD_PREP(IR_DEC_REG1_MODE, DEC_MODE_RAW));
else
regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_MODE,
FIELD_PREP(IR_DEC_REG2_MODE, DEC_MODE_RAW));
/* Set rate */
regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_BASE_TIME,
FIELD_PREP(IR_DEC_REG0_BASE_TIME,
MESON_RAW_TRATE - 1));
/* IRQ on rising and falling edges */
regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_IRQSEL,
FIELD_PREP(IR_DEC_REG1_IRQSEL, IRQSEL_RISE_FALL));
/* Enable the decoder */
regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE,
IR_DEC_REG1_ENABLE);
dev_info(dev, "receiver initialized\n");
return 0;
}
static void meson_ir_remove(struct platform_device *pdev)
{
struct meson_ir *ir = platform_get_drvdata(pdev);
unsigned long flags;
/* Disable the decoder */
spin_lock_irqsave(&ir->lock, flags);
regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE, 0);
spin_unlock_irqrestore(&ir->lock, flags);
}
static void meson_ir_shutdown(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct meson_ir *ir = platform_get_drvdata(pdev);
unsigned long flags;
spin_lock_irqsave(&ir->lock, flags);
/*
* Set operation mode to NEC/hardware decoding to give
* bootloader a chance to power the system back on
*/
if (of_device_is_compatible(node, "amlogic,meson6-ir"))
regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_MODE,
FIELD_PREP(IR_DEC_REG1_MODE, DEC_MODE_NEC));
else
regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_MODE,
FIELD_PREP(IR_DEC_REG2_MODE, DEC_MODE_NEC));
/* Set rate to default value */
regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_BASE_TIME,
FIELD_PREP(IR_DEC_REG0_BASE_TIME,
MESON_HW_TRATE - 1));
spin_unlock_irqrestore(&ir->lock, flags);
}
static const struct of_device_id meson_ir_match[] = {
{ .compatible = "amlogic,meson6-ir" },
{ .compatible = "amlogic,meson8b-ir" },
{ .compatible = "amlogic,meson-gxbb-ir" },
{ },
};
MODULE_DEVICE_TABLE(of, meson_ir_match);
static struct platform_driver meson_ir_driver = {
.probe = meson_ir_probe,
.remove_new = meson_ir_remove,
.shutdown = meson_ir_shutdown,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_ir_match,
},
};
module_platform_driver(meson_ir_driver);
MODULE_DESCRIPTION("Amlogic Meson IR remote receiver driver");
MODULE_AUTHOR("Beniamino Galvani <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/rc/meson-ir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Loopback driver for rc-core,
*
* Copyright (c) 2010 David Härdeman <[email protected]>
*
* This driver receives TX data and passes it back as RX data,
* which is useful for (scripted) debugging of rc-core without
* having to use actual hardware.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/rc-core.h>
#define DRIVER_NAME "rc-loopback"
#define RXMASK_NARROWBAND 0x1
#define RXMASK_WIDEBAND 0x2
struct loopback_dev {
struct rc_dev *dev;
u32 txmask;
u32 txcarrier;
u32 txduty;
bool idle;
bool wideband;
bool carrierreport;
u32 rxcarriermin;
u32 rxcarriermax;
};
static struct loopback_dev loopdev;
static int loop_set_tx_mask(struct rc_dev *dev, u32 mask)
{
struct loopback_dev *lodev = dev->priv;
if ((mask & (RXMASK_NARROWBAND | RXMASK_WIDEBAND)) != mask) {
dev_dbg(&dev->dev, "invalid tx mask: %u\n", mask);
return 2;
}
dev_dbg(&dev->dev, "setting tx mask: %u\n", mask);
lodev->txmask = mask;
return 0;
}
static int loop_set_tx_carrier(struct rc_dev *dev, u32 carrier)
{
struct loopback_dev *lodev = dev->priv;
dev_dbg(&dev->dev, "setting tx carrier: %u\n", carrier);
lodev->txcarrier = carrier;
return 0;
}
static int loop_set_tx_duty_cycle(struct rc_dev *dev, u32 duty_cycle)
{
struct loopback_dev *lodev = dev->priv;
if (duty_cycle < 1 || duty_cycle > 99) {
dev_dbg(&dev->dev, "invalid duty cycle: %u\n", duty_cycle);
return -EINVAL;
}
dev_dbg(&dev->dev, "setting duty cycle: %u\n", duty_cycle);
lodev->txduty = duty_cycle;
return 0;
}
static int loop_set_rx_carrier_range(struct rc_dev *dev, u32 min, u32 max)
{
struct loopback_dev *lodev = dev->priv;
if (min < 1 || min > max) {
dev_dbg(&dev->dev, "invalid rx carrier range %u to %u\n", min, max);
return -EINVAL;
}
dev_dbg(&dev->dev, "setting rx carrier range %u to %u\n", min, max);
lodev->rxcarriermin = min;
lodev->rxcarriermax = max;
return 0;
}
static int loop_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
struct loopback_dev *lodev = dev->priv;
u32 rxmask;
unsigned i;
struct ir_raw_event rawir = {};
if (lodev->txcarrier < lodev->rxcarriermin ||
lodev->txcarrier > lodev->rxcarriermax) {
dev_dbg(&dev->dev, "ignoring tx, carrier out of range\n");
goto out;
}
if (lodev->wideband)
rxmask = RXMASK_WIDEBAND;
else
rxmask = RXMASK_NARROWBAND;
if (!(rxmask & lodev->txmask)) {
dev_dbg(&dev->dev, "ignoring tx, rx mask mismatch\n");
goto out;
}
for (i = 0; i < count; i++) {
rawir.pulse = i % 2 ? false : true;
rawir.duration = txbuf[i];
/* simulate overflow if ridiculously long pulse was sent */
if (rawir.pulse && rawir.duration > MS_TO_US(50))
ir_raw_event_overflow(dev);
else
ir_raw_event_store_with_filter(dev, &rawir);
}
if (lodev->carrierreport) {
rawir.pulse = false;
rawir.carrier_report = true;
rawir.carrier = lodev->txcarrier;
ir_raw_event_store(dev, &rawir);
}
/* Fake a silence long enough to cause us to go idle */
rawir.pulse = false;
rawir.duration = dev->timeout;
ir_raw_event_store_with_filter(dev, &rawir);
ir_raw_event_handle(dev);
out:
return count;
}
static void loop_set_idle(struct rc_dev *dev, bool enable)
{
struct loopback_dev *lodev = dev->priv;
if (lodev->idle != enable) {
dev_dbg(&dev->dev, "%sing idle mode\n", enable ? "enter" : "exit");
lodev->idle = enable;
}
}
static int loop_set_wideband_receiver(struct rc_dev *dev, int enable)
{
struct loopback_dev *lodev = dev->priv;
if (lodev->wideband != enable) {
dev_dbg(&dev->dev, "using %sband receiver\n", enable ? "wide" : "narrow");
lodev->wideband = !!enable;
}
return 0;
}
static int loop_set_carrier_report(struct rc_dev *dev, int enable)
{
struct loopback_dev *lodev = dev->priv;
if (lodev->carrierreport != enable) {
dev_dbg(&dev->dev, "%sabling carrier reports\n", enable ? "en" : "dis");
lodev->carrierreport = !!enable;
}
return 0;
}
static int loop_set_wakeup_filter(struct rc_dev *dev,
struct rc_scancode_filter *sc)
{
static const unsigned int max = 512;
struct ir_raw_event *raw;
int ret;
int i;
/* fine to disable filter */
if (!sc->mask)
return 0;
/* encode the specified filter and loop it back */
raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
if (!raw)
return -ENOMEM;
ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc->data, raw, max);
/* still loop back the partial raw IR even if it's incomplete */
if (ret == -ENOBUFS)
ret = max;
if (ret >= 0) {
/* do the loopback */
for (i = 0; i < ret; ++i)
ir_raw_event_store(dev, &raw[i]);
ir_raw_event_handle(dev);
ret = 0;
}
kfree(raw);
return ret;
}
static int __init loop_init(void)
{
struct rc_dev *rc;
int ret;
rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc)
return -ENOMEM;
rc->device_name = "rc-core loopback device";
rc->input_phys = "rc-core/virtual";
rc->input_id.bustype = BUS_VIRTUAL;
rc->input_id.version = 1;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER;
rc->encode_wakeup = true;
rc->timeout = IR_DEFAULT_TIMEOUT;
rc->min_timeout = 1;
rc->max_timeout = IR_MAX_TIMEOUT;
rc->rx_resolution = 1;
rc->tx_resolution = 1;
rc->s_tx_mask = loop_set_tx_mask;
rc->s_tx_carrier = loop_set_tx_carrier;
rc->s_tx_duty_cycle = loop_set_tx_duty_cycle;
rc->s_rx_carrier_range = loop_set_rx_carrier_range;
rc->tx_ir = loop_tx_ir;
rc->s_idle = loop_set_idle;
rc->s_wideband_receiver = loop_set_wideband_receiver;
rc->s_carrier_report = loop_set_carrier_report;
rc->s_wakeup_filter = loop_set_wakeup_filter;
loopdev.txmask = RXMASK_NARROWBAND;
loopdev.txcarrier = 36000;
loopdev.txduty = 50;
loopdev.rxcarriermin = 1;
loopdev.rxcarriermax = ~0;
loopdev.idle = true;
loopdev.wideband = false;
loopdev.carrierreport = false;
ret = rc_register_device(rc);
if (ret < 0) {
dev_err(&rc->dev, "rc_dev registration failed\n");
rc_free_device(rc);
return ret;
}
loopdev.dev = rc;
return 0;
}
static void __exit loop_exit(void)
{
rc_unregister_device(loopdev.dev);
}
module_init(loop_init);
module_exit(loop_exit);
MODULE_DESCRIPTION("Loopback device for rc-core debugging");
MODULE_AUTHOR("David Härdeman <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/rc-loopback.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* USB ATI Remote support
*
* Copyright (c) 2011, 2012 Anssi Hannula <[email protected]>
* Version 2.2.0 Copyright (c) 2004 Torrey Hoffman <[email protected]>
* Version 2.1.1 Copyright (c) 2002 Vladimir Dergachev
*
* This 2.2.0 version is a rewrite / cleanup of the 2.1.1 driver, including
* porting to the 2.6 kernel interfaces, along with other modification
* to better match the style of the existing usb/input drivers. However, the
* protocol and hardware handling is essentially unchanged from 2.1.1.
*
* The 2.1.1 driver was derived from the usbati_remote and usbkbd drivers by
* Vojtech Pavlik.
*
* Changes:
*
* Feb 2004: Torrey Hoffman <[email protected]>
* Version 2.2.0
* Jun 2004: Torrey Hoffman <[email protected]>
* Version 2.2.1
* Added key repeat support contributed by:
* Vincent Vanackere <[email protected]>
* Added support for the "Lola" remote contributed by:
* Seth Cohn <[email protected]>
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
* Hardware & software notes
*
* These remote controls are distributed by ATI as part of their
* "All-In-Wonder" video card packages. The receiver self-identifies as a
* "USB Receiver" with manufacturer "X10 Wireless Technology Inc".
*
* The "Lola" remote is available from X10. See:
* http://www.x10.com/products/lola_sg1.htm
* The Lola is similar to the ATI remote but has no mouse support, and slightly
* different keys.
*
* It is possible to use multiple receivers and remotes on multiple computers
* simultaneously by configuring them to use specific channels.
*
* The RF protocol used by the remote supports 16 distinct channels, 1 to 16.
* Actually, it may even support more, at least in some revisions of the
* hardware.
*
* Each remote can be configured to transmit on one channel as follows:
* - Press and hold the "hand icon" button.
* - When the red LED starts to blink, let go of the "hand icon" button.
* - When it stops blinking, input the channel code as two digits, from 01
* to 16, and press the hand icon again.
*
* The timing can be a little tricky. Try loading the module with debug=1
* to have the kernel print out messages about the remote control number
* and mask. Note: debugging prints remote numbers as zero-based hexadecimal.
*
* The driver has a "channel_mask" parameter. This bitmask specifies which
* channels will be ignored by the module. To mask out channels, just add
* all the 2^channel_number values together.
*
* For instance, set channel_mask = 2^4 = 16 (binary 10000) to make ati_remote
* ignore signals coming from remote controls transmitting on channel 4, but
* accept all other channels.
*
* Or, set channel_mask = 65533, (0xFFFD), and all channels except 1 will be
* ignored.
*
* The default is 0 (respond to all channels). Bit 0 and bits 17-32 of this
* parameter are unused.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/usb/input.h>
#include <linux/wait.h>
#include <linux/jiffies.h>
#include <media/rc-core.h>
/*
* Module and Version Information, Module Parameters
*/
#define ATI_REMOTE_VENDOR_ID 0x0bc7
#define LOLA_REMOTE_PRODUCT_ID 0x0002
#define LOLA2_REMOTE_PRODUCT_ID 0x0003
#define ATI_REMOTE_PRODUCT_ID 0x0004
#define NVIDIA_REMOTE_PRODUCT_ID 0x0005
#define MEDION_REMOTE_PRODUCT_ID 0x0006
#define FIREFLY_REMOTE_PRODUCT_ID 0x0008
#define DRIVER_VERSION "2.2.1"
#define DRIVER_AUTHOR "Torrey Hoffman <[email protected]>"
#define DRIVER_DESC "ATI/X10 RF USB Remote Control"
#define NAME_BUFSIZE 80 /* size of product name, path buffers */
#define DATA_BUFSIZE 63 /* size of URB data buffers */
/*
* Duplicate event filtering time.
* Sequential, identical KIND_FILTERED inputs with less than
* FILTER_TIME milliseconds between them are considered as repeat
* events. The hardware generates 5 events for the first keypress
* and we have to take this into account for an accurate repeat
* behaviour.
*/
#define FILTER_TIME 60 /* msec */
#define REPEAT_DELAY 500 /* msec */
static unsigned long channel_mask;
module_param(channel_mask, ulong, 0644);
MODULE_PARM_DESC(channel_mask, "Bitmask of remote control channels to ignore");
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable extra debug messages and information");
static int repeat_filter = FILTER_TIME;
module_param(repeat_filter, int, 0644);
MODULE_PARM_DESC(repeat_filter, "Repeat filter time, default = 60 msec");
static int repeat_delay = REPEAT_DELAY;
module_param(repeat_delay, int, 0644);
MODULE_PARM_DESC(repeat_delay, "Delay before sending repeats, default = 500 msec");
static bool mouse = true;
module_param(mouse, bool, 0444);
MODULE_PARM_DESC(mouse, "Enable mouse device, default = yes");
#define dbginfo(dev, format, arg...) \
do { if (debug) dev_info(dev , format , ## arg); } while (0)
struct ati_receiver_type {
/* either default_keymap or get_default_keymap should be set */
const char *default_keymap;
const char *(*get_default_keymap)(struct usb_interface *interface);
};
static const char *get_medion_keymap(struct usb_interface *interface)
{
struct usb_device *udev = interface_to_usbdev(interface);
/*
* There are many different Medion remotes shipped with a receiver
* with the same usb id, but the receivers have subtle differences
* in the USB descriptors allowing us to detect them.
*/
if (udev->manufacturer && udev->product) {
if (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP) {
if (!strcmp(udev->manufacturer, "X10 Wireless Technology Inc")
&& !strcmp(udev->product, "USB Receiver"))
return RC_MAP_MEDION_X10_DIGITAINER;
if (!strcmp(udev->manufacturer, "X10 WTI")
&& !strcmp(udev->product, "RF receiver"))
return RC_MAP_MEDION_X10_OR2X;
} else {
if (!strcmp(udev->manufacturer, "X10 Wireless Technology Inc")
&& !strcmp(udev->product, "USB Receiver"))
return RC_MAP_MEDION_X10;
}
}
dev_info(&interface->dev,
"Unknown Medion X10 receiver, using default ati_remote Medion keymap\n");
return RC_MAP_MEDION_X10;
}
static const struct ati_receiver_type type_ati = {
.default_keymap = RC_MAP_ATI_X10
};
static const struct ati_receiver_type type_medion = {
.get_default_keymap = get_medion_keymap
};
static const struct ati_receiver_type type_firefly = {
.default_keymap = RC_MAP_SNAPSTREAM_FIREFLY
};
static const struct usb_device_id ati_remote_table[] = {
{
USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID),
.driver_info = (unsigned long)&type_ati
},
{
USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA2_REMOTE_PRODUCT_ID),
.driver_info = (unsigned long)&type_ati
},
{
USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID),
.driver_info = (unsigned long)&type_ati
},
{
USB_DEVICE(ATI_REMOTE_VENDOR_ID, NVIDIA_REMOTE_PRODUCT_ID),
.driver_info = (unsigned long)&type_ati
},
{
USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID),
.driver_info = (unsigned long)&type_medion
},
{
USB_DEVICE(ATI_REMOTE_VENDOR_ID, FIREFLY_REMOTE_PRODUCT_ID),
.driver_info = (unsigned long)&type_firefly
},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ati_remote_table);
/* Get hi and low bytes of a 16-bits int */
#define HI(a) ((unsigned char)((a) >> 8))
#define LO(a) ((unsigned char)((a) & 0xff))
#define SEND_FLAG_IN_PROGRESS 1
#define SEND_FLAG_COMPLETE 2
/* Device initialization strings */
static char init1[] = { 0x01, 0x00, 0x20, 0x14 };
static char init2[] = { 0x01, 0x00, 0x20, 0x14, 0x20, 0x20, 0x20 };
struct ati_remote {
struct input_dev *idev;
struct rc_dev *rdev;
struct usb_device *udev;
struct usb_interface *interface;
struct urb *irq_urb;
struct urb *out_urb;
struct usb_endpoint_descriptor *endpoint_in;
struct usb_endpoint_descriptor *endpoint_out;
unsigned char *inbuf;
unsigned char *outbuf;
dma_addr_t inbuf_dma;
dma_addr_t outbuf_dma;
unsigned char old_data; /* Detect duplicate events */
unsigned long old_jiffies;
unsigned long acc_jiffies; /* handle acceleration */
unsigned long first_jiffies;
unsigned int repeat_count;
char rc_name[NAME_BUFSIZE];
char rc_phys[NAME_BUFSIZE];
char mouse_name[NAME_BUFSIZE];
char mouse_phys[NAME_BUFSIZE];
wait_queue_head_t wait;
int send_flags;
int users; /* 0-2, users are rc and input */
struct mutex open_mutex;
};
/* "Kinds" of messages sent from the hardware to the driver. */
#define KIND_END 0
#define KIND_LITERAL 1 /* Simply pass to input system as EV_KEY */
#define KIND_FILTERED 2 /* Add artificial key-up events, drop keyrepeats */
#define KIND_ACCEL 3 /* Translate to EV_REL mouse-move events */
/* Translation table from hardware messages to input events. */
static const struct {
unsigned char kind;
unsigned char data; /* Raw key code from remote */
unsigned short code; /* Input layer translation */
} ati_remote_tbl[] = {
/* Directional control pad axes. Code is xxyy */
{KIND_ACCEL, 0x70, 0xff00}, /* left */
{KIND_ACCEL, 0x71, 0x0100}, /* right */
{KIND_ACCEL, 0x72, 0x00ff}, /* up */
{KIND_ACCEL, 0x73, 0x0001}, /* down */
/* Directional control pad diagonals */
{KIND_ACCEL, 0x74, 0xffff}, /* left up */
{KIND_ACCEL, 0x75, 0x01ff}, /* right up */
{KIND_ACCEL, 0x77, 0xff01}, /* left down */
{KIND_ACCEL, 0x76, 0x0101}, /* right down */
/* "Mouse button" buttons. The code below uses the fact that the
* lsbit of the raw code is a down/up indicator. */
{KIND_LITERAL, 0x78, BTN_LEFT}, /* left btn down */
{KIND_LITERAL, 0x79, BTN_LEFT}, /* left btn up */
{KIND_LITERAL, 0x7c, BTN_RIGHT},/* right btn down */
{KIND_LITERAL, 0x7d, BTN_RIGHT},/* right btn up */
/* Artificial "double-click" events are generated by the hardware.
* They are mapped to the "side" and "extra" mouse buttons here. */
{KIND_FILTERED, 0x7a, BTN_SIDE}, /* left dblclick */
{KIND_FILTERED, 0x7e, BTN_EXTRA},/* right dblclick */
/* Non-mouse events are handled by rc-core */
{KIND_END, 0x00, 0}
};
/*
* ati_remote_dump_input
*/
static void ati_remote_dump(struct device *dev, unsigned char *data,
unsigned int len)
{
if (len == 1) {
if (data[0] != (unsigned char)0xff && data[0] != 0x00)
dev_warn(dev, "Weird byte 0x%02x\n", data[0]);
} else if (len == 4)
dev_warn(dev, "Weird key %*ph\n", 4, data);
else
dev_warn(dev, "Weird data, len=%d %*ph ...\n", len, 6, data);
}
/*
* ati_remote_open
*/
static int ati_remote_open(struct ati_remote *ati_remote)
{
int err = 0;
mutex_lock(&ati_remote->open_mutex);
if (ati_remote->users++ != 0)
goto out; /* one was already active */
/* On first open, submit the read urb which was set up previously. */
ati_remote->irq_urb->dev = ati_remote->udev;
if (usb_submit_urb(ati_remote->irq_urb, GFP_KERNEL)) {
dev_err(&ati_remote->interface->dev,
"%s: usb_submit_urb failed!\n", __func__);
err = -EIO;
}
out: mutex_unlock(&ati_remote->open_mutex);
return err;
}
/*
* ati_remote_close
*/
static void ati_remote_close(struct ati_remote *ati_remote)
{
mutex_lock(&ati_remote->open_mutex);
if (--ati_remote->users == 0)
usb_kill_urb(ati_remote->irq_urb);
mutex_unlock(&ati_remote->open_mutex);
}
static int ati_remote_input_open(struct input_dev *inputdev)
{
struct ati_remote *ati_remote = input_get_drvdata(inputdev);
return ati_remote_open(ati_remote);
}
static void ati_remote_input_close(struct input_dev *inputdev)
{
struct ati_remote *ati_remote = input_get_drvdata(inputdev);
ati_remote_close(ati_remote);
}
static int ati_remote_rc_open(struct rc_dev *rdev)
{
struct ati_remote *ati_remote = rdev->priv;
return ati_remote_open(ati_remote);
}
static void ati_remote_rc_close(struct rc_dev *rdev)
{
struct ati_remote *ati_remote = rdev->priv;
ati_remote_close(ati_remote);
}
/*
* ati_remote_irq_out
*/
static void ati_remote_irq_out(struct urb *urb)
{
struct ati_remote *ati_remote = urb->context;
if (urb->status) {
dev_dbg(&ati_remote->interface->dev, "%s: status %d\n",
__func__, urb->status);
return;
}
ati_remote->send_flags |= SEND_FLAG_COMPLETE;
wmb();
wake_up(&ati_remote->wait);
}
/*
* ati_remote_sendpacket
*
* Used to send device initialization strings
*/
static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd,
unsigned char *data)
{
int retval = 0;
/* Set up out_urb */
memcpy(ati_remote->out_urb->transfer_buffer + 1, data, LO(cmd));
((char *) ati_remote->out_urb->transfer_buffer)[0] = HI(cmd);
ati_remote->out_urb->transfer_buffer_length = LO(cmd) + 1;
ati_remote->out_urb->dev = ati_remote->udev;
ati_remote->send_flags = SEND_FLAG_IN_PROGRESS;
retval = usb_submit_urb(ati_remote->out_urb, GFP_ATOMIC);
if (retval) {
dev_dbg(&ati_remote->interface->dev,
"sendpacket: usb_submit_urb failed: %d\n", retval);
return retval;
}
wait_event_timeout(ati_remote->wait,
((ati_remote->out_urb->status != -EINPROGRESS) ||
(ati_remote->send_flags & SEND_FLAG_COMPLETE)),
HZ);
usb_kill_urb(ati_remote->out_urb);
return retval;
}
struct accel_times {
const char value;
unsigned int msecs;
};
static const struct accel_times accel[] = {
{ 1, 125 },
{ 2, 250 },
{ 4, 500 },
{ 6, 1000 },
{ 9, 1500 },
{ 13, 2000 },
{ 20, 0 },
};
/*
* ati_remote_compute_accel
*
* Implements acceleration curve for directional control pad
* If elapsed time since last event is > 1/4 second, user "stopped",
* so reset acceleration. Otherwise, user is probably holding the control
* pad down, so we increase acceleration, ramping up over two seconds to
* a maximum speed.
*/
static int ati_remote_compute_accel(struct ati_remote *ati_remote)
{
unsigned long now = jiffies, reset_time;
int i;
reset_time = msecs_to_jiffies(250);
if (time_after(now, ati_remote->old_jiffies + reset_time)) {
ati_remote->acc_jiffies = now;
return 1;
}
for (i = 0; i < ARRAY_SIZE(accel) - 1; i++) {
unsigned long timeout = msecs_to_jiffies(accel[i].msecs);
if (time_before(now, ati_remote->acc_jiffies + timeout))
return accel[i].value;
}
return accel[i].value;
}
/*
* ati_remote_report_input
*/
static void ati_remote_input_report(struct urb *urb)
{
struct ati_remote *ati_remote = urb->context;
unsigned char *data= ati_remote->inbuf;
struct input_dev *dev = ati_remote->idev;
int index = -1;
int remote_num;
unsigned char scancode;
u32 wheel_keycode = KEY_RESERVED;
int i;
/*
* data[0] = 0x14
* data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
* data[2] = the key code (with toggle bit in MSB with some models)
* data[3] = channel << 4 (the low 4 bits must be zero)
*/
/* Deal with strange looking inputs */
if ( urb->actual_length != 4 || data[0] != 0x14 ||
data[1] != (unsigned char)(data[2] + data[3] + 0xD5) ||
(data[3] & 0x0f) != 0x00) {
ati_remote_dump(&urb->dev->dev, data, urb->actual_length);
return;
}
if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
dbginfo(&ati_remote->interface->dev,
"wrong checksum in input: %*ph\n", 4, data);
return;
}
/* Mask unwanted remote channels. */
/* note: remote_num is 0-based, channel 1 on remote == 0 here */
remote_num = (data[3] >> 4) & 0x0f;
if (channel_mask & (1 << (remote_num + 1))) {
dbginfo(&ati_remote->interface->dev,
"Masked input from channel 0x%02x: data %02x, mask= 0x%02lx\n",
remote_num, data[2], channel_mask);
return;
}
/*
* MSB is a toggle code, though only used by some devices
* (e.g. SnapStream Firefly)
*/
scancode = data[2] & 0x7f;
dbginfo(&ati_remote->interface->dev,
"channel 0x%02x; key data %02x, scancode %02x\n",
remote_num, data[2], scancode);
if (scancode >= 0x70) {
/*
* This is either a mouse or scrollwheel event, depending on
* the remote/keymap.
* Get the keycode assigned to scancode 0x78/0x70. If it is
* set, assume this is a scrollwheel up/down event.
*/
wheel_keycode = rc_g_keycode_from_table(ati_remote->rdev,
scancode & 0x78);
if (wheel_keycode == KEY_RESERVED) {
/* scrollwheel was not mapped, assume mouse */
/* Look up event code index in the mouse translation
* table.
*/
for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
if (scancode == ati_remote_tbl[i].data) {
index = i;
break;
}
}
}
}
if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
/*
* The lsbit of the raw key code is a down/up flag.
* Invert it to match the input layer's conventions.
*/
input_event(dev, EV_KEY, ati_remote_tbl[index].code,
!(data[2] & 1));
ati_remote->old_jiffies = jiffies;
} else if (index < 0 || ati_remote_tbl[index].kind == KIND_FILTERED) {
unsigned long now = jiffies;
/* Filter duplicate events which happen "too close" together. */
if (ati_remote->old_data == data[2] &&
time_before(now, ati_remote->old_jiffies +
msecs_to_jiffies(repeat_filter))) {
ati_remote->repeat_count++;
} else {
ati_remote->repeat_count = 0;
ati_remote->first_jiffies = now;
}
ati_remote->old_jiffies = now;
/* Ensure we skip at least the 4 first duplicate events
* (generated by a single keypress), and continue skipping
* until repeat_delay msecs have passed.
*/
if (ati_remote->repeat_count > 0 &&
(ati_remote->repeat_count < 5 ||
time_before(now, ati_remote->first_jiffies +
msecs_to_jiffies(repeat_delay))))
return;
if (index >= 0) {
input_event(dev, EV_KEY, ati_remote_tbl[index].code, 1);
input_event(dev, EV_KEY, ati_remote_tbl[index].code, 0);
} else {
/* Not a mouse event, hand it to rc-core. */
int count = 1;
if (wheel_keycode != KEY_RESERVED) {
/*
* This is a scrollwheel event, send the
* scroll up (0x78) / down (0x70) scancode
* repeatedly as many times as indicated by
* rest of the scancode.
*/
count = (scancode & 0x07) + 1;
scancode &= 0x78;
}
while (count--) {
/*
* We don't use the rc-core repeat handling yet as
* it would cause ghost repeats which would be a
* regression for this driver.
*/
rc_keydown_notimeout(ati_remote->rdev,
RC_PROTO_OTHER,
scancode, data[2]);
rc_keyup(ati_remote->rdev);
}
goto nosync;
}
} else if (ati_remote_tbl[index].kind == KIND_ACCEL) {
signed char dx = ati_remote_tbl[index].code >> 8;
signed char dy = ati_remote_tbl[index].code & 255;
/*
* Other event kinds are from the directional control pad, and
* have an acceleration factor applied to them. Without this
* acceleration, the control pad is mostly unusable.
*/
int acc = ati_remote_compute_accel(ati_remote);
if (dx)
input_report_rel(dev, REL_X, dx * acc);
if (dy)
input_report_rel(dev, REL_Y, dy * acc);
ati_remote->old_jiffies = jiffies;
} else {
dev_dbg(&ati_remote->interface->dev, "ati_remote kind=%d\n",
ati_remote_tbl[index].kind);
return;
}
input_sync(dev);
nosync:
ati_remote->old_data = data[2];
}
/*
* ati_remote_irq_in
*/
static void ati_remote_irq_in(struct urb *urb)
{
struct ati_remote *ati_remote = urb->context;
int retval;
switch (urb->status) {
case 0: /* success */
ati_remote_input_report(urb);
break;
case -ECONNRESET: /* unlink */
case -ENOENT:
case -ESHUTDOWN:
dev_dbg(&ati_remote->interface->dev,
"%s: urb error status, unlink?\n",
__func__);
return;
default: /* error */
dev_dbg(&ati_remote->interface->dev,
"%s: Nonzero urb status %d\n",
__func__, urb->status);
}
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&ati_remote->interface->dev,
"%s: usb_submit_urb()=%d\n",
__func__, retval);
}
/*
* ati_remote_alloc_buffers
*/
static int ati_remote_alloc_buffers(struct usb_device *udev,
struct ati_remote *ati_remote)
{
ati_remote->inbuf = usb_alloc_coherent(udev, DATA_BUFSIZE, GFP_ATOMIC,
&ati_remote->inbuf_dma);
if (!ati_remote->inbuf)
return -1;
ati_remote->outbuf = usb_alloc_coherent(udev, DATA_BUFSIZE, GFP_ATOMIC,
&ati_remote->outbuf_dma);
if (!ati_remote->outbuf)
return -1;
ati_remote->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ati_remote->irq_urb)
return -1;
ati_remote->out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ati_remote->out_urb)
return -1;
return 0;
}
/*
* ati_remote_free_buffers
*/
static void ati_remote_free_buffers(struct ati_remote *ati_remote)
{
usb_free_urb(ati_remote->irq_urb);
usb_free_urb(ati_remote->out_urb);
usb_free_coherent(ati_remote->udev, DATA_BUFSIZE,
ati_remote->inbuf, ati_remote->inbuf_dma);
usb_free_coherent(ati_remote->udev, DATA_BUFSIZE,
ati_remote->outbuf, ati_remote->outbuf_dma);
}
static void ati_remote_input_init(struct ati_remote *ati_remote)
{
struct input_dev *idev = ati_remote->idev;
int i;
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
idev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA);
idev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++)
if (ati_remote_tbl[i].kind == KIND_LITERAL ||
ati_remote_tbl[i].kind == KIND_FILTERED)
__set_bit(ati_remote_tbl[i].code, idev->keybit);
input_set_drvdata(idev, ati_remote);
idev->open = ati_remote_input_open;
idev->close = ati_remote_input_close;
idev->name = ati_remote->mouse_name;
idev->phys = ati_remote->mouse_phys;
usb_to_input_id(ati_remote->udev, &idev->id);
idev->dev.parent = &ati_remote->interface->dev;
}
static void ati_remote_rc_init(struct ati_remote *ati_remote)
{
struct rc_dev *rdev = ati_remote->rdev;
rdev->priv = ati_remote;
rdev->allowed_protocols = RC_PROTO_BIT_OTHER;
rdev->driver_name = "ati_remote";
rdev->open = ati_remote_rc_open;
rdev->close = ati_remote_rc_close;
rdev->device_name = ati_remote->rc_name;
rdev->input_phys = ati_remote->rc_phys;
usb_to_input_id(ati_remote->udev, &rdev->input_id);
rdev->dev.parent = &ati_remote->interface->dev;
}
static int ati_remote_initialize(struct ati_remote *ati_remote)
{
struct usb_device *udev = ati_remote->udev;
int pipe, maxp;
init_waitqueue_head(&ati_remote->wait);
/* Set up irq_urb */
pipe = usb_rcvintpipe(udev, ati_remote->endpoint_in->bEndpointAddress);
maxp = usb_maxpacket(udev, pipe);
maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
usb_fill_int_urb(ati_remote->irq_urb, udev, pipe, ati_remote->inbuf,
maxp, ati_remote_irq_in, ati_remote,
ati_remote->endpoint_in->bInterval);
ati_remote->irq_urb->transfer_dma = ati_remote->inbuf_dma;
ati_remote->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* Set up out_urb */
pipe = usb_sndintpipe(udev, ati_remote->endpoint_out->bEndpointAddress);
maxp = usb_maxpacket(udev, pipe);
maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
usb_fill_int_urb(ati_remote->out_urb, udev, pipe, ati_remote->outbuf,
maxp, ati_remote_irq_out, ati_remote,
ati_remote->endpoint_out->bInterval);
ati_remote->out_urb->transfer_dma = ati_remote->outbuf_dma;
ati_remote->out_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* send initialization strings */
if ((ati_remote_sendpacket(ati_remote, 0x8004, init1)) ||
(ati_remote_sendpacket(ati_remote, 0x8007, init2))) {
dev_err(&ati_remote->interface->dev,
"Initializing ati_remote hardware failed.\n");
return -EIO;
}
return 0;
}
/*
* ati_remote_probe
*/
static int ati_remote_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *iface_host = interface->cur_altsetting;
struct usb_endpoint_descriptor *endpoint_in, *endpoint_out;
struct ati_receiver_type *type = (struct ati_receiver_type *)id->driver_info;
struct ati_remote *ati_remote;
struct input_dev *input_dev;
struct device *device = &interface->dev;
struct rc_dev *rc_dev;
int err = -ENOMEM;
if (iface_host->desc.bNumEndpoints != 2) {
dev_err(device, "%s: Unexpected desc.bNumEndpoints\n", __func__);
return -ENODEV;
}
endpoint_in = &iface_host->endpoint[0].desc;
endpoint_out = &iface_host->endpoint[1].desc;
if (!usb_endpoint_is_int_in(endpoint_in)) {
dev_err(device, "%s: Unexpected endpoint_in\n", __func__);
return -ENODEV;
}
if (le16_to_cpu(endpoint_in->wMaxPacketSize) == 0) {
dev_err(device, "%s: endpoint_in message size==0?\n", __func__);
return -ENODEV;
}
if (!usb_endpoint_is_int_out(endpoint_out)) {
dev_err(device, "%s: Unexpected endpoint_out\n", __func__);
return -ENODEV;
}
ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!ati_remote || !rc_dev)
goto exit_free_dev_rdev;
/* Allocate URB buffers, URBs */
if (ati_remote_alloc_buffers(udev, ati_remote))
goto exit_free_buffers;
ati_remote->endpoint_in = endpoint_in;
ati_remote->endpoint_out = endpoint_out;
ati_remote->udev = udev;
ati_remote->rdev = rc_dev;
ati_remote->interface = interface;
usb_make_path(udev, ati_remote->rc_phys, sizeof(ati_remote->rc_phys));
strscpy(ati_remote->mouse_phys, ati_remote->rc_phys,
sizeof(ati_remote->mouse_phys));
strlcat(ati_remote->rc_phys, "/input0", sizeof(ati_remote->rc_phys));
strlcat(ati_remote->mouse_phys, "/input1", sizeof(ati_remote->mouse_phys));
snprintf(ati_remote->rc_name, sizeof(ati_remote->rc_name), "%s%s%s",
udev->manufacturer ?: "",
udev->manufacturer && udev->product ? " " : "",
udev->product ?: "");
if (!strlen(ati_remote->rc_name))
snprintf(ati_remote->rc_name, sizeof(ati_remote->rc_name),
DRIVER_DESC "(%04x,%04x)",
le16_to_cpu(ati_remote->udev->descriptor.idVendor),
le16_to_cpu(ati_remote->udev->descriptor.idProduct));
snprintf(ati_remote->mouse_name, sizeof(ati_remote->mouse_name),
"%s mouse", ati_remote->rc_name);
rc_dev->map_name = RC_MAP_ATI_X10; /* default map */
/* set default keymap according to receiver model */
if (type) {
if (type->default_keymap)
rc_dev->map_name = type->default_keymap;
else if (type->get_default_keymap)
rc_dev->map_name = type->get_default_keymap(interface);
}
ati_remote_rc_init(ati_remote);
mutex_init(&ati_remote->open_mutex);
/* Device Hardware Initialization - fills in ati_remote->idev from udev. */
err = ati_remote_initialize(ati_remote);
if (err)
goto exit_kill_urbs;
/* Set up and register rc device */
err = rc_register_device(ati_remote->rdev);
if (err)
goto exit_kill_urbs;
/* Set up and register mouse input device */
if (mouse) {
input_dev = input_allocate_device();
if (!input_dev) {
err = -ENOMEM;
goto exit_unregister_device;
}
ati_remote->idev = input_dev;
ati_remote_input_init(ati_remote);
err = input_register_device(input_dev);
if (err)
goto exit_free_input_device;
}
usb_set_intfdata(interface, ati_remote);
return 0;
exit_free_input_device:
input_free_device(input_dev);
exit_unregister_device:
rc_unregister_device(rc_dev);
rc_dev = NULL;
exit_kill_urbs:
usb_kill_urb(ati_remote->irq_urb);
usb_kill_urb(ati_remote->out_urb);
exit_free_buffers:
ati_remote_free_buffers(ati_remote);
exit_free_dev_rdev:
rc_free_device(rc_dev);
kfree(ati_remote);
return err;
}
/*
* ati_remote_disconnect
*/
static void ati_remote_disconnect(struct usb_interface *interface)
{
struct ati_remote *ati_remote;
ati_remote = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
if (!ati_remote) {
dev_warn(&interface->dev, "%s - null device?\n", __func__);
return;
}
usb_kill_urb(ati_remote->irq_urb);
usb_kill_urb(ati_remote->out_urb);
if (ati_remote->idev)
input_unregister_device(ati_remote->idev);
rc_unregister_device(ati_remote->rdev);
ati_remote_free_buffers(ati_remote);
kfree(ati_remote);
}
/* usb specific object to register with the usb subsystem */
static struct usb_driver ati_remote_driver = {
.name = "ati_remote",
.probe = ati_remote_probe,
.disconnect = ati_remote_disconnect,
.id_table = ati_remote_table,
};
module_usb_driver(ati_remote_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/ati_remote.c |
// SPDX-License-Identifier: GPL-2.0
// ir-rc5-decoder.c - decoder for RC5(x) and StreamZap protocols
//
// Copyright (C) 2010 by Mauro Carvalho Chehab
// Copyright (C) 2010 by Jarod Wilson <[email protected]>
/*
* This decoder handles the 14 bit RC5 protocol, 15 bit "StreamZap" protocol
* and 20 bit RC5x protocol.
*/
#include "rc-core-priv.h"
#include <linux/module.h>
#define RC5_NBITS 14
#define RC5_SZ_NBITS 15
#define RC5X_NBITS 20
#define CHECK_RC5X_NBITS 8
#define RC5_UNIT 889 /* us */
#define RC5_BIT_START (1 * RC5_UNIT)
#define RC5_BIT_END (1 * RC5_UNIT)
#define RC5X_SPACE (4 * RC5_UNIT)
#define RC5_TRAILER (6 * RC5_UNIT) /* In reality, approx 100 */
enum rc5_state {
STATE_INACTIVE,
STATE_BIT_START,
STATE_BIT_END,
STATE_CHECK_RC5X,
STATE_FINISHED,
};
/**
* ir_rc5_decode() - Decode one RC-5 pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct rc5_dec *data = &dev->raw->rc5;
u8 toggle;
u32 scancode;
enum rc_proto protocol;
if (!is_timing_event(ev)) {
if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
goto out;
again:
dev_dbg(&dev->dev, "RC5(x/sz) decode started at state %i (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
return 0;
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
data->state = STATE_BIT_START;
data->count = 1;
decrease_duration(&ev, RC5_BIT_START);
goto again;
case STATE_BIT_START:
if (!ev.pulse && geq_margin(ev.duration, RC5_TRAILER, RC5_UNIT / 2)) {
data->state = STATE_FINISHED;
goto again;
}
if (!eq_margin(ev.duration, RC5_BIT_START, RC5_UNIT / 2))
break;
data->bits <<= 1;
if (!ev.pulse)
data->bits |= 1;
data->count++;
data->state = STATE_BIT_END;
return 0;
case STATE_BIT_END:
if (data->count == CHECK_RC5X_NBITS)
data->state = STATE_CHECK_RC5X;
else
data->state = STATE_BIT_START;
decrease_duration(&ev, RC5_BIT_END);
goto again;
case STATE_CHECK_RC5X:
if (!ev.pulse && geq_margin(ev.duration, RC5X_SPACE, RC5_UNIT / 2)) {
data->is_rc5x = true;
decrease_duration(&ev, RC5X_SPACE);
} else
data->is_rc5x = false;
data->state = STATE_BIT_START;
goto again;
case STATE_FINISHED:
if (ev.pulse)
break;
if (data->is_rc5x && data->count == RC5X_NBITS) {
/* RC5X */
u8 xdata, command, system;
if (!(dev->enabled_protocols & RC_PROTO_BIT_RC5X_20)) {
data->state = STATE_INACTIVE;
return 0;
}
xdata = (data->bits & 0x0003F) >> 0;
command = (data->bits & 0x00FC0) >> 6;
system = (data->bits & 0x1F000) >> 12;
toggle = (data->bits & 0x20000) ? 1 : 0;
command += (data->bits & 0x40000) ? 0 : 0x40;
scancode = system << 16 | command << 8 | xdata;
protocol = RC_PROTO_RC5X_20;
} else if (!data->is_rc5x && data->count == RC5_NBITS) {
/* RC5 */
u8 command, system;
if (!(dev->enabled_protocols & RC_PROTO_BIT_RC5)) {
data->state = STATE_INACTIVE;
return 0;
}
command = (data->bits & 0x0003F) >> 0;
system = (data->bits & 0x007C0) >> 6;
toggle = (data->bits & 0x00800) ? 1 : 0;
command += (data->bits & 0x01000) ? 0 : 0x40;
scancode = system << 8 | command;
protocol = RC_PROTO_RC5;
} else if (!data->is_rc5x && data->count == RC5_SZ_NBITS) {
/* RC5 StreamZap */
u8 command, system;
if (!(dev->enabled_protocols & RC_PROTO_BIT_RC5_SZ)) {
data->state = STATE_INACTIVE;
return 0;
}
command = (data->bits & 0x0003F) >> 0;
system = (data->bits & 0x02FC0) >> 6;
toggle = (data->bits & 0x01000) ? 1 : 0;
scancode = system << 6 | command;
protocol = RC_PROTO_RC5_SZ;
} else
break;
dev_dbg(&dev->dev, "RC5(x/sz) scancode 0x%06x (p: %u, t: %u)\n",
scancode, protocol, toggle);
rc_keydown(dev, protocol, scancode, toggle);
data->state = STATE_INACTIVE;
return 0;
}
out:
dev_dbg(&dev->dev, "RC5(x/sz) decode failed at state %i count %d (%uus %s)\n",
data->state, data->count, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static const struct ir_raw_timings_manchester ir_rc5_timings = {
.leader_pulse = RC5_UNIT,
.clock = RC5_UNIT,
.trailer_space = RC5_UNIT * 10,
};
static const struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
{
.leader_pulse = RC5_UNIT,
.clock = RC5_UNIT,
.trailer_space = RC5X_SPACE,
},
{
.clock = RC5_UNIT,
.trailer_space = RC5_UNIT * 10,
},
};
static const struct ir_raw_timings_manchester ir_rc5_sz_timings = {
.leader_pulse = RC5_UNIT,
.clock = RC5_UNIT,
.trailer_space = RC5_UNIT * 10,
};
/**
* ir_rc5_encode() - Encode a scancode as a stream of raw events
*
* @protocol: protocol variant to encode
* @scancode: scancode to encode
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
* -EINVAL if the scancode is ambiguous or invalid.
*/
static int ir_rc5_encode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
int ret;
struct ir_raw_event *e = events;
unsigned int data, xdata, command, commandx, system, pre_space_data;
/* Detect protocol and convert scancode to raw data */
if (protocol == RC_PROTO_RC5) {
/* decode scancode */
command = (scancode & 0x003f) >> 0;
commandx = (scancode & 0x0040) >> 6;
system = (scancode & 0x1f00) >> 8;
/* encode data */
data = !commandx << 12 | system << 6 | command;
/* First bit is encoded by leader_pulse */
ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings,
RC5_NBITS - 1, data);
if (ret < 0)
return ret;
} else if (protocol == RC_PROTO_RC5X_20) {
/* decode scancode */
xdata = (scancode & 0x00003f) >> 0;
command = (scancode & 0x003f00) >> 8;
commandx = !(scancode & 0x004000);
system = (scancode & 0x1f0000) >> 16;
/* encode data */
data = commandx << 18 | system << 12 | command << 6 | xdata;
/* First bit is encoded by leader_pulse */
pre_space_data = data >> (RC5X_NBITS - CHECK_RC5X_NBITS);
ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
CHECK_RC5X_NBITS - 1,
pre_space_data);
if (ret < 0)
return ret;
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc5x_timings[1],
RC5X_NBITS - CHECK_RC5X_NBITS,
data);
if (ret < 0)
return ret;
} else if (protocol == RC_PROTO_RC5_SZ) {
/* RC5-SZ scancode is raw enough for Manchester as it is */
/* First bit is encoded by leader_pulse */
ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
RC5_SZ_NBITS - 1,
scancode & 0x2fff);
if (ret < 0)
return ret;
} else {
return -EINVAL;
}
return e - events;
}
static struct ir_raw_handler rc5_handler = {
.protocols = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 |
RC_PROTO_BIT_RC5_SZ,
.decode = ir_rc5_decode,
.encode = ir_rc5_encode,
.carrier = 36000,
.min_timeout = RC5_TRAILER,
};
static int __init ir_rc5_decode_init(void)
{
ir_raw_handler_register(&rc5_handler);
printk(KERN_INFO "IR RC5(x/sz) protocol handler initialized\n");
return 0;
}
static void __exit ir_rc5_decode_exit(void)
{
ir_raw_handler_unregister(&rc5_handler);
}
module_init(ir_rc5_decode_init);
module_exit(ir_rc5_decode_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mauro Carvalho Chehab and Jarod Wilson");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("RC5(x/sz) IR protocol decoder");
| linux-master | drivers/media/rc/ir-rc5-decoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/* ir-mce_kbd-decoder.c - A decoder for the RC6-ish keyboard/mouse IR protocol
* used by the Microsoft Remote Keyboard for Windows Media Center Edition,
* referred to by Microsoft's Windows Media Center remote specification docs
* as "an internal protocol called MCIR-2".
*
* Copyright (C) 2011 by Jarod Wilson <[email protected]>
*/
#include <linux/module.h>
#include "rc-core-priv.h"
/*
* This decoder currently supports:
* - MCIR-2 29-bit IR signals used for mouse movement and buttons
* - MCIR-2 32-bit IR signals used for standard keyboard keys
*
* The media keys on the keyboard send RC-6 signals that are indistinguishable
* from the keys of the same name on the stock MCE remote, and will be handled
* by the standard RC-6 decoder, and be made available to the system via the
* input device for the remote, rather than the keyboard/mouse one.
*/
#define MCIR2_UNIT 333 /* us */
#define MCIR2_HEADER_NBITS 5
#define MCIR2_MOUSE_NBITS 29
#define MCIR2_KEYBOARD_NBITS 32
#define MCIR2_PREFIX_PULSE (8 * MCIR2_UNIT)
#define MCIR2_PREFIX_SPACE (1 * MCIR2_UNIT)
#define MCIR2_MAX_LEN (3 * MCIR2_UNIT)
#define MCIR2_BIT_START (1 * MCIR2_UNIT)
#define MCIR2_BIT_END (1 * MCIR2_UNIT)
#define MCIR2_BIT_0 (1 * MCIR2_UNIT)
#define MCIR2_BIT_SET (2 * MCIR2_UNIT)
#define MCIR2_MODE_MASK 0xf /* for the header bits */
#define MCIR2_KEYBOARD_HEADER 0x4
#define MCIR2_MOUSE_HEADER 0x1
#define MCIR2_MASK_KEYS_START 0xe0
enum mce_kbd_mode {
MCIR2_MODE_KEYBOARD,
MCIR2_MODE_MOUSE,
MCIR2_MODE_UNKNOWN,
};
enum mce_kbd_state {
STATE_INACTIVE,
STATE_HEADER_BIT_START,
STATE_HEADER_BIT_END,
STATE_BODY_BIT_START,
STATE_BODY_BIT_END,
STATE_FINISHED,
};
static unsigned char kbd_keycodes[256] = {
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_A,
KEY_B, KEY_C, KEY_D, KEY_E, KEY_F,
KEY_G, KEY_H, KEY_I, KEY_J, KEY_K,
KEY_L, KEY_M, KEY_N, KEY_O, KEY_P,
KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U,
KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z,
KEY_1, KEY_2, KEY_3, KEY_4, KEY_5,
KEY_6, KEY_7, KEY_8, KEY_9, KEY_0,
KEY_ENTER, KEY_ESC, KEY_BACKSPACE, KEY_TAB, KEY_SPACE,
KEY_MINUS, KEY_EQUAL, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH,
KEY_BACKSLASH, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_GRAVE, KEY_COMMA,
KEY_DOT, KEY_SLASH, KEY_CAPSLOCK, KEY_F1, KEY_F2,
KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7,
KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_F12,
KEY_SYSRQ, KEY_SCROLLLOCK, KEY_PAUSE, KEY_INSERT, KEY_HOME,
KEY_PAGEUP, KEY_DELETE, KEY_END, KEY_PAGEDOWN, KEY_RIGHT,
KEY_LEFT, KEY_DOWN, KEY_UP, KEY_NUMLOCK, KEY_KPSLASH,
KEY_KPASTERISK, KEY_KPMINUS, KEY_KPPLUS, KEY_KPENTER, KEY_KP1,
KEY_KP2, KEY_KP3, KEY_KP4, KEY_KP5, KEY_KP6,
KEY_KP7, KEY_KP8, KEY_KP9, KEY_KP0, KEY_KPDOT,
KEY_102ND, KEY_COMPOSE, KEY_POWER, KEY_KPEQUAL, KEY_F13,
KEY_F14, KEY_F15, KEY_F16, KEY_F17, KEY_F18,
KEY_F19, KEY_F20, KEY_F21, KEY_F22, KEY_F23,
KEY_F24, KEY_OPEN, KEY_HELP, KEY_PROPS, KEY_FRONT,
KEY_STOP, KEY_AGAIN, KEY_UNDO, KEY_CUT, KEY_COPY,
KEY_PASTE, KEY_FIND, KEY_MUTE, KEY_VOLUMEUP, KEY_VOLUMEDOWN,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_KPCOMMA, KEY_RESERVED,
KEY_RO, KEY_KATAKANAHIRAGANA, KEY_YEN, KEY_HENKAN, KEY_MUHENKAN,
KEY_KPJPCOMMA, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_HANGUEL,
KEY_HANJA, KEY_KATAKANA, KEY_HIRAGANA, KEY_ZENKAKUHANKAKU, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_LEFTCTRL,
KEY_LEFTSHIFT, KEY_LEFTALT, KEY_LEFTMETA, KEY_RIGHTCTRL, KEY_RIGHTSHIFT,
KEY_RIGHTALT, KEY_RIGHTMETA, KEY_PLAYPAUSE, KEY_STOPCD, KEY_PREVIOUSSONG,
KEY_NEXTSONG, KEY_EJECTCD, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_MUTE,
KEY_WWW, KEY_BACK, KEY_FORWARD, KEY_STOP, KEY_FIND,
KEY_SCROLLUP, KEY_SCROLLDOWN, KEY_EDIT, KEY_SLEEP, KEY_COFFEE,
KEY_REFRESH, KEY_CALC, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
KEY_RESERVED
};
static void mce_kbd_rx_timeout(struct timer_list *t)
{
struct ir_raw_event_ctrl *raw = from_timer(raw, t, mce_kbd.rx_timeout);
unsigned char maskcode;
unsigned long flags;
int i;
dev_dbg(&raw->dev->dev, "timer callback clearing all keys\n");
spin_lock_irqsave(&raw->mce_kbd.keylock, flags);
if (time_is_before_eq_jiffies(raw->mce_kbd.rx_timeout.expires)) {
for (i = 0; i < 7; i++) {
maskcode = kbd_keycodes[MCIR2_MASK_KEYS_START + i];
input_report_key(raw->dev->input_dev, maskcode, 0);
}
for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
input_report_key(raw->dev->input_dev, kbd_keycodes[i],
0);
input_sync(raw->dev->input_dev);
}
spin_unlock_irqrestore(&raw->mce_kbd.keylock, flags);
}
static enum mce_kbd_mode mce_kbd_mode(struct mce_kbd_dec *data)
{
switch (data->header & MCIR2_MODE_MASK) {
case MCIR2_KEYBOARD_HEADER:
return MCIR2_MODE_KEYBOARD;
case MCIR2_MOUSE_HEADER:
return MCIR2_MODE_MOUSE;
default:
return MCIR2_MODE_UNKNOWN;
}
}
static void ir_mce_kbd_process_keyboard_data(struct rc_dev *dev, u32 scancode)
{
u8 keydata1 = (scancode >> 8) & 0xff;
u8 keydata2 = (scancode >> 16) & 0xff;
u8 shiftmask = scancode & 0xff;
unsigned char maskcode;
int i, keystate;
dev_dbg(&dev->dev, "keyboard: keydata2 = 0x%02x, keydata1 = 0x%02x, shiftmask = 0x%02x\n",
keydata2, keydata1, shiftmask);
for (i = 0; i < 7; i++) {
maskcode = kbd_keycodes[MCIR2_MASK_KEYS_START + i];
if (shiftmask & (1 << i))
keystate = 1;
else
keystate = 0;
input_report_key(dev->input_dev, maskcode, keystate);
}
if (keydata1)
input_report_key(dev->input_dev, kbd_keycodes[keydata1], 1);
if (keydata2)
input_report_key(dev->input_dev, kbd_keycodes[keydata2], 1);
if (!keydata1 && !keydata2) {
for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
input_report_key(dev->input_dev, kbd_keycodes[i], 0);
}
}
static void ir_mce_kbd_process_mouse_data(struct rc_dev *dev, u32 scancode)
{
/* raw mouse coordinates */
u8 xdata = (scancode >> 7) & 0x7f;
u8 ydata = (scancode >> 14) & 0x7f;
int x, y;
/* mouse buttons */
bool right = scancode & 0x40;
bool left = scancode & 0x20;
if (xdata & 0x40)
x = -((~xdata & 0x7f) + 1);
else
x = xdata;
if (ydata & 0x40)
y = -((~ydata & 0x7f) + 1);
else
y = ydata;
dev_dbg(&dev->dev, "mouse: x = %d, y = %d, btns = %s%s\n",
x, y, left ? "L" : "", right ? "R" : "");
input_report_rel(dev->input_dev, REL_X, x);
input_report_rel(dev->input_dev, REL_Y, y);
input_report_key(dev->input_dev, BTN_LEFT, left);
input_report_key(dev->input_dev, BTN_RIGHT, right);
}
/**
* ir_mce_kbd_decode() - Decode one mce_kbd pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct mce_kbd_dec *data = &dev->raw->mce_kbd;
u32 scancode;
unsigned long delay;
struct lirc_scancode lsc = {};
if (!is_timing_event(ev)) {
if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
if (!geq_margin(ev.duration, MCIR2_UNIT, MCIR2_UNIT / 2))
goto out;
again:
dev_dbg(&dev->dev, "started at state %i (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
if (!geq_margin(ev.duration, MCIR2_UNIT, MCIR2_UNIT / 2))
return 0;
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
/* Note: larger margin on first pulse since each MCIR2_UNIT
is quite short and some hardware takes some time to
adjust to the signal */
if (!eq_margin(ev.duration, MCIR2_PREFIX_PULSE, MCIR2_UNIT))
break;
data->state = STATE_HEADER_BIT_START;
data->count = 0;
data->header = 0;
return 0;
case STATE_HEADER_BIT_START:
if (geq_margin(ev.duration, MCIR2_MAX_LEN, MCIR2_UNIT / 2))
break;
data->header <<= 1;
if (ev.pulse)
data->header |= 1;
data->count++;
data->state = STATE_HEADER_BIT_END;
return 0;
case STATE_HEADER_BIT_END:
decrease_duration(&ev, MCIR2_BIT_END);
if (data->count != MCIR2_HEADER_NBITS) {
data->state = STATE_HEADER_BIT_START;
goto again;
}
switch (mce_kbd_mode(data)) {
case MCIR2_MODE_KEYBOARD:
data->wanted_bits = MCIR2_KEYBOARD_NBITS;
break;
case MCIR2_MODE_MOUSE:
data->wanted_bits = MCIR2_MOUSE_NBITS;
break;
default:
dev_dbg(&dev->dev, "not keyboard or mouse data\n");
goto out;
}
data->count = 0;
data->body = 0;
data->state = STATE_BODY_BIT_START;
goto again;
case STATE_BODY_BIT_START:
if (geq_margin(ev.duration, MCIR2_MAX_LEN, MCIR2_UNIT / 2))
break;
data->body <<= 1;
if (ev.pulse)
data->body |= 1;
data->count++;
data->state = STATE_BODY_BIT_END;
return 0;
case STATE_BODY_BIT_END:
if (data->count == data->wanted_bits)
data->state = STATE_FINISHED;
else
data->state = STATE_BODY_BIT_START;
decrease_duration(&ev, MCIR2_BIT_END);
goto again;
case STATE_FINISHED:
if (ev.pulse)
break;
switch (data->wanted_bits) {
case MCIR2_KEYBOARD_NBITS:
scancode = data->body & 0xffffff;
dev_dbg(&dev->dev, "keyboard data 0x%08x\n",
data->body);
spin_lock(&data->keylock);
if (scancode) {
delay = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(100);
mod_timer(&data->rx_timeout, jiffies + delay);
} else {
del_timer(&data->rx_timeout);
}
/* Pass data to keyboard buffer parser */
ir_mce_kbd_process_keyboard_data(dev, scancode);
spin_unlock(&data->keylock);
lsc.rc_proto = RC_PROTO_MCIR2_KBD;
break;
case MCIR2_MOUSE_NBITS:
scancode = data->body & 0x1fffff;
dev_dbg(&dev->dev, "mouse data 0x%06x\n", scancode);
/* Pass data to mouse buffer parser */
ir_mce_kbd_process_mouse_data(dev, scancode);
lsc.rc_proto = RC_PROTO_MCIR2_MSE;
break;
default:
dev_dbg(&dev->dev, "not keyboard or mouse data\n");
goto out;
}
lsc.scancode = scancode;
lirc_scancode_event(dev, &lsc);
data->state = STATE_INACTIVE;
input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
input_sync(dev->input_dev);
return 0;
}
out:
dev_dbg(&dev->dev, "failed at state %i (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static int ir_mce_kbd_register(struct rc_dev *dev)
{
struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd;
timer_setup(&mce_kbd->rx_timeout, mce_kbd_rx_timeout, 0);
spin_lock_init(&mce_kbd->keylock);
return 0;
}
static int ir_mce_kbd_unregister(struct rc_dev *dev)
{
struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd;
del_timer_sync(&mce_kbd->rx_timeout);
return 0;
}
static const struct ir_raw_timings_manchester ir_mce_kbd_timings = {
.leader_pulse = MCIR2_PREFIX_PULSE,
.invert = 1,
.clock = MCIR2_UNIT,
.trailer_space = MCIR2_UNIT * 10,
};
/**
* ir_mce_kbd_encode() - Encode a scancode as a stream of raw events
*
* @protocol: protocol to encode
* @scancode: scancode to encode
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
*/
static int ir_mce_kbd_encode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
struct ir_raw_event *e = events;
int len, ret;
u64 raw;
if (protocol == RC_PROTO_MCIR2_KBD) {
raw = scancode |
((u64)MCIR2_KEYBOARD_HEADER << MCIR2_KEYBOARD_NBITS);
len = MCIR2_KEYBOARD_NBITS + MCIR2_HEADER_NBITS;
} else {
raw = scancode |
((u64)MCIR2_MOUSE_HEADER << MCIR2_MOUSE_NBITS);
len = MCIR2_MOUSE_NBITS + MCIR2_HEADER_NBITS;
}
ret = ir_raw_gen_manchester(&e, max, &ir_mce_kbd_timings, len, raw);
if (ret < 0)
return ret;
return e - events;
}
static struct ir_raw_handler mce_kbd_handler = {
.protocols = RC_PROTO_BIT_MCIR2_KBD | RC_PROTO_BIT_MCIR2_MSE,
.decode = ir_mce_kbd_decode,
.encode = ir_mce_kbd_encode,
.raw_register = ir_mce_kbd_register,
.raw_unregister = ir_mce_kbd_unregister,
.carrier = 36000,
.min_timeout = MCIR2_MAX_LEN + MCIR2_UNIT / 2,
};
static int __init ir_mce_kbd_decode_init(void)
{
ir_raw_handler_register(&mce_kbd_handler);
printk(KERN_INFO "IR MCE Keyboard/mouse protocol handler initialized\n");
return 0;
}
static void __exit ir_mce_kbd_decode_exit(void)
{
ir_raw_handler_unregister(&mce_kbd_handler);
}
module_init(ir_mce_kbd_decode_init);
module_exit(ir_mce_kbd_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <[email protected]>");
MODULE_DESCRIPTION("MCE Keyboard/mouse IR protocol decoder");
| linux-master | drivers/media/rc/ir-mce_kbd-decoder.c |
// SPDX-License-Identifier: GPL-2.0+
// ir-imon-decoder.c - handle iMon protocol
//
// Copyright (C) 2018 by Sean Young <[email protected]>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include "rc-core-priv.h"
#define IMON_UNIT 416 /* us */
#define IMON_BITS 30
#define IMON_CHKBITS (BIT(30) | BIT(25) | BIT(24) | BIT(22) | \
BIT(21) | BIT(20) | BIT(19) | BIT(18) | \
BIT(17) | BIT(16) | BIT(14) | BIT(13) | \
BIT(12) | BIT(11) | BIT(10) | BIT(9))
/*
* This protocol has 30 bits. The format is one IMON_UNIT header pulse,
* followed by 30 bits. Each bit is one IMON_UNIT check field, and then
* one IMON_UNIT field with the actual bit (1=space, 0=pulse).
* The check field is always space for some bits, for others it is pulse if
* both the preceding and current bit are zero, else space. IMON_CHKBITS
* defines which bits are of type check.
*
* There is no way to distinguish an incomplete message from one where
* the lower bits are all set, iow. the last pulse is for the lowest
* bit which is 0.
*/
enum imon_state {
STATE_INACTIVE,
STATE_BIT_CHK,
STATE_BIT_START,
STATE_FINISHED,
STATE_ERROR,
};
static void ir_imon_decode_scancode(struct rc_dev *dev)
{
struct imon_dec *imon = &dev->raw->imon;
/* Keyboard/Mouse toggle */
if (imon->bits == 0x299115b7)
imon->stick_keyboard = !imon->stick_keyboard;
if ((imon->bits & 0xfc0000ff) == 0x680000b7) {
int rel_x, rel_y;
u8 buf;
buf = imon->bits >> 16;
rel_x = (buf & 0x08) | (buf & 0x10) >> 2 |
(buf & 0x20) >> 4 | (buf & 0x40) >> 6;
if (imon->bits & 0x02000000)
rel_x |= ~0x0f;
buf = imon->bits >> 8;
rel_y = (buf & 0x08) | (buf & 0x10) >> 2 |
(buf & 0x20) >> 4 | (buf & 0x40) >> 6;
if (imon->bits & 0x01000000)
rel_y |= ~0x0f;
if (rel_x && rel_y && imon->stick_keyboard) {
if (abs(rel_y) > abs(rel_x))
imon->bits = rel_y > 0 ?
0x289515b7 : /* KEY_DOWN */
0x2aa515b7; /* KEY_UP */
else
imon->bits = rel_x > 0 ?
0x2ba515b7 : /* KEY_RIGHT */
0x29a515b7; /* KEY_LEFT */
}
if (!imon->stick_keyboard) {
input_report_rel(dev->input_dev, REL_X, rel_x);
input_report_rel(dev->input_dev, REL_Y, rel_y);
input_report_key(dev->input_dev, BTN_LEFT,
(imon->bits & 0x00010000) != 0);
input_report_key(dev->input_dev, BTN_RIGHT,
(imon->bits & 0x00040000) != 0);
}
}
rc_keydown(dev, RC_PROTO_IMON, imon->bits, 0);
}
/**
* ir_imon_decode() - Decode one iMON pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_imon_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct imon_dec *data = &dev->raw->imon;
if (!is_timing_event(ev)) {
if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
dev_dbg(&dev->dev,
"iMON decode started at state %d bitno %d (%uus %s)\n",
data->state, data->count, ev.duration, TO_STR(ev.pulse));
/*
* Since iMON protocol is a series of bits, if at any point
* we encounter an error, make sure that any remaining bits
* aren't parsed as a scancode made up of less bits.
*
* Note that if the stick is held, then the remote repeats
* the scancode with about 12ms between them. So, make sure
* we have at least 10ms of space after an error. That way,
* we're at a new scancode.
*/
if (data->state == STATE_ERROR) {
if (!ev.pulse && ev.duration > MS_TO_US(10))
data->state = STATE_INACTIVE;
return 0;
}
for (;;) {
if (!geq_margin(ev.duration, IMON_UNIT, IMON_UNIT / 2))
return 0;
decrease_duration(&ev, IMON_UNIT);
switch (data->state) {
case STATE_INACTIVE:
if (ev.pulse) {
data->state = STATE_BIT_CHK;
data->bits = 0;
data->count = IMON_BITS;
}
break;
case STATE_BIT_CHK:
if (IMON_CHKBITS & BIT(data->count))
data->last_chk = ev.pulse;
else if (ev.pulse)
goto err_out;
data->state = STATE_BIT_START;
break;
case STATE_BIT_START:
data->bits <<= 1;
if (!ev.pulse)
data->bits |= 1;
if (IMON_CHKBITS & BIT(data->count)) {
if (data->last_chk != !(data->bits & 3))
goto err_out;
}
if (!data->count--)
data->state = STATE_FINISHED;
else
data->state = STATE_BIT_CHK;
break;
case STATE_FINISHED:
if (ev.pulse)
goto err_out;
ir_imon_decode_scancode(dev);
data->state = STATE_INACTIVE;
break;
}
}
err_out:
dev_dbg(&dev->dev,
"iMON decode failed at state %d bitno %d (%uus %s)\n",
data->state, data->count, ev.duration, TO_STR(ev.pulse));
data->state = STATE_ERROR;
return -EINVAL;
}
/**
* ir_imon_encode() - Encode a scancode as a stream of raw events
*
* @protocol: protocol to encode
* @scancode: scancode to encode
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
*/
static int ir_imon_encode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
struct ir_raw_event *e = events;
int i, pulse;
if (!max--)
return -ENOBUFS;
init_ir_raw_event_duration(e, 1, IMON_UNIT);
for (i = IMON_BITS; i >= 0; i--) {
if (BIT(i) & IMON_CHKBITS)
pulse = !(scancode & (BIT(i) | BIT(i + 1)));
else
pulse = 0;
if (pulse == e->pulse) {
e->duration += IMON_UNIT;
} else {
if (!max--)
return -ENOBUFS;
init_ir_raw_event_duration(++e, pulse, IMON_UNIT);
}
pulse = !(scancode & BIT(i));
if (pulse == e->pulse) {
e->duration += IMON_UNIT;
} else {
if (!max--)
return -ENOBUFS;
init_ir_raw_event_duration(++e, pulse, IMON_UNIT);
}
}
if (e->pulse)
e++;
return e - events;
}
static int ir_imon_register(struct rc_dev *dev)
{
struct imon_dec *imon = &dev->raw->imon;
imon->stick_keyboard = false;
return 0;
}
static struct ir_raw_handler imon_handler = {
.protocols = RC_PROTO_BIT_IMON,
.decode = ir_imon_decode,
.encode = ir_imon_encode,
.carrier = 38000,
.raw_register = ir_imon_register,
.min_timeout = IMON_UNIT * IMON_BITS * 2,
};
static int __init ir_imon_decode_init(void)
{
ir_raw_handler_register(&imon_handler);
pr_info("IR iMON protocol handler initialized\n");
return 0;
}
static void __exit ir_imon_decode_exit(void)
{
ir_raw_handler_unregister(&imon_handler);
}
module_init(ir_imon_decode_init);
module_exit(ir_imon_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_DESCRIPTION("iMON IR protocol decoder");
| linux-master | drivers/media/rc/ir-imon-decoder.c |
// SPDX-License-Identifier: GPL-2.0
// SPI driven IR LED device driver
//
// Copyright (c) 2016 Samsung Electronics Co., Ltd.
// Copyright (c) Andi Shyti <[email protected]>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <media/rc-core.h>
#define IR_SPI_DRIVER_NAME "ir-spi"
#define IR_SPI_DEFAULT_FREQUENCY 38000
#define IR_SPI_MAX_BUFSIZE 4096
struct ir_spi_data {
u32 freq;
bool negated;
u16 tx_buf[IR_SPI_MAX_BUFSIZE];
u16 pulse;
u16 space;
struct rc_dev *rc;
struct spi_device *spi;
struct regulator *regulator;
};
static int ir_spi_tx(struct rc_dev *dev,
unsigned int *buffer, unsigned int count)
{
int i;
int ret;
unsigned int len = 0;
struct ir_spi_data *idata = dev->priv;
struct spi_transfer xfer;
/* convert the pulse/space signal to raw binary signal */
for (i = 0; i < count; i++) {
unsigned int periods;
int j;
u16 val;
periods = DIV_ROUND_CLOSEST(buffer[i] * idata->freq, 1000000);
if (len + periods >= IR_SPI_MAX_BUFSIZE)
return -EINVAL;
/*
* the first value in buffer is a pulse, so that 0, 2, 4, ...
* contain a pulse duration. On the contrary, 1, 3, 5, ...
* contain a space duration.
*/
val = (i % 2) ? idata->space : idata->pulse;
for (j = 0; j < periods; j++)
idata->tx_buf[len++] = val;
}
memset(&xfer, 0, sizeof(xfer));
xfer.speed_hz = idata->freq * 16;
xfer.len = len * sizeof(*idata->tx_buf);
xfer.tx_buf = idata->tx_buf;
ret = regulator_enable(idata->regulator);
if (ret)
return ret;
ret = spi_sync_transfer(idata->spi, &xfer, 1);
if (ret)
dev_err(&idata->spi->dev, "unable to deliver the signal\n");
regulator_disable(idata->regulator);
return ret ? ret : count;
}
static int ir_spi_set_tx_carrier(struct rc_dev *dev, u32 carrier)
{
struct ir_spi_data *idata = dev->priv;
if (!carrier)
return -EINVAL;
idata->freq = carrier;
return 0;
}
static int ir_spi_set_duty_cycle(struct rc_dev *dev, u32 duty_cycle)
{
struct ir_spi_data *idata = dev->priv;
int bits = (duty_cycle * 15) / 100;
idata->pulse = GENMASK(bits, 0);
if (idata->negated) {
idata->pulse = ~idata->pulse;
idata->space = 0xffff;
} else {
idata->space = 0;
}
return 0;
}
static int ir_spi_probe(struct spi_device *spi)
{
int ret;
u8 dc;
struct ir_spi_data *idata;
idata = devm_kzalloc(&spi->dev, sizeof(*idata), GFP_KERNEL);
if (!idata)
return -ENOMEM;
idata->regulator = devm_regulator_get(&spi->dev, "irda_regulator");
if (IS_ERR(idata->regulator))
return PTR_ERR(idata->regulator);
idata->rc = devm_rc_allocate_device(&spi->dev, RC_DRIVER_IR_RAW_TX);
if (!idata->rc)
return -ENOMEM;
idata->rc->tx_ir = ir_spi_tx;
idata->rc->s_tx_carrier = ir_spi_set_tx_carrier;
idata->rc->s_tx_duty_cycle = ir_spi_set_duty_cycle;
idata->rc->device_name = "IR SPI";
idata->rc->driver_name = IR_SPI_DRIVER_NAME;
idata->rc->priv = idata;
idata->spi = spi;
idata->negated = of_property_read_bool(spi->dev.of_node,
"led-active-low");
ret = of_property_read_u8(spi->dev.of_node, "duty-cycle", &dc);
if (ret)
dc = 50;
/* ir_spi_set_duty_cycle cannot fail,
* it returns int to be compatible with the
* rc->s_tx_duty_cycle function
*/
ir_spi_set_duty_cycle(idata->rc, dc);
idata->freq = IR_SPI_DEFAULT_FREQUENCY;
return devm_rc_register_device(&spi->dev, idata->rc);
}
static const struct of_device_id ir_spi_of_match[] = {
{ .compatible = "ir-spi-led" },
{},
};
MODULE_DEVICE_TABLE(of, ir_spi_of_match);
static const struct spi_device_id ir_spi_ids[] = {
{ "ir-spi-led" },
{},
};
MODULE_DEVICE_TABLE(spi, ir_spi_ids);
static struct spi_driver ir_spi_driver = {
.probe = ir_spi_probe,
.id_table = ir_spi_ids,
.driver = {
.name = IR_SPI_DRIVER_NAME,
.of_match_table = ir_spi_of_match,
},
};
module_spi_driver(ir_spi_driver);
MODULE_AUTHOR("Andi Shyti <[email protected]>");
MODULE_DESCRIPTION("SPI IR LED");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/rc/ir-spi.c |
// SPDX-License-Identifier: GPL-2.0
// rc-ir-raw.c - handle IR pulse/space events
//
// Copyright (C) 2010 by Mauro Carvalho Chehab
#include <linux/export.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/kmod.h>
#include <linux/sched.h>
#include "rc-core-priv.h"
/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
static LIST_HEAD(ir_raw_client_list);
/* Used to handle IR raw handler extensions */
DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static atomic64_t available_protocols = ATOMIC64_INIT(0);
static int ir_raw_event_thread(void *data)
{
struct ir_raw_event ev;
struct ir_raw_handler *handler;
struct ir_raw_event_ctrl *raw = data;
struct rc_dev *dev = raw->dev;
while (1) {
mutex_lock(&ir_raw_handler_lock);
while (kfifo_out(&raw->kfifo, &ev, 1)) {
if (is_timing_event(ev)) {
if (ev.duration == 0)
dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
if (is_timing_event(raw->prev_ev) &&
!is_transition(&ev, &raw->prev_ev))
dev_warn_once(&dev->dev, "two consecutive events of type %s",
TO_STR(ev.pulse));
}
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (dev->enabled_protocols &
handler->protocols || !handler->protocols)
handler->decode(dev, ev);
lirc_raw_event(dev, ev);
raw->prev_ev = ev;
}
mutex_unlock(&ir_raw_handler_lock);
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
break;
} else if (!kfifo_is_empty(&raw->kfifo))
set_current_state(TASK_RUNNING);
schedule();
}
return 0;
}
/**
* ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
* @dev: the struct rc_dev device descriptor
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This routine (which may be called from an interrupt context) stores a
* pulse/space duration for the raw ir decoding state machines. Pulses are
* signalled as positive values and spaces as negative values. A zero value
* will reset the decoding state machines.
*/
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
{
if (!dev->raw)
return -EINVAL;
dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
ev->duration, TO_STR(ev->pulse));
if (!kfifo_put(&dev->raw->kfifo, *ev)) {
dev_err(&dev->dev, "IR event FIFO is full!\n");
return -ENOSPC;
}
return 0;
}
EXPORT_SYMBOL_GPL(ir_raw_event_store);
/**
* ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
* @dev: the struct rc_dev device descriptor
* @pulse: true for pulse, false for space
*
* This routine (which may be called from an interrupt context) is used to
* store the beginning of an ir pulse or space (or the start/end of ir
* reception) for the raw ir decoding state machines. This is used by
* hardware which does not provide durations directly but only interrupts
* (or similar events) on state change.
*/
int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
{
ktime_t now;
struct ir_raw_event ev = {};
if (!dev->raw)
return -EINVAL;
now = ktime_get();
ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event));
ev.pulse = !pulse;
return ir_raw_event_store_with_timeout(dev, &ev);
}
EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
/*
* ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw
* ir decoders, schedule decoding and
* timeout
* @dev: the struct rc_dev device descriptor
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This routine (which may be called from an interrupt context) stores a
* pulse/space duration for the raw ir decoding state machines, schedules
* decoding and generates a timeout.
*/
int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev)
{
ktime_t now;
int rc = 0;
if (!dev->raw)
return -EINVAL;
now = ktime_get();
spin_lock(&dev->raw->edge_spinlock);
rc = ir_raw_event_store(dev, ev);
dev->raw->last_event = now;
/* timer could be set to timeout (125ms by default) */
if (!timer_pending(&dev->raw->edge_handle) ||
time_after(dev->raw->edge_handle.expires,
jiffies + msecs_to_jiffies(15))) {
mod_timer(&dev->raw->edge_handle,
jiffies + msecs_to_jiffies(15));
}
spin_unlock(&dev->raw->edge_spinlock);
return rc;
}
EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout);
/**
* ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
* @dev: the struct rc_dev device descriptor
* @ev: the event that has occurred
*
* This routine (which may be called from an interrupt context) works
* in similar manner to ir_raw_event_store_edge.
* This routine is intended for devices with limited internal buffer
* It automerges samples of same type, and handles timeouts. Returns non-zero
* if the event was added, and zero if the event was ignored due to idle
* processing.
*/
int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
{
if (!dev->raw)
return -EINVAL;
/* Ignore spaces in idle mode */
if (dev->idle && !ev->pulse)
return 0;
else if (dev->idle)
ir_raw_event_set_idle(dev, false);
if (!dev->raw->this_ev.duration)
dev->raw->this_ev = *ev;
else if (ev->pulse == dev->raw->this_ev.pulse)
dev->raw->this_ev.duration += ev->duration;
else {
ir_raw_event_store(dev, &dev->raw->this_ev);
dev->raw->this_ev = *ev;
}
/* Enter idle mode if necessary */
if (!ev->pulse && dev->timeout &&
dev->raw->this_ev.duration >= dev->timeout)
ir_raw_event_set_idle(dev, true);
return 1;
}
EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
/**
* ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
* @dev: the struct rc_dev device descriptor
* @idle: whether the device is idle or not
*/
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
{
if (!dev->raw)
return;
dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave");
if (idle) {
dev->raw->this_ev.timeout = true;
ir_raw_event_store(dev, &dev->raw->this_ev);
dev->raw->this_ev = (struct ir_raw_event) {};
}
if (dev->s_idle)
dev->s_idle(dev, idle);
dev->idle = idle;
}
EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
/**
* ir_raw_event_handle() - schedules the decoding of stored ir data
* @dev: the struct rc_dev device descriptor
*
* This routine will tell rc-core to start decoding stored ir data.
*/
void ir_raw_event_handle(struct rc_dev *dev)
{
if (!dev->raw || !dev->raw->thread)
return;
wake_up_process(dev->raw->thread);
}
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
/* used internally by the sysfs interface */
u64
ir_raw_get_allowed_protocols(void)
{
return atomic64_read(&available_protocols);
}
static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
{
struct ir_raw_handler *handler;
u32 timeout = 0;
mutex_lock(&ir_raw_handler_lock);
list_for_each_entry(handler, &ir_raw_handler_list, list) {
if (!(dev->enabled_protocols & handler->protocols) &&
(*rc_proto & handler->protocols) && handler->raw_register)
handler->raw_register(dev);
if ((dev->enabled_protocols & handler->protocols) &&
!(*rc_proto & handler->protocols) &&
handler->raw_unregister)
handler->raw_unregister(dev);
}
mutex_unlock(&ir_raw_handler_lock);
if (!dev->max_timeout)
return 0;
mutex_lock(&ir_raw_handler_lock);
list_for_each_entry(handler, &ir_raw_handler_list, list) {
if (handler->protocols & *rc_proto) {
if (timeout < handler->min_timeout)
timeout = handler->min_timeout;
}
}
mutex_unlock(&ir_raw_handler_lock);
if (timeout == 0)
timeout = IR_DEFAULT_TIMEOUT;
else
timeout += MS_TO_US(10);
if (timeout < dev->min_timeout)
timeout = dev->min_timeout;
else if (timeout > dev->max_timeout)
timeout = dev->max_timeout;
if (dev->s_timeout)
dev->s_timeout(dev, timeout);
else
dev->timeout = timeout;
return 0;
}
static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
{
mutex_lock(&dev->lock);
dev->enabled_protocols &= ~protocols;
mutex_unlock(&dev->lock);
}
/**
* ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
* @ev: Pointer to pointer to next free event. *@ev is incremented for
* each raw event filled.
* @max: Maximum number of raw events to fill.
* @timings: Manchester modulation timings.
* @n: Number of bits of data.
* @data: Data bits to encode.
*
* Encodes the @n least significant bits of @data using Manchester (bi-phase)
* modulation with the timing characteristics described by @timings, writing up
* to @max raw IR events using the *@ev pointer.
*
* Returns: 0 on success.
* -ENOBUFS if there isn't enough space in the array to fit the
* full encoded data. In this case all @max events will have been
* written.
*/
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
const struct ir_raw_timings_manchester *timings,
unsigned int n, u64 data)
{
bool need_pulse;
u64 i;
int ret = -ENOBUFS;
i = BIT_ULL(n - 1);
if (timings->leader_pulse) {
if (!max--)
return ret;
init_ir_raw_event_duration((*ev), 1, timings->leader_pulse);
if (timings->leader_space) {
if (!max--)
return ret;
init_ir_raw_event_duration(++(*ev), 0,
timings->leader_space);
}
} else {
/* continue existing signal */
--(*ev);
}
/* from here on *ev will point to the last event rather than the next */
while (n && i > 0) {
need_pulse = !(data & i);
if (timings->invert)
need_pulse = !need_pulse;
if (need_pulse == !!(*ev)->pulse) {
(*ev)->duration += timings->clock;
} else {
if (!max--)
goto nobufs;
init_ir_raw_event_duration(++(*ev), need_pulse,
timings->clock);
}
if (!max--)
goto nobufs;
init_ir_raw_event_duration(++(*ev), !need_pulse,
timings->clock);
i >>= 1;
}
if (timings->trailer_space) {
if (!(*ev)->pulse)
(*ev)->duration += timings->trailer_space;
else if (!max--)
goto nobufs;
else
init_ir_raw_event_duration(++(*ev), 0,
timings->trailer_space);
}
ret = 0;
nobufs:
/* point to the next event rather than last event before returning */
++(*ev);
return ret;
}
EXPORT_SYMBOL(ir_raw_gen_manchester);
/**
* ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
* @ev: Pointer to pointer to next free event. *@ev is incremented for
* each raw event filled.
* @max: Maximum number of raw events to fill.
* @timings: Pulse distance modulation timings.
* @n: Number of bits of data.
* @data: Data bits to encode.
*
* Encodes the @n least significant bits of @data using pulse-distance
* modulation with the timing characteristics described by @timings, writing up
* to @max raw IR events using the *@ev pointer.
*
* Returns: 0 on success.
* -ENOBUFS if there isn't enough space in the array to fit the
* full encoded data. In this case all @max events will have been
* written.
*/
int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
const struct ir_raw_timings_pd *timings,
unsigned int n, u64 data)
{
int i;
int ret;
unsigned int space;
if (timings->header_pulse) {
ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
timings->header_space);
if (ret)
return ret;
}
if (timings->msb_first) {
for (i = n - 1; i >= 0; --i) {
space = timings->bit_space[(data >> i) & 1];
ret = ir_raw_gen_pulse_space(ev, &max,
timings->bit_pulse,
space);
if (ret)
return ret;
}
} else {
for (i = 0; i < n; ++i, data >>= 1) {
space = timings->bit_space[data & 1];
ret = ir_raw_gen_pulse_space(ev, &max,
timings->bit_pulse,
space);
if (ret)
return ret;
}
}
ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
timings->trailer_space);
return ret;
}
EXPORT_SYMBOL(ir_raw_gen_pd);
/**
* ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
* @ev: Pointer to pointer to next free event. *@ev is incremented for
* each raw event filled.
* @max: Maximum number of raw events to fill.
* @timings: Pulse distance modulation timings.
* @n: Number of bits of data.
* @data: Data bits to encode.
*
* Encodes the @n least significant bits of @data using space-distance
* modulation with the timing characteristics described by @timings, writing up
* to @max raw IR events using the *@ev pointer.
*
* Returns: 0 on success.
* -ENOBUFS if there isn't enough space in the array to fit the
* full encoded data. In this case all @max events will have been
* written.
*/
int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
const struct ir_raw_timings_pl *timings,
unsigned int n, u64 data)
{
int i;
int ret = -ENOBUFS;
unsigned int pulse;
if (!max--)
return ret;
init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
if (timings->msb_first) {
for (i = n - 1; i >= 0; --i) {
if (!max--)
return ret;
init_ir_raw_event_duration((*ev)++, 0,
timings->bit_space);
if (!max--)
return ret;
pulse = timings->bit_pulse[(data >> i) & 1];
init_ir_raw_event_duration((*ev)++, 1, pulse);
}
} else {
for (i = 0; i < n; ++i, data >>= 1) {
if (!max--)
return ret;
init_ir_raw_event_duration((*ev)++, 0,
timings->bit_space);
if (!max--)
return ret;
pulse = timings->bit_pulse[data & 1];
init_ir_raw_event_duration((*ev)++, 1, pulse);
}
}
if (!max--)
return ret;
init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
return 0;
}
EXPORT_SYMBOL(ir_raw_gen_pl);
/**
* ir_raw_encode_scancode() - Encode a scancode as raw events
*
* @protocol: protocol
* @scancode: scancode filter describing a single scancode
* @events: array of raw events to write into
* @max: max number of raw events
*
* Attempts to encode the scancode as raw events.
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
* -EINVAL if the scancode is ambiguous or invalid, or if no
* compatible encoder was found.
*/
int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
struct ir_raw_handler *handler;
int ret = -EINVAL;
u64 mask = 1ULL << protocol;
ir_raw_load_modules(&mask);
mutex_lock(&ir_raw_handler_lock);
list_for_each_entry(handler, &ir_raw_handler_list, list) {
if (handler->protocols & mask && handler->encode) {
ret = handler->encode(protocol, scancode, events, max);
if (ret >= 0 || ret == -ENOBUFS)
break;
}
}
mutex_unlock(&ir_raw_handler_lock);
return ret;
}
EXPORT_SYMBOL(ir_raw_encode_scancode);
/**
* ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing
*
* @t: timer_list
*
* This callback is armed by ir_raw_event_store_edge(). It does two things:
* first of all, rather than calling ir_raw_event_handle() for each
* edge and waking up the rc thread, 15 ms after the first edge
* ir_raw_event_handle() is called. Secondly, generate a timeout event
* no more IR is received after the rc_dev timeout.
*/
static void ir_raw_edge_handle(struct timer_list *t)
{
struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
struct rc_dev *dev = raw->dev;
unsigned long flags;
ktime_t interval;
spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
interval = ktime_sub(ktime_get(), dev->raw->last_event);
if (ktime_to_us(interval) >= dev->timeout) {
struct ir_raw_event ev = {
.timeout = true,
.duration = ktime_to_us(interval)
};
ir_raw_event_store(dev, &ev);
} else {
mod_timer(&dev->raw->edge_handle,
jiffies + usecs_to_jiffies(dev->timeout -
ktime_to_us(interval)));
}
spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
ir_raw_event_handle(dev);
}
/**
* ir_raw_encode_carrier() - Get carrier used for protocol
*
* @protocol: protocol
*
* Attempts to find the carrier for the specified protocol
*
* Returns: The carrier in Hz
* -EINVAL if the protocol is invalid, or if no
* compatible encoder was found.
*/
int ir_raw_encode_carrier(enum rc_proto protocol)
{
struct ir_raw_handler *handler;
int ret = -EINVAL;
u64 mask = BIT_ULL(protocol);
mutex_lock(&ir_raw_handler_lock);
list_for_each_entry(handler, &ir_raw_handler_list, list) {
if (handler->protocols & mask && handler->encode) {
ret = handler->carrier;
break;
}
}
mutex_unlock(&ir_raw_handler_lock);
return ret;
}
EXPORT_SYMBOL(ir_raw_encode_carrier);
/*
* Used to (un)register raw event clients
*/
int ir_raw_event_prepare(struct rc_dev *dev)
{
if (!dev)
return -EINVAL;
dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
if (!dev->raw)
return -ENOMEM;
dev->raw->dev = dev;
dev->change_protocol = change_protocol;
dev->idle = true;
spin_lock_init(&dev->raw->edge_spinlock);
timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
INIT_KFIFO(dev->raw->kfifo);
return 0;
}
int ir_raw_event_register(struct rc_dev *dev)
{
struct task_struct *thread;
thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
if (IS_ERR(thread))
return PTR_ERR(thread);
dev->raw->thread = thread;
mutex_lock(&ir_raw_handler_lock);
list_add_tail(&dev->raw->list, &ir_raw_client_list);
mutex_unlock(&ir_raw_handler_lock);
return 0;
}
void ir_raw_event_free(struct rc_dev *dev)
{
if (!dev)
return;
kfree(dev->raw);
dev->raw = NULL;
}
void ir_raw_event_unregister(struct rc_dev *dev)
{
struct ir_raw_handler *handler;
if (!dev || !dev->raw)
return;
kthread_stop(dev->raw->thread);
del_timer_sync(&dev->raw->edge_handle);
mutex_lock(&ir_raw_handler_lock);
list_del(&dev->raw->list);
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (handler->raw_unregister &&
(handler->protocols & dev->enabled_protocols))
handler->raw_unregister(dev);
lirc_bpf_free(dev);
ir_raw_event_free(dev);
/*
* A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so
* ensure that the raw member is null on unlock; this is how
* "device gone" is checked.
*/
mutex_unlock(&ir_raw_handler_lock);
}
/*
* Extension interface - used to register the IR decoders
*/
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
{
mutex_lock(&ir_raw_handler_lock);
list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
atomic64_or(ir_raw_handler->protocols, &available_protocols);
mutex_unlock(&ir_raw_handler_lock);
return 0;
}
EXPORT_SYMBOL(ir_raw_handler_register);
void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
{
struct ir_raw_event_ctrl *raw;
u64 protocols = ir_raw_handler->protocols;
mutex_lock(&ir_raw_handler_lock);
list_del(&ir_raw_handler->list);
list_for_each_entry(raw, &ir_raw_client_list, list) {
if (ir_raw_handler->raw_unregister &&
(raw->dev->enabled_protocols & protocols))
ir_raw_handler->raw_unregister(raw->dev);
ir_raw_disable_protocols(raw->dev, protocols);
}
atomic64_andnot(protocols, &available_protocols);
mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
| linux-master | drivers/media/rc/rc-ir-raw.c |
// SPDX-License-Identifier: GPL-2.0-only
/* ir-sony-decoder.c - handle Sony IR Pulse/Space protocol
*
* Copyright (C) 2010 by David Härdeman <[email protected]>
*/
#include <linux/bitrev.h>
#include <linux/module.h>
#include "rc-core-priv.h"
#define SONY_UNIT 600 /* us */
#define SONY_HEADER_PULSE (4 * SONY_UNIT)
#define SONY_HEADER_SPACE (1 * SONY_UNIT)
#define SONY_BIT_0_PULSE (1 * SONY_UNIT)
#define SONY_BIT_1_PULSE (2 * SONY_UNIT)
#define SONY_BIT_SPACE (1 * SONY_UNIT)
#define SONY_TRAILER_SPACE (10 * SONY_UNIT) /* minimum */
enum sony_state {
STATE_INACTIVE,
STATE_HEADER_SPACE,
STATE_BIT_PULSE,
STATE_BIT_SPACE,
STATE_FINISHED,
};
/**
* ir_sony_decode() - Decode one Sony pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct sony_dec *data = &dev->raw->sony;
enum rc_proto protocol;
u32 scancode;
u8 device, subdevice, function;
if (!is_timing_event(ev)) {
if (ev.overflow)
data->state = STATE_INACTIVE;
return 0;
}
if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2))
goto out;
dev_dbg(&dev->dev, "Sony decode started at state %d (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, SONY_HEADER_PULSE, SONY_UNIT / 2))
break;
data->count = 0;
data->state = STATE_HEADER_SPACE;
return 0;
case STATE_HEADER_SPACE:
if (ev.pulse)
break;
if (!eq_margin(ev.duration, SONY_HEADER_SPACE, SONY_UNIT / 2))
break;
data->state = STATE_BIT_PULSE;
return 0;
case STATE_BIT_PULSE:
if (!ev.pulse)
break;
data->bits <<= 1;
if (eq_margin(ev.duration, SONY_BIT_1_PULSE, SONY_UNIT / 2))
data->bits |= 1;
else if (!eq_margin(ev.duration, SONY_BIT_0_PULSE, SONY_UNIT / 2))
break;
data->count++;
data->state = STATE_BIT_SPACE;
return 0;
case STATE_BIT_SPACE:
if (ev.pulse)
break;
if (!geq_margin(ev.duration, SONY_BIT_SPACE, SONY_UNIT / 2))
break;
decrease_duration(&ev, SONY_BIT_SPACE);
if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2)) {
data->state = STATE_BIT_PULSE;
return 0;
}
data->state = STATE_FINISHED;
fallthrough;
case STATE_FINISHED:
if (ev.pulse)
break;
if (!geq_margin(ev.duration, SONY_TRAILER_SPACE, SONY_UNIT / 2))
break;
switch (data->count) {
case 12:
if (!(dev->enabled_protocols & RC_PROTO_BIT_SONY12))
goto finish_state_machine;
device = bitrev8((data->bits << 3) & 0xF8);
subdevice = 0;
function = bitrev8((data->bits >> 4) & 0xFE);
protocol = RC_PROTO_SONY12;
break;
case 15:
if (!(dev->enabled_protocols & RC_PROTO_BIT_SONY15))
goto finish_state_machine;
device = bitrev8((data->bits >> 0) & 0xFF);
subdevice = 0;
function = bitrev8((data->bits >> 7) & 0xFE);
protocol = RC_PROTO_SONY15;
break;
case 20:
if (!(dev->enabled_protocols & RC_PROTO_BIT_SONY20))
goto finish_state_machine;
device = bitrev8((data->bits >> 5) & 0xF8);
subdevice = bitrev8((data->bits >> 0) & 0xFF);
function = bitrev8((data->bits >> 12) & 0xFE);
protocol = RC_PROTO_SONY20;
break;
default:
dev_dbg(&dev->dev, "Sony invalid bitcount %u\n",
data->count);
goto out;
}
scancode = device << 16 | subdevice << 8 | function;
dev_dbg(&dev->dev, "Sony(%u) scancode 0x%05x\n", data->count,
scancode);
rc_keydown(dev, protocol, scancode, 0);
goto finish_state_machine;
}
out:
dev_dbg(&dev->dev, "Sony decode failed at state %d (%uus %s)\n",
data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
finish_state_machine:
data->state = STATE_INACTIVE;
return 0;
}
static const struct ir_raw_timings_pl ir_sony_timings = {
.header_pulse = SONY_HEADER_PULSE,
.bit_space = SONY_BIT_SPACE,
.bit_pulse[0] = SONY_BIT_0_PULSE,
.bit_pulse[1] = SONY_BIT_1_PULSE,
.trailer_space = SONY_TRAILER_SPACE + SONY_BIT_SPACE,
.msb_first = 0,
};
/**
* ir_sony_encode() - Encode a scancode as a stream of raw events
*
* @protocol: protocol to encode
* @scancode: scancode to encode
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
*/
static int ir_sony_encode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max)
{
struct ir_raw_event *e = events;
u32 raw, len;
int ret;
if (protocol == RC_PROTO_SONY12) {
raw = (scancode & 0x7f) | ((scancode & 0x1f0000) >> 9);
len = 12;
} else if (protocol == RC_PROTO_SONY15) {
raw = (scancode & 0x7f) | ((scancode & 0xff0000) >> 9);
len = 15;
} else {
raw = (scancode & 0x7f) | ((scancode & 0x1f0000) >> 9) |
((scancode & 0xff00) << 4);
len = 20;
}
ret = ir_raw_gen_pl(&e, max, &ir_sony_timings, len, raw);
if (ret < 0)
return ret;
return e - events;
}
static struct ir_raw_handler sony_handler = {
.protocols = RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 |
RC_PROTO_BIT_SONY20,
.decode = ir_sony_decode,
.encode = ir_sony_encode,
.carrier = 40000,
.min_timeout = SONY_TRAILER_SPACE,
};
static int __init ir_sony_decode_init(void)
{
ir_raw_handler_register(&sony_handler);
printk(KERN_INFO "IR Sony protocol handler initialized\n");
return 0;
}
static void __exit ir_sony_decode_exit(void)
{
ir_raw_handler_unregister(&sony_handler);
}
module_init(ir_sony_decode_init);
module_exit(ir_sony_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Härdeman <[email protected]>");
MODULE_DESCRIPTION("Sony IR protocol decoder");
| linux-master | drivers/media/rc/ir-sony-decoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* meson-ir-tx.c - Amlogic Meson IR TX driver
*
* Copyright (c) 2021, SberDevices. All Rights Reserved.
*
* Author: Viktor Prutyanov <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <media/rc-core.h>
#define DEVICE_NAME "Meson IR TX"
#define DRIVER_NAME "meson-ir-tx"
#define MIRTX_DEFAULT_CARRIER 38000
#define MIRTX_DEFAULT_DUTY_CYCLE 50
#define MIRTX_FIFO_THD 32
#define IRB_MOD_1US_CLK_RATE 1000000
#define IRB_FIFO_LEN 128
#define IRB_ADDR0 0x0
#define IRB_ADDR1 0x4
#define IRB_ADDR2 0x8
#define IRB_ADDR3 0xc
#define IRB_MAX_DELAY (1 << 10)
#define IRB_DELAY_MASK (IRB_MAX_DELAY - 1)
/* IRCTRL_IR_BLASTER_ADDR0 */
#define IRB_MOD_CLK(x) ((x) << 12)
#define IRB_MOD_SYS_CLK 0
#define IRB_MOD_XTAL3_CLK 1
#define IRB_MOD_1US_CLK 2
#define IRB_MOD_10US_CLK 3
#define IRB_INIT_HIGH BIT(2)
#define IRB_ENABLE BIT(0)
/* IRCTRL_IR_BLASTER_ADDR2 */
#define IRB_MOD_COUNT(lo, hi) ((((lo) - 1) << 16) | ((hi) - 1))
/* IRCTRL_IR_BLASTER_ADDR2 */
#define IRB_WRITE_FIFO BIT(16)
#define IRB_MOD_ENABLE BIT(12)
#define IRB_TB_1US (0x0 << 10)
#define IRB_TB_10US (0x1 << 10)
#define IRB_TB_100US (0x2 << 10)
#define IRB_TB_MOD_CLK (0x3 << 10)
/* IRCTRL_IR_BLASTER_ADDR3 */
#define IRB_FIFO_THD_PENDING BIT(16)
#define IRB_FIFO_IRQ_ENABLE BIT(8)
struct meson_irtx {
struct device *dev;
void __iomem *reg_base;
u32 *buf;
unsigned int buf_len;
unsigned int buf_head;
unsigned int carrier;
unsigned int duty_cycle;
/* Locks buf */
spinlock_t lock;
struct completion completion;
unsigned long clk_rate;
};
static void meson_irtx_set_mod(struct meson_irtx *ir)
{
unsigned int cnt = DIV_ROUND_CLOSEST(ir->clk_rate, ir->carrier);
unsigned int pulse_cnt = DIV_ROUND_CLOSEST(cnt * ir->duty_cycle, 100);
unsigned int space_cnt = cnt - pulse_cnt;
dev_dbg(ir->dev, "F_mod = %uHz, T_mod = %luns, duty_cycle = %u%%\n",
ir->carrier, NSEC_PER_SEC / ir->clk_rate * cnt,
100 * pulse_cnt / cnt);
writel(IRB_MOD_COUNT(pulse_cnt, space_cnt),
ir->reg_base + IRB_ADDR1);
}
static void meson_irtx_setup(struct meson_irtx *ir, unsigned int clk_nr)
{
/*
* Disable the TX, set modulator clock tick and set initialize
* output to be high. Set up carrier frequency and duty cycle. Then
* unset initialize output. Enable FIFO interrupt, set FIFO interrupt
* threshold. Finally, enable the transmitter back.
*/
writel(~IRB_ENABLE & (IRB_MOD_CLK(clk_nr) | IRB_INIT_HIGH),
ir->reg_base + IRB_ADDR0);
meson_irtx_set_mod(ir);
writel(readl(ir->reg_base + IRB_ADDR0) & ~IRB_INIT_HIGH,
ir->reg_base + IRB_ADDR0);
writel(IRB_FIFO_IRQ_ENABLE | MIRTX_FIFO_THD,
ir->reg_base + IRB_ADDR3);
writel(readl(ir->reg_base + IRB_ADDR0) | IRB_ENABLE,
ir->reg_base + IRB_ADDR0);
}
static u32 meson_irtx_prepare_pulse(struct meson_irtx *ir, unsigned int time)
{
unsigned int delay;
unsigned int tb = IRB_TB_MOD_CLK;
unsigned int tb_us = DIV_ROUND_CLOSEST(USEC_PER_SEC, ir->carrier);
delay = (DIV_ROUND_CLOSEST(time, tb_us) - 1) & IRB_DELAY_MASK;
return ((IRB_WRITE_FIFO | IRB_MOD_ENABLE) | tb | delay);
}
static u32 meson_irtx_prepare_space(struct meson_irtx *ir, unsigned int time)
{
unsigned int delay;
unsigned int tb = IRB_TB_100US;
unsigned int tb_us = 100;
if (time <= IRB_MAX_DELAY) {
tb = IRB_TB_1US;
tb_us = 1;
} else if (time <= 10 * IRB_MAX_DELAY) {
tb = IRB_TB_10US;
tb_us = 10;
} else if (time <= 100 * IRB_MAX_DELAY) {
tb = IRB_TB_100US;
tb_us = 100;
}
delay = (DIV_ROUND_CLOSEST(time, tb_us) - 1) & IRB_DELAY_MASK;
return ((IRB_WRITE_FIFO & ~IRB_MOD_ENABLE) | tb | delay);
}
static void meson_irtx_send_buffer(struct meson_irtx *ir)
{
unsigned int nr = 0;
unsigned int max_fifo_level = IRB_FIFO_LEN - MIRTX_FIFO_THD;
while (ir->buf_head < ir->buf_len && nr < max_fifo_level) {
writel(ir->buf[ir->buf_head], ir->reg_base + IRB_ADDR2);
ir->buf_head++;
nr++;
}
}
static bool meson_irtx_check_buf(struct meson_irtx *ir,
unsigned int *buf, unsigned int len)
{
unsigned int i;
for (i = 0; i < len; i++) {
unsigned int max_tb_us;
/*
* Max space timebase is 100 us.
* Pulse timebase equals to carrier period.
*/
if (i % 2 == 0)
max_tb_us = USEC_PER_SEC / ir->carrier;
else
max_tb_us = 100;
if (buf[i] >= max_tb_us * IRB_MAX_DELAY)
return false;
}
return true;
}
static void meson_irtx_fill_buf(struct meson_irtx *ir, u32 *dst_buf,
unsigned int *src_buf, unsigned int len)
{
unsigned int i;
for (i = 0; i < len; i++) {
if (i % 2 == 0)
dst_buf[i] = meson_irtx_prepare_pulse(ir, src_buf[i]);
else
dst_buf[i] = meson_irtx_prepare_space(ir, src_buf[i]);
}
}
static irqreturn_t meson_irtx_irqhandler(int irq, void *data)
{
unsigned long flags;
struct meson_irtx *ir = data;
writel(readl(ir->reg_base + IRB_ADDR3) & ~IRB_FIFO_THD_PENDING,
ir->reg_base + IRB_ADDR3);
if (completion_done(&ir->completion))
return IRQ_HANDLED;
spin_lock_irqsave(&ir->lock, flags);
if (ir->buf_head < ir->buf_len)
meson_irtx_send_buffer(ir);
else
complete(&ir->completion);
spin_unlock_irqrestore(&ir->lock, flags);
return IRQ_HANDLED;
}
static int meson_irtx_set_carrier(struct rc_dev *rc, u32 carrier)
{
struct meson_irtx *ir = rc->priv;
if (carrier == 0)
return -EINVAL;
ir->carrier = carrier;
meson_irtx_set_mod(ir);
return 0;
}
static int meson_irtx_set_duty_cycle(struct rc_dev *rc, u32 duty_cycle)
{
struct meson_irtx *ir = rc->priv;
ir->duty_cycle = duty_cycle;
meson_irtx_set_mod(ir);
return 0;
}
static void meson_irtx_update_buf(struct meson_irtx *ir, u32 *buf,
unsigned int len, unsigned int head)
{
ir->buf = buf;
ir->buf_len = len;
ir->buf_head = head;
}
static int meson_irtx_transmit(struct rc_dev *rc, unsigned int *buf,
unsigned int len)
{
unsigned long flags;
struct meson_irtx *ir = rc->priv;
u32 *tx_buf;
int ret = len;
if (!meson_irtx_check_buf(ir, buf, len))
return -EINVAL;
tx_buf = kmalloc_array(len, sizeof(u32), GFP_KERNEL);
if (!tx_buf)
return -ENOMEM;
meson_irtx_fill_buf(ir, tx_buf, buf, len);
dev_dbg(ir->dev, "TX buffer filled, length = %u\n", len);
spin_lock_irqsave(&ir->lock, flags);
meson_irtx_update_buf(ir, tx_buf, len, 0);
reinit_completion(&ir->completion);
meson_irtx_send_buffer(ir);
spin_unlock_irqrestore(&ir->lock, flags);
if (!wait_for_completion_timeout(&ir->completion,
usecs_to_jiffies(IR_MAX_DURATION)))
ret = -ETIMEDOUT;
spin_lock_irqsave(&ir->lock, flags);
kfree(ir->buf);
meson_irtx_update_buf(ir, NULL, 0, 0);
spin_unlock_irqrestore(&ir->lock, flags);
return ret;
}
static int meson_irtx_mod_clock_probe(struct meson_irtx *ir,
unsigned int *clk_nr)
{
struct device_node *np = ir->dev->of_node;
struct clk *clock;
if (!np)
return -ENODEV;
clock = devm_clk_get(ir->dev, "xtal");
if (IS_ERR(clock) || clk_prepare_enable(clock))
return -ENODEV;
*clk_nr = IRB_MOD_XTAL3_CLK;
ir->clk_rate = clk_get_rate(clock) / 3;
if (ir->clk_rate < IRB_MOD_1US_CLK_RATE) {
*clk_nr = IRB_MOD_1US_CLK;
ir->clk_rate = IRB_MOD_1US_CLK_RATE;
}
dev_info(ir->dev, "F_clk = %luHz\n", ir->clk_rate);
return 0;
}
static int __init meson_irtx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_irtx *ir;
struct rc_dev *rc;
int irq;
unsigned int clk_nr;
int ret;
ir = devm_kzalloc(dev, sizeof(*ir), GFP_KERNEL);
if (!ir)
return -ENOMEM;
ir->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->reg_base))
return PTR_ERR(ir->reg_base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENODEV;
ir->dev = dev;
ir->carrier = MIRTX_DEFAULT_CARRIER;
ir->duty_cycle = MIRTX_DEFAULT_DUTY_CYCLE;
init_completion(&ir->completion);
spin_lock_init(&ir->lock);
ret = meson_irtx_mod_clock_probe(ir, &clk_nr);
if (ret) {
dev_err(dev, "modulator clock setup failed\n");
return ret;
}
meson_irtx_setup(ir, clk_nr);
ret = devm_request_irq(dev, irq,
meson_irtx_irqhandler,
IRQF_TRIGGER_RISING,
DRIVER_NAME, ir);
if (ret) {
dev_err(dev, "irq request failed\n");
return ret;
}
rc = rc_allocate_device(RC_DRIVER_IR_RAW_TX);
if (!rc)
return -ENOMEM;
rc->driver_name = DRIVER_NAME;
rc->device_name = DEVICE_NAME;
rc->priv = ir;
rc->tx_ir = meson_irtx_transmit;
rc->s_tx_carrier = meson_irtx_set_carrier;
rc->s_tx_duty_cycle = meson_irtx_set_duty_cycle;
ret = rc_register_device(rc);
if (ret < 0) {
dev_err(dev, "rc_dev registration failed\n");
rc_free_device(rc);
return ret;
}
platform_set_drvdata(pdev, rc);
return 0;
}
static void meson_irtx_remove(struct platform_device *pdev)
{
struct rc_dev *rc = platform_get_drvdata(pdev);
rc_unregister_device(rc);
}
static const struct of_device_id meson_irtx_dt_match[] = {
{
.compatible = "amlogic,meson-g12a-ir-tx",
},
{},
};
MODULE_DEVICE_TABLE(of, meson_irtx_dt_match);
static struct platform_driver meson_irtx_pd = {
.remove_new = meson_irtx_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_irtx_dt_match,
},
};
module_platform_driver_probe(meson_irtx_pd, meson_irtx_probe);
MODULE_DESCRIPTION("Meson IR TX driver");
MODULE_AUTHOR("Viktor Prutyanov <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/meson-ir-tx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Infrared Toy and IR Droid RC core driver
*
* Copyright (C) 2020 Sean Young <[email protected]>
*
* http://dangerousprototypes.com/docs/USB_IR_Toy:_Sampling_mode
*
* This driver is based on the lirc driver which can be found here:
* https://sourceforge.net/p/lirc/git/ci/master/tree/plugins/irtoy.c
* Copyright (C) 2011 Peter Kooiman <[email protected]>
*/
#include <asm/unaligned.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/usb/input.h>
#include <media/rc-core.h>
static const u8 COMMAND_VERSION[] = { 'v' };
// End transmit and repeat reset command so we exit sump mode
static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };
static const u8 COMMAND_SMODE_ENTER[] = { 's' };
static const u8 COMMAND_SMODE_EXIT[] = { 0 };
static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
#define REPLY_XMITCOUNT 't'
#define REPLY_XMITSUCCESS 'C'
#define REPLY_VERSION 'V'
#define REPLY_SAMPLEMODEPROTO 'S'
#define TIMEOUT 500
#define LEN_XMITRES 3
#define LEN_VERSION 4
#define LEN_SAMPLEMODEPROTO 3
#define MIN_FW_VERSION 20
#define UNIT_US 21
#define MAX_TIMEOUT_US (UNIT_US * U16_MAX)
#define MAX_PACKET 64
enum state {
STATE_IRDATA,
STATE_COMMAND_NO_RESP,
STATE_COMMAND,
STATE_TX,
};
struct irtoy {
struct device *dev;
struct usb_device *usbdev;
struct rc_dev *rc;
struct urb *urb_in, *urb_out;
u8 *in;
u8 *out;
struct completion command_done;
bool pulse;
enum state state;
void *tx_buf;
uint tx_len;
uint emitted;
uint hw_version;
uint sw_version;
uint proto_version;
char phys[64];
};
static void irtoy_response(struct irtoy *irtoy, u32 len)
{
switch (irtoy->state) {
case STATE_COMMAND:
if (len == LEN_VERSION && irtoy->in[0] == REPLY_VERSION) {
uint version;
irtoy->in[LEN_VERSION] = 0;
if (kstrtouint(irtoy->in + 1, 10, &version)) {
dev_err(irtoy->dev, "invalid version %*phN. Please make sure you are using firmware v20 or higher",
LEN_VERSION, irtoy->in);
break;
}
dev_dbg(irtoy->dev, "version %s\n", irtoy->in);
irtoy->hw_version = version / 100;
irtoy->sw_version = version % 100;
irtoy->state = STATE_IRDATA;
complete(&irtoy->command_done);
} else if (len == LEN_SAMPLEMODEPROTO &&
irtoy->in[0] == REPLY_SAMPLEMODEPROTO) {
uint version;
irtoy->in[LEN_SAMPLEMODEPROTO] = 0;
if (kstrtouint(irtoy->in + 1, 10, &version)) {
dev_err(irtoy->dev, "invalid sample mode response %*phN",
LEN_SAMPLEMODEPROTO, irtoy->in);
return;
}
dev_dbg(irtoy->dev, "protocol %s\n", irtoy->in);
irtoy->proto_version = version;
irtoy->state = STATE_IRDATA;
complete(&irtoy->command_done);
} else {
dev_err(irtoy->dev, "unexpected response to command: %*phN\n",
len, irtoy->in);
}
break;
case STATE_COMMAND_NO_RESP:
case STATE_IRDATA: {
struct ir_raw_event rawir = { .pulse = irtoy->pulse };
__be16 *in = (__be16 *)irtoy->in;
int i;
for (i = 0; i < len / sizeof(__be16); i++) {
u16 v = be16_to_cpu(in[i]);
if (v == 0xffff) {
rawir.pulse = false;
} else {
rawir.duration = v * UNIT_US;
ir_raw_event_store_with_timeout(irtoy->rc,
&rawir);
}
rawir.pulse = !rawir.pulse;
}
irtoy->pulse = rawir.pulse;
ir_raw_event_handle(irtoy->rc);
break;
}
case STATE_TX:
if (irtoy->tx_len == 0) {
if (len == LEN_XMITRES &&
irtoy->in[0] == REPLY_XMITCOUNT) {
u16 emitted = get_unaligned_be16(irtoy->in + 1);
dev_dbg(irtoy->dev, "emitted:%u\n", emitted);
irtoy->emitted = emitted;
} else if (len == 1 &&
irtoy->in[0] == REPLY_XMITSUCCESS) {
irtoy->state = STATE_IRDATA;
complete(&irtoy->command_done);
}
} else {
// send next part of tx buffer
uint space = irtoy->in[0];
uint buf_len;
int err;
if (len != 1 || space > MAX_PACKET || space == 0) {
dev_dbg(irtoy->dev, "packet length expected: %*phN\n",
len, irtoy->in);
break;
}
buf_len = min(space, irtoy->tx_len);
dev_dbg(irtoy->dev, "remaining:%u sending:%u\n",
irtoy->tx_len, buf_len);
memcpy(irtoy->out, irtoy->tx_buf, buf_len);
irtoy->urb_out->transfer_buffer_length = buf_len;
err = usb_submit_urb(irtoy->urb_out, GFP_ATOMIC);
if (err != 0) {
dev_err(irtoy->dev, "fail to submit tx buf urb: %d\n",
err);
irtoy->state = STATE_IRDATA;
complete(&irtoy->command_done);
break;
}
irtoy->tx_buf += buf_len;
irtoy->tx_len -= buf_len;
}
break;
}
}
static void irtoy_out_callback(struct urb *urb)
{
struct irtoy *irtoy = urb->context;
if (urb->status == 0) {
if (irtoy->state == STATE_COMMAND_NO_RESP)
complete(&irtoy->command_done);
} else {
dev_warn(irtoy->dev, "out urb status: %d\n", urb->status);
}
}
static void irtoy_in_callback(struct urb *urb)
{
struct irtoy *irtoy = urb->context;
int ret;
switch (urb->status) {
case 0:
irtoy_response(irtoy, urb->actual_length);
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -EPROTO:
case -EPIPE:
usb_unlink_urb(urb);
return;
default:
dev_dbg(irtoy->dev, "in urb status: %d\n", urb->status);
}
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret && ret != -ENODEV)
dev_warn(irtoy->dev, "failed to resubmit urb: %d\n", ret);
}
static int irtoy_command(struct irtoy *irtoy, const u8 *cmd, int cmd_len,
enum state state)
{
int err;
init_completion(&irtoy->command_done);
irtoy->state = state;
memcpy(irtoy->out, cmd, cmd_len);
irtoy->urb_out->transfer_buffer_length = cmd_len;
err = usb_submit_urb(irtoy->urb_out, GFP_KERNEL);
if (err != 0)
return err;
if (!wait_for_completion_timeout(&irtoy->command_done,
msecs_to_jiffies(TIMEOUT))) {
usb_kill_urb(irtoy->urb_out);
return -ETIMEDOUT;
}
return 0;
}
static int irtoy_setup(struct irtoy *irtoy)
{
int err;
err = irtoy_command(irtoy, COMMAND_RESET, sizeof(COMMAND_RESET),
STATE_COMMAND_NO_RESP);
if (err != 0) {
dev_err(irtoy->dev, "could not write reset command: %d\n",
err);
return err;
}
usleep_range(50, 50);
// get version
err = irtoy_command(irtoy, COMMAND_VERSION, sizeof(COMMAND_VERSION),
STATE_COMMAND);
if (err) {
dev_err(irtoy->dev, "could not write version command: %d\n",
err);
return err;
}
// enter sample mode
err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,
sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
if (err)
dev_err(irtoy->dev, "could not write sample command: %d\n",
err);
return err;
}
/*
* When sending IR, it is imperative that we send the IR data as quickly
* as possible to the device, so it does not run out of IR data and
* introduce gaps. Allocate the buffer here, and then feed the data from
* the urb callback handler.
*/
static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
{
struct irtoy *irtoy = rc->priv;
unsigned int i, size;
__be16 *buf;
int err;
size = sizeof(u16) * (count + 1);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (i = 0; i < count; i++) {
u16 v = DIV_ROUND_CLOSEST(txbuf[i], UNIT_US);
if (!v)
v = 1;
buf[i] = cpu_to_be16(v);
}
buf[count] = cpu_to_be16(0xffff);
irtoy->tx_buf = buf;
irtoy->tx_len = size;
irtoy->emitted = 0;
// There is an issue where if the unit is receiving IR while the
// first TXSTART command is sent, the device might end up hanging
// with its led on. It does not respond to any command when this
// happens. To work around this, re-enter sample mode.
err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP);
if (err) {
dev_err(irtoy->dev, "exit sample mode: %d\n", err);
return err;
}
err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,
sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
if (err) {
dev_err(irtoy->dev, "enter sample mode: %d\n", err);
return err;
}
err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),
STATE_TX);
kfree(buf);
if (err) {
dev_err(irtoy->dev, "failed to send tx start command: %d\n",
err);
// not sure what state the device is in, reset it
irtoy_setup(irtoy);
return err;
}
if (size != irtoy->emitted) {
dev_err(irtoy->dev, "expected %u emitted, got %u\n", size,
irtoy->emitted);
// not sure what state the device is in, reset it
irtoy_setup(irtoy);
return -EINVAL;
}
return count;
}
static int irtoy_tx_carrier(struct rc_dev *rc, uint32_t carrier)
{
struct irtoy *irtoy = rc->priv;
u8 buf[3];
int err;
if (carrier < 11800)
return -EINVAL;
buf[0] = 0x06;
buf[1] = DIV_ROUND_CLOSEST(48000000, 16 * carrier) - 1;
buf[2] = 0;
err = irtoy_command(irtoy, buf, sizeof(buf), STATE_COMMAND_NO_RESP);
if (err)
dev_err(irtoy->dev, "could not write carrier command: %d\n",
err);
return err;
}
static int irtoy_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_host_interface *idesc = intf->cur_altsetting;
struct usb_device *usbdev = interface_to_usbdev(intf);
struct usb_endpoint_descriptor *ep_in = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
struct usb_endpoint_descriptor *ep = NULL;
struct irtoy *irtoy;
struct rc_dev *rc;
struct urb *urb;
int i, pipe, err = -ENOMEM;
for (i = 0; i < idesc->desc.bNumEndpoints; i++) {
ep = &idesc->endpoint[i].desc;
if (!ep_in && usb_endpoint_is_bulk_in(ep) &&
usb_endpoint_maxp(ep) == MAX_PACKET)
ep_in = ep;
if (!ep_out && usb_endpoint_is_bulk_out(ep) &&
usb_endpoint_maxp(ep) == MAX_PACKET)
ep_out = ep;
}
if (!ep_in || !ep_out) {
dev_err(&intf->dev, "required endpoints not found\n");
return -ENODEV;
}
irtoy = kzalloc(sizeof(*irtoy), GFP_KERNEL);
if (!irtoy)
return -ENOMEM;
irtoy->in = kmalloc(MAX_PACKET, GFP_KERNEL);
if (!irtoy->in)
goto free_irtoy;
irtoy->out = kmalloc(MAX_PACKET, GFP_KERNEL);
if (!irtoy->out)
goto free_irtoy;
rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc)
goto free_irtoy;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
goto free_rcdev;
pipe = usb_rcvbulkpipe(usbdev, ep_in->bEndpointAddress);
usb_fill_bulk_urb(urb, usbdev, pipe, irtoy->in, MAX_PACKET,
irtoy_in_callback, irtoy);
irtoy->urb_in = urb;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
goto free_rcdev;
pipe = usb_sndbulkpipe(usbdev, ep_out->bEndpointAddress);
usb_fill_bulk_urb(urb, usbdev, pipe, irtoy->out, MAX_PACKET,
irtoy_out_callback, irtoy);
irtoy->dev = &intf->dev;
irtoy->usbdev = usbdev;
irtoy->rc = rc;
irtoy->urb_out = urb;
irtoy->pulse = true;
err = usb_submit_urb(irtoy->urb_in, GFP_KERNEL);
if (err != 0) {
dev_err(irtoy->dev, "fail to submit in urb: %d\n", err);
goto free_rcdev;
}
err = irtoy_setup(irtoy);
if (err)
goto free_rcdev;
dev_info(irtoy->dev, "version: hardware %u, firmware %u.%u, protocol %u",
irtoy->hw_version, irtoy->sw_version / 10,
irtoy->sw_version % 10, irtoy->proto_version);
if (irtoy->sw_version < MIN_FW_VERSION) {
dev_err(irtoy->dev, "need firmware V%02u or higher",
MIN_FW_VERSION);
err = -ENODEV;
goto free_rcdev;
}
usb_make_path(usbdev, irtoy->phys, sizeof(irtoy->phys));
rc->device_name = "Infrared Toy";
rc->driver_name = KBUILD_MODNAME;
rc->input_phys = irtoy->phys;
usb_to_input_id(usbdev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->priv = irtoy;
rc->tx_ir = irtoy_tx;
rc->s_tx_carrier = irtoy_tx_carrier;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->map_name = RC_MAP_RC6_MCE;
rc->rx_resolution = UNIT_US;
rc->timeout = IR_DEFAULT_TIMEOUT;
/*
* end of transmission is detected by absence of a usb packet
* with more pulse/spaces. However, each usb packet sent can
* contain 32 pulse/spaces, which can be quite lengthy, so there
* can be a delay between usb packets. For example with nec there is a
* 17ms gap between packets.
*
* So, make timeout a largish minimum which works with most protocols.
*/
rc->min_timeout = MS_TO_US(40);
rc->max_timeout = MAX_TIMEOUT_US;
err = rc_register_device(rc);
if (err)
goto free_rcdev;
usb_set_intfdata(intf, irtoy);
return 0;
free_rcdev:
usb_kill_urb(irtoy->urb_out);
usb_free_urb(irtoy->urb_out);
usb_kill_urb(irtoy->urb_in);
usb_free_urb(irtoy->urb_in);
rc_free_device(rc);
free_irtoy:
kfree(irtoy->in);
kfree(irtoy->out);
kfree(irtoy);
return err;
}
static void irtoy_disconnect(struct usb_interface *intf)
{
struct irtoy *ir = usb_get_intfdata(intf);
rc_unregister_device(ir->rc);
usb_set_intfdata(intf, NULL);
usb_kill_urb(ir->urb_out);
usb_free_urb(ir->urb_out);
usb_kill_urb(ir->urb_in);
usb_free_urb(ir->urb_in);
kfree(ir->in);
kfree(ir->out);
kfree(ir);
}
static const struct usb_device_id irtoy_table[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x04d8, 0xfd08, USB_CLASS_CDC_DATA) },
{ USB_DEVICE_INTERFACE_CLASS(0x04d8, 0xf58b, USB_CLASS_CDC_DATA) },
{ }
};
static struct usb_driver irtoy_driver = {
.name = KBUILD_MODNAME,
.probe = irtoy_probe,
.disconnect = irtoy_disconnect,
.id_table = irtoy_table,
};
module_usb_driver(irtoy_driver);
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_DESCRIPTION("Infrared Toy and IR Droid driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, irtoy_table);
| linux-master | drivers/media/rc/ir_toy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* winbond-cir.c - Driver for the Consumer IR functionality of Winbond
* SuperI/O chips.
*
* Currently supports the Winbond WPCD376i chip (PNP id WEC1022), but
* could probably support others (Winbond WEC102X, NatSemi, etc)
* with minor modifications.
*
* Original Author: David Härdeman <[email protected]>
* Copyright (C) 2012 Sean Young <[email protected]>
* Copyright (C) 2009 - 2011 David Härdeman <[email protected]>
*
* Dedicated to my daughter Matilda, without whose loving attention this
* driver would have been finished in half the time and with a fraction
* of the bugs.
*
* Written using:
* o Winbond WPCD376I datasheet helpfully provided by Jesse Barnes at Intel
* o NatSemi PC87338/PC97338 datasheet (for the serial port stuff)
* o DSDT dumps
*
* Supported features:
* o IR Receive
* o IR Transmit
* o Wake-On-CIR functionality
* o Carrier detection
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/pnp.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/leds.h>
#include <linux/spinlock.h>
#include <linux/pci_ids.h>
#include <linux/io.h>
#include <linux/bitrev.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <media/rc-core.h>
#define DRVNAME "winbond-cir"
/* CEIR Wake-Up Registers, relative to data->wbase */
#define WBCIR_REG_WCEIR_CTL 0x03 /* CEIR Receiver Control */
#define WBCIR_REG_WCEIR_STS 0x04 /* CEIR Receiver Status */
#define WBCIR_REG_WCEIR_EV_EN 0x05 /* CEIR Receiver Event Enable */
#define WBCIR_REG_WCEIR_CNTL 0x06 /* CEIR Receiver Counter Low */
#define WBCIR_REG_WCEIR_CNTH 0x07 /* CEIR Receiver Counter High */
#define WBCIR_REG_WCEIR_INDEX 0x08 /* CEIR Receiver Index */
#define WBCIR_REG_WCEIR_DATA 0x09 /* CEIR Receiver Data */
#define WBCIR_REG_WCEIR_CSL 0x0A /* CEIR Re. Compare Strlen */
#define WBCIR_REG_WCEIR_CFG1 0x0B /* CEIR Re. Configuration 1 */
#define WBCIR_REG_WCEIR_CFG2 0x0C /* CEIR Re. Configuration 2 */
/* CEIR Enhanced Functionality Registers, relative to data->ebase */
#define WBCIR_REG_ECEIR_CTS 0x00 /* Enhanced IR Control Status */
#define WBCIR_REG_ECEIR_CCTL 0x01 /* Infrared Counter Control */
#define WBCIR_REG_ECEIR_CNT_LO 0x02 /* Infrared Counter LSB */
#define WBCIR_REG_ECEIR_CNT_HI 0x03 /* Infrared Counter MSB */
#define WBCIR_REG_ECEIR_IREM 0x04 /* Infrared Emitter Status */
/* SP3 Banked Registers, relative to data->sbase */
#define WBCIR_REG_SP3_BSR 0x03 /* Bank Select, all banks */
/* Bank 0 */
#define WBCIR_REG_SP3_RXDATA 0x00 /* FIFO RX data (r) */
#define WBCIR_REG_SP3_TXDATA 0x00 /* FIFO TX data (w) */
#define WBCIR_REG_SP3_IER 0x01 /* Interrupt Enable */
#define WBCIR_REG_SP3_EIR 0x02 /* Event Identification (r) */
#define WBCIR_REG_SP3_FCR 0x02 /* FIFO Control (w) */
#define WBCIR_REG_SP3_MCR 0x04 /* Mode Control */
#define WBCIR_REG_SP3_LSR 0x05 /* Link Status */
#define WBCIR_REG_SP3_MSR 0x06 /* Modem Status */
#define WBCIR_REG_SP3_ASCR 0x07 /* Aux Status and Control */
/* Bank 2 */
#define WBCIR_REG_SP3_BGDL 0x00 /* Baud Divisor LSB */
#define WBCIR_REG_SP3_BGDH 0x01 /* Baud Divisor MSB */
#define WBCIR_REG_SP3_EXCR1 0x02 /* Extended Control 1 */
#define WBCIR_REG_SP3_EXCR2 0x04 /* Extended Control 2 */
#define WBCIR_REG_SP3_TXFLV 0x06 /* TX FIFO Level */
#define WBCIR_REG_SP3_RXFLV 0x07 /* RX FIFO Level */
/* Bank 3 */
#define WBCIR_REG_SP3_MRID 0x00 /* Module Identification */
#define WBCIR_REG_SP3_SH_LCR 0x01 /* LCR Shadow */
#define WBCIR_REG_SP3_SH_FCR 0x02 /* FCR Shadow */
/* Bank 4 */
#define WBCIR_REG_SP3_IRCR1 0x02 /* Infrared Control 1 */
/* Bank 5 */
#define WBCIR_REG_SP3_IRCR2 0x04 /* Infrared Control 2 */
/* Bank 6 */
#define WBCIR_REG_SP3_IRCR3 0x00 /* Infrared Control 3 */
#define WBCIR_REG_SP3_SIR_PW 0x02 /* SIR Pulse Width */
/* Bank 7 */
#define WBCIR_REG_SP3_IRRXDC 0x00 /* IR RX Demod Control */
#define WBCIR_REG_SP3_IRTXMC 0x01 /* IR TX Mod Control */
#define WBCIR_REG_SP3_RCCFG 0x02 /* CEIR Config */
#define WBCIR_REG_SP3_IRCFG1 0x04 /* Infrared Config 1 */
#define WBCIR_REG_SP3_IRCFG4 0x07 /* Infrared Config 4 */
/*
* Magic values follow
*/
/* No interrupts for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */
#define WBCIR_IRQ_NONE 0x00
/* RX data bit for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */
#define WBCIR_IRQ_RX 0x01
/* TX data low bit for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */
#define WBCIR_IRQ_TX_LOW 0x02
/* Over/Under-flow bit for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */
#define WBCIR_IRQ_ERR 0x04
/* TX data empty bit for WBCEIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */
#define WBCIR_IRQ_TX_EMPTY 0x20
/* Led enable/disable bit for WBCIR_REG_ECEIR_CTS */
#define WBCIR_LED_ENABLE 0x80
/* RX data available bit for WBCIR_REG_SP3_LSR */
#define WBCIR_RX_AVAIL 0x01
/* RX data overrun error bit for WBCIR_REG_SP3_LSR */
#define WBCIR_RX_OVERRUN 0x02
/* TX End-Of-Transmission bit for WBCIR_REG_SP3_ASCR */
#define WBCIR_TX_EOT 0x04
/* RX disable bit for WBCIR_REG_SP3_ASCR */
#define WBCIR_RX_DISABLE 0x20
/* TX data underrun error bit for WBCIR_REG_SP3_ASCR */
#define WBCIR_TX_UNDERRUN 0x40
/* Extended mode enable bit for WBCIR_REG_SP3_EXCR1 */
#define WBCIR_EXT_ENABLE 0x01
/* Select compare register in WBCIR_REG_WCEIR_INDEX (bits 5 & 6) */
#define WBCIR_REGSEL_COMPARE 0x10
/* Select mask register in WBCIR_REG_WCEIR_INDEX (bits 5 & 6) */
#define WBCIR_REGSEL_MASK 0x20
/* Starting address of selected register in WBCIR_REG_WCEIR_INDEX */
#define WBCIR_REG_ADDR0 0x00
/* Enable carrier counter */
#define WBCIR_CNTR_EN 0x01
/* Reset carrier counter */
#define WBCIR_CNTR_R 0x02
/* Invert TX */
#define WBCIR_IRTX_INV 0x04
/* Receiver oversampling */
#define WBCIR_RX_T_OV 0x40
/* Valid banks for the SP3 UART */
enum wbcir_bank {
WBCIR_BANK_0 = 0x00,
WBCIR_BANK_1 = 0x80,
WBCIR_BANK_2 = 0xE0,
WBCIR_BANK_3 = 0xE4,
WBCIR_BANK_4 = 0xE8,
WBCIR_BANK_5 = 0xEC,
WBCIR_BANK_6 = 0xF0,
WBCIR_BANK_7 = 0xF4,
};
/* Supported power-on IR Protocols */
enum wbcir_protocol {
IR_PROTOCOL_RC5 = 0x0,
IR_PROTOCOL_NEC = 0x1,
IR_PROTOCOL_RC6 = 0x2,
};
/* Possible states for IR reception */
enum wbcir_rxstate {
WBCIR_RXSTATE_INACTIVE = 0,
WBCIR_RXSTATE_ACTIVE,
WBCIR_RXSTATE_ERROR
};
/* Possible states for IR transmission */
enum wbcir_txstate {
WBCIR_TXSTATE_INACTIVE = 0,
WBCIR_TXSTATE_ACTIVE,
WBCIR_TXSTATE_ERROR
};
/* Misc */
#define WBCIR_NAME "Winbond CIR"
#define WBCIR_ID_FAMILY 0xF1 /* Family ID for the WPCD376I */
#define WBCIR_ID_CHIP 0x04 /* Chip ID for the WPCD376I */
#define WAKEUP_IOMEM_LEN 0x10 /* Wake-Up I/O Reg Len */
#define EHFUNC_IOMEM_LEN 0x10 /* Enhanced Func I/O Reg Len */
#define SP_IOMEM_LEN 0x08 /* Serial Port 3 (IR) Reg Len */
/* Per-device data */
struct wbcir_data {
spinlock_t spinlock;
struct rc_dev *dev;
struct led_classdev led;
unsigned long wbase; /* Wake-Up Baseaddr */
unsigned long ebase; /* Enhanced Func. Baseaddr */
unsigned long sbase; /* Serial Port Baseaddr */
unsigned int irq; /* Serial Port IRQ */
u8 irqmask;
/* RX state */
enum wbcir_rxstate rxstate;
int carrier_report_enabled;
u32 pulse_duration;
/* TX state */
enum wbcir_txstate txstate;
u32 txlen;
u32 txoff;
u32 *txbuf;
u8 txmask;
u32 txcarrier;
};
static bool invert; /* default = 0 */
module_param(invert, bool, 0444);
MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
static bool txandrx; /* default = 0 */
module_param(txandrx, bool, 0444);
MODULE_PARM_DESC(txandrx, "Allow simultaneous TX and RX");
/*****************************************************************************
*
* UTILITY FUNCTIONS
*
*****************************************************************************/
/* Caller needs to hold wbcir_lock */
static void
wbcir_set_bits(unsigned long addr, u8 bits, u8 mask)
{
u8 val;
val = inb(addr);
val = ((val & ~mask) | (bits & mask));
outb(val, addr);
}
/* Selects the register bank for the serial port */
static inline void
wbcir_select_bank(struct wbcir_data *data, enum wbcir_bank bank)
{
outb(bank, data->sbase + WBCIR_REG_SP3_BSR);
}
static inline void
wbcir_set_irqmask(struct wbcir_data *data, u8 irqmask)
{
if (data->irqmask == irqmask)
return;
wbcir_select_bank(data, WBCIR_BANK_0);
outb(irqmask, data->sbase + WBCIR_REG_SP3_IER);
data->irqmask = irqmask;
}
static enum led_brightness
wbcir_led_brightness_get(struct led_classdev *led_cdev)
{
struct wbcir_data *data = container_of(led_cdev,
struct wbcir_data,
led);
if (inb(data->ebase + WBCIR_REG_ECEIR_CTS) & WBCIR_LED_ENABLE)
return LED_FULL;
else
return LED_OFF;
}
static void
wbcir_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct wbcir_data *data = container_of(led_cdev,
struct wbcir_data,
led);
wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CTS,
brightness == LED_OFF ? 0x00 : WBCIR_LED_ENABLE,
WBCIR_LED_ENABLE);
}
/* Manchester encodes bits to RC6 message cells (see wbcir_shutdown) */
static u8
wbcir_to_rc6cells(u8 val)
{
u8 coded = 0x00;
int i;
val &= 0x0F;
for (i = 0; i < 4; i++) {
if (val & 0x01)
coded |= 0x02 << (i * 2);
else
coded |= 0x01 << (i * 2);
val >>= 1;
}
return coded;
}
/*****************************************************************************
*
* INTERRUPT FUNCTIONS
*
*****************************************************************************/
static void
wbcir_carrier_report(struct wbcir_data *data)
{
unsigned counter = inb(data->ebase + WBCIR_REG_ECEIR_CNT_LO) |
inb(data->ebase + WBCIR_REG_ECEIR_CNT_HI) << 8;
if (counter > 0 && counter < 0xffff) {
struct ir_raw_event ev = {
.carrier_report = 1,
.carrier = DIV_ROUND_CLOSEST(counter * 1000000u,
data->pulse_duration)
};
ir_raw_event_store(data->dev, &ev);
}
/* reset and restart the counter */
data->pulse_duration = 0;
wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_R,
WBCIR_CNTR_EN | WBCIR_CNTR_R);
wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_EN,
WBCIR_CNTR_EN | WBCIR_CNTR_R);
}
static void
wbcir_idle_rx(struct rc_dev *dev, bool idle)
{
struct wbcir_data *data = dev->priv;
if (!idle && data->rxstate == WBCIR_RXSTATE_INACTIVE)
data->rxstate = WBCIR_RXSTATE_ACTIVE;
if (idle && data->rxstate != WBCIR_RXSTATE_INACTIVE) {
data->rxstate = WBCIR_RXSTATE_INACTIVE;
if (data->carrier_report_enabled)
wbcir_carrier_report(data);
/* Tell hardware to go idle by setting RXINACTIVE */
outb(WBCIR_RX_DISABLE, data->sbase + WBCIR_REG_SP3_ASCR);
}
}
static void
wbcir_irq_rx(struct wbcir_data *data, struct pnp_dev *device)
{
u8 irdata;
struct ir_raw_event rawir = {};
/* Since RXHDLEV is set, at least 8 bytes are in the FIFO */
while (inb(data->sbase + WBCIR_REG_SP3_LSR) & WBCIR_RX_AVAIL) {
irdata = inb(data->sbase + WBCIR_REG_SP3_RXDATA);
if (data->rxstate == WBCIR_RXSTATE_ERROR)
continue;
rawir.duration = ((irdata & 0x7F) + 1) *
(data->carrier_report_enabled ? 2 : 10);
rawir.pulse = irdata & 0x80 ? false : true;
if (rawir.pulse)
data->pulse_duration += rawir.duration;
ir_raw_event_store_with_filter(data->dev, &rawir);
}
ir_raw_event_handle(data->dev);
}
static void
wbcir_irq_tx(struct wbcir_data *data)
{
unsigned int space;
unsigned int used;
u8 bytes[16];
u8 byte;
if (!data->txbuf)
return;
switch (data->txstate) {
case WBCIR_TXSTATE_INACTIVE:
/* TX FIFO empty */
space = 16;
break;
case WBCIR_TXSTATE_ACTIVE:
/* TX FIFO low (3 bytes or less) */
space = 13;
break;
case WBCIR_TXSTATE_ERROR:
space = 0;
break;
default:
return;
}
/*
* TX data is run-length coded in bytes: YXXXXXXX
* Y = space (1) or pulse (0)
* X = duration, encoded as (X + 1) * 10us (i.e 10 to 1280 us)
*/
for (used = 0; used < space && data->txoff != data->txlen; used++) {
if (data->txbuf[data->txoff] == 0) {
data->txoff++;
continue;
}
byte = min((u32)0x80, data->txbuf[data->txoff]);
data->txbuf[data->txoff] -= byte;
byte--;
byte |= (data->txoff % 2 ? 0x80 : 0x00); /* pulse/space */
bytes[used] = byte;
}
while (data->txoff != data->txlen && data->txbuf[data->txoff] == 0)
data->txoff++;
if (used == 0) {
/* Finished */
if (data->txstate == WBCIR_TXSTATE_ERROR)
/* Clear TX underrun bit */
outb(WBCIR_TX_UNDERRUN, data->sbase + WBCIR_REG_SP3_ASCR);
wbcir_set_irqmask(data, WBCIR_IRQ_RX | WBCIR_IRQ_ERR);
kfree(data->txbuf);
data->txbuf = NULL;
data->txstate = WBCIR_TXSTATE_INACTIVE;
} else if (data->txoff == data->txlen) {
/* At the end of transmission, tell the hw before last byte */
outsb(data->sbase + WBCIR_REG_SP3_TXDATA, bytes, used - 1);
outb(WBCIR_TX_EOT, data->sbase + WBCIR_REG_SP3_ASCR);
outb(bytes[used - 1], data->sbase + WBCIR_REG_SP3_TXDATA);
wbcir_set_irqmask(data, WBCIR_IRQ_RX | WBCIR_IRQ_ERR |
WBCIR_IRQ_TX_EMPTY);
} else {
/* More data to follow... */
outsb(data->sbase + WBCIR_REG_SP3_RXDATA, bytes, used);
if (data->txstate == WBCIR_TXSTATE_INACTIVE) {
wbcir_set_irqmask(data, WBCIR_IRQ_RX | WBCIR_IRQ_ERR |
WBCIR_IRQ_TX_LOW);
data->txstate = WBCIR_TXSTATE_ACTIVE;
}
}
}
static irqreturn_t
wbcir_irq_handler(int irqno, void *cookie)
{
struct pnp_dev *device = cookie;
struct wbcir_data *data = pnp_get_drvdata(device);
unsigned long flags;
u8 status;
spin_lock_irqsave(&data->spinlock, flags);
wbcir_select_bank(data, WBCIR_BANK_0);
status = inb(data->sbase + WBCIR_REG_SP3_EIR);
status &= data->irqmask;
if (!status) {
spin_unlock_irqrestore(&data->spinlock, flags);
return IRQ_NONE;
}
if (status & WBCIR_IRQ_ERR) {
/* RX overflow? (read clears bit) */
if (inb(data->sbase + WBCIR_REG_SP3_LSR) & WBCIR_RX_OVERRUN) {
data->rxstate = WBCIR_RXSTATE_ERROR;
ir_raw_event_overflow(data->dev);
}
/* TX underflow? */
if (inb(data->sbase + WBCIR_REG_SP3_ASCR) & WBCIR_TX_UNDERRUN)
data->txstate = WBCIR_TXSTATE_ERROR;
}
if (status & WBCIR_IRQ_RX)
wbcir_irq_rx(data, device);
if (status & (WBCIR_IRQ_TX_LOW | WBCIR_IRQ_TX_EMPTY))
wbcir_irq_tx(data);
spin_unlock_irqrestore(&data->spinlock, flags);
return IRQ_HANDLED;
}
/*****************************************************************************
*
* RC-CORE INTERFACE FUNCTIONS
*
*****************************************************************************/
static int
wbcir_set_carrier_report(struct rc_dev *dev, int enable)
{
struct wbcir_data *data = dev->priv;
unsigned long flags;
spin_lock_irqsave(&data->spinlock, flags);
if (data->carrier_report_enabled == enable) {
spin_unlock_irqrestore(&data->spinlock, flags);
return 0;
}
data->pulse_duration = 0;
wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_R,
WBCIR_CNTR_EN | WBCIR_CNTR_R);
if (enable && data->dev->idle)
wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL,
WBCIR_CNTR_EN, WBCIR_CNTR_EN | WBCIR_CNTR_R);
/* Set a higher sampling resolution if carrier reports are enabled */
wbcir_select_bank(data, WBCIR_BANK_2);
data->dev->rx_resolution = enable ? 2 : 10;
outb(enable ? 0x03 : 0x0f, data->sbase + WBCIR_REG_SP3_BGDL);
outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
/* Enable oversampling if carrier reports are enabled */
wbcir_select_bank(data, WBCIR_BANK_7);
wbcir_set_bits(data->sbase + WBCIR_REG_SP3_RCCFG,
enable ? WBCIR_RX_T_OV : 0, WBCIR_RX_T_OV);
data->carrier_report_enabled = enable;
spin_unlock_irqrestore(&data->spinlock, flags);
return 0;
}
static int
wbcir_txcarrier(struct rc_dev *dev, u32 carrier)
{
struct wbcir_data *data = dev->priv;
unsigned long flags;
u8 val;
u32 freq;
freq = DIV_ROUND_CLOSEST(carrier, 1000);
if (freq < 30 || freq > 60)
return -EINVAL;
switch (freq) {
case 58:
case 59:
case 60:
val = freq - 58;
freq *= 1000;
break;
case 57:
val = freq - 27;
freq = 56900;
break;
default:
val = freq - 27;
freq *= 1000;
break;
}
spin_lock_irqsave(&data->spinlock, flags);
if (data->txstate != WBCIR_TXSTATE_INACTIVE) {
spin_unlock_irqrestore(&data->spinlock, flags);
return -EBUSY;
}
if (data->txcarrier != freq) {
wbcir_select_bank(data, WBCIR_BANK_7);
wbcir_set_bits(data->sbase + WBCIR_REG_SP3_IRTXMC, val, 0x1F);
data->txcarrier = freq;
}
spin_unlock_irqrestore(&data->spinlock, flags);
return 0;
}
static int
wbcir_txmask(struct rc_dev *dev, u32 mask)
{
struct wbcir_data *data = dev->priv;
unsigned long flags;
u8 val;
/* return the number of transmitters */
if (mask > 15)
return 4;
/* Four outputs, only one output can be enabled at a time */
switch (mask) {
case 0x1:
val = 0x0;
break;
case 0x2:
val = 0x1;
break;
case 0x4:
val = 0x2;
break;
case 0x8:
val = 0x3;
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&data->spinlock, flags);
if (data->txstate != WBCIR_TXSTATE_INACTIVE) {
spin_unlock_irqrestore(&data->spinlock, flags);
return -EBUSY;
}
if (data->txmask != mask) {
wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CTS, val, 0x0c);
data->txmask = mask;
}
spin_unlock_irqrestore(&data->spinlock, flags);
return 0;
}
static int
wbcir_tx(struct rc_dev *dev, unsigned *b, unsigned count)
{
struct wbcir_data *data = dev->priv;
unsigned *buf;
unsigned i;
unsigned long flags;
buf = kmalloc_array(count, sizeof(*b), GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Convert values to multiples of 10us */
for (i = 0; i < count; i++)
buf[i] = DIV_ROUND_CLOSEST(b[i], 10);
/* Not sure if this is possible, but better safe than sorry */
spin_lock_irqsave(&data->spinlock, flags);
if (data->txstate != WBCIR_TXSTATE_INACTIVE) {
spin_unlock_irqrestore(&data->spinlock, flags);
kfree(buf);
return -EBUSY;
}
/* Fill the TX fifo once, the irq handler will do the rest */
data->txbuf = buf;
data->txlen = count;
data->txoff = 0;
wbcir_irq_tx(data);
/* We're done */
spin_unlock_irqrestore(&data->spinlock, flags);
return count;
}
/*****************************************************************************
*
* SETUP/INIT/SUSPEND/RESUME FUNCTIONS
*
*****************************************************************************/
static void
wbcir_shutdown(struct pnp_dev *device)
{
struct device *dev = &device->dev;
struct wbcir_data *data = pnp_get_drvdata(device);
struct rc_dev *rc = data->dev;
bool do_wake = true;
u8 match[11];
u8 mask[11];
u8 rc6_csl = 0;
u8 proto;
u32 wake_sc = rc->scancode_wakeup_filter.data;
u32 mask_sc = rc->scancode_wakeup_filter.mask;
int i;
memset(match, 0, sizeof(match));
memset(mask, 0, sizeof(mask));
if (!mask_sc || !device_may_wakeup(dev)) {
do_wake = false;
goto finish;
}
switch (rc->wakeup_protocol) {
case RC_PROTO_RC5:
/* Mask = 13 bits, ex toggle */
mask[0] = (mask_sc & 0x003f);
mask[0] |= (mask_sc & 0x0300) >> 2;
mask[1] = (mask_sc & 0x1c00) >> 10;
if (mask_sc & 0x0040) /* 2nd start bit */
match[1] |= 0x10;
match[0] = (wake_sc & 0x003F); /* 6 command bits */
match[0] |= (wake_sc & 0x0300) >> 2; /* 2 address bits */
match[1] = (wake_sc & 0x1c00) >> 10; /* 3 address bits */
if (!(wake_sc & 0x0040)) /* 2nd start bit */
match[1] |= 0x10;
proto = IR_PROTOCOL_RC5;
break;
case RC_PROTO_NEC:
mask[1] = bitrev8(mask_sc);
mask[0] = mask[1];
mask[3] = bitrev8(mask_sc >> 8);
mask[2] = mask[3];
match[1] = bitrev8(wake_sc);
match[0] = ~match[1];
match[3] = bitrev8(wake_sc >> 8);
match[2] = ~match[3];
proto = IR_PROTOCOL_NEC;
break;
case RC_PROTO_NECX:
mask[1] = bitrev8(mask_sc);
mask[0] = mask[1];
mask[2] = bitrev8(mask_sc >> 8);
mask[3] = bitrev8(mask_sc >> 16);
match[1] = bitrev8(wake_sc);
match[0] = ~match[1];
match[2] = bitrev8(wake_sc >> 8);
match[3] = bitrev8(wake_sc >> 16);
proto = IR_PROTOCOL_NEC;
break;
case RC_PROTO_NEC32:
mask[0] = bitrev8(mask_sc);
mask[1] = bitrev8(mask_sc >> 8);
mask[2] = bitrev8(mask_sc >> 16);
mask[3] = bitrev8(mask_sc >> 24);
match[0] = bitrev8(wake_sc);
match[1] = bitrev8(wake_sc >> 8);
match[2] = bitrev8(wake_sc >> 16);
match[3] = bitrev8(wake_sc >> 24);
proto = IR_PROTOCOL_NEC;
break;
case RC_PROTO_RC6_0:
/* Command */
match[0] = wbcir_to_rc6cells(wake_sc >> 0);
mask[0] = wbcir_to_rc6cells(mask_sc >> 0);
match[1] = wbcir_to_rc6cells(wake_sc >> 4);
mask[1] = wbcir_to_rc6cells(mask_sc >> 4);
/* Address */
match[2] = wbcir_to_rc6cells(wake_sc >> 8);
mask[2] = wbcir_to_rc6cells(mask_sc >> 8);
match[3] = wbcir_to_rc6cells(wake_sc >> 12);
mask[3] = wbcir_to_rc6cells(mask_sc >> 12);
/* Header */
match[4] = 0x50; /* mode1 = mode0 = 0, ignore toggle */
mask[4] = 0xF0;
match[5] = 0x09; /* start bit = 1, mode2 = 0 */
mask[5] = 0x0F;
rc6_csl = 44;
proto = IR_PROTOCOL_RC6;
break;
case RC_PROTO_RC6_6A_24:
case RC_PROTO_RC6_6A_32:
case RC_PROTO_RC6_MCE:
i = 0;
/* Command */
match[i] = wbcir_to_rc6cells(wake_sc >> 0);
mask[i++] = wbcir_to_rc6cells(mask_sc >> 0);
match[i] = wbcir_to_rc6cells(wake_sc >> 4);
mask[i++] = wbcir_to_rc6cells(mask_sc >> 4);
/* Address + Toggle */
match[i] = wbcir_to_rc6cells(wake_sc >> 8);
mask[i++] = wbcir_to_rc6cells(mask_sc >> 8);
match[i] = wbcir_to_rc6cells(wake_sc >> 12);
mask[i++] = wbcir_to_rc6cells(mask_sc >> 12);
/* Customer bits 7 - 0 */
match[i] = wbcir_to_rc6cells(wake_sc >> 16);
mask[i++] = wbcir_to_rc6cells(mask_sc >> 16);
if (rc->wakeup_protocol == RC_PROTO_RC6_6A_20) {
rc6_csl = 52;
} else {
match[i] = wbcir_to_rc6cells(wake_sc >> 20);
mask[i++] = wbcir_to_rc6cells(mask_sc >> 20);
if (rc->wakeup_protocol == RC_PROTO_RC6_6A_24) {
rc6_csl = 60;
} else {
/* Customer range bit and bits 15 - 8 */
match[i] = wbcir_to_rc6cells(wake_sc >> 24);
mask[i++] = wbcir_to_rc6cells(mask_sc >> 24);
match[i] = wbcir_to_rc6cells(wake_sc >> 28);
mask[i++] = wbcir_to_rc6cells(mask_sc >> 28);
rc6_csl = 76;
}
}
/* Header */
match[i] = 0x93; /* mode1 = mode0 = 1, submode = 0 */
mask[i++] = 0xFF;
match[i] = 0x0A; /* start bit = 1, mode2 = 1 */
mask[i++] = 0x0F;
proto = IR_PROTOCOL_RC6;
break;
default:
do_wake = false;
break;
}
finish:
if (do_wake) {
/* Set compare and compare mask */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_INDEX,
WBCIR_REGSEL_COMPARE | WBCIR_REG_ADDR0,
0x3F);
outsb(data->wbase + WBCIR_REG_WCEIR_DATA, match, 11);
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_INDEX,
WBCIR_REGSEL_MASK | WBCIR_REG_ADDR0,
0x3F);
outsb(data->wbase + WBCIR_REG_WCEIR_DATA, mask, 11);
/* RC6 Compare String Len */
outb(rc6_csl, data->wbase + WBCIR_REG_WCEIR_CSL);
/* Clear status bits NEC_REP, BUFF, MSG_END, MATCH */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_STS, 0x17, 0x17);
/* Clear BUFF_EN, Clear END_EN, Set MATCH_EN */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x01, 0x07);
/* Set CEIR_EN */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL,
(proto << 4) | 0x01, 0x31);
} else {
/* Clear BUFF_EN, Clear END_EN, Clear MATCH_EN */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x00, 0x07);
/* Clear CEIR_EN */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, 0x00, 0x01);
}
/*
* ACPI will set the HW disable bit for SP3 which means that the
* output signals are left in an undefined state which may cause
* spurious interrupts which we need to ignore until the hardware
* is reinitialized.
*/
wbcir_set_irqmask(data, WBCIR_IRQ_NONE);
disable_irq(data->irq);
}
/*
* Wakeup handling is done on shutdown.
*/
static int
wbcir_set_wakeup_filter(struct rc_dev *rc, struct rc_scancode_filter *filter)
{
return 0;
}
static int
wbcir_suspend(struct pnp_dev *device, pm_message_t state)
{
struct wbcir_data *data = pnp_get_drvdata(device);
led_classdev_suspend(&data->led);
wbcir_shutdown(device);
return 0;
}
static void
wbcir_init_hw(struct wbcir_data *data)
{
/* Disable interrupts */
wbcir_set_irqmask(data, WBCIR_IRQ_NONE);
/* Set RX_INV, Clear CEIR_EN (needed for the led) */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, invert ? 8 : 0, 0x09);
/* Clear status bits NEC_REP, BUFF, MSG_END, MATCH */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_STS, 0x17, 0x17);
/* Clear BUFF_EN, Clear END_EN, Clear MATCH_EN */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x00, 0x07);
/* Set RC5 cell time to correspond to 36 kHz */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CFG1, 0x4A, 0x7F);
/* Set IRTX_INV */
if (invert)
outb(WBCIR_IRTX_INV, data->ebase + WBCIR_REG_ECEIR_CCTL);
else
outb(0x00, data->ebase + WBCIR_REG_ECEIR_CCTL);
/*
* Clear IR LED, set SP3 clock to 24Mhz, set TX mask to IRTX1,
* set SP3_IRRX_SW to binary 01, helpfully not documented
*/
outb(0x10, data->ebase + WBCIR_REG_ECEIR_CTS);
data->txmask = 0x1;
/* Enable extended mode */
wbcir_select_bank(data, WBCIR_BANK_2);
outb(WBCIR_EXT_ENABLE, data->sbase + WBCIR_REG_SP3_EXCR1);
/*
* Configure baud generator, IR data will be sampled at
* a bitrate of: (24Mhz * prescaler) / (divisor * 16).
*
* The ECIR registers include a flag to change the
* 24Mhz clock freq to 48Mhz.
*
* It's not documented in the specs, but fifo levels
* other than 16 seems to be unsupported.
*/
/* prescaler 1.0, tx/rx fifo lvl 16 */
outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2);
/* Set baud divisor to sample every 10 us */
outb(0x0f, data->sbase + WBCIR_REG_SP3_BGDL);
outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
/* Set CEIR mode */
wbcir_select_bank(data, WBCIR_BANK_0);
outb(0xC0, data->sbase + WBCIR_REG_SP3_MCR);
inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */
inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */
/* Disable RX demod, enable run-length enc/dec, set freq span */
wbcir_select_bank(data, WBCIR_BANK_7);
outb(0x90, data->sbase + WBCIR_REG_SP3_RCCFG);
/* Disable timer */
wbcir_select_bank(data, WBCIR_BANK_4);
outb(0x00, data->sbase + WBCIR_REG_SP3_IRCR1);
/* Disable MSR interrupt, clear AUX_IRX, mask RX during TX? */
wbcir_select_bank(data, WBCIR_BANK_5);
outb(txandrx ? 0x03 : 0x02, data->sbase + WBCIR_REG_SP3_IRCR2);
/* Disable CRC */
wbcir_select_bank(data, WBCIR_BANK_6);
outb(0x20, data->sbase + WBCIR_REG_SP3_IRCR3);
/* Set RX demodulation freq, not really used */
wbcir_select_bank(data, WBCIR_BANK_7);
outb(0xF2, data->sbase + WBCIR_REG_SP3_IRRXDC);
/* Set TX modulation, 36kHz, 7us pulse width */
outb(0x69, data->sbase + WBCIR_REG_SP3_IRTXMC);
data->txcarrier = 36000;
/* Set invert and pin direction */
if (invert)
outb(0x10, data->sbase + WBCIR_REG_SP3_IRCFG4);
else
outb(0x00, data->sbase + WBCIR_REG_SP3_IRCFG4);
/* Set FIFO thresholds (RX = 8, TX = 3), reset RX/TX */
wbcir_select_bank(data, WBCIR_BANK_0);
outb(0x97, data->sbase + WBCIR_REG_SP3_FCR);
/* Clear AUX status bits */
outb(0xE0, data->sbase + WBCIR_REG_SP3_ASCR);
/* Clear RX state */
data->rxstate = WBCIR_RXSTATE_INACTIVE;
wbcir_idle_rx(data->dev, true);
/* Clear TX state */
if (data->txstate == WBCIR_TXSTATE_ACTIVE) {
kfree(data->txbuf);
data->txbuf = NULL;
data->txstate = WBCIR_TXSTATE_INACTIVE;
}
/* Enable interrupts */
wbcir_set_irqmask(data, WBCIR_IRQ_RX | WBCIR_IRQ_ERR);
}
static int
wbcir_resume(struct pnp_dev *device)
{
struct wbcir_data *data = pnp_get_drvdata(device);
wbcir_init_hw(data);
enable_irq(data->irq);
led_classdev_resume(&data->led);
return 0;
}
static int
wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
{
struct device *dev = &device->dev;
struct wbcir_data *data;
int err;
if (!(pnp_port_len(device, 0) == EHFUNC_IOMEM_LEN &&
pnp_port_len(device, 1) == WAKEUP_IOMEM_LEN &&
pnp_port_len(device, 2) == SP_IOMEM_LEN)) {
dev_err(dev, "Invalid resources\n");
return -ENODEV;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
pnp_set_drvdata(device, data);
spin_lock_init(&data->spinlock);
data->ebase = pnp_port_start(device, 0);
data->wbase = pnp_port_start(device, 1);
data->sbase = pnp_port_start(device, 2);
data->irq = pnp_irq(device, 0);
if (data->wbase == 0 || data->ebase == 0 ||
data->sbase == 0 || data->irq == -1) {
err = -ENODEV;
dev_err(dev, "Invalid resources\n");
goto exit_free_data;
}
dev_dbg(&device->dev, "Found device (w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
data->wbase, data->ebase, data->sbase, data->irq);
data->led.name = "cir::activity";
data->led.default_trigger = "rc-feedback";
data->led.brightness_set = wbcir_led_brightness_set;
data->led.brightness_get = wbcir_led_brightness_get;
err = led_classdev_register(&device->dev, &data->led);
if (err)
goto exit_free_data;
data->dev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!data->dev) {
err = -ENOMEM;
goto exit_unregister_led;
}
data->dev->driver_name = DRVNAME;
data->dev->device_name = WBCIR_NAME;
data->dev->input_phys = "wbcir/cir0";
data->dev->input_id.bustype = BUS_HOST;
data->dev->input_id.vendor = PCI_VENDOR_ID_WINBOND;
data->dev->input_id.product = WBCIR_ID_FAMILY;
data->dev->input_id.version = WBCIR_ID_CHIP;
data->dev->map_name = RC_MAP_RC6_MCE;
data->dev->s_idle = wbcir_idle_rx;
data->dev->s_carrier_report = wbcir_set_carrier_report;
data->dev->s_tx_mask = wbcir_txmask;
data->dev->s_tx_carrier = wbcir_txcarrier;
data->dev->tx_ir = wbcir_tx;
data->dev->priv = data;
data->dev->dev.parent = &device->dev;
data->dev->min_timeout = 1;
data->dev->timeout = IR_DEFAULT_TIMEOUT;
data->dev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
data->dev->rx_resolution = 2;
data->dev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
data->dev->allowed_wakeup_protocols = RC_PROTO_BIT_NEC |
RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32 | RC_PROTO_BIT_RC5 |
RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 |
RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 |
RC_PROTO_BIT_RC6_MCE;
data->dev->wakeup_protocol = RC_PROTO_RC6_MCE;
data->dev->scancode_wakeup_filter.data = 0x800f040c;
data->dev->scancode_wakeup_filter.mask = 0xffff7fff;
data->dev->s_wakeup_filter = wbcir_set_wakeup_filter;
err = rc_register_device(data->dev);
if (err)
goto exit_free_rc;
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
err = -EBUSY;
goto exit_unregister_device;
}
if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
err = -EBUSY;
goto exit_release_wbase;
}
if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->sbase, data->sbase + SP_IOMEM_LEN - 1);
err = -EBUSY;
goto exit_release_ebase;
}
err = request_irq(data->irq, wbcir_irq_handler,
0, DRVNAME, device);
if (err) {
dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
err = -EBUSY;
goto exit_release_sbase;
}
device_init_wakeup(&device->dev, 1);
wbcir_init_hw(data);
return 0;
exit_release_sbase:
release_region(data->sbase, SP_IOMEM_LEN);
exit_release_ebase:
release_region(data->ebase, EHFUNC_IOMEM_LEN);
exit_release_wbase:
release_region(data->wbase, WAKEUP_IOMEM_LEN);
exit_unregister_device:
rc_unregister_device(data->dev);
data->dev = NULL;
exit_free_rc:
rc_free_device(data->dev);
exit_unregister_led:
led_classdev_unregister(&data->led);
exit_free_data:
kfree(data);
pnp_set_drvdata(device, NULL);
exit:
return err;
}
static void
wbcir_remove(struct pnp_dev *device)
{
struct wbcir_data *data = pnp_get_drvdata(device);
/* Disable interrupts */
wbcir_set_irqmask(data, WBCIR_IRQ_NONE);
free_irq(data->irq, device);
/* Clear status bits NEC_REP, BUFF, MSG_END, MATCH */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_STS, 0x17, 0x17);
/* Clear CEIR_EN */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, 0x00, 0x01);
/* Clear BUFF_EN, END_EN, MATCH_EN */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x00, 0x07);
rc_unregister_device(data->dev);
led_classdev_unregister(&data->led);
/* This is ok since &data->led isn't actually used */
wbcir_led_brightness_set(&data->led, LED_OFF);
release_region(data->wbase, WAKEUP_IOMEM_LEN);
release_region(data->ebase, EHFUNC_IOMEM_LEN);
release_region(data->sbase, SP_IOMEM_LEN);
kfree(data);
pnp_set_drvdata(device, NULL);
}
static const struct pnp_device_id wbcir_ids[] = {
{ "WEC1022", 0 },
{ "", 0 }
};
MODULE_DEVICE_TABLE(pnp, wbcir_ids);
static struct pnp_driver wbcir_driver = {
.name = DRVNAME,
.id_table = wbcir_ids,
.probe = wbcir_probe,
.remove = wbcir_remove,
.suspend = wbcir_suspend,
.resume = wbcir_resume,
.shutdown = wbcir_shutdown
};
static int __init
wbcir_init(void)
{
int ret;
ret = pnp_register_driver(&wbcir_driver);
if (ret)
pr_err("Unable to register driver\n");
return ret;
}
static void __exit
wbcir_exit(void)
{
pnp_unregister_driver(&wbcir_driver);
}
module_init(wbcir_init);
module_exit(wbcir_exit);
MODULE_AUTHOR("David Härdeman <[email protected]>");
MODULE_DESCRIPTION("Winbond SuperI/O Consumer IR Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/winbond-cir.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/irq.h>
#include <media/rc-core.h>
#define GPIO_IR_DEVICE_NAME "gpio_ir_recv"
struct gpio_rc_dev {
struct rc_dev *rcdev;
struct gpio_desc *gpiod;
int irq;
struct device *pmdev;
struct pm_qos_request qos;
};
static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
{
int val;
struct gpio_rc_dev *gpio_dev = dev_id;
struct device *pmdev = gpio_dev->pmdev;
/*
* For some cpuidle systems, not all:
* Respond to interrupt taking more latency when cpu in idle.
* Invoke asynchronous pm runtime get from interrupt context,
* this may introduce a millisecond delay to call resume callback,
* where to disable cpuilde.
*
* Two issues lead to fail to decode first frame, one is latency to
* respond to interrupt, another is delay introduced by async api.
*/
if (pmdev)
pm_runtime_get(pmdev);
val = gpiod_get_value(gpio_dev->gpiod);
if (val >= 0)
ir_raw_event_store_edge(gpio_dev->rcdev, val == 1);
if (pmdev) {
pm_runtime_mark_last_busy(pmdev);
pm_runtime_put_autosuspend(pmdev);
}
return IRQ_HANDLED;
}
static int gpio_ir_recv_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct gpio_rc_dev *gpio_dev;
struct rc_dev *rcdev;
u32 period = 0;
int rc;
if (!np)
return -ENODEV;
gpio_dev = devm_kzalloc(dev, sizeof(*gpio_dev), GFP_KERNEL);
if (!gpio_dev)
return -ENOMEM;
gpio_dev->gpiod = devm_gpiod_get(dev, NULL, GPIOD_IN);
if (IS_ERR(gpio_dev->gpiod))
return dev_err_probe(dev, PTR_ERR(gpio_dev->gpiod),
"error getting gpio\n");
gpio_dev->irq = gpiod_to_irq(gpio_dev->gpiod);
if (gpio_dev->irq < 0)
return gpio_dev->irq;
rcdev = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
if (!rcdev)
return -ENOMEM;
rcdev->priv = gpio_dev;
rcdev->device_name = GPIO_IR_DEVICE_NAME;
rcdev->input_phys = GPIO_IR_DEVICE_NAME "/input0";
rcdev->input_id.bustype = BUS_HOST;
rcdev->input_id.vendor = 0x0001;
rcdev->input_id.product = 0x0001;
rcdev->input_id.version = 0x0100;
rcdev->dev.parent = dev;
rcdev->driver_name = KBUILD_MODNAME;
rcdev->min_timeout = 1;
rcdev->timeout = IR_DEFAULT_TIMEOUT;
rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rcdev->map_name = of_get_property(np, "linux,rc-map-name", NULL);
if (!rcdev->map_name)
rcdev->map_name = RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
if (of_property_read_bool(np, "wakeup-source"))
device_init_wakeup(dev, true);
rc = devm_rc_register_device(dev, rcdev);
if (rc < 0) {
dev_err(dev, "failed to register rc device (%d)\n", rc);
return rc;
}
of_property_read_u32(np, "linux,autosuspend-period", &period);
if (period) {
gpio_dev->pmdev = dev;
pm_runtime_set_autosuspend_delay(dev, period);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_suspended(dev);
pm_runtime_enable(dev);
}
platform_set_drvdata(pdev, gpio_dev);
return devm_request_irq(dev, gpio_dev->irq, gpio_ir_recv_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"gpio-ir-recv-irq", gpio_dev);
}
static void gpio_ir_recv_remove(struct platform_device *pdev)
{
struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
struct device *pmdev = gpio_dev->pmdev;
if (pmdev) {
pm_runtime_get_sync(pmdev);
cpu_latency_qos_remove_request(&gpio_dev->qos);
pm_runtime_disable(pmdev);
pm_runtime_put_noidle(pmdev);
pm_runtime_set_suspended(pmdev);
}
}
#ifdef CONFIG_PM
static int gpio_ir_recv_suspend(struct device *dev)
{
struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(gpio_dev->irq);
else
disable_irq(gpio_dev->irq);
return 0;
}
static int gpio_ir_recv_resume(struct device *dev)
{
struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(gpio_dev->irq);
else
enable_irq(gpio_dev->irq);
return 0;
}
static int gpio_ir_recv_runtime_suspend(struct device *dev)
{
struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
cpu_latency_qos_remove_request(&gpio_dev->qos);
return 0;
}
static int gpio_ir_recv_runtime_resume(struct device *dev)
{
struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
cpu_latency_qos_add_request(&gpio_dev->qos, 0);
return 0;
}
static const struct dev_pm_ops gpio_ir_recv_pm_ops = {
.suspend = gpio_ir_recv_suspend,
.resume = gpio_ir_recv_resume,
.runtime_suspend = gpio_ir_recv_runtime_suspend,
.runtime_resume = gpio_ir_recv_runtime_resume,
};
#endif
static const struct of_device_id gpio_ir_recv_of_match[] = {
{ .compatible = "gpio-ir-receiver", },
{ },
};
MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
static struct platform_driver gpio_ir_recv_driver = {
.probe = gpio_ir_recv_probe,
.remove_new = gpio_ir_recv_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = gpio_ir_recv_of_match,
#ifdef CONFIG_PM
.pm = &gpio_ir_recv_pm_ops,
#endif
},
};
module_platform_driver(gpio_ir_recv_driver);
MODULE_DESCRIPTION("GPIO IR Receiver driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/rc/gpio-ir-recv.c |
// SPDX-License-Identifier: GPL-2.0+
// gadmei-rm008z.h - Keytable for gadmei_rm008z Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/* GADMEI UTV330+ RM008Z remote
Shine Liu <[email protected]>
*/
static struct rc_map_table gadmei_rm008z[] = {
{ 0x14, KEY_POWER2}, /* POWER OFF */
{ 0x0c, KEY_MUTE}, /* MUTE */
{ 0x18, KEY_TV}, /* TV */
{ 0x0e, KEY_VIDEO}, /* AV */
{ 0x0b, KEY_AUDIO}, /* SV */
{ 0x0f, KEY_RADIO}, /* FM */
{ 0x00, KEY_NUMERIC_1},
{ 0x01, KEY_NUMERIC_2},
{ 0x02, KEY_NUMERIC_3},
{ 0x03, KEY_NUMERIC_4},
{ 0x04, KEY_NUMERIC_5},
{ 0x05, KEY_NUMERIC_6},
{ 0x06, KEY_NUMERIC_7},
{ 0x07, KEY_NUMERIC_8},
{ 0x08, KEY_NUMERIC_9},
{ 0x09, KEY_NUMERIC_0},
{ 0x0a, KEY_INFO}, /* OSD */
{ 0x1c, KEY_BACKSPACE}, /* LAST */
{ 0x0d, KEY_PLAY}, /* PLAY */
{ 0x1e, KEY_CAMERA}, /* SNAPSHOT */
{ 0x1a, KEY_RECORD}, /* RECORD */
{ 0x17, KEY_STOP}, /* STOP */
{ 0x1f, KEY_UP}, /* UP */
{ 0x44, KEY_DOWN}, /* DOWN */
{ 0x46, KEY_TAB}, /* BACK */
{ 0x4a, KEY_ZOOM}, /* FULLSECREEN */
{ 0x10, KEY_VOLUMEUP}, /* VOLUMEUP */
{ 0x11, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
{ 0x12, KEY_CHANNELUP}, /* CHANNELUP */
{ 0x13, KEY_CHANNELDOWN}, /* CHANNELDOWN */
{ 0x15, KEY_ENTER}, /* OK */
};
static struct rc_map_list gadmei_rm008z_map = {
.map = {
.scan = gadmei_rm008z,
.size = ARRAY_SIZE(gadmei_rm008z),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_GADMEI_RM008Z,
}
};
static int __init init_rc_map_gadmei_rm008z(void)
{
return rc_map_register(&gadmei_rm008z_map);
}
static void __exit exit_rc_map_gadmei_rm008z(void)
{
rc_map_unregister(&gadmei_rm008z_map);
}
module_init(init_rc_map_gadmei_rm008z)
module_exit(exit_rc_map_gadmei_rm008z)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-gadmei-rm008z.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Anysee remote controller keytable
*
* Copyright (C) 2010 Antti Palosaari <[email protected]>
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table anysee[] = {
{ 0x0800, KEY_NUMERIC_0 },
{ 0x0801, KEY_NUMERIC_1 },
{ 0x0802, KEY_NUMERIC_2 },
{ 0x0803, KEY_NUMERIC_3 },
{ 0x0804, KEY_NUMERIC_4 },
{ 0x0805, KEY_NUMERIC_5 },
{ 0x0806, KEY_NUMERIC_6 },
{ 0x0807, KEY_NUMERIC_7 },
{ 0x0808, KEY_NUMERIC_8 },
{ 0x0809, KEY_NUMERIC_9 },
{ 0x080a, KEY_POWER2 }, /* [red power button] */
{ 0x080b, KEY_VIDEO }, /* [*] MODE */
{ 0x080c, KEY_CHANNEL }, /* [symbol counterclockwise arrow] */
{ 0x080d, KEY_NEXT }, /* [>>|] */
{ 0x080e, KEY_MENU }, /* MENU */
{ 0x080f, KEY_EPG }, /* [EPG] */
{ 0x0810, KEY_CLEAR }, /* EXIT */
{ 0x0811, KEY_CHANNELUP },
{ 0x0812, KEY_VOLUMEDOWN },
{ 0x0813, KEY_VOLUMEUP },
{ 0x0814, KEY_CHANNELDOWN },
{ 0x0815, KEY_OK },
{ 0x0816, KEY_RADIO }, /* [symbol TV/radio] */
{ 0x0817, KEY_INFO }, /* [i] */
{ 0x0818, KEY_PREVIOUS }, /* [|<<] */
{ 0x0819, KEY_FAVORITES }, /* FAV. */
{ 0x081a, KEY_SUBTITLE }, /* Subtitle */
{ 0x081b, KEY_CAMERA }, /* [symbol camera] */
{ 0x081c, KEY_YELLOW },
{ 0x081d, KEY_RED },
{ 0x081e, KEY_LANGUAGE }, /* [symbol Second Audio Program] */
{ 0x081f, KEY_GREEN },
{ 0x0820, KEY_SLEEP }, /* Sleep */
{ 0x0821, KEY_SCREEN }, /* 16:9 / 4:3 */
{ 0x0822, KEY_ZOOM }, /* SIZE */
{ 0x0824, KEY_FN }, /* [F1] */
{ 0x0825, KEY_FN }, /* [F2] */
{ 0x0842, KEY_MUTE }, /* symbol mute */
{ 0x0844, KEY_BLUE },
{ 0x0847, KEY_TEXT }, /* TEXT */
{ 0x0848, KEY_STOP },
{ 0x0849, KEY_RECORD },
{ 0x0850, KEY_PLAY },
{ 0x0851, KEY_PAUSE },
};
static struct rc_map_list anysee_map = {
.map = {
.scan = anysee,
.size = ARRAY_SIZE(anysee),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_ANYSEE,
}
};
static int __init init_rc_map_anysee(void)
{
return rc_map_register(&anysee_map);
}
static void __exit exit_rc_map_anysee(void)
{
rc_map_unregister(&anysee_map);
}
module_init(init_rc_map_anysee)
module_exit(exit_rc_map_anysee)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-anysee.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* avermedia-m733a-rm-k6.h - Keytable for avermedia_m733a_rm_k6 Remote Controller
*
* Copyright (c) 2010 by Herton Ronaldo Krzesinski <[email protected]>
*/
#include <media/rc-map.h>
#include <linux/module.h>
/*
* Avermedia M733A with IR model RM-K6
* This is the stock remote controller used with Positivo machines with M733A
* Herton Ronaldo Krzesinski <[email protected]>
*/
static struct rc_map_table avermedia_m733a_rm_k6[] = {
{ 0x0401, KEY_POWER2 },
{ 0x0406, KEY_MUTE },
{ 0x0408, KEY_MODE }, /* TV/FM */
{ 0x0409, KEY_NUMERIC_1 },
{ 0x040a, KEY_NUMERIC_2 },
{ 0x040b, KEY_NUMERIC_3 },
{ 0x040c, KEY_NUMERIC_4 },
{ 0x040d, KEY_NUMERIC_5 },
{ 0x040e, KEY_NUMERIC_6 },
{ 0x040f, KEY_NUMERIC_7 },
{ 0x0410, KEY_NUMERIC_8 },
{ 0x0411, KEY_NUMERIC_9 },
{ 0x044c, KEY_DOT }, /* '.' */
{ 0x0412, KEY_NUMERIC_0 },
{ 0x0407, KEY_REFRESH }, /* Refresh/Reload */
{ 0x0413, KEY_AUDIO },
{ 0x0440, KEY_SCREEN }, /* Full Screen toggle */
{ 0x0441, KEY_HOME },
{ 0x0442, KEY_BACK },
{ 0x0447, KEY_UP },
{ 0x0448, KEY_DOWN },
{ 0x0449, KEY_LEFT },
{ 0x044a, KEY_RIGHT },
{ 0x044b, KEY_OK },
{ 0x0404, KEY_VOLUMEUP },
{ 0x0405, KEY_VOLUMEDOWN },
{ 0x0402, KEY_CHANNELUP },
{ 0x0403, KEY_CHANNELDOWN },
{ 0x0443, KEY_RED },
{ 0x0444, KEY_GREEN },
{ 0x0445, KEY_YELLOW },
{ 0x0446, KEY_BLUE },
{ 0x0414, KEY_TEXT },
{ 0x0415, KEY_EPG },
{ 0x041a, KEY_TV2 }, /* PIP */
{ 0x041b, KEY_CAMERA }, /* Snapshot */
{ 0x0417, KEY_RECORD },
{ 0x0416, KEY_PLAYPAUSE },
{ 0x0418, KEY_STOP },
{ 0x0419, KEY_PAUSE },
{ 0x041f, KEY_PREVIOUS },
{ 0x041c, KEY_REWIND },
{ 0x041d, KEY_FORWARD },
{ 0x041e, KEY_NEXT },
};
static struct rc_map_list avermedia_m733a_rm_k6_map = {
.map = {
.scan = avermedia_m733a_rm_k6,
.size = ARRAY_SIZE(avermedia_m733a_rm_k6),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_AVERMEDIA_M733A_RM_K6,
}
};
static int __init init_rc_map_avermedia_m733a_rm_k6(void)
{
return rc_map_register(&avermedia_m733a_rm_k6_map);
}
static void __exit exit_rc_map_avermedia_m733a_rm_k6(void)
{
rc_map_unregister(&avermedia_m733a_rm_k6_map);
}
module_init(init_rc_map_avermedia_m733a_rm_k6)
module_exit(exit_rc_map_avermedia_m733a_rm_k6)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (C) 2019 Christian Hewitt <[email protected]>
#include <media/rc-map.h>
#include <linux/module.h>
//
// Keytable for the HardKernel ODROID remote control
//
static struct rc_map_table odroid[] = {
{ 0xb2dc, KEY_POWER },
{ 0xb288, KEY_MUTE },
{ 0xb282, KEY_HOME },
{ 0xb2ca, KEY_UP },
{ 0xb299, KEY_LEFT },
{ 0xb2ce, KEY_OK },
{ 0xb2c1, KEY_RIGHT },
{ 0xb2d2, KEY_DOWN },
{ 0xb2c5, KEY_MENU },
{ 0xb29a, KEY_BACK },
{ 0xb281, KEY_VOLUMEDOWN },
{ 0xb280, KEY_VOLUMEUP },
};
static struct rc_map_list odroid_map = {
.map = {
.scan = odroid,
.size = ARRAY_SIZE(odroid),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_ODROID,
}
};
static int __init init_rc_map_odroid(void)
{
return rc_map_register(&odroid_map);
}
static void __exit exit_rc_map_odroid(void)
{
rc_map_unregister(&odroid_map);
}
module_init(init_rc_map_odroid)
module_exit(exit_rc_map_odroid)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <[email protected]");
| linux-master | drivers/media/rc/keymaps/rc-odroid.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* rc-dvbsky.c - Keytable for DVBSky Remote Controllers
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010-2012 by Nibble Max <[email protected]>
*/
#include <media/rc-map.h>
#include <linux/module.h>
/*
* This table contains the complete RC5 code, instead of just the data part
*/
static struct rc_map_table rc5_dvbsky[] = {
{ 0x0000, KEY_NUMERIC_0 },
{ 0x0001, KEY_NUMERIC_1 },
{ 0x0002, KEY_NUMERIC_2 },
{ 0x0003, KEY_NUMERIC_3 },
{ 0x0004, KEY_NUMERIC_4 },
{ 0x0005, KEY_NUMERIC_5 },
{ 0x0006, KEY_NUMERIC_6 },
{ 0x0007, KEY_NUMERIC_7 },
{ 0x0008, KEY_NUMERIC_8 },
{ 0x0009, KEY_NUMERIC_9 },
{ 0x000a, KEY_MUTE },
{ 0x000d, KEY_OK },
{ 0x000b, KEY_STOP },
{ 0x000c, KEY_EXIT },
{ 0x000e, KEY_CAMERA }, /*Snap shot*/
{ 0x000f, KEY_SUBTITLE }, /*PIP*/
{ 0x0010, KEY_VOLUMEUP },
{ 0x0011, KEY_VOLUMEDOWN },
{ 0x0012, KEY_FAVORITES },
{ 0x0013, KEY_LIST }, /*Info*/
{ 0x0016, KEY_PAUSE },
{ 0x0017, KEY_PLAY },
{ 0x001f, KEY_RECORD },
{ 0x0020, KEY_CHANNELDOWN },
{ 0x0021, KEY_CHANNELUP },
{ 0x0025, KEY_POWER2 },
{ 0x0026, KEY_REWIND },
{ 0x0027, KEY_FASTFORWARD },
{ 0x0029, KEY_LAST },
{ 0x002b, KEY_MENU },
{ 0x002c, KEY_EPG },
{ 0x002d, KEY_ZOOM },
};
static struct rc_map_list rc5_dvbsky_map = {
.map = {
.scan = rc5_dvbsky,
.size = ARRAY_SIZE(rc5_dvbsky),
.rc_proto = RC_PROTO_RC5,
.name = RC_MAP_DVBSKY,
}
};
static int __init init_rc_map_rc5_dvbsky(void)
{
return rc_map_register(&rc5_dvbsky_map);
}
static void __exit exit_rc_map_rc5_dvbsky(void)
{
rc_map_unregister(&rc5_dvbsky_map);
}
module_init(init_rc_map_rc5_dvbsky)
module_exit(exit_rc_map_rc5_dvbsky)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nibble Max <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-dvbsky.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Total Media In Hand_02 remote controller keytable for Mygica X8507
*
* Copyright (C) 2012 Alfredo J. Delaiti <[email protected]>
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table total_media_in_hand_02[] = {
{ 0x0000, KEY_NUMERIC_0 },
{ 0x0001, KEY_NUMERIC_1 },
{ 0x0002, KEY_NUMERIC_2 },
{ 0x0003, KEY_NUMERIC_3 },
{ 0x0004, KEY_NUMERIC_4 },
{ 0x0005, KEY_NUMERIC_5 },
{ 0x0006, KEY_NUMERIC_6 },
{ 0x0007, KEY_NUMERIC_7 },
{ 0x0008, KEY_NUMERIC_8 },
{ 0x0009, KEY_NUMERIC_9 },
{ 0x000a, KEY_MUTE },
{ 0x000b, KEY_STOP }, /* Stop */
{ 0x000c, KEY_POWER2 }, /* Turn on/off application */
{ 0x000d, KEY_OK }, /* OK */
{ 0x000e, KEY_CAMERA }, /* Snapshot */
{ 0x000f, KEY_ZOOM }, /* Full Screen/Restore */
{ 0x0010, KEY_RIGHT }, /* Right arrow */
{ 0x0011, KEY_LEFT }, /* Left arrow */
{ 0x0012, KEY_CHANNELUP },
{ 0x0013, KEY_CHANNELDOWN },
{ 0x0014, KEY_SHUFFLE },
{ 0x0016, KEY_PAUSE },
{ 0x0017, KEY_PLAY }, /* Play */
{ 0x001e, KEY_TIME }, /* Time Shift */
{ 0x001f, KEY_RECORD },
{ 0x0020, KEY_UP },
{ 0x0021, KEY_DOWN },
{ 0x0025, KEY_POWER }, /* Turn off computer */
{ 0x0026, KEY_REWIND }, /* FR << */
{ 0x0027, KEY_FASTFORWARD }, /* FF >> */
{ 0x0029, KEY_ESC },
{ 0x002b, KEY_VOLUMEUP },
{ 0x002c, KEY_VOLUMEDOWN },
{ 0x002d, KEY_CHANNEL }, /* CH Surfing */
{ 0x0038, KEY_VIDEO }, /* TV/AV/S-Video/YPbPr */
};
static struct rc_map_list total_media_in_hand_02_map = {
.map = {
.scan = total_media_in_hand_02,
.size = ARRAY_SIZE(total_media_in_hand_02),
.rc_proto = RC_PROTO_RC5,
.name = RC_MAP_TOTAL_MEDIA_IN_HAND_02,
}
};
static int __init init_rc_map_total_media_in_hand_02(void)
{
return rc_map_register(&total_media_in_hand_02_map);
}
static void __exit exit_rc_map_total_media_in_hand_02(void)
{
rc_map_unregister(&total_media_in_hand_02_map);
}
module_init(init_rc_map_total_media_in_hand_02)
module_exit(exit_rc_map_total_media_in_hand_02)
MODULE_LICENSE("GPL");
MODULE_AUTHOR(" Alfredo J. Delaiti <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-total-media-in-hand-02.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* ITE Generic remotes Version 1
*
* Copyright (C) 2012 Malcolm Priestley ([email protected])
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table it913x_v1_rc[] = {
/* Type 1 */
{ 0x61d601, KEY_VIDEO }, /* Source */
{ 0x61d602, KEY_NUMERIC_3 },
{ 0x61d603, KEY_POWER }, /* ShutDown */
{ 0x61d604, KEY_NUMERIC_1 },
{ 0x61d605, KEY_NUMERIC_5 },
{ 0x61d606, KEY_NUMERIC_6 },
{ 0x61d607, KEY_CHANNELDOWN }, /* CH- */
{ 0x61d608, KEY_NUMERIC_2 },
{ 0x61d609, KEY_CHANNELUP }, /* CH+ */
{ 0x61d60a, KEY_NUMERIC_9 },
{ 0x61d60b, KEY_ZOOM }, /* Zoom */
{ 0x61d60c, KEY_NUMERIC_7 },
{ 0x61d60d, KEY_NUMERIC_8 },
{ 0x61d60e, KEY_VOLUMEUP }, /* Vol+ */
{ 0x61d60f, KEY_NUMERIC_4 },
{ 0x61d610, KEY_ESC }, /* [back up arrow] */
{ 0x61d611, KEY_NUMERIC_0 },
{ 0x61d612, KEY_OK }, /* [enter arrow] */
{ 0x61d613, KEY_VOLUMEDOWN }, /* Vol- */
{ 0x61d614, KEY_RECORD }, /* Rec */
{ 0x61d615, KEY_STOP }, /* Stop */
{ 0x61d616, KEY_PLAY }, /* Play */
{ 0x61d617, KEY_MUTE }, /* Mute */
{ 0x61d618, KEY_UP },
{ 0x61d619, KEY_DOWN },
{ 0x61d61a, KEY_LEFT },
{ 0x61d61b, KEY_RIGHT },
{ 0x61d61c, KEY_RED },
{ 0x61d61d, KEY_GREEN },
{ 0x61d61e, KEY_YELLOW },
{ 0x61d61f, KEY_BLUE },
{ 0x61d643, KEY_POWER2 }, /* [red power button] */
/* Type 2 - 20 buttons */
{ 0x807f0d, KEY_NUMERIC_0 },
{ 0x807f04, KEY_NUMERIC_1 },
{ 0x807f05, KEY_NUMERIC_2 },
{ 0x807f06, KEY_NUMERIC_3 },
{ 0x807f07, KEY_NUMERIC_4 },
{ 0x807f08, KEY_NUMERIC_5 },
{ 0x807f09, KEY_NUMERIC_6 },
{ 0x807f0a, KEY_NUMERIC_7 },
{ 0x807f1b, KEY_NUMERIC_8 },
{ 0x807f1f, KEY_NUMERIC_9 },
{ 0x807f12, KEY_POWER },
{ 0x807f01, KEY_MEDIA_REPEAT}, /* Recall */
{ 0x807f19, KEY_PAUSE }, /* Timeshift */
{ 0x807f1e, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0x807f03, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0x807f1a, KEY_CHANNELUP },
{ 0x807f02, KEY_CHANNELDOWN },
{ 0x807f0c, KEY_ZOOM },
{ 0x807f00, KEY_RECORD },
{ 0x807f0e, KEY_STOP },
};
static struct rc_map_list it913x_v1_map = {
.map = {
.scan = it913x_v1_rc,
.size = ARRAY_SIZE(it913x_v1_rc),
.rc_proto = RC_PROTO_NECX,
.name = RC_MAP_IT913X_V1,
}
};
static int __init init_rc_it913x_v1_map(void)
{
return rc_map_register(&it913x_v1_map);
}
static void __exit exit_rc_it913x_v1_map(void)
{
rc_map_unregister(&it913x_v1_map);
}
module_init(init_rc_it913x_v1_map)
module_exit(exit_rc_it913x_v1_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Malcolm Priestley [email protected]");
| linux-master | drivers/media/rc/keymaps/rc-it913x-v1.c |
// SPDX-License-Identifier: GPL-2.0+
// behold.h - Keytable for behold Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/*
* Igor Kuznetsov <[email protected]>
* Andrey J. Melnikov <[email protected]>
*
* Keytable is used by BeholdTV 60x series, M6 series at
* least, and probably other cards too.
* The "ascii-art picture" below (in comments, first row
* is the keycode in hex, and subsequent row(s) shows
* the button labels (several variants when appropriate)
* helps to decide which keycodes to assign to the buttons.
*/
static struct rc_map_table behold[] = {
/* 0x1c 0x12 *
* TV/FM POWER *
* */
{ 0x866b1c, KEY_TUNER }, /* XXX KEY_TV / KEY_RADIO */
{ 0x866b12, KEY_POWER },
/* 0x01 0x02 0x03 *
* 1 2 3 *
* *
* 0x04 0x05 0x06 *
* 4 5 6 *
* *
* 0x07 0x08 0x09 *
* 7 8 9 *
* */
{ 0x866b01, KEY_NUMERIC_1 },
{ 0x866b02, KEY_NUMERIC_2 },
{ 0x866b03, KEY_NUMERIC_3 },
{ 0x866b04, KEY_NUMERIC_4 },
{ 0x866b05, KEY_NUMERIC_5 },
{ 0x866b06, KEY_NUMERIC_6 },
{ 0x866b07, KEY_NUMERIC_7 },
{ 0x866b08, KEY_NUMERIC_8 },
{ 0x866b09, KEY_NUMERIC_9 },
/* 0x0a 0x00 0x17 *
* RECALL 0 MODE *
* */
{ 0x866b0a, KEY_AGAIN },
{ 0x866b00, KEY_NUMERIC_0 },
{ 0x866b17, KEY_MODE },
/* 0x14 0x10 *
* ASPECT FULLSCREEN *
* */
{ 0x866b14, KEY_SCREEN },
{ 0x866b10, KEY_ZOOM },
/* 0x0b *
* Up *
* *
* 0x18 0x16 0x0c *
* Left Ok Right *
* *
* 0x015 *
* Down *
* */
{ 0x866b0b, KEY_CHANNELUP },
{ 0x866b18, KEY_VOLUMEDOWN },
{ 0x866b16, KEY_OK }, /* XXX KEY_ENTER */
{ 0x866b0c, KEY_VOLUMEUP },
{ 0x866b15, KEY_CHANNELDOWN },
/* 0x11 0x0d *
* MUTE INFO *
* */
{ 0x866b11, KEY_MUTE },
{ 0x866b0d, KEY_INFO },
/* 0x0f 0x1b 0x1a *
* RECORD PLAY/PAUSE STOP *
* *
* 0x0e 0x1f 0x1e *
*TELETEXT AUDIO SOURCE *
* RED YELLOW *
* */
{ 0x866b0f, KEY_RECORD },
{ 0x866b1b, KEY_PLAYPAUSE },
{ 0x866b1a, KEY_STOP },
{ 0x866b0e, KEY_TEXT },
{ 0x866b1f, KEY_RED }, /*XXX KEY_AUDIO */
{ 0x866b1e, KEY_VIDEO },
/* 0x1d 0x13 0x19 *
* SLEEP PREVIEW DVB *
* GREEN BLUE *
* */
{ 0x866b1d, KEY_SLEEP },
{ 0x866b13, KEY_GREEN },
{ 0x866b19, KEY_BLUE }, /* XXX KEY_SAT */
/* 0x58 0x5c *
* FREEZE SNAPSHOT *
* */
{ 0x866b58, KEY_SLOW },
{ 0x866b5c, KEY_CAMERA },
};
static struct rc_map_list behold_map = {
.map = {
.scan = behold,
.size = ARRAY_SIZE(behold),
.rc_proto = RC_PROTO_NECX,
.name = RC_MAP_BEHOLD,
}
};
static int __init init_rc_map_behold(void)
{
return rc_map_register(&behold_map);
}
static void __exit exit_rc_map_behold(void)
{
rc_map_unregister(&behold_map);
}
module_init(init_rc_map_behold)
module_exit(exit_rc_map_behold)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-behold.c |
// SPDX-License-Identifier: GPL-2.0+
// purpletv.h - Keytable for purpletv Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table purpletv[] = {
{ 0x03, KEY_POWER },
{ 0x6f, KEY_MUTE },
{ 0x10, KEY_BACKSPACE }, /* Recall */
{ 0x11, KEY_NUMERIC_0 },
{ 0x04, KEY_NUMERIC_1 },
{ 0x05, KEY_NUMERIC_2 },
{ 0x06, KEY_NUMERIC_3 },
{ 0x08, KEY_NUMERIC_4 },
{ 0x09, KEY_NUMERIC_5 },
{ 0x0a, KEY_NUMERIC_6 },
{ 0x0c, KEY_NUMERIC_7 },
{ 0x0d, KEY_NUMERIC_8 },
{ 0x0e, KEY_NUMERIC_9 },
{ 0x12, KEY_DOT }, /* 100+ */
{ 0x07, KEY_VOLUMEUP },
{ 0x0b, KEY_VOLUMEDOWN },
{ 0x1a, KEY_KPPLUS },
{ 0x18, KEY_KPMINUS },
{ 0x15, KEY_UP },
{ 0x1d, KEY_DOWN },
{ 0x0f, KEY_CHANNELUP },
{ 0x13, KEY_CHANNELDOWN },
{ 0x48, KEY_ZOOM },
{ 0x1b, KEY_VIDEO }, /* Video source */
{ 0x1f, KEY_CAMERA }, /* Snapshot */
{ 0x49, KEY_LANGUAGE }, /* MTS Select */
{ 0x19, KEY_SEARCH }, /* Auto Scan */
{ 0x4b, KEY_RECORD },
{ 0x46, KEY_PLAY },
{ 0x45, KEY_PAUSE }, /* Pause */
{ 0x44, KEY_STOP },
{ 0x43, KEY_TIME }, /* Time Shift */
{ 0x17, KEY_CHANNEL }, /* SURF CH */
{ 0x40, KEY_FORWARD }, /* Forward ? */
{ 0x42, KEY_REWIND }, /* Backward ? */
};
static struct rc_map_list purpletv_map = {
.map = {
.scan = purpletv,
.size = ARRAY_SIZE(purpletv),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_PURPLETV,
}
};
static int __init init_rc_map_purpletv(void)
{
return rc_map_register(&purpletv_map);
}
static void __exit exit_rc_map_purpletv(void)
{
rc_map_unregister(&purpletv_map);
}
module_init(init_rc_map_purpletv)
module_exit(exit_rc_map_purpletv)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-purpletv.c |
// SPDX-License-Identifier: GPL-2.0+
// tbs-nec.h - Keytable for tbs_nec Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table tbs_nec[] = {
{ 0x84, KEY_POWER2}, /* power */
{ 0x94, KEY_MUTE}, /* mute */
{ 0x87, KEY_NUMERIC_1},
{ 0x86, KEY_NUMERIC_2},
{ 0x85, KEY_NUMERIC_3},
{ 0x8b, KEY_NUMERIC_4},
{ 0x8a, KEY_NUMERIC_5},
{ 0x89, KEY_NUMERIC_6},
{ 0x8f, KEY_NUMERIC_7},
{ 0x8e, KEY_NUMERIC_8},
{ 0x8d, KEY_NUMERIC_9},
{ 0x92, KEY_NUMERIC_0},
{ 0xc0, KEY_10CHANNELSUP}, /* 10+ */
{ 0xd0, KEY_10CHANNELSDOWN}, /* 10- */
{ 0x96, KEY_CHANNELUP}, /* ch+ */
{ 0x91, KEY_CHANNELDOWN}, /* ch- */
{ 0x93, KEY_VOLUMEUP}, /* vol+ */
{ 0x8c, KEY_VOLUMEDOWN}, /* vol- */
{ 0x83, KEY_RECORD}, /* rec */
{ 0x98, KEY_PAUSE}, /* pause, yellow */
{ 0x99, KEY_OK}, /* ok */
{ 0x9a, KEY_CAMERA}, /* snapshot */
{ 0x81, KEY_UP},
{ 0x90, KEY_LEFT},
{ 0x82, KEY_RIGHT},
{ 0x88, KEY_DOWN},
{ 0x95, KEY_FAVORITES}, /* blue */
{ 0x97, KEY_SUBTITLE}, /* green */
{ 0x9d, KEY_ZOOM},
{ 0x9f, KEY_EXIT},
{ 0x9e, KEY_MENU},
{ 0x9c, KEY_EPG},
{ 0x80, KEY_PREVIOUS}, /* red */
{ 0x9b, KEY_MODE},
};
static struct rc_map_list tbs_nec_map = {
.map = {
.scan = tbs_nec,
.size = ARRAY_SIZE(tbs_nec),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_TBS_NEC,
}
};
static int __init init_rc_map_tbs_nec(void)
{
return rc_map_register(&tbs_nec_map);
}
static void __exit exit_rc_map_tbs_nec(void)
{
rc_map_unregister(&tbs_nec_map);
}
module_init(init_rc_map_tbs_nec)
module_exit(exit_rc_map_tbs_nec)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-tbs-nec.c |
// SPDX-License-Identifier: GPL-2.0+
// winfast-usbii-deluxe.h - Keytable for winfast_usbii_deluxe Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/* Leadtek Winfast TV USB II Deluxe remote
Magnus Alm <[email protected]>
*/
static struct rc_map_table winfast_usbii_deluxe[] = {
{ 0x62, KEY_NUMERIC_0},
{ 0x75, KEY_NUMERIC_1},
{ 0x76, KEY_NUMERIC_2},
{ 0x77, KEY_NUMERIC_3},
{ 0x79, KEY_NUMERIC_4},
{ 0x7a, KEY_NUMERIC_5},
{ 0x7b, KEY_NUMERIC_6},
{ 0x7d, KEY_NUMERIC_7},
{ 0x7e, KEY_NUMERIC_8},
{ 0x7f, KEY_NUMERIC_9},
{ 0x38, KEY_CAMERA}, /* SNAPSHOT */
{ 0x37, KEY_RECORD}, /* RECORD */
{ 0x35, KEY_TIME}, /* TIMESHIFT */
{ 0x74, KEY_VOLUMEUP}, /* VOLUMEUP */
{ 0x78, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
{ 0x64, KEY_MUTE}, /* MUTE */
{ 0x21, KEY_CHANNEL}, /* SURF */
{ 0x7c, KEY_CHANNELUP}, /* CHANNELUP */
{ 0x60, KEY_CHANNELDOWN}, /* CHANNELDOWN */
{ 0x61, KEY_LAST}, /* LAST CHANNEL (RECALL) */
{ 0x72, KEY_VIDEO}, /* INPUT MODES (TV/FM) */
{ 0x70, KEY_POWER2}, /* TV ON/OFF */
{ 0x39, KEY_CYCLEWINDOWS}, /* MINIMIZE (BOSS) */
{ 0x3a, KEY_NEW}, /* PIP */
{ 0x73, KEY_ZOOM}, /* FULLSECREEN */
{ 0x66, KEY_INFO}, /* OSD (DISPLAY) */
{ 0x31, KEY_DOT}, /* '.' */
{ 0x63, KEY_ENTER}, /* ENTER */
};
static struct rc_map_list winfast_usbii_deluxe_map = {
.map = {
.scan = winfast_usbii_deluxe,
.size = ARRAY_SIZE(winfast_usbii_deluxe),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_WINFAST_USBII_DELUXE,
}
};
static int __init init_rc_map_winfast_usbii_deluxe(void)
{
return rc_map_register(&winfast_usbii_deluxe_map);
}
static void __exit exit_rc_map_winfast_usbii_deluxe(void)
{
rc_map_unregister(&winfast_usbii_deluxe_map);
}
module_init(init_rc_map_winfast_usbii_deluxe)
module_exit(exit_rc_map_winfast_usbii_deluxe)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Keytable for remote controller of HiSilicon poplar board.
*
* Copyright (c) 2017 HiSilicon Technologies Co., Ltd.
*/
#include <linux/module.h>
#include <media/rc-map.h>
static struct rc_map_table hisi_poplar_keymap[] = {
{ 0x0000b292, KEY_NUMERIC_1},
{ 0x0000b293, KEY_NUMERIC_2},
{ 0x0000b2cc, KEY_NUMERIC_3},
{ 0x0000b28e, KEY_NUMERIC_4},
{ 0x0000b28f, KEY_NUMERIC_5},
{ 0x0000b2c8, KEY_NUMERIC_6},
{ 0x0000b28a, KEY_NUMERIC_7},
{ 0x0000b28b, KEY_NUMERIC_8},
{ 0x0000b2c4, KEY_NUMERIC_9},
{ 0x0000b287, KEY_NUMERIC_0},
{ 0x0000b282, KEY_HOMEPAGE},
{ 0x0000b2ca, KEY_UP},
{ 0x0000b299, KEY_LEFT},
{ 0x0000b2c1, KEY_RIGHT},
{ 0x0000b2d2, KEY_DOWN},
{ 0x0000b2c5, KEY_DELETE},
{ 0x0000b29c, KEY_MUTE},
{ 0x0000b281, KEY_VOLUMEDOWN},
{ 0x0000b280, KEY_VOLUMEUP},
{ 0x0000b2dc, KEY_POWER},
{ 0x0000b29a, KEY_MENU},
{ 0x0000b28d, KEY_SETUP},
{ 0x0000b2c5, KEY_BACK},
{ 0x0000b295, KEY_PLAYPAUSE},
{ 0x0000b2ce, KEY_ENTER},
{ 0x0000b285, KEY_CHANNELUP},
{ 0x0000b286, KEY_CHANNELDOWN},
{ 0x0000b2da, KEY_NUMERIC_STAR},
{ 0x0000b2d0, KEY_NUMERIC_POUND},
};
static struct rc_map_list hisi_poplar_map = {
.map = {
.scan = hisi_poplar_keymap,
.size = ARRAY_SIZE(hisi_poplar_keymap),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_HISI_POPLAR,
}
};
static int __init init_rc_map_hisi_poplar(void)
{
return rc_map_register(&hisi_poplar_map);
}
static void __exit exit_rc_map_hisi_poplar(void)
{
rc_map_unregister(&hisi_poplar_map);
}
module_init(init_rc_map_hisi_poplar)
module_exit(exit_rc_map_hisi_poplar)
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/rc/keymaps/rc-hisi-poplar.c |
// SPDX-License-Identifier: GPL-2.0+
// asus-ps3-100.h - Keytable for asus_ps3_100 Remote Controller
//
// Copyright (c) 2012 by Mauro Carvalho Chehab
//
// Based on a previous patch from Remi Schwartz <[email protected]>
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table asus_ps3_100[] = {
{ 0x081c, KEY_HOME }, /* home */
{ 0x081e, KEY_TV }, /* tv */
{ 0x0803, KEY_TEXT }, /* teletext */
{ 0x0829, KEY_POWER }, /* close */
{ 0x080b, KEY_RED }, /* red */
{ 0x080d, KEY_YELLOW }, /* yellow */
{ 0x0806, KEY_BLUE }, /* blue */
{ 0x0807, KEY_GREEN }, /* green */
/* Keys 0 to 9 */
{ 0x082a, KEY_NUMERIC_0 },
{ 0x0816, KEY_NUMERIC_1 },
{ 0x0812, KEY_NUMERIC_2 },
{ 0x0814, KEY_NUMERIC_3 },
{ 0x0836, KEY_NUMERIC_4 },
{ 0x0832, KEY_NUMERIC_5 },
{ 0x0834, KEY_NUMERIC_6 },
{ 0x080e, KEY_NUMERIC_7 },
{ 0x080a, KEY_NUMERIC_8 },
{ 0x080c, KEY_NUMERIC_9 },
{ 0x0815, KEY_VOLUMEUP },
{ 0x0826, KEY_VOLUMEDOWN },
{ 0x0835, KEY_CHANNELUP }, /* channel / program + */
{ 0x0824, KEY_CHANNELDOWN }, /* channel / program - */
{ 0x0808, KEY_UP },
{ 0x0804, KEY_DOWN },
{ 0x0818, KEY_LEFT },
{ 0x0810, KEY_RIGHT },
{ 0x0825, KEY_ENTER }, /* enter */
{ 0x0822, KEY_EXIT }, /* back */
{ 0x082c, KEY_AB }, /* recall */
{ 0x0820, KEY_AUDIO }, /* TV audio */
{ 0x0837, KEY_SCREEN }, /* snapshot */
{ 0x082e, KEY_ZOOM }, /* full screen */
{ 0x0802, KEY_MUTE }, /* mute */
{ 0x0831, KEY_REWIND }, /* backward << */
{ 0x0811, KEY_RECORD }, /* recording */
{ 0x0809, KEY_STOP },
{ 0x0805, KEY_FASTFORWARD }, /* forward >> */
{ 0x0821, KEY_PREVIOUS }, /* rew */
{ 0x081a, KEY_PAUSE }, /* pause */
{ 0x0839, KEY_PLAY }, /* play */
{ 0x0819, KEY_NEXT }, /* forward */
};
static struct rc_map_list asus_ps3_100_map = {
.map = {
.scan = asus_ps3_100,
.size = ARRAY_SIZE(asus_ps3_100),
.rc_proto = RC_PROTO_RC5,
.name = RC_MAP_ASUS_PS3_100,
}
};
static int __init init_rc_map_asus_ps3_100(void)
{
return rc_map_register(&asus_ps3_100_map);
}
static void __exit exit_rc_map_asus_ps3_100(void)
{
rc_map_unregister(&asus_ps3_100_map);
}
module_init(init_rc_map_asus_ps3_100)
module_exit(exit_rc_map_asus_ps3_100)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-asus-ps3-100.c |
// SPDX-License-Identifier: GPL-2.0+
// pixelview-new.h - Keytable for pixelview_new Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/*
Mauro Carvalho Chehab <[email protected]>
present on PV MPEG 8000GT
*/
static struct rc_map_table pixelview_new[] = {
{ 0x3c, KEY_TIME }, /* Timeshift */
{ 0x12, KEY_POWER },
{ 0x3d, KEY_NUMERIC_1 },
{ 0x38, KEY_NUMERIC_2 },
{ 0x18, KEY_NUMERIC_3 },
{ 0x35, KEY_NUMERIC_4 },
{ 0x39, KEY_NUMERIC_5 },
{ 0x15, KEY_NUMERIC_6 },
{ 0x36, KEY_NUMERIC_7 },
{ 0x3a, KEY_NUMERIC_8 },
{ 0x1e, KEY_NUMERIC_9 },
{ 0x3e, KEY_NUMERIC_0 },
{ 0x1c, KEY_AGAIN }, /* LOOP */
{ 0x3f, KEY_VIDEO }, /* Source */
{ 0x1f, KEY_LAST }, /* +100 */
{ 0x1b, KEY_MUTE },
{ 0x17, KEY_CHANNELDOWN },
{ 0x16, KEY_CHANNELUP },
{ 0x10, KEY_VOLUMEUP },
{ 0x14, KEY_VOLUMEDOWN },
{ 0x13, KEY_ZOOM },
{ 0x19, KEY_CAMERA }, /* SNAPSHOT */
{ 0x1a, KEY_SEARCH }, /* scan */
{ 0x37, KEY_REWIND }, /* << */
{ 0x32, KEY_RECORD }, /* o (red) */
{ 0x33, KEY_FORWARD }, /* >> */
{ 0x11, KEY_STOP }, /* square */
{ 0x3b, KEY_PLAY }, /* > */
{ 0x30, KEY_PLAYPAUSE }, /* || */
{ 0x31, KEY_TV },
{ 0x34, KEY_RADIO },
};
static struct rc_map_list pixelview_new_map = {
.map = {
.scan = pixelview_new,
.size = ARRAY_SIZE(pixelview_new),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_PIXELVIEW_NEW,
}
};
static int __init init_rc_map_pixelview_new(void)
{
return rc_map_register(&pixelview_new_map);
}
static void __exit exit_rc_map_pixelview_new(void)
{
rc_map_unregister(&pixelview_new_map);
}
module_init(init_rc_map_pixelview_new)
module_exit(exit_rc_map_pixelview_new)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-pixelview-new.c |
// SPDX-License-Identifier: GPL-2.0+
// rc-dvb0700-big.c - Keytable for devices in dvb0700
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
//
// TODO: This table is a real mess, as it merges RC codes from several
// devices into a big table. It also has both RC-5 and NEC codes inside.
// It should be broken into small tables, and the protocols should properly
// be identificated.
//
// The table were imported from dib0700_devices.c.
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table dib0700_nec_table[] = {
/* Key codes for the Pixelview SBTVD remote */
{ 0x866b13, KEY_MUTE },
{ 0x866b12, KEY_POWER },
{ 0x866b01, KEY_NUMERIC_1 },
{ 0x866b02, KEY_NUMERIC_2 },
{ 0x866b03, KEY_NUMERIC_3 },
{ 0x866b04, KEY_NUMERIC_4 },
{ 0x866b05, KEY_NUMERIC_5 },
{ 0x866b06, KEY_NUMERIC_6 },
{ 0x866b07, KEY_NUMERIC_7 },
{ 0x866b08, KEY_NUMERIC_8 },
{ 0x866b09, KEY_NUMERIC_9 },
{ 0x866b00, KEY_NUMERIC_0 },
{ 0x866b0d, KEY_CHANNELUP },
{ 0x866b19, KEY_CHANNELDOWN },
{ 0x866b10, KEY_VOLUMEUP },
{ 0x866b0c, KEY_VOLUMEDOWN },
{ 0x866b0a, KEY_CAMERA },
{ 0x866b0b, KEY_ZOOM },
{ 0x866b1b, KEY_BACKSPACE },
{ 0x866b15, KEY_ENTER },
{ 0x866b1d, KEY_UP },
{ 0x866b1e, KEY_DOWN },
{ 0x866b0e, KEY_LEFT },
{ 0x866b0f, KEY_RIGHT },
{ 0x866b18, KEY_RECORD },
{ 0x866b1a, KEY_STOP },
/* Key codes for the EvolutePC TVWay+ remote */
{ 0x7a00, KEY_MENU },
{ 0x7a01, KEY_RECORD },
{ 0x7a02, KEY_PLAY },
{ 0x7a03, KEY_STOP },
{ 0x7a10, KEY_CHANNELUP },
{ 0x7a11, KEY_CHANNELDOWN },
{ 0x7a12, KEY_VOLUMEUP },
{ 0x7a13, KEY_VOLUMEDOWN },
{ 0x7a40, KEY_POWER },
{ 0x7a41, KEY_MUTE },
/* Key codes for the Elgato EyeTV Diversity silver remote */
{ 0x4501, KEY_POWER },
{ 0x4502, KEY_MUTE },
{ 0x4503, KEY_NUMERIC_1 },
{ 0x4504, KEY_NUMERIC_2 },
{ 0x4505, KEY_NUMERIC_3 },
{ 0x4506, KEY_NUMERIC_4 },
{ 0x4507, KEY_NUMERIC_5 },
{ 0x4508, KEY_NUMERIC_6 },
{ 0x4509, KEY_NUMERIC_7 },
{ 0x450a, KEY_NUMERIC_8 },
{ 0x450b, KEY_NUMERIC_9 },
{ 0x450c, KEY_LAST },
{ 0x450d, KEY_NUMERIC_0 },
{ 0x450e, KEY_ENTER },
{ 0x450f, KEY_RED },
{ 0x4510, KEY_CHANNELUP },
{ 0x4511, KEY_GREEN },
{ 0x4512, KEY_VOLUMEDOWN },
{ 0x4513, KEY_OK },
{ 0x4514, KEY_VOLUMEUP },
{ 0x4515, KEY_YELLOW },
{ 0x4516, KEY_CHANNELDOWN },
{ 0x4517, KEY_BLUE },
{ 0x4518, KEY_LEFT }, /* Skip backwards */
{ 0x4519, KEY_PLAYPAUSE },
{ 0x451a, KEY_RIGHT }, /* Skip forward */
{ 0x451b, KEY_REWIND },
{ 0x451c, KEY_L }, /* Live */
{ 0x451d, KEY_FASTFORWARD },
{ 0x451e, KEY_STOP }, /* 'Reveal' for Teletext */
{ 0x451f, KEY_MENU }, /* KEY_TEXT for Teletext */
{ 0x4540, KEY_RECORD }, /* Font 'Size' for Teletext */
{ 0x4541, KEY_SCREEN }, /* Full screen toggle, 'Hold' for Teletext */
{ 0x4542, KEY_SELECT }, /* Select video input, 'Select' for Teletext */
};
static struct rc_map_list dib0700_nec_map = {
.map = {
.scan = dib0700_nec_table,
.size = ARRAY_SIZE(dib0700_nec_table),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_DIB0700_NEC_TABLE,
}
};
static int __init init_rc_map(void)
{
return rc_map_register(&dib0700_nec_map);
}
static void __exit exit_rc_map(void)
{
rc_map_unregister(&dib0700_nec_map);
}
module_init(init_rc_map)
module_exit(exit_rc_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-dib0700-nec.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (C) 2019 Mohammad Rasim <[email protected]>
#include <media/rc-map.h>
#include <linux/module.h>
//
// Keytable for the Videostrong KII Pro STB remote control
//
static struct rc_map_table kii_pro[] = {
{ 0x59, KEY_POWER },
{ 0x19, KEY_MUTE },
{ 0x42, KEY_RED },
{ 0x40, KEY_GREEN },
{ 0x00, KEY_YELLOW },
{ 0x03, KEY_BLUE },
{ 0x4a, KEY_BACK },
{ 0x48, KEY_FORWARD },
{ 0x08, KEY_PREVIOUSSONG},
{ 0x0b, KEY_NEXTSONG},
{ 0x46, KEY_PLAYPAUSE },
{ 0x44, KEY_STOP },
{ 0x1f, KEY_FAVORITES}, //KEY_F5?
{ 0x04, KEY_PVR },
{ 0x4d, KEY_EPG },
{ 0x02, KEY_INFO },
{ 0x09, KEY_SUBTITLE },
{ 0x01, KEY_AUDIO },
{ 0x0d, KEY_HOMEPAGE },
{ 0x11, KEY_TV }, // DTV ?
{ 0x06, KEY_UP },
{ 0x5a, KEY_LEFT },
{ 0x1a, KEY_ENTER }, // KEY_OK ?
{ 0x1b, KEY_RIGHT },
{ 0x16, KEY_DOWN },
{ 0x45, KEY_MENU },
{ 0x05, KEY_ESC },
{ 0x13, KEY_VOLUMEUP },
{ 0x17, KEY_VOLUMEDOWN },
{ 0x58, KEY_APPSELECT },
{ 0x12, KEY_VENDOR }, // mouse
{ 0x55, KEY_PAGEUP }, // KEY_CHANNELUP ?
{ 0x15, KEY_PAGEDOWN }, // KEY_CHANNELDOWN ?
{ 0x52, KEY_1 },
{ 0x50, KEY_2 },
{ 0x10, KEY_3 },
{ 0x56, KEY_4 },
{ 0x54, KEY_5 },
{ 0x14, KEY_6 },
{ 0x4e, KEY_7 },
{ 0x4c, KEY_8 },
{ 0x0c, KEY_9 },
{ 0x18, KEY_WWW }, // KEY_F7
{ 0x0f, KEY_0 },
{ 0x51, KEY_BACKSPACE },
};
static struct rc_map_list kii_pro_map = {
.map = {
.scan = kii_pro,
.size = ARRAY_SIZE(kii_pro),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_KII_PRO,
}
};
static int __init init_rc_map_kii_pro(void)
{
return rc_map_register(&kii_pro_map);
}
static void __exit exit_rc_map_kii_pro(void)
{
rc_map_unregister(&kii_pro_map);
}
module_init(init_rc_map_kii_pro)
module_exit(exit_rc_map_kii_pro)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mohammad Rasim <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-videostrong-kii-pro.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Keytable for the CEC remote control
*
* This keymap is unusual in that it can't be built as a module,
* instead it is registered directly in rc-main.c if CONFIG_MEDIA_CEC_RC
* is set. This is because it can be called from drm_dp_cec_set_edid() via
* cec_register_adapter() in an asynchronous context, and it is not
* allowed to use request_module() to load rc-cec.ko in that case.
*
* Since this keymap is only used if CONFIG_MEDIA_CEC_RC is set, we
* just compile this keymap into the rc-core module and never as a
* separate module.
*
* Copyright (c) 2015 by Kamil Debski
*/
#include <media/rc-map.h>
#include <linux/module.h>
/*
* CEC Spec "High-Definition Multimedia Interface Specification" can be obtained
* here: http://xtreamerdev.googlecode.com/files/CEC_Specs.pdf
* The list of control codes is listed in Table 27: User Control Codes p. 95
*/
static struct rc_map_table cec[] = {
{ 0x00, KEY_OK },
{ 0x01, KEY_UP },
{ 0x02, KEY_DOWN },
{ 0x03, KEY_LEFT },
{ 0x04, KEY_RIGHT },
{ 0x05, KEY_RIGHT_UP },
{ 0x06, KEY_RIGHT_DOWN },
{ 0x07, KEY_LEFT_UP },
{ 0x08, KEY_LEFT_DOWN },
{ 0x09, KEY_ROOT_MENU }, /* CEC Spec: Device Root Menu - see Note 2 */
/*
* Note 2: This is the initial display that a device shows. It is
* device-dependent and can be, for example, a contents menu, setup
* menu, favorite menu or other menu. The actual menu displayed
* may also depend on the device's current state.
*/
{ 0x0a, KEY_SETUP },
{ 0x0b, KEY_MENU }, /* CEC Spec: Contents Menu */
{ 0x0c, KEY_FAVORITES }, /* CEC Spec: Favorite Menu */
{ 0x0d, KEY_EXIT },
/* 0x0e-0x0f: Reserved */
{ 0x10, KEY_MEDIA_TOP_MENU },
{ 0x11, KEY_CONTEXT_MENU },
/* 0x12-0x1c: Reserved */
{ 0x1d, KEY_DIGITS }, /* CEC Spec: select/toggle a Number Entry Mode */
{ 0x1e, KEY_NUMERIC_11 },
{ 0x1f, KEY_NUMERIC_12 },
/* 0x20-0x29: Keys 0 to 9 */
{ 0x20, KEY_NUMERIC_0 },
{ 0x21, KEY_NUMERIC_1 },
{ 0x22, KEY_NUMERIC_2 },
{ 0x23, KEY_NUMERIC_3 },
{ 0x24, KEY_NUMERIC_4 },
{ 0x25, KEY_NUMERIC_5 },
{ 0x26, KEY_NUMERIC_6 },
{ 0x27, KEY_NUMERIC_7 },
{ 0x28, KEY_NUMERIC_8 },
{ 0x29, KEY_NUMERIC_9 },
{ 0x2a, KEY_DOT },
{ 0x2b, KEY_ENTER },
{ 0x2c, KEY_CLEAR },
/* 0x2d-0x2e: Reserved */
{ 0x2f, KEY_NEXT_FAVORITE }, /* CEC Spec: Next Favorite */
{ 0x30, KEY_CHANNELUP },
{ 0x31, KEY_CHANNELDOWN },
{ 0x32, KEY_PREVIOUS }, /* CEC Spec: Previous Channel */
{ 0x33, KEY_SOUND }, /* CEC Spec: Sound Select */
{ 0x34, KEY_VIDEO }, /* 0x34: CEC Spec: Input Select */
{ 0x35, KEY_INFO }, /* CEC Spec: Display Information */
{ 0x36, KEY_HELP },
{ 0x37, KEY_PAGEUP },
{ 0x38, KEY_PAGEDOWN },
/* 0x39-0x3f: Reserved */
{ 0x40, KEY_POWER },
{ 0x41, KEY_VOLUMEUP },
{ 0x42, KEY_VOLUMEDOWN },
{ 0x43, KEY_MUTE },
{ 0x44, KEY_PLAYCD },
{ 0x45, KEY_STOPCD },
{ 0x46, KEY_PAUSECD },
{ 0x47, KEY_RECORD },
{ 0x48, KEY_REWIND },
{ 0x49, KEY_FASTFORWARD },
{ 0x4a, KEY_EJECTCD }, /* CEC Spec: Eject */
{ 0x4b, KEY_FORWARD },
{ 0x4c, KEY_BACK },
{ 0x4d, KEY_STOP_RECORD }, /* CEC Spec: Stop-Record */
{ 0x4e, KEY_PAUSE_RECORD }, /* CEC Spec: Pause-Record */
/* 0x4f: Reserved */
{ 0x50, KEY_ANGLE },
{ 0x51, KEY_TV2 },
{ 0x52, KEY_VOD }, /* CEC Spec: Video on Demand */
{ 0x53, KEY_EPG },
{ 0x54, KEY_TIME }, /* CEC Spec: Timer */
{ 0x55, KEY_CONFIG },
/*
* The following codes are hard to implement at this moment, as they
* carry an additional additional argument. Most likely changes to RC
* framework are necessary.
* For now they are interpreted by the CEC framework as non keycodes
* and are passed as messages enabling user application to parse them.
*/
/* 0x56: CEC Spec: Select Broadcast Type */
/* 0x57: CEC Spec: Select Sound presentation */
{ 0x58, KEY_AUDIO_DESC }, /* CEC 2.0 and up */
{ 0x59, KEY_WWW }, /* CEC 2.0 and up */
{ 0x5a, KEY_3D_MODE }, /* CEC 2.0 and up */
/* 0x5b-0x5f: Reserved */
{ 0x60, KEY_PLAYCD }, /* CEC Spec: Play Function */
{ 0x6005, KEY_FASTFORWARD },
{ 0x6006, KEY_FASTFORWARD },
{ 0x6007, KEY_FASTFORWARD },
{ 0x6015, KEY_SLOW },
{ 0x6016, KEY_SLOW },
{ 0x6017, KEY_SLOW },
{ 0x6009, KEY_FASTREVERSE },
{ 0x600a, KEY_FASTREVERSE },
{ 0x600b, KEY_FASTREVERSE },
{ 0x6019, KEY_SLOWREVERSE },
{ 0x601a, KEY_SLOWREVERSE },
{ 0x601b, KEY_SLOWREVERSE },
{ 0x6020, KEY_REWIND },
{ 0x6024, KEY_PLAYCD },
{ 0x6025, KEY_PAUSECD },
{ 0x61, KEY_PLAYPAUSE }, /* CEC Spec: Pause-Play Function */
{ 0x62, KEY_RECORD }, /* Spec: Record Function */
{ 0x63, KEY_PAUSE_RECORD }, /* CEC Spec: Pause-Record Function */
{ 0x64, KEY_STOPCD }, /* CEC Spec: Stop Function */
{ 0x65, KEY_MUTE }, /* CEC Spec: Mute Function */
{ 0x66, KEY_UNMUTE }, /* CEC Spec: Restore the volume */
/*
* The following codes are hard to implement at this moment, as they
* carry an additional additional argument. Most likely changes to RC
* framework are necessary.
* For now they are interpreted by the CEC framework as non keycodes
* and are passed as messages enabling user application to parse them.
*/
/* 0x67: CEC Spec: Tune Function */
/* 0x68: CEC Spec: Seleect Media Function */
/* 0x69: CEC Spec: Select A/V Input Function */
/* 0x6a: CEC Spec: Select Audio Input Function */
{ 0x6b, KEY_POWER }, /* CEC Spec: Power Toggle Function */
{ 0x6c, KEY_SLEEP }, /* CEC Spec: Power Off Function */
{ 0x6d, KEY_WAKEUP }, /* CEC Spec: Power On Function */
/* 0x6e-0x70: Reserved */
{ 0x71, KEY_BLUE }, /* CEC Spec: F1 (Blue) */
{ 0x72, KEY_RED }, /* CEC Spec: F2 (Red) */
{ 0x73, KEY_GREEN }, /* CEC Spec: F3 (Green) */
{ 0x74, KEY_YELLOW }, /* CEC Spec: F4 (Yellow) */
{ 0x75, KEY_F5 },
{ 0x76, KEY_DATA }, /* CEC Spec: Data - see Note 3 */
/*
* Note 3: This is used, for example, to enter or leave a digital TV
* data broadcast application.
*/
/* 0x77-0xff: Reserved */
};
struct rc_map_list cec_map = {
.map = {
.scan = cec,
.size = ARRAY_SIZE(cec),
.rc_proto = RC_PROTO_CEC,
.name = RC_MAP_CEC,
}
};
| linux-master | drivers/media/rc/keymaps/rc-cec.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2021 Emanuel Strobel <[email protected]>
*/
#include <media/rc-map.h>
#include <linux/module.h>
/*
* Keytable for Dreambox RC10/RC0 and RC20/RC-BT remote controls
*
* Keys that are not IR addressable:
*
* // DREAM switches to STB control mode
* // TV switches to TV control mode
* // MODE toggles STB/TV/BT control modes
*
*/
static struct rc_map_table dreambox[] = {
/* Dreambox RC10/RC0/RCU-BT remote */
{ 0x3200, KEY_POWER },
// DREAM
{ 0x3290, KEY_HELP },
// TV
{ 0x3201, KEY_1 },
{ 0x3202, KEY_2 },
{ 0x3203, KEY_3 },
{ 0x3204, KEY_4 },
{ 0x3205, KEY_5 },
{ 0x3206, KEY_6 },
{ 0x3207, KEY_7 },
{ 0x3208, KEY_8 },
{ 0x3209, KEY_9 },
{ 0x320a, KEY_PREVIOUS },
{ 0x320b, KEY_0 },
{ 0x320c, KEY_NEXT },
{ 0x321f, KEY_RED },
{ 0x3220, KEY_GREEN },
{ 0x3221, KEY_YELLOW },
{ 0x3222, KEY_BLUE },
{ 0x3210, KEY_INFO },
{ 0x3212, KEY_MENU },
{ 0x320e, KEY_AUDIO },
{ 0x3218, KEY_PVR },
{ 0x3213, KEY_LEFT },
{ 0x3211, KEY_UP },
{ 0x3215, KEY_RIGHT },
{ 0x3217, KEY_DOWN },
{ 0x3214, KEY_OK },
{ 0x3219, KEY_VOLUMEUP },
{ 0x321c, KEY_VOLUMEDOWN },
{ 0x321d, KEY_ESC }, // EXIT
{ 0x321a, KEY_MUTE },
{ 0x321b, KEY_PAGEUP },
{ 0x321e, KEY_PAGEDOWN },
{ 0x3223, KEY_PREVIOUSSONG },
{ 0x3224, KEY_PLAYPAUSE },
{ 0x3225, KEY_STOP },
{ 0x3226, KEY_NEXTSONG },
{ 0x3227, KEY_TV },
{ 0x3228, KEY_RADIO },
{ 0x3229, KEY_TEXT },
{ 0x322a, KEY_RECORD },
/* Dreambox RC20/RC-BT */
{ 0x3407, KEY_MUTE },
// MODE
{ 0x3401, KEY_POWER },
{ 0x3432, KEY_PREVIOUSSONG },
{ 0x3433, KEY_PLAYPAUSE },
{ 0x3435, KEY_NEXTSONG },
{ 0x3436, KEY_RECORD },
{ 0x3434, KEY_STOP },
{ 0x3425, KEY_TEXT },
{ 0x341f, KEY_RED },
{ 0x3420, KEY_GREEN },
{ 0x3421, KEY_YELLOW },
{ 0x3422, KEY_BLUE },
{ 0x341b, KEY_INFO },
{ 0x341c, KEY_MENU },
{ 0x3430, KEY_AUDIO },
{ 0x3431, KEY_PVR },
{ 0x3414, KEY_LEFT },
{ 0x3411, KEY_UP },
{ 0x3416, KEY_RIGHT },
{ 0x3419, KEY_DOWN },
{ 0x3415, KEY_OK },
{ 0x3413, KEY_VOLUMEUP },
{ 0x3418, KEY_VOLUMEDOWN },
{ 0x3412, KEY_ESC }, // EXIT
{ 0x3426, KEY_HELP }, // MIC
{ 0x3417, KEY_PAGEUP },
{ 0x341a, KEY_PAGEDOWN },
{ 0x3404, KEY_1 },
{ 0x3405, KEY_2 },
{ 0x3406, KEY_3 },
{ 0x3408, KEY_4 },
{ 0x3409, KEY_5 },
{ 0x340a, KEY_6 },
{ 0x340c, KEY_7 },
{ 0x340d, KEY_8 },
{ 0x340e, KEY_9 },
{ 0x340b, KEY_PREVIOUS },
{ 0x3410, KEY_0 },
{ 0x340f, KEY_NEXT },
};
static struct rc_map_list dreambox_map = {
.map = {
.scan = dreambox,
.size = ARRAY_SIZE(dreambox),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_DREAMBOX,
}
};
static int __init init_rc_map_dreambox(void)
{
return rc_map_register(&dreambox_map);
}
static void __exit exit_rc_map_dreambox(void)
{
rc_map_unregister(&dreambox_map);
}
module_init(init_rc_map_dreambox)
module_exit(exit_rc_map_dreambox)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Emanuel Strobel <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-dreambox.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* kworld-pc150u.c - Keytable for kworld_pc150u Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Kyle Strickland
* (based on kworld-plus-tv-analog.c by
* Mauro Carvalho Chehab)
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Kworld PC150-U
Kyle Strickland <[email protected]>
*/
static struct rc_map_table kworld_pc150u[] = {
{ 0x0c, KEY_MEDIA }, /* Kworld key */
{ 0x16, KEY_EJECTCLOSECD }, /* -> ) */
{ 0x1d, KEY_POWER2 },
{ 0x00, KEY_NUMERIC_1 },
{ 0x01, KEY_NUMERIC_2 },
{ 0x02, KEY_NUMERIC_3 },
{ 0x03, KEY_NUMERIC_4 },
{ 0x04, KEY_NUMERIC_5 },
{ 0x05, KEY_NUMERIC_6 },
{ 0x06, KEY_NUMERIC_7 },
{ 0x07, KEY_NUMERIC_8 },
{ 0x08, KEY_NUMERIC_9 },
{ 0x0a, KEY_NUMERIC_0 },
{ 0x09, KEY_AGAIN },
{ 0x14, KEY_MUTE },
{ 0x1e, KEY_LAST },
{ 0x17, KEY_ZOOM },
{ 0x1f, KEY_HOMEPAGE },
{ 0x0e, KEY_ESC },
{ 0x20, KEY_UP },
{ 0x21, KEY_DOWN },
{ 0x42, KEY_LEFT },
{ 0x43, KEY_RIGHT },
{ 0x0b, KEY_ENTER },
{ 0x10, KEY_CHANNELUP },
{ 0x11, KEY_CHANNELDOWN },
{ 0x13, KEY_VOLUMEUP },
{ 0x12, KEY_VOLUMEDOWN },
{ 0x19, KEY_TIME}, /* Timeshift */
{ 0x1a, KEY_STOP},
{ 0x1b, KEY_RECORD},
{ 0x4b, KEY_EMAIL},
{ 0x40, KEY_REWIND},
{ 0x44, KEY_PLAYPAUSE},
{ 0x41, KEY_FORWARD},
{ 0x22, KEY_TEXT},
{ 0x15, KEY_AUDIO}, /* ((*)) */
{ 0x0f, KEY_MODE}, /* display ratio */
{ 0x1c, KEY_SYSRQ}, /* snapshot */
{ 0x4a, KEY_SLEEP}, /* sleep timer */
{ 0x48, KEY_SOUND}, /* switch theater mode */
{ 0x49, KEY_BLUE}, /* A */
{ 0x18, KEY_RED}, /* B */
{ 0x23, KEY_GREEN}, /* C */
};
static struct rc_map_list kworld_pc150u_map = {
.map = {
.scan = kworld_pc150u,
.size = ARRAY_SIZE(kworld_pc150u),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_KWORLD_PC150U,
}
};
static int __init init_rc_map_kworld_pc150u(void)
{
return rc_map_register(&kworld_pc150u_map);
}
static void __exit exit_rc_map_kworld_pc150u(void)
{
rc_map_unregister(&kworld_pc150u_map);
}
module_init(init_rc_map_kworld_pc150u)
module_exit(exit_rc_map_kworld_pc150u)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyle Strickland <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-kworld-pc150u.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Medion X10 RF remote keytable
*
* Copyright (C) 2011 Anssi Hannula <anssi.hannula@?ki.fi>
*
* This file is based on a keytable provided by
* Jan Losinski <[email protected]>
*/
#include <linux/module.h>
#include <media/rc-map.h>
static struct rc_map_table medion_x10[] = {
{ 0x2c, KEY_TV }, /* TV */
{ 0x2d, KEY_VCR }, /* VCR */
{ 0x04, KEY_DVD }, /* DVD */
{ 0x06, KEY_AUDIO }, /* MUSIC */
{ 0x2e, KEY_RADIO }, /* RADIO */
{ 0x05, KEY_DIRECTORY }, /* PHOTO */
{ 0x2f, KEY_INFO }, /* TV-PREVIEW */
{ 0x30, KEY_LIST }, /* CHANNEL-LST */
{ 0x1b, KEY_SETUP }, /* SETUP */
{ 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
{ 0x08, KEY_VOLUMEDOWN }, /* VOL - */
{ 0x09, KEY_VOLUMEUP }, /* VOL + */
{ 0x0b, KEY_CHANNELUP }, /* CHAN + */
{ 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
{ 0x00, KEY_MUTE }, /* MUTE */
{ 0x32, KEY_RED }, /* red */
{ 0x33, KEY_GREEN }, /* green */
{ 0x34, KEY_YELLOW }, /* yellow */
{ 0x35, KEY_BLUE }, /* blue */
{ 0x16, KEY_TEXT }, /* TXT */
{ 0x0d, KEY_NUMERIC_1 },
{ 0x0e, KEY_NUMERIC_2 },
{ 0x0f, KEY_NUMERIC_3 },
{ 0x10, KEY_NUMERIC_4 },
{ 0x11, KEY_NUMERIC_5 },
{ 0x12, KEY_NUMERIC_6 },
{ 0x13, KEY_NUMERIC_7 },
{ 0x14, KEY_NUMERIC_8 },
{ 0x15, KEY_NUMERIC_9 },
{ 0x17, KEY_NUMERIC_0 },
{ 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
{ 0x20, KEY_DELETE }, /* DELETE */
{ 0x36, KEY_KEYBOARD }, /* RENAME */
{ 0x18, KEY_SCREEN }, /* SNAPSHOT */
{ 0x1a, KEY_UP }, /* up */
{ 0x22, KEY_DOWN }, /* down */
{ 0x1d, KEY_LEFT }, /* left */
{ 0x1f, KEY_RIGHT }, /* right */
{ 0x1e, KEY_OK }, /* OK */
{ 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
{ 0x38, KEY_EDIT }, /* EDIT IMAGE */
{ 0x24, KEY_REWIND }, /* rewind (<<) */
{ 0x25, KEY_PLAY }, /* play ( >) */
{ 0x26, KEY_FORWARD }, /* forward (>>) */
{ 0x27, KEY_RECORD }, /* record ( o) */
{ 0x28, KEY_STOP }, /* stop ([]) */
{ 0x29, KEY_PAUSE }, /* pause ('') */
{ 0x21, KEY_PREVIOUS }, /* prev */
{ 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
{ 0x23, KEY_NEXT }, /* next */
{ 0x19, KEY_MENU }, /* MENU */
{ 0x3a, KEY_LANGUAGE }, /* AUDIO */
{ 0x02, KEY_POWER }, /* POWER */
};
static struct rc_map_list medion_x10_map = {
.map = {
.scan = medion_x10,
.size = ARRAY_SIZE(medion_x10),
.rc_proto = RC_PROTO_OTHER,
.name = RC_MAP_MEDION_X10,
}
};
static int __init init_rc_map_medion_x10(void)
{
return rc_map_register(&medion_x10_map);
}
static void __exit exit_rc_map_medion_x10(void)
{
rc_map_unregister(&medion_x10_map);
}
module_init(init_rc_map_medion_x10)
module_exit(exit_rc_map_medion_x10)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anssi Hannula <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-medion-x10.c |
// SPDX-License-Identifier: GPL-2.0+
// dntv-live-dvb-t.h - Keytable for dntv_live_dvb_t Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/* DigitalNow DNTV Live DVB-T Remote */
static struct rc_map_table dntv_live_dvb_t[] = {
{ 0x00, KEY_ESC }, /* 'go up a level?' */
/* Keys 0 to 9 */
{ 0x0a, KEY_NUMERIC_0 },
{ 0x01, KEY_NUMERIC_1 },
{ 0x02, KEY_NUMERIC_2 },
{ 0x03, KEY_NUMERIC_3 },
{ 0x04, KEY_NUMERIC_4 },
{ 0x05, KEY_NUMERIC_5 },
{ 0x06, KEY_NUMERIC_6 },
{ 0x07, KEY_NUMERIC_7 },
{ 0x08, KEY_NUMERIC_8 },
{ 0x09, KEY_NUMERIC_9 },
{ 0x0b, KEY_TUNER }, /* tv/fm */
{ 0x0c, KEY_SEARCH }, /* scan */
{ 0x0d, KEY_STOP },
{ 0x0e, KEY_PAUSE },
{ 0x0f, KEY_VIDEO }, /* source */
{ 0x10, KEY_MUTE },
{ 0x11, KEY_REWIND }, /* backward << */
{ 0x12, KEY_POWER },
{ 0x13, KEY_CAMERA }, /* snap */
{ 0x14, KEY_AUDIO }, /* stereo */
{ 0x15, KEY_CLEAR }, /* reset */
{ 0x16, KEY_PLAY },
{ 0x17, KEY_ENTER },
{ 0x18, KEY_ZOOM }, /* full screen */
{ 0x19, KEY_FASTFORWARD }, /* forward >> */
{ 0x1a, KEY_CHANNELUP },
{ 0x1b, KEY_VOLUMEUP },
{ 0x1c, KEY_INFO }, /* preview */
{ 0x1d, KEY_RECORD }, /* record */
{ 0x1e, KEY_CHANNELDOWN },
{ 0x1f, KEY_VOLUMEDOWN },
};
static struct rc_map_list dntv_live_dvb_t_map = {
.map = {
.scan = dntv_live_dvb_t,
.size = ARRAY_SIZE(dntv_live_dvb_t),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_DNTV_LIVE_DVB_T,
}
};
static int __init init_rc_map_dntv_live_dvb_t(void)
{
return rc_map_register(&dntv_live_dvb_t_map);
}
static void __exit exit_rc_map_dntv_live_dvb_t(void)
{
rc_map_unregister(&dntv_live_dvb_t_map);
}
module_init(init_rc_map_dntv_live_dvb_t)
module_exit(exit_rc_map_dntv_live_dvb_t)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c |
// SPDX-License-Identifier: GPL-2.0+
// real-audio-220-32-keys.h - Keytable for real_audio_220_32_keys Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/* Zogis Real Audio 220 - 32 keys IR */
static struct rc_map_table real_audio_220_32_keys[] = {
{ 0x1c, KEY_RADIO},
{ 0x12, KEY_POWER2},
{ 0x01, KEY_NUMERIC_1},
{ 0x02, KEY_NUMERIC_2},
{ 0x03, KEY_NUMERIC_3},
{ 0x04, KEY_NUMERIC_4},
{ 0x05, KEY_NUMERIC_5},
{ 0x06, KEY_NUMERIC_6},
{ 0x07, KEY_NUMERIC_7},
{ 0x08, KEY_NUMERIC_8},
{ 0x09, KEY_NUMERIC_9},
{ 0x00, KEY_NUMERIC_0},
{ 0x0c, KEY_VOLUMEUP},
{ 0x18, KEY_VOLUMEDOWN},
{ 0x0b, KEY_CHANNELUP},
{ 0x15, KEY_CHANNELDOWN},
{ 0x16, KEY_ENTER},
{ 0x11, KEY_VIDEO}, /* Source */
{ 0x0d, KEY_AUDIO}, /* stereo */
{ 0x0f, KEY_PREVIOUS}, /* Prev */
{ 0x1b, KEY_TIME}, /* Timeshift */
{ 0x1a, KEY_NEXT}, /* Next */
{ 0x0e, KEY_STOP},
{ 0x1f, KEY_PLAY},
{ 0x1e, KEY_PLAYPAUSE}, /* Pause */
{ 0x1d, KEY_RECORD},
{ 0x13, KEY_MUTE},
{ 0x19, KEY_CAMERA}, /* Snapshot */
};
static struct rc_map_list real_audio_220_32_keys_map = {
.map = {
.scan = real_audio_220_32_keys,
.size = ARRAY_SIZE(real_audio_220_32_keys),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_REAL_AUDIO_220_32_KEYS,
}
};
static int __init init_rc_map_real_audio_220_32_keys(void)
{
return rc_map_register(&real_audio_220_32_keys_map);
}
static void __exit exit_rc_map_real_audio_220_32_keys(void)
{
rc_map_unregister(&real_audio_220_32_keys_map);
}
module_init(init_rc_map_real_audio_220_32_keys)
module_exit(exit_rc_map_real_audio_220_32_keys)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.