python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#include <linux/vhost_types.h>
#include <linux/vdpa.h>
#include <linux/gcd.h>
#include <linux/string.h>
#include <linux/mlx5/qp.h>
#include "mlx5_vdpa.h"
/* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */
#define MLX5_DIV_ROUND_UP_POW2(_n, _s) \
({ \
u64 __s = _s; \
u64 _res; \
_res = (((_n) + (1 << (__s)) - 1) >> (__s)); \
_res; \
})
static int get_octo_len(u64 len, int page_shift)
{
u64 page_size = 1ULL << page_shift;
int npages;
npages = ALIGN(len, page_size) >> page_shift;
return (npages + 1) / 2;
}
static void mlx5_set_access_mode(void *mkc, int mode)
{
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2);
}
static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
{
struct scatterlist *sg;
int nsg = mr->nsg;
u64 dma_addr;
u64 dma_len;
int j = 0;
int i;
for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
nsg && dma_len;
nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
mtt[j++] = cpu_to_be64(dma_addr);
}
}
static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
int inlen;
void *mkc;
void *in;
int err;
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
MLX5_SET64(mkc, mkc, start_addr, mr->offset);
MLX5_SET64(mkc, mkc, len, mr->end - mr->start);
MLX5_SET(mkc, mkc, log_page_size, mr->log_size);
MLX5_SET(mkc, mkc, translations_octword_size,
get_octo_len(mr->end - mr->start, mr->log_size));
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
get_octo_len(mr->end - mr->start, mr->log_size));
populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
kvfree(in);
if (err) {
mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
return err;
}
return 0;
}
static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
mlx5_vdpa_destroy_mkey(mvdev, mr->mr);
}
static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
{
return max_t(u64, map->start, mr->start);
}
static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
{
return min_t(u64, map->last + 1, mr->end);
}
static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
{
return map_end(map, mr) - map_start(map, mr);
}
#define MLX5_VDPA_INVALID_START_ADDR ((u64)-1)
#define MLX5_VDPA_INVALID_LEN ((u64)-1)
static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey)
{
struct mlx5_vdpa_direct_mr *s;
s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
if (!s)
return MLX5_VDPA_INVALID_START_ADDR;
return s->start;
}
static u64 indir_len(struct mlx5_vdpa_mr *mkey)
{
struct mlx5_vdpa_direct_mr *s;
struct mlx5_vdpa_direct_mr *e;
s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
if (!s)
return MLX5_VDPA_INVALID_LEN;
e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list);
return e->end - s->start;
}
#define LOG_MAX_KLM_SIZE 30
#define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE)
static u32 klm_bcount(u64 size)
{
return (u32)size;
}
static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in)
{
struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_klm *klmarr;
struct mlx5_klm *klm;
bool first = true;
u64 preve;
int i;
klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
i = 0;
list_for_each_entry(dmr, &mkey->head, list) {
again:
klm = &klmarr[i++];
if (first) {
preve = dmr->start;
first = false;
}
if (preve == dmr->start) {
klm->key = cpu_to_be32(dmr->mr);
klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
preve = dmr->end;
} else {
klm->key = cpu_to_be32(mvdev->res.null_mkey);
klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve));
preve = dmr->start;
goto again;
}
}
}
static int klm_byte_size(int nklms)
{
return 16 * ALIGN(nklms, 4);
}
static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
int inlen;
void *mkc;
void *in;
int err;
u64 start;
u64 len;
start = indir_start_addr(mr);
len = indir_len(mr);
if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN)
return -EINVAL;
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
MLX5_SET64(mkc, mkc, start_addr, start);
MLX5_SET64(mkc, mkc, len, len);
MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16);
MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms);
fill_indir(mvdev, mr, in);
err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
kfree(in);
return err;
}
static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey)
{
mlx5_vdpa_destroy_mkey(mvdev, mkey->mkey);
}
static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
struct vhost_iotlb *iotlb)
{
struct vhost_iotlb_map *map;
unsigned long lgcd = 0;
int log_entity_size;
unsigned long size;
u64 start = 0;
int err;
struct page *pg;
unsigned int nsg;
int sglen;
u64 pa;
u64 paend;
struct scatterlist *sg;
struct device *dma = mvdev->vdev.dma_dev;
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
size = maplen(map, mr);
lgcd = gcd(lgcd, size);
start += size;
}
log_entity_size = ilog2(lgcd);
sglen = 1 << log_entity_size;
nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size);
err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL);
if (err)
return err;
sg = mr->sg_head.sgl;
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
paend = map->addr + maplen(map, mr);
for (pa = map->addr; pa < paend; pa += sglen) {
pg = pfn_to_page(__phys_to_pfn(pa));
if (!sg) {
mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
map->start, map->last + 1);
err = -ENOMEM;
goto err_map;
}
sg_set_page(sg, pg, sglen, 0);
sg = sg_next(sg);
if (!sg)
goto done;
}
}
done:
mr->log_size = log_entity_size;
mr->nsg = nsg;
mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
if (!mr->nent) {
err = -ENOMEM;
goto err_map;
}
err = create_direct_mr(mvdev, mr);
if (err)
goto err_direct;
return 0;
err_direct:
dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
err_map:
sg_free_table(&mr->sg_head);
return err;
}
static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
struct device *dma = mvdev->vdev.dma_dev;
destroy_direct_mr(mvdev, mr);
dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
sg_free_table(&mr->sg_head);
}
static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_vdpa_direct_mr *n;
LIST_HEAD(tmp);
u64 st;
u64 sz;
int err;
st = start;
while (size) {
sz = (u32)min_t(u64, MAX_KLM_SIZE, size);
dmr = kzalloc(sizeof(*dmr), GFP_KERNEL);
if (!dmr) {
err = -ENOMEM;
goto err_alloc;
}
dmr->start = st;
dmr->end = st + sz;
dmr->perm = perm;
err = map_direct_mr(mvdev, dmr, iotlb);
if (err) {
kfree(dmr);
goto err_alloc;
}
list_add_tail(&dmr->list, &tmp);
size -= sz;
mr->num_directs++;
mr->num_klms++;
st += sz;
}
list_splice_tail(&tmp, &mr->head);
return 0;
err_alloc:
list_for_each_entry_safe(dmr, n, &mr->head, list) {
list_del_init(&dmr->list);
unmap_direct_mr(mvdev, dmr);
kfree(dmr);
}
return err;
}
/* The iotlb pointer contains a list of maps. Go over the maps, possibly
* merging mergeable maps, and create direct memory keys that provide the
* device access to memory. The direct mkeys are then referred to by the
* indirect memory key that provides access to the enitre address space given
* by iotlb.
*/
static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_vdpa_direct_mr *n;
struct vhost_iotlb_map *map;
u32 pperm = U16_MAX;
u64 last = U64_MAX;
u64 ps = U64_MAX;
u64 pe = U64_MAX;
u64 start = 0;
int err = 0;
int nnuls;
INIT_LIST_HEAD(&mr->head);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
start = map->start;
if (pe == map->start && pperm == map->perm) {
pe = map->last + 1;
} else {
if (ps != U64_MAX) {
if (pe < map->start) {
/* We have a hole in the map. Check how
* many null keys are required to fill it.
*/
nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe,
LOG_MAX_KLM_SIZE);
mr->num_klms += nnuls;
}
err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
if (err)
goto err_chain;
}
ps = map->start;
pe = map->last + 1;
pperm = map->perm;
}
}
err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
if (err)
goto err_chain;
/* Create the memory key that defines the guests's address space. This
* memory key refers to the direct keys that contain the MTT
* translations
*/
err = create_indirect_key(mvdev, mr);
if (err)
goto err_chain;
mr->user_mr = true;
return 0;
err_chain:
list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
list_del_init(&dmr->list);
unmap_direct_mr(mvdev, dmr);
kfree(dmr);
}
return err;
}
static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
u32 *in;
int err;
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
MLX5_SET(mkc, mkc, length64, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
if (!err)
mr->user_mr = false;
kfree(in);
return err;
}
static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
mlx5_vdpa_destroy_mkey(mvdev, mr->mkey);
}
static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
{
struct vhost_iotlb_map *map;
u64 start = 0, last = ULLONG_MAX;
int err;
if (!src) {
err = vhost_iotlb_add_range(mvdev->cvq.iotlb, start, last, start, VHOST_ACCESS_RW);
return err;
}
for (map = vhost_iotlb_itree_first(src, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, map->last,
map->addr, map->perm);
if (err)
return err;
}
return 0;
}
static void prune_iotlb(struct mlx5_vdpa_dev *mvdev)
{
vhost_iotlb_del_range(mvdev->cvq.iotlb, 0, ULLONG_MAX);
}
static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_vdpa_direct_mr *n;
destroy_indirect_key(mvdev, mr);
list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
list_del_init(&dmr->list);
unmap_direct_mr(mvdev, dmr);
kfree(dmr);
}
}
static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
{
if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
return;
prune_iotlb(mvdev);
}
static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
return;
if (!mr->initialized)
return;
if (mr->user_mr)
destroy_user_mr(mvdev, mr);
else
destroy_dma_mr(mvdev, mr);
mr->initialized = false;
}
void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
mutex_lock(&mr->mkey_mtx);
_mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
_mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
mutex_unlock(&mr->mkey_mtx);
}
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
{
mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
}
static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb,
unsigned int asid)
{
if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
return 0;
return dup_iotlb(mvdev, iotlb);
}
static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb,
unsigned int asid)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err;
if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
return 0;
if (mr->initialized)
return 0;
if (iotlb)
err = create_user_mr(mvdev, iotlb);
else
err = create_dma_mr(mvdev, mr);
if (err)
return err;
mr->initialized = true;
return 0;
}
static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb, unsigned int asid)
{
int err;
err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
if (err)
return err;
err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
if (err)
goto out_err;
return 0;
out_err:
_mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
return err;
}
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
unsigned int asid)
{
int err;
mutex_lock(&mvdev->mr.mkey_mtx);
err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
mutex_unlock(&mvdev->mr.mkey_mtx);
return err;
}
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map, unsigned int asid)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err = 0;
*change_map = false;
mutex_lock(&mr->mkey_mtx);
if (mr->initialized) {
mlx5_vdpa_info(mvdev, "memory map update\n");
*change_map = true;
}
if (!*change_map)
err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
mutex_unlock(&mr->mkey_mtx);
return err;
}
|
linux-master
|
drivers/vdpa/mlx5/core/mr.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* vDPA bridge driver for modern virtio-pci device
*
* Copyright (c) 2020, Red Hat Inc. All rights reserved.
* Author: Jason Wang <[email protected]>
*
* Based on virtio_pci_modern.c.
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vdpa.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
#include <linux/virtio_pci_modern.h>
#include <uapi/linux/vdpa.h>
#define VP_VDPA_QUEUE_MAX 256
#define VP_VDPA_DRIVER_NAME "vp_vdpa"
#define VP_VDPA_NAME_SIZE 256
struct vp_vring {
void __iomem *notify;
char msix_name[VP_VDPA_NAME_SIZE];
struct vdpa_callback cb;
resource_size_t notify_pa;
int irq;
};
struct vp_vdpa {
struct vdpa_device vdpa;
struct virtio_pci_modern_device *mdev;
struct vp_vring *vring;
struct vdpa_callback config_cb;
u64 device_features;
char msix_name[VP_VDPA_NAME_SIZE];
int config_irq;
int queues;
int vectors;
};
struct vp_vdpa_mgmtdev {
struct vdpa_mgmt_dev mgtdev;
struct virtio_pci_modern_device *mdev;
struct vp_vdpa *vp_vdpa;
};
static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
{
return container_of(vdpa, struct vp_vdpa, vdpa);
}
static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
return vp_vdpa->mdev;
}
static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa)
{
return vp_vdpa->mdev;
}
static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
return vp_vdpa->device_features;
}
static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
vp_modern_set_features(mdev, features);
return 0;
}
static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return vp_modern_get_driver_features(mdev);
}
static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return vp_modern_get_status(mdev);
}
static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
int irq = vp_vdpa->vring[idx].irq;
if (irq == VIRTIO_MSI_NO_VECTOR)
return -EINVAL;
return irq;
}
static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
{
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct pci_dev *pdev = mdev->pci_dev;
int i;
for (i = 0; i < vp_vdpa->queues; i++) {
if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR);
devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq,
&vp_vdpa->vring[i]);
vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
}
}
if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR);
devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa);
vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
}
if (vp_vdpa->vectors) {
pci_free_irq_vectors(pdev);
vp_vdpa->vectors = 0;
}
}
static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg)
{
struct vp_vring *vring = arg;
if (vring->cb.callback)
return vring->cb.callback(vring->cb.private);
return IRQ_HANDLED;
}
static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
{
struct vp_vdpa *vp_vdpa = arg;
if (vp_vdpa->config_cb.callback)
return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private);
return IRQ_HANDLED;
}
static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
{
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct pci_dev *pdev = mdev->pci_dev;
int i, ret, irq;
int queues = vp_vdpa->queues;
int vectors = queues + 1;
ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
if (ret != vectors) {
dev_err(&pdev->dev,
"vp_vdpa: fail to allocate irq vectors want %d but %d\n",
vectors, ret);
return ret;
}
vp_vdpa->vectors = vectors;
for (i = 0; i < queues; i++) {
snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE,
"vp-vdpa[%s]-%d\n", pci_name(pdev), i);
irq = pci_irq_vector(pdev, i);
ret = devm_request_irq(&pdev->dev, irq,
vp_vdpa_vq_handler,
0, vp_vdpa->vring[i].msix_name,
&vp_vdpa->vring[i]);
if (ret) {
dev_err(&pdev->dev,
"vp_vdpa: fail to request irq for vq %d\n", i);
goto err;
}
vp_modern_queue_vector(mdev, i, i);
vp_vdpa->vring[i].irq = irq;
}
snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n",
pci_name(pdev));
irq = pci_irq_vector(pdev, queues);
ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0,
vp_vdpa->msix_name, vp_vdpa);
if (ret) {
dev_err(&pdev->dev,
"vp_vdpa: fail to request irq for vq %d\n", i);
goto err;
}
vp_modern_config_vector(mdev, queues);
vp_vdpa->config_irq = irq;
return 0;
err:
vp_vdpa_free_irq(vp_vdpa);
return ret;
}
static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 s = vp_vdpa_get_status(vdpa);
if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
!(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
vp_vdpa_request_irq(vp_vdpa);
}
vp_modern_set_status(mdev, status);
}
static int vp_vdpa_reset(struct vdpa_device *vdpa)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 s = vp_vdpa_get_status(vdpa);
vp_modern_set_status(mdev, 0);
if (s & VIRTIO_CONFIG_S_DRIVER_OK)
vp_vdpa_free_irq(vp_vdpa);
return 0;
}
static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
{
return VP_VDPA_QUEUE_MAX;
}
static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
struct vdpa_vq_state *state)
{
/* Note that this is not supported by virtio specification, so
* we return -EOPNOTSUPP here. This means we can't support live
* migration, vhost device start/stop.
*/
return -EOPNOTSUPP;
}
static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa,
const struct vdpa_vq_state *state)
{
const struct vdpa_vq_state_split *split = &state->split;
if (split->avail_index == 0)
return 0;
return -EOPNOTSUPP;
}
static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa,
const struct vdpa_vq_state *state)
{
const struct vdpa_vq_state_packed *packed = &state->packed;
if (packed->last_avail_counter == 1 &&
packed->last_avail_idx == 0 &&
packed->last_used_counter == 1 &&
packed->last_used_idx == 0)
return 0;
return -EOPNOTSUPP;
}
static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
const struct vdpa_vq_state *state)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
/* Note that this is not supported by virtio specification.
* But if the state is by chance equal to the device initial
* state, we can let it go.
*/
if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) &&
!vp_modern_get_queue_enable(mdev, qid)) {
if (vp_modern_get_driver_features(mdev) &
BIT_ULL(VIRTIO_F_RING_PACKED))
return vp_vdpa_set_vq_state_packed(vdpa, state);
else
return vp_vdpa_set_vq_state_split(vdpa, state);
}
return -EOPNOTSUPP;
}
static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
struct vdpa_callback *cb)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
vp_vdpa->vring[qid].cb = *cb;
}
static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa,
u16 qid, bool ready)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
vp_modern_set_queue_enable(mdev, qid, ready);
}
static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return vp_modern_get_queue_enable(mdev, qid);
}
static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
u32 num)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
vp_modern_set_queue_size(mdev, qid, num);
}
static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
u64 desc_area, u64 driver_area,
u64 device_area)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
vp_modern_queue_address(mdev, qid, desc_area,
driver_area, device_area);
return 0;
}
static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
}
static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return vp_modern_generation(mdev);
}
static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return mdev->id.device;
}
static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return mdev->id.vendor;
}
static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa)
{
return PAGE_SIZE;
}
static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return mdev->device_len;
}
static void vp_vdpa_get_config(struct vdpa_device *vdpa,
unsigned int offset,
void *buf, unsigned int len)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 old, new;
u8 *p;
int i;
do {
old = vp_ioread8(&mdev->common->config_generation);
p = buf;
for (i = 0; i < len; i++)
*p++ = vp_ioread8(mdev->device + offset + i);
new = vp_ioread8(&mdev->common->config_generation);
} while (old != new);
}
static void vp_vdpa_set_config(struct vdpa_device *vdpa,
unsigned int offset, const void *buf,
unsigned int len)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
const u8 *p = buf;
int i;
for (i = 0; i < len; i++)
vp_iowrite8(*p++, mdev->device + offset + i);
}
static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
vp_vdpa->config_cb = *cb;
}
static struct vdpa_notification_area
vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct vdpa_notification_area notify;
notify.addr = vp_vdpa->vring[qid].notify_pa;
notify.size = mdev->notify_offset_multiplier;
return notify;
}
static const struct vdpa_config_ops vp_vdpa_ops = {
.get_device_features = vp_vdpa_get_device_features,
.set_driver_features = vp_vdpa_set_driver_features,
.get_driver_features = vp_vdpa_get_driver_features,
.get_status = vp_vdpa_get_status,
.set_status = vp_vdpa_set_status,
.reset = vp_vdpa_reset,
.get_vq_num_max = vp_vdpa_get_vq_num_max,
.get_vq_state = vp_vdpa_get_vq_state,
.get_vq_notification = vp_vdpa_get_vq_notification,
.set_vq_state = vp_vdpa_set_vq_state,
.set_vq_cb = vp_vdpa_set_vq_cb,
.set_vq_ready = vp_vdpa_set_vq_ready,
.get_vq_ready = vp_vdpa_get_vq_ready,
.set_vq_num = vp_vdpa_set_vq_num,
.set_vq_address = vp_vdpa_set_vq_address,
.kick_vq = vp_vdpa_kick_vq,
.get_generation = vp_vdpa_get_generation,
.get_device_id = vp_vdpa_get_device_id,
.get_vendor_id = vp_vdpa_get_vendor_id,
.get_vq_align = vp_vdpa_get_vq_align,
.get_config_size = vp_vdpa_get_config_size,
.get_config = vp_vdpa_get_config,
.set_config = vp_vdpa_set_config,
.set_config_cb = vp_vdpa_set_config_cb,
.get_vq_irq = vp_vdpa_get_vq_irq,
};
static void vp_vdpa_free_irq_vectors(void *data)
{
pci_free_irq_vectors(data);
}
static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
const struct vdpa_dev_set_config *add_config)
{
struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
struct virtio_pci_modern_device *mdev = vp_vdpa_mgtdev->mdev;
struct pci_dev *pdev = mdev->pci_dev;
struct device *dev = &pdev->dev;
struct vp_vdpa *vp_vdpa = NULL;
u64 device_features;
int ret, i;
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
dev, &vp_vdpa_ops, 1, 1, name, false);
if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
return PTR_ERR(vp_vdpa);
}
vp_vdpa_mgtdev->vp_vdpa = vp_vdpa;
vp_vdpa->vdpa.dma_dev = &pdev->dev;
vp_vdpa->queues = vp_modern_get_num_queues(mdev);
vp_vdpa->mdev = mdev;
device_features = vp_modern_get_features(mdev);
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
if (add_config->device_features & ~device_features) {
ret = -EINVAL;
dev_err(&pdev->dev, "Try to provision features "
"that are not supported by the device: "
"device_features 0x%llx provisioned 0x%llx\n",
device_features, add_config->device_features);
goto err;
}
device_features = add_config->device_features;
}
vp_vdpa->device_features = device_features;
ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
if (ret) {
dev_err(&pdev->dev,
"Failed for adding devres for freeing irq vectors\n");
goto err;
}
vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues,
sizeof(*vp_vdpa->vring),
GFP_KERNEL);
if (!vp_vdpa->vring) {
ret = -ENOMEM;
dev_err(&pdev->dev, "Fail to allocate virtqueues\n");
goto err;
}
for (i = 0; i < vp_vdpa->queues; i++) {
vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
vp_vdpa->vring[i].notify =
vp_modern_map_vq_notify(mdev, i,
&vp_vdpa->vring[i].notify_pa);
if (!vp_vdpa->vring[i].notify) {
ret = -EINVAL;
dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i);
goto err;
}
}
vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
vp_vdpa->vdpa.mdev = &vp_vdpa_mgtdev->mgtdev;
ret = _vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
if (ret) {
dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
goto err;
}
return 0;
err:
put_device(&vp_vdpa->vdpa.dev);
return ret;
}
static void vp_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev,
struct vdpa_device *dev)
{
struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
struct vp_vdpa *vp_vdpa = vp_vdpa_mgtdev->vp_vdpa;
_vdpa_unregister_device(&vp_vdpa->vdpa);
vp_vdpa_mgtdev->vp_vdpa = NULL;
}
static const struct vdpa_mgmtdev_ops vp_vdpa_mdev_ops = {
.dev_add = vp_vdpa_dev_add,
.dev_del = vp_vdpa_dev_del,
};
static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = NULL;
struct vdpa_mgmt_dev *mgtdev;
struct device *dev = &pdev->dev;
struct virtio_pci_modern_device *mdev = NULL;
struct virtio_device_id *mdev_id = NULL;
int err;
vp_vdpa_mgtdev = kzalloc(sizeof(*vp_vdpa_mgtdev), GFP_KERNEL);
if (!vp_vdpa_mgtdev)
return -ENOMEM;
mgtdev = &vp_vdpa_mgtdev->mgtdev;
mgtdev->ops = &vp_vdpa_mdev_ops;
mgtdev->device = dev;
mdev = kzalloc(sizeof(struct virtio_pci_modern_device), GFP_KERNEL);
if (!mdev) {
err = -ENOMEM;
goto mdev_err;
}
mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL);
if (!mdev_id) {
err = -ENOMEM;
goto mdev_id_err;
}
vp_vdpa_mgtdev->mdev = mdev;
mdev->pci_dev = pdev;
err = pcim_enable_device(pdev);
if (err) {
goto probe_err;
}
err = vp_modern_probe(mdev);
if (err) {
dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
goto probe_err;
}
mdev_id->device = mdev->id.device;
mdev_id->vendor = mdev->id.vendor;
mgtdev->id_table = mdev_id;
mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
mgtdev->supported_features = vp_modern_get_features(mdev);
mgtdev->config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES);
pci_set_master(pdev);
pci_set_drvdata(pdev, vp_vdpa_mgtdev);
err = vdpa_mgmtdev_register(mgtdev);
if (err) {
dev_err(&pdev->dev, "Failed to register vdpa mgmtdev device\n");
goto register_err;
}
return 0;
register_err:
vp_modern_remove(vp_vdpa_mgtdev->mdev);
probe_err:
kfree(mdev_id);
mdev_id_err:
kfree(mdev);
mdev_err:
kfree(vp_vdpa_mgtdev);
return err;
}
static void vp_vdpa_remove(struct pci_dev *pdev)
{
struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = pci_get_drvdata(pdev);
struct virtio_pci_modern_device *mdev = NULL;
mdev = vp_vdpa_mgtdev->mdev;
vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
vp_modern_remove(mdev);
kfree(vp_vdpa_mgtdev->mgtdev.id_table);
kfree(mdev);
kfree(vp_vdpa_mgtdev);
}
static struct pci_driver vp_vdpa_driver = {
.name = "vp-vdpa",
.id_table = NULL, /* only dynamic ids */
.probe = vp_vdpa_probe,
.remove = vp_vdpa_remove,
};
module_pci_driver(vp_vdpa_driver);
MODULE_AUTHOR("Jason Wang <[email protected]>");
MODULE_DESCRIPTION("vp-vdpa");
MODULE_LICENSE("GPL");
MODULE_VERSION("1");
|
linux-master
|
drivers/vdpa/virtio_pci/vp_vdpa.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* MMU-based software IOTLB.
*
* Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
*
* Author: Xie Yongji <[email protected]>
*
*/
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/anon_inodes.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/vdpa.h>
#include "iova_domain.h"
static int vduse_iotlb_add_range(struct vduse_iova_domain *domain,
u64 start, u64 last,
u64 addr, unsigned int perm,
struct file *file, u64 offset)
{
struct vdpa_map_file *map_file;
int ret;
map_file = kmalloc(sizeof(*map_file), GFP_ATOMIC);
if (!map_file)
return -ENOMEM;
map_file->file = get_file(file);
map_file->offset = offset;
ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last,
addr, perm, map_file);
if (ret) {
fput(map_file->file);
kfree(map_file);
return ret;
}
return 0;
}
static void vduse_iotlb_del_range(struct vduse_iova_domain *domain,
u64 start, u64 last)
{
struct vdpa_map_file *map_file;
struct vhost_iotlb_map *map;
while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) {
map_file = (struct vdpa_map_file *)map->opaque;
fput(map_file->file);
kfree(map_file);
vhost_iotlb_map_free(domain->iotlb, map);
}
}
int vduse_domain_set_map(struct vduse_iova_domain *domain,
struct vhost_iotlb *iotlb)
{
struct vdpa_map_file *map_file;
struct vhost_iotlb_map *map;
u64 start = 0ULL, last = ULLONG_MAX;
int ret;
spin_lock(&domain->iotlb_lock);
vduse_iotlb_del_range(domain, start, last);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
map_file = (struct vdpa_map_file *)map->opaque;
ret = vduse_iotlb_add_range(domain, map->start, map->last,
map->addr, map->perm,
map_file->file,
map_file->offset);
if (ret)
goto err;
}
spin_unlock(&domain->iotlb_lock);
return 0;
err:
vduse_iotlb_del_range(domain, start, last);
spin_unlock(&domain->iotlb_lock);
return ret;
}
void vduse_domain_clear_map(struct vduse_iova_domain *domain,
struct vhost_iotlb *iotlb)
{
struct vhost_iotlb_map *map;
u64 start = 0ULL, last = ULLONG_MAX;
spin_lock(&domain->iotlb_lock);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
vduse_iotlb_del_range(domain, map->start, map->last);
}
spin_unlock(&domain->iotlb_lock);
}
static int vduse_domain_map_bounce_page(struct vduse_iova_domain *domain,
u64 iova, u64 size, u64 paddr)
{
struct vduse_bounce_map *map;
u64 last = iova + size - 1;
while (iova <= last) {
map = &domain->bounce_maps[iova >> PAGE_SHIFT];
if (!map->bounce_page) {
map->bounce_page = alloc_page(GFP_ATOMIC);
if (!map->bounce_page)
return -ENOMEM;
}
map->orig_phys = paddr;
paddr += PAGE_SIZE;
iova += PAGE_SIZE;
}
return 0;
}
static void vduse_domain_unmap_bounce_page(struct vduse_iova_domain *domain,
u64 iova, u64 size)
{
struct vduse_bounce_map *map;
u64 last = iova + size - 1;
while (iova <= last) {
map = &domain->bounce_maps[iova >> PAGE_SHIFT];
map->orig_phys = INVALID_PHYS_ADDR;
iova += PAGE_SIZE;
}
}
static void do_bounce(phys_addr_t orig, void *addr, size_t size,
enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(orig);
unsigned int offset = offset_in_page(orig);
struct page *page;
unsigned int sz = 0;
while (size) {
sz = min_t(size_t, PAGE_SIZE - offset, size);
page = pfn_to_page(pfn);
if (dir == DMA_TO_DEVICE)
memcpy_from_page(addr, page, offset, sz);
else
memcpy_to_page(page, offset, addr, sz);
size -= sz;
pfn++;
addr += sz;
offset = 0;
}
}
static void vduse_domain_bounce(struct vduse_iova_domain *domain,
dma_addr_t iova, size_t size,
enum dma_data_direction dir)
{
struct vduse_bounce_map *map;
unsigned int offset;
void *addr;
size_t sz;
if (iova >= domain->bounce_size)
return;
while (size) {
map = &domain->bounce_maps[iova >> PAGE_SHIFT];
offset = offset_in_page(iova);
sz = min_t(size_t, PAGE_SIZE - offset, size);
if (WARN_ON(!map->bounce_page ||
map->orig_phys == INVALID_PHYS_ADDR))
return;
addr = kmap_local_page(map->bounce_page);
do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
kunmap_local(addr);
size -= sz;
iova += sz;
}
}
static struct page *
vduse_domain_get_coherent_page(struct vduse_iova_domain *domain, u64 iova)
{
u64 start = iova & PAGE_MASK;
u64 last = start + PAGE_SIZE - 1;
struct vhost_iotlb_map *map;
struct page *page = NULL;
spin_lock(&domain->iotlb_lock);
map = vhost_iotlb_itree_first(domain->iotlb, start, last);
if (!map)
goto out;
page = pfn_to_page((map->addr + iova - map->start) >> PAGE_SHIFT);
get_page(page);
out:
spin_unlock(&domain->iotlb_lock);
return page;
}
static struct page *
vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
{
struct vduse_bounce_map *map;
struct page *page = NULL;
read_lock(&domain->bounce_lock);
map = &domain->bounce_maps[iova >> PAGE_SHIFT];
if (domain->user_bounce_pages || !map->bounce_page)
goto out;
page = map->bounce_page;
get_page(page);
out:
read_unlock(&domain->bounce_lock);
return page;
}
static void
vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
{
struct vduse_bounce_map *map;
unsigned long pfn, bounce_pfns;
bounce_pfns = domain->bounce_size >> PAGE_SHIFT;
for (pfn = 0; pfn < bounce_pfns; pfn++) {
map = &domain->bounce_maps[pfn];
if (WARN_ON(map->orig_phys != INVALID_PHYS_ADDR))
continue;
if (!map->bounce_page)
continue;
__free_page(map->bounce_page);
map->bounce_page = NULL;
}
}
int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
struct page **pages, int count)
{
struct vduse_bounce_map *map;
int i, ret;
/* Now we don't support partial mapping */
if (count != (domain->bounce_size >> PAGE_SHIFT))
return -EINVAL;
write_lock(&domain->bounce_lock);
ret = -EEXIST;
if (domain->user_bounce_pages)
goto out;
for (i = 0; i < count; i++) {
map = &domain->bounce_maps[i];
if (map->bounce_page) {
/* Copy kernel page to user page if it's in use */
if (map->orig_phys != INVALID_PHYS_ADDR)
memcpy_to_page(pages[i], 0,
page_address(map->bounce_page),
PAGE_SIZE);
__free_page(map->bounce_page);
}
map->bounce_page = pages[i];
get_page(pages[i]);
}
domain->user_bounce_pages = true;
ret = 0;
out:
write_unlock(&domain->bounce_lock);
return ret;
}
void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
{
struct vduse_bounce_map *map;
unsigned long i, count;
write_lock(&domain->bounce_lock);
if (!domain->user_bounce_pages)
goto out;
count = domain->bounce_size >> PAGE_SHIFT;
for (i = 0; i < count; i++) {
struct page *page = NULL;
map = &domain->bounce_maps[i];
if (WARN_ON(!map->bounce_page))
continue;
/* Copy user page to kernel page if it's in use */
if (map->orig_phys != INVALID_PHYS_ADDR) {
page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
memcpy_from_page(page_address(page),
map->bounce_page, 0, PAGE_SIZE);
}
put_page(map->bounce_page);
map->bounce_page = page;
}
domain->user_bounce_pages = false;
out:
write_unlock(&domain->bounce_lock);
}
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
{
if (!domain->bounce_map)
return;
spin_lock(&domain->iotlb_lock);
if (!domain->bounce_map)
goto unlock;
vduse_iotlb_del_range(domain, 0, domain->bounce_size - 1);
domain->bounce_map = 0;
unlock:
spin_unlock(&domain->iotlb_lock);
}
static int vduse_domain_init_bounce_map(struct vduse_iova_domain *domain)
{
int ret = 0;
if (domain->bounce_map)
return 0;
spin_lock(&domain->iotlb_lock);
if (domain->bounce_map)
goto unlock;
ret = vduse_iotlb_add_range(domain, 0, domain->bounce_size - 1,
0, VHOST_MAP_RW, domain->file, 0);
if (ret)
goto unlock;
domain->bounce_map = 1;
unlock:
spin_unlock(&domain->iotlb_lock);
return ret;
}
static dma_addr_t
vduse_domain_alloc_iova(struct iova_domain *iovad,
unsigned long size, unsigned long limit)
{
unsigned long shift = iova_shift(iovad);
unsigned long iova_len = iova_align(iovad, size) >> shift;
unsigned long iova_pfn;
iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
return (dma_addr_t)iova_pfn << shift;
}
static void vduse_domain_free_iova(struct iova_domain *iovad,
dma_addr_t iova, size_t size)
{
unsigned long shift = iova_shift(iovad);
unsigned long iova_len = iova_align(iovad, size) >> shift;
free_iova_fast(iovad, iova >> shift, iova_len);
}
dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct iova_domain *iovad = &domain->stream_iovad;
unsigned long limit = domain->bounce_size - 1;
phys_addr_t pa = page_to_phys(page) + offset;
dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
if (!iova)
return DMA_MAPPING_ERROR;
if (vduse_domain_init_bounce_map(domain))
goto err;
read_lock(&domain->bounce_lock);
if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
goto err_unlock;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
read_unlock(&domain->bounce_lock);
return iova;
err_unlock:
read_unlock(&domain->bounce_lock);
err:
vduse_domain_free_iova(iovad, iova, size);
return DMA_MAPPING_ERROR;
}
void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
struct iova_domain *iovad = &domain->stream_iovad;
read_lock(&domain->bounce_lock);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
read_unlock(&domain->bounce_lock);
vduse_domain_free_iova(iovad, dma_addr, size);
}
void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
size_t size, dma_addr_t *dma_addr,
gfp_t flag, unsigned long attrs)
{
struct iova_domain *iovad = &domain->consistent_iovad;
unsigned long limit = domain->iova_limit;
dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
void *orig = alloc_pages_exact(size, flag);
if (!iova || !orig)
goto err;
spin_lock(&domain->iotlb_lock);
if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1,
virt_to_phys(orig), VHOST_MAP_RW,
domain->file, (u64)iova)) {
spin_unlock(&domain->iotlb_lock);
goto err;
}
spin_unlock(&domain->iotlb_lock);
*dma_addr = iova;
return orig;
err:
*dma_addr = DMA_MAPPING_ERROR;
if (orig)
free_pages_exact(orig, size);
if (iova)
vduse_domain_free_iova(iovad, iova, size);
return NULL;
}
void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
void *vaddr, dma_addr_t dma_addr,
unsigned long attrs)
{
struct iova_domain *iovad = &domain->consistent_iovad;
struct vhost_iotlb_map *map;
struct vdpa_map_file *map_file;
phys_addr_t pa;
spin_lock(&domain->iotlb_lock);
map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr,
(u64)dma_addr + size - 1);
if (WARN_ON(!map)) {
spin_unlock(&domain->iotlb_lock);
return;
}
map_file = (struct vdpa_map_file *)map->opaque;
fput(map_file->file);
kfree(map_file);
pa = map->addr;
vhost_iotlb_map_free(domain->iotlb, map);
spin_unlock(&domain->iotlb_lock);
vduse_domain_free_iova(iovad, dma_addr, size);
free_pages_exact(phys_to_virt(pa), size);
}
static vm_fault_t vduse_domain_mmap_fault(struct vm_fault *vmf)
{
struct vduse_iova_domain *domain = vmf->vma->vm_private_data;
unsigned long iova = vmf->pgoff << PAGE_SHIFT;
struct page *page;
if (!domain)
return VM_FAULT_SIGBUS;
if (iova < domain->bounce_size)
page = vduse_domain_get_bounce_page(domain, iova);
else
page = vduse_domain_get_coherent_page(domain, iova);
if (!page)
return VM_FAULT_SIGBUS;
vmf->page = page;
return 0;
}
static const struct vm_operations_struct vduse_domain_mmap_ops = {
.fault = vduse_domain_mmap_fault,
};
static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
{
struct vduse_iova_domain *domain = file->private_data;
vm_flags_set(vma, VM_DONTDUMP | VM_DONTEXPAND);
vma->vm_private_data = domain;
vma->vm_ops = &vduse_domain_mmap_ops;
return 0;
}
static int vduse_domain_release(struct inode *inode, struct file *file)
{
struct vduse_iova_domain *domain = file->private_data;
spin_lock(&domain->iotlb_lock);
vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
vduse_domain_remove_user_bounce_pages(domain);
vduse_domain_free_kernel_bounce_pages(domain);
spin_unlock(&domain->iotlb_lock);
put_iova_domain(&domain->stream_iovad);
put_iova_domain(&domain->consistent_iovad);
vhost_iotlb_free(domain->iotlb);
vfree(domain->bounce_maps);
kfree(domain);
return 0;
}
static const struct file_operations vduse_domain_fops = {
.owner = THIS_MODULE,
.mmap = vduse_domain_mmap,
.release = vduse_domain_release,
};
void vduse_domain_destroy(struct vduse_iova_domain *domain)
{
fput(domain->file);
}
struct vduse_iova_domain *
vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
{
struct vduse_iova_domain *domain;
struct file *file;
struct vduse_bounce_map *map;
unsigned long pfn, bounce_pfns;
int ret;
bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
if (iova_limit <= bounce_size)
return NULL;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
domain->iotlb = vhost_iotlb_alloc(0, 0);
if (!domain->iotlb)
goto err_iotlb;
domain->iova_limit = iova_limit;
domain->bounce_size = PAGE_ALIGN(bounce_size);
domain->bounce_maps = vzalloc(bounce_pfns *
sizeof(struct vduse_bounce_map));
if (!domain->bounce_maps)
goto err_map;
for (pfn = 0; pfn < bounce_pfns; pfn++) {
map = &domain->bounce_maps[pfn];
map->orig_phys = INVALID_PHYS_ADDR;
}
file = anon_inode_getfile("[vduse-domain]", &vduse_domain_fops,
domain, O_RDWR);
if (IS_ERR(file))
goto err_file;
domain->file = file;
rwlock_init(&domain->bounce_lock);
spin_lock_init(&domain->iotlb_lock);
init_iova_domain(&domain->stream_iovad,
PAGE_SIZE, IOVA_START_PFN);
ret = iova_domain_init_rcaches(&domain->stream_iovad);
if (ret)
goto err_iovad_stream;
init_iova_domain(&domain->consistent_iovad,
PAGE_SIZE, bounce_pfns);
ret = iova_domain_init_rcaches(&domain->consistent_iovad);
if (ret)
goto err_iovad_consistent;
return domain;
err_iovad_consistent:
put_iova_domain(&domain->stream_iovad);
err_iovad_stream:
fput(file);
err_file:
vfree(domain->bounce_maps);
err_map:
vhost_iotlb_free(domain->iotlb);
err_iotlb:
kfree(domain);
return NULL;
}
int vduse_domain_init(void)
{
return iova_cache_get();
}
void vduse_domain_exit(void)
{
iova_cache_put();
}
|
linux-master
|
drivers/vdpa/vdpa_user/iova_domain.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* VDUSE: vDPA Device in Userspace
*
* Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
*
* Author: Xie Yongji <[email protected]>
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/eventfd.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/dma-map-ops.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/uio.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
#include <uapi/linux/vduse.h>
#include <uapi/linux/vdpa.h>
#include <uapi/linux/virtio_config.h>
#include <uapi/linux/virtio_ids.h>
#include <uapi/linux/virtio_blk.h>
#include <linux/mod_devicetable.h>
#include "iova_domain.h"
#define DRV_AUTHOR "Yongji Xie <[email protected]>"
#define DRV_DESC "vDPA Device in Userspace"
#define DRV_LICENSE "GPL v2"
#define VDUSE_DEV_MAX (1U << MINORBITS)
#define VDUSE_MAX_BOUNCE_SIZE (1024 * 1024 * 1024)
#define VDUSE_MIN_BOUNCE_SIZE (1024 * 1024)
#define VDUSE_BOUNCE_SIZE (64 * 1024 * 1024)
/* 128 MB reserved for virtqueue creation */
#define VDUSE_IOVA_SIZE (VDUSE_MAX_BOUNCE_SIZE + 128 * 1024 * 1024)
#define VDUSE_MSG_DEFAULT_TIMEOUT 30
#define IRQ_UNBOUND -1
struct vduse_virtqueue {
u16 index;
u16 num_max;
u32 num;
u64 desc_addr;
u64 driver_addr;
u64 device_addr;
struct vdpa_vq_state state;
bool ready;
bool kicked;
spinlock_t kick_lock;
spinlock_t irq_lock;
struct eventfd_ctx *kickfd;
struct vdpa_callback cb;
struct work_struct inject;
struct work_struct kick;
int irq_effective_cpu;
struct cpumask irq_affinity;
struct kobject kobj;
};
struct vduse_dev;
struct vduse_vdpa {
struct vdpa_device vdpa;
struct vduse_dev *dev;
};
struct vduse_umem {
unsigned long iova;
unsigned long npages;
struct page **pages;
struct mm_struct *mm;
};
struct vduse_dev {
struct vduse_vdpa *vdev;
struct device *dev;
struct vduse_virtqueue **vqs;
struct vduse_iova_domain *domain;
char *name;
struct mutex lock;
spinlock_t msg_lock;
u64 msg_unique;
u32 msg_timeout;
wait_queue_head_t waitq;
struct list_head send_list;
struct list_head recv_list;
struct vdpa_callback config_cb;
struct work_struct inject;
spinlock_t irq_lock;
struct rw_semaphore rwsem;
int minor;
bool broken;
bool connected;
u64 api_version;
u64 device_features;
u64 driver_features;
u32 device_id;
u32 vendor_id;
u32 generation;
u32 config_size;
void *config;
u8 status;
u32 vq_num;
u32 vq_align;
struct vduse_umem *umem;
struct mutex mem_lock;
unsigned int bounce_size;
struct mutex domain_lock;
};
struct vduse_dev_msg {
struct vduse_dev_request req;
struct vduse_dev_response resp;
struct list_head list;
wait_queue_head_t waitq;
bool completed;
};
struct vduse_control {
u64 api_version;
};
static DEFINE_MUTEX(vduse_lock);
static DEFINE_IDR(vduse_idr);
static dev_t vduse_major;
static struct class *vduse_class;
static struct cdev vduse_ctrl_cdev;
static struct cdev vduse_cdev;
static struct workqueue_struct *vduse_irq_wq;
static struct workqueue_struct *vduse_irq_bound_wq;
static u32 allowed_device_id[] = {
VIRTIO_ID_BLOCK,
};
static inline struct vduse_dev *vdpa_to_vduse(struct vdpa_device *vdpa)
{
struct vduse_vdpa *vdev = container_of(vdpa, struct vduse_vdpa, vdpa);
return vdev->dev;
}
static inline struct vduse_dev *dev_to_vduse(struct device *dev)
{
struct vdpa_device *vdpa = dev_to_vdpa(dev);
return vdpa_to_vduse(vdpa);
}
static struct vduse_dev_msg *vduse_find_msg(struct list_head *head,
uint32_t request_id)
{
struct vduse_dev_msg *msg;
list_for_each_entry(msg, head, list) {
if (msg->req.request_id == request_id) {
list_del(&msg->list);
return msg;
}
}
return NULL;
}
static struct vduse_dev_msg *vduse_dequeue_msg(struct list_head *head)
{
struct vduse_dev_msg *msg = NULL;
if (!list_empty(head)) {
msg = list_first_entry(head, struct vduse_dev_msg, list);
list_del(&msg->list);
}
return msg;
}
static void vduse_enqueue_msg(struct list_head *head,
struct vduse_dev_msg *msg)
{
list_add_tail(&msg->list, head);
}
static void vduse_dev_broken(struct vduse_dev *dev)
{
struct vduse_dev_msg *msg, *tmp;
if (unlikely(dev->broken))
return;
list_splice_init(&dev->recv_list, &dev->send_list);
list_for_each_entry_safe(msg, tmp, &dev->send_list, list) {
list_del(&msg->list);
msg->completed = 1;
msg->resp.result = VDUSE_REQ_RESULT_FAILED;
wake_up(&msg->waitq);
}
dev->broken = true;
wake_up(&dev->waitq);
}
static int vduse_dev_msg_sync(struct vduse_dev *dev,
struct vduse_dev_msg *msg)
{
int ret;
if (unlikely(dev->broken))
return -EIO;
init_waitqueue_head(&msg->waitq);
spin_lock(&dev->msg_lock);
if (unlikely(dev->broken)) {
spin_unlock(&dev->msg_lock);
return -EIO;
}
msg->req.request_id = dev->msg_unique++;
vduse_enqueue_msg(&dev->send_list, msg);
wake_up(&dev->waitq);
spin_unlock(&dev->msg_lock);
if (dev->msg_timeout)
ret = wait_event_killable_timeout(msg->waitq, msg->completed,
(long)dev->msg_timeout * HZ);
else
ret = wait_event_killable(msg->waitq, msg->completed);
spin_lock(&dev->msg_lock);
if (!msg->completed) {
list_del(&msg->list);
msg->resp.result = VDUSE_REQ_RESULT_FAILED;
/* Mark the device as malfunction when there is a timeout */
if (!ret)
vduse_dev_broken(dev);
}
ret = (msg->resp.result == VDUSE_REQ_RESULT_OK) ? 0 : -EIO;
spin_unlock(&dev->msg_lock);
return ret;
}
static int vduse_dev_get_vq_state_packed(struct vduse_dev *dev,
struct vduse_virtqueue *vq,
struct vdpa_vq_state_packed *packed)
{
struct vduse_dev_msg msg = { 0 };
int ret;
msg.req.type = VDUSE_GET_VQ_STATE;
msg.req.vq_state.index = vq->index;
ret = vduse_dev_msg_sync(dev, &msg);
if (ret)
return ret;
packed->last_avail_counter =
msg.resp.vq_state.packed.last_avail_counter & 0x0001;
packed->last_avail_idx =
msg.resp.vq_state.packed.last_avail_idx & 0x7FFF;
packed->last_used_counter =
msg.resp.vq_state.packed.last_used_counter & 0x0001;
packed->last_used_idx =
msg.resp.vq_state.packed.last_used_idx & 0x7FFF;
return 0;
}
static int vduse_dev_get_vq_state_split(struct vduse_dev *dev,
struct vduse_virtqueue *vq,
struct vdpa_vq_state_split *split)
{
struct vduse_dev_msg msg = { 0 };
int ret;
msg.req.type = VDUSE_GET_VQ_STATE;
msg.req.vq_state.index = vq->index;
ret = vduse_dev_msg_sync(dev, &msg);
if (ret)
return ret;
split->avail_index = msg.resp.vq_state.split.avail_index;
return 0;
}
static int vduse_dev_set_status(struct vduse_dev *dev, u8 status)
{
struct vduse_dev_msg msg = { 0 };
msg.req.type = VDUSE_SET_STATUS;
msg.req.s.status = status;
return vduse_dev_msg_sync(dev, &msg);
}
static int vduse_dev_update_iotlb(struct vduse_dev *dev,
u64 start, u64 last)
{
struct vduse_dev_msg msg = { 0 };
if (last < start)
return -EINVAL;
msg.req.type = VDUSE_UPDATE_IOTLB;
msg.req.iova.start = start;
msg.req.iova.last = last;
return vduse_dev_msg_sync(dev, &msg);
}
static ssize_t vduse_dev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct vduse_dev *dev = file->private_data;
struct vduse_dev_msg *msg;
int size = sizeof(struct vduse_dev_request);
ssize_t ret;
if (iov_iter_count(to) < size)
return -EINVAL;
spin_lock(&dev->msg_lock);
while (1) {
msg = vduse_dequeue_msg(&dev->send_list);
if (msg)
break;
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
goto unlock;
spin_unlock(&dev->msg_lock);
ret = wait_event_interruptible_exclusive(dev->waitq,
!list_empty(&dev->send_list));
if (ret)
return ret;
spin_lock(&dev->msg_lock);
}
spin_unlock(&dev->msg_lock);
ret = copy_to_iter(&msg->req, size, to);
spin_lock(&dev->msg_lock);
if (ret != size) {
ret = -EFAULT;
vduse_enqueue_msg(&dev->send_list, msg);
goto unlock;
}
vduse_enqueue_msg(&dev->recv_list, msg);
unlock:
spin_unlock(&dev->msg_lock);
return ret;
}
static bool is_mem_zero(const char *ptr, int size)
{
int i;
for (i = 0; i < size; i++) {
if (ptr[i])
return false;
}
return true;
}
static ssize_t vduse_dev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct vduse_dev *dev = file->private_data;
struct vduse_dev_response resp;
struct vduse_dev_msg *msg;
size_t ret;
ret = copy_from_iter(&resp, sizeof(resp), from);
if (ret != sizeof(resp))
return -EINVAL;
if (!is_mem_zero((const char *)resp.reserved, sizeof(resp.reserved)))
return -EINVAL;
spin_lock(&dev->msg_lock);
msg = vduse_find_msg(&dev->recv_list, resp.request_id);
if (!msg) {
ret = -ENOENT;
goto unlock;
}
memcpy(&msg->resp, &resp, sizeof(resp));
msg->completed = 1;
wake_up(&msg->waitq);
unlock:
spin_unlock(&dev->msg_lock);
return ret;
}
static __poll_t vduse_dev_poll(struct file *file, poll_table *wait)
{
struct vduse_dev *dev = file->private_data;
__poll_t mask = 0;
poll_wait(file, &dev->waitq, wait);
spin_lock(&dev->msg_lock);
if (unlikely(dev->broken))
mask |= EPOLLERR;
if (!list_empty(&dev->send_list))
mask |= EPOLLIN | EPOLLRDNORM;
if (!list_empty(&dev->recv_list))
mask |= EPOLLOUT | EPOLLWRNORM;
spin_unlock(&dev->msg_lock);
return mask;
}
static void vduse_dev_reset(struct vduse_dev *dev)
{
int i;
struct vduse_iova_domain *domain = dev->domain;
/* The coherent mappings are handled in vduse_dev_free_coherent() */
if (domain && domain->bounce_map)
vduse_domain_reset_bounce_map(domain);
down_write(&dev->rwsem);
dev->status = 0;
dev->driver_features = 0;
dev->generation++;
spin_lock(&dev->irq_lock);
dev->config_cb.callback = NULL;
dev->config_cb.private = NULL;
spin_unlock(&dev->irq_lock);
flush_work(&dev->inject);
for (i = 0; i < dev->vq_num; i++) {
struct vduse_virtqueue *vq = dev->vqs[i];
vq->ready = false;
vq->desc_addr = 0;
vq->driver_addr = 0;
vq->device_addr = 0;
vq->num = 0;
memset(&vq->state, 0, sizeof(vq->state));
spin_lock(&vq->kick_lock);
vq->kicked = false;
if (vq->kickfd)
eventfd_ctx_put(vq->kickfd);
vq->kickfd = NULL;
spin_unlock(&vq->kick_lock);
spin_lock(&vq->irq_lock);
vq->cb.callback = NULL;
vq->cb.private = NULL;
vq->cb.trigger = NULL;
spin_unlock(&vq->irq_lock);
flush_work(&vq->inject);
flush_work(&vq->kick);
}
up_write(&dev->rwsem);
}
static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx,
u64 desc_area, u64 driver_area,
u64 device_area)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
struct vduse_virtqueue *vq = dev->vqs[idx];
vq->desc_addr = desc_area;
vq->driver_addr = driver_area;
vq->device_addr = device_area;
return 0;
}
static void vduse_vq_kick(struct vduse_virtqueue *vq)
{
spin_lock(&vq->kick_lock);
if (!vq->ready)
goto unlock;
if (vq->kickfd)
eventfd_signal(vq->kickfd, 1);
else
vq->kicked = true;
unlock:
spin_unlock(&vq->kick_lock);
}
static void vduse_vq_kick_work(struct work_struct *work)
{
struct vduse_virtqueue *vq = container_of(work,
struct vduse_virtqueue, kick);
vduse_vq_kick(vq);
}
static void vduse_vdpa_kick_vq(struct vdpa_device *vdpa, u16 idx)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
struct vduse_virtqueue *vq = dev->vqs[idx];
if (!eventfd_signal_allowed()) {
schedule_work(&vq->kick);
return;
}
vduse_vq_kick(vq);
}
static void vduse_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
struct vdpa_callback *cb)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
struct vduse_virtqueue *vq = dev->vqs[idx];
spin_lock(&vq->irq_lock);
vq->cb.callback = cb->callback;
vq->cb.private = cb->private;
vq->cb.trigger = cb->trigger;
spin_unlock(&vq->irq_lock);
}
static void vduse_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
struct vduse_virtqueue *vq = dev->vqs[idx];
vq->num = num;
}
static void vduse_vdpa_set_vq_ready(struct vdpa_device *vdpa,
u16 idx, bool ready)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
struct vduse_virtqueue *vq = dev->vqs[idx];
vq->ready = ready;
}
static bool vduse_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
struct vduse_virtqueue *vq = dev->vqs[idx];
return vq->ready;
}
static int vduse_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 idx,
const struct vdpa_vq_state *state)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
struct vduse_virtqueue *vq = dev->vqs[idx];
if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
vq->state.packed.last_avail_counter =
state->packed.last_avail_counter;
vq->state.packed.last_avail_idx = state->packed.last_avail_idx;
vq->state.packed.last_used_counter =
state->packed.last_used_counter;
vq->state.packed.last_used_idx = state->packed.last_used_idx;
} else
vq->state.split.avail_index = state->split.avail_index;
return 0;
}
static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vdpa_vq_state *state)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
struct vduse_virtqueue *vq = dev->vqs[idx];
if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED))
return vduse_dev_get_vq_state_packed(dev, vq, &state->packed);
return vduse_dev_get_vq_state_split(dev, vq, &state->split);
}
static u32 vduse_vdpa_get_vq_align(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->vq_align;
}
static u64 vduse_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->device_features;
}
static int vduse_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
dev->driver_features = features;
return 0;
}
static u64 vduse_vdpa_get_driver_features(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->driver_features;
}
static void vduse_vdpa_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
spin_lock(&dev->irq_lock);
dev->config_cb.callback = cb->callback;
dev->config_cb.private = cb->private;
spin_unlock(&dev->irq_lock);
}
static u16 vduse_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
u16 num_max = 0;
int i;
for (i = 0; i < dev->vq_num; i++)
if (num_max < dev->vqs[i]->num_max)
num_max = dev->vqs[i]->num_max;
return num_max;
}
static u32 vduse_vdpa_get_device_id(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->device_id;
}
static u32 vduse_vdpa_get_vendor_id(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->vendor_id;
}
static u8 vduse_vdpa_get_status(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->status;
}
static void vduse_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
if (vduse_dev_set_status(dev, status))
return;
dev->status = status;
}
static size_t vduse_vdpa_get_config_size(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->config_size;
}
static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
void *buf, unsigned int len)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
/* Initialize the buffer in case of partial copy. */
memset(buf, 0, len);
if (offset > dev->config_size)
return;
if (len > dev->config_size - offset)
len = dev->config_size - offset;
memcpy(buf, dev->config + offset, len);
}
static void vduse_vdpa_set_config(struct vdpa_device *vdpa, unsigned int offset,
const void *buf, unsigned int len)
{
/* Now we only support read-only configuration space */
}
static int vduse_vdpa_reset(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
int ret = vduse_dev_set_status(dev, 0);
vduse_dev_reset(dev);
return ret;
}
static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->generation;
}
static int vduse_vdpa_set_vq_affinity(struct vdpa_device *vdpa, u16 idx,
const struct cpumask *cpu_mask)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
if (cpu_mask)
cpumask_copy(&dev->vqs[idx]->irq_affinity, cpu_mask);
else
cpumask_setall(&dev->vqs[idx]->irq_affinity);
return 0;
}
static const struct cpumask *
vduse_vdpa_get_vq_affinity(struct vdpa_device *vdpa, u16 idx)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return &dev->vqs[idx]->irq_affinity;
}
static int vduse_vdpa_set_map(struct vdpa_device *vdpa,
unsigned int asid,
struct vhost_iotlb *iotlb)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
int ret;
ret = vduse_domain_set_map(dev->domain, iotlb);
if (ret)
return ret;
ret = vduse_dev_update_iotlb(dev, 0ULL, ULLONG_MAX);
if (ret) {
vduse_domain_clear_map(dev->domain, iotlb);
return ret;
}
return 0;
}
static void vduse_vdpa_free(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
dev->vdev = NULL;
}
static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.set_vq_address = vduse_vdpa_set_vq_address,
.kick_vq = vduse_vdpa_kick_vq,
.set_vq_cb = vduse_vdpa_set_vq_cb,
.set_vq_num = vduse_vdpa_set_vq_num,
.set_vq_ready = vduse_vdpa_set_vq_ready,
.get_vq_ready = vduse_vdpa_get_vq_ready,
.set_vq_state = vduse_vdpa_set_vq_state,
.get_vq_state = vduse_vdpa_get_vq_state,
.get_vq_align = vduse_vdpa_get_vq_align,
.get_device_features = vduse_vdpa_get_device_features,
.set_driver_features = vduse_vdpa_set_driver_features,
.get_driver_features = vduse_vdpa_get_driver_features,
.set_config_cb = vduse_vdpa_set_config_cb,
.get_vq_num_max = vduse_vdpa_get_vq_num_max,
.get_device_id = vduse_vdpa_get_device_id,
.get_vendor_id = vduse_vdpa_get_vendor_id,
.get_status = vduse_vdpa_get_status,
.set_status = vduse_vdpa_set_status,
.get_config_size = vduse_vdpa_get_config_size,
.get_config = vduse_vdpa_get_config,
.set_config = vduse_vdpa_set_config,
.get_generation = vduse_vdpa_get_generation,
.set_vq_affinity = vduse_vdpa_set_vq_affinity,
.get_vq_affinity = vduse_vdpa_get_vq_affinity,
.reset = vduse_vdpa_reset,
.set_map = vduse_vdpa_set_map,
.free = vduse_vdpa_free,
};
static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
}
static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
}
static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs)
{
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
unsigned long iova;
void *addr;
*dma_addr = DMA_MAPPING_ERROR;
addr = vduse_domain_alloc_coherent(domain, size,
(dma_addr_t *)&iova, flag, attrs);
if (!addr)
return NULL;
*dma_addr = (dma_addr_t)iova;
return addr;
}
static void vduse_dev_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
unsigned long attrs)
{
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
}
static size_t vduse_dev_max_mapping_size(struct device *dev)
{
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
return domain->bounce_size;
}
static const struct dma_map_ops vduse_dev_dma_ops = {
.map_page = vduse_dev_map_page,
.unmap_page = vduse_dev_unmap_page,
.alloc = vduse_dev_alloc_coherent,
.free = vduse_dev_free_coherent,
.max_mapping_size = vduse_dev_max_mapping_size,
};
static unsigned int perm_to_file_flags(u8 perm)
{
unsigned int flags = 0;
switch (perm) {
case VDUSE_ACCESS_WO:
flags |= O_WRONLY;
break;
case VDUSE_ACCESS_RO:
flags |= O_RDONLY;
break;
case VDUSE_ACCESS_RW:
flags |= O_RDWR;
break;
default:
WARN(1, "invalidate vhost IOTLB permission\n");
break;
}
return flags;
}
static int vduse_kickfd_setup(struct vduse_dev *dev,
struct vduse_vq_eventfd *eventfd)
{
struct eventfd_ctx *ctx = NULL;
struct vduse_virtqueue *vq;
u32 index;
if (eventfd->index >= dev->vq_num)
return -EINVAL;
index = array_index_nospec(eventfd->index, dev->vq_num);
vq = dev->vqs[index];
if (eventfd->fd >= 0) {
ctx = eventfd_ctx_fdget(eventfd->fd);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
} else if (eventfd->fd != VDUSE_EVENTFD_DEASSIGN)
return 0;
spin_lock(&vq->kick_lock);
if (vq->kickfd)
eventfd_ctx_put(vq->kickfd);
vq->kickfd = ctx;
if (vq->ready && vq->kicked && vq->kickfd) {
eventfd_signal(vq->kickfd, 1);
vq->kicked = false;
}
spin_unlock(&vq->kick_lock);
return 0;
}
static bool vduse_dev_is_ready(struct vduse_dev *dev)
{
int i;
for (i = 0; i < dev->vq_num; i++)
if (!dev->vqs[i]->num_max)
return false;
return true;
}
static void vduse_dev_irq_inject(struct work_struct *work)
{
struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
spin_lock_bh(&dev->irq_lock);
if (dev->config_cb.callback)
dev->config_cb.callback(dev->config_cb.private);
spin_unlock_bh(&dev->irq_lock);
}
static void vduse_vq_irq_inject(struct work_struct *work)
{
struct vduse_virtqueue *vq = container_of(work,
struct vduse_virtqueue, inject);
spin_lock_bh(&vq->irq_lock);
if (vq->ready && vq->cb.callback)
vq->cb.callback(vq->cb.private);
spin_unlock_bh(&vq->irq_lock);
}
static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq)
{
bool signal = false;
if (!vq->cb.trigger)
return false;
spin_lock_irq(&vq->irq_lock);
if (vq->ready && vq->cb.trigger) {
eventfd_signal(vq->cb.trigger, 1);
signal = true;
}
spin_unlock_irq(&vq->irq_lock);
return signal;
}
static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
struct work_struct *irq_work,
int irq_effective_cpu)
{
int ret = -EINVAL;
down_read(&dev->rwsem);
if (!(dev->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto unlock;
ret = 0;
if (irq_effective_cpu == IRQ_UNBOUND)
queue_work(vduse_irq_wq, irq_work);
else
queue_work_on(irq_effective_cpu,
vduse_irq_bound_wq, irq_work);
unlock:
up_read(&dev->rwsem);
return ret;
}
static int vduse_dev_dereg_umem(struct vduse_dev *dev,
u64 iova, u64 size)
{
int ret;
mutex_lock(&dev->mem_lock);
ret = -ENOENT;
if (!dev->umem)
goto unlock;
ret = -EINVAL;
if (!dev->domain)
goto unlock;
if (dev->umem->iova != iova || size != dev->domain->bounce_size)
goto unlock;
vduse_domain_remove_user_bounce_pages(dev->domain);
unpin_user_pages_dirty_lock(dev->umem->pages,
dev->umem->npages, true);
atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
mmdrop(dev->umem->mm);
vfree(dev->umem->pages);
kfree(dev->umem);
dev->umem = NULL;
ret = 0;
unlock:
mutex_unlock(&dev->mem_lock);
return ret;
}
static int vduse_dev_reg_umem(struct vduse_dev *dev,
u64 iova, u64 uaddr, u64 size)
{
struct page **page_list = NULL;
struct vduse_umem *umem = NULL;
long pinned = 0;
unsigned long npages, lock_limit;
int ret;
if (!dev->domain || !dev->domain->bounce_map ||
size != dev->domain->bounce_size ||
iova != 0 || uaddr & ~PAGE_MASK)
return -EINVAL;
mutex_lock(&dev->mem_lock);
ret = -EEXIST;
if (dev->umem)
goto unlock;
ret = -ENOMEM;
npages = size >> PAGE_SHIFT;
page_list = __vmalloc(array_size(npages, sizeof(struct page *)),
GFP_KERNEL_ACCOUNT);
umem = kzalloc(sizeof(*umem), GFP_KERNEL);
if (!page_list || !umem)
goto unlock;
mmap_read_lock(current->mm);
lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
if (npages + atomic64_read(¤t->mm->pinned_vm) > lock_limit)
goto out;
pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE,
page_list);
if (pinned != npages) {
ret = pinned < 0 ? pinned : -ENOMEM;
goto out;
}
ret = vduse_domain_add_user_bounce_pages(dev->domain,
page_list, pinned);
if (ret)
goto out;
atomic64_add(npages, ¤t->mm->pinned_vm);
umem->pages = page_list;
umem->npages = pinned;
umem->iova = iova;
umem->mm = current->mm;
mmgrab(current->mm);
dev->umem = umem;
out:
if (ret && pinned > 0)
unpin_user_pages(page_list, pinned);
mmap_read_unlock(current->mm);
unlock:
if (ret) {
vfree(page_list);
kfree(umem);
}
mutex_unlock(&dev->mem_lock);
return ret;
}
static void vduse_vq_update_effective_cpu(struct vduse_virtqueue *vq)
{
int curr_cpu = vq->irq_effective_cpu;
while (true) {
curr_cpu = cpumask_next(curr_cpu, &vq->irq_affinity);
if (cpu_online(curr_cpu))
break;
if (curr_cpu >= nr_cpu_ids)
curr_cpu = IRQ_UNBOUND;
}
vq->irq_effective_cpu = curr_cpu;
}
static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct vduse_dev *dev = file->private_data;
void __user *argp = (void __user *)arg;
int ret;
if (unlikely(dev->broken))
return -EPERM;
switch (cmd) {
case VDUSE_IOTLB_GET_FD: {
struct vduse_iotlb_entry entry;
struct vhost_iotlb_map *map;
struct vdpa_map_file *map_file;
struct file *f = NULL;
ret = -EFAULT;
if (copy_from_user(&entry, argp, sizeof(entry)))
break;
ret = -EINVAL;
if (entry.start > entry.last)
break;
mutex_lock(&dev->domain_lock);
if (!dev->domain) {
mutex_unlock(&dev->domain_lock);
break;
}
spin_lock(&dev->domain->iotlb_lock);
map = vhost_iotlb_itree_first(dev->domain->iotlb,
entry.start, entry.last);
if (map) {
map_file = (struct vdpa_map_file *)map->opaque;
f = get_file(map_file->file);
entry.offset = map_file->offset;
entry.start = map->start;
entry.last = map->last;
entry.perm = map->perm;
}
spin_unlock(&dev->domain->iotlb_lock);
mutex_unlock(&dev->domain_lock);
ret = -EINVAL;
if (!f)
break;
ret = -EFAULT;
if (copy_to_user(argp, &entry, sizeof(entry))) {
fput(f);
break;
}
ret = receive_fd(f, perm_to_file_flags(entry.perm));
fput(f);
break;
}
case VDUSE_DEV_GET_FEATURES:
/*
* Just mirror what driver wrote here.
* The driver is expected to check FEATURE_OK later.
*/
ret = put_user(dev->driver_features, (u64 __user *)argp);
break;
case VDUSE_DEV_SET_CONFIG: {
struct vduse_config_data config;
unsigned long size = offsetof(struct vduse_config_data,
buffer);
ret = -EFAULT;
if (copy_from_user(&config, argp, size))
break;
ret = -EINVAL;
if (config.offset > dev->config_size ||
config.length == 0 ||
config.length > dev->config_size - config.offset)
break;
ret = -EFAULT;
if (copy_from_user(dev->config + config.offset, argp + size,
config.length))
break;
ret = 0;
break;
}
case VDUSE_DEV_INJECT_CONFIG_IRQ:
ret = vduse_dev_queue_irq_work(dev, &dev->inject, IRQ_UNBOUND);
break;
case VDUSE_VQ_SETUP: {
struct vduse_vq_config config;
u32 index;
ret = -EFAULT;
if (copy_from_user(&config, argp, sizeof(config)))
break;
ret = -EINVAL;
if (config.index >= dev->vq_num)
break;
if (!is_mem_zero((const char *)config.reserved,
sizeof(config.reserved)))
break;
index = array_index_nospec(config.index, dev->vq_num);
dev->vqs[index]->num_max = config.max_size;
ret = 0;
break;
}
case VDUSE_VQ_GET_INFO: {
struct vduse_vq_info vq_info;
struct vduse_virtqueue *vq;
u32 index;
ret = -EFAULT;
if (copy_from_user(&vq_info, argp, sizeof(vq_info)))
break;
ret = -EINVAL;
if (vq_info.index >= dev->vq_num)
break;
index = array_index_nospec(vq_info.index, dev->vq_num);
vq = dev->vqs[index];
vq_info.desc_addr = vq->desc_addr;
vq_info.driver_addr = vq->driver_addr;
vq_info.device_addr = vq->device_addr;
vq_info.num = vq->num;
if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
vq_info.packed.last_avail_counter =
vq->state.packed.last_avail_counter;
vq_info.packed.last_avail_idx =
vq->state.packed.last_avail_idx;
vq_info.packed.last_used_counter =
vq->state.packed.last_used_counter;
vq_info.packed.last_used_idx =
vq->state.packed.last_used_idx;
} else
vq_info.split.avail_index =
vq->state.split.avail_index;
vq_info.ready = vq->ready;
ret = -EFAULT;
if (copy_to_user(argp, &vq_info, sizeof(vq_info)))
break;
ret = 0;
break;
}
case VDUSE_VQ_SETUP_KICKFD: {
struct vduse_vq_eventfd eventfd;
ret = -EFAULT;
if (copy_from_user(&eventfd, argp, sizeof(eventfd)))
break;
ret = vduse_kickfd_setup(dev, &eventfd);
break;
}
case VDUSE_VQ_INJECT_IRQ: {
u32 index;
ret = -EFAULT;
if (get_user(index, (u32 __user *)argp))
break;
ret = -EINVAL;
if (index >= dev->vq_num)
break;
ret = 0;
index = array_index_nospec(index, dev->vq_num);
if (!vduse_vq_signal_irqfd(dev->vqs[index])) {
vduse_vq_update_effective_cpu(dev->vqs[index]);
ret = vduse_dev_queue_irq_work(dev,
&dev->vqs[index]->inject,
dev->vqs[index]->irq_effective_cpu);
}
break;
}
case VDUSE_IOTLB_REG_UMEM: {
struct vduse_iova_umem umem;
ret = -EFAULT;
if (copy_from_user(&umem, argp, sizeof(umem)))
break;
ret = -EINVAL;
if (!is_mem_zero((const char *)umem.reserved,
sizeof(umem.reserved)))
break;
mutex_lock(&dev->domain_lock);
ret = vduse_dev_reg_umem(dev, umem.iova,
umem.uaddr, umem.size);
mutex_unlock(&dev->domain_lock);
break;
}
case VDUSE_IOTLB_DEREG_UMEM: {
struct vduse_iova_umem umem;
ret = -EFAULT;
if (copy_from_user(&umem, argp, sizeof(umem)))
break;
ret = -EINVAL;
if (!is_mem_zero((const char *)umem.reserved,
sizeof(umem.reserved)))
break;
mutex_lock(&dev->domain_lock);
ret = vduse_dev_dereg_umem(dev, umem.iova,
umem.size);
mutex_unlock(&dev->domain_lock);
break;
}
case VDUSE_IOTLB_GET_INFO: {
struct vduse_iova_info info;
struct vhost_iotlb_map *map;
ret = -EFAULT;
if (copy_from_user(&info, argp, sizeof(info)))
break;
ret = -EINVAL;
if (info.start > info.last)
break;
if (!is_mem_zero((const char *)info.reserved,
sizeof(info.reserved)))
break;
mutex_lock(&dev->domain_lock);
if (!dev->domain) {
mutex_unlock(&dev->domain_lock);
break;
}
spin_lock(&dev->domain->iotlb_lock);
map = vhost_iotlb_itree_first(dev->domain->iotlb,
info.start, info.last);
if (map) {
info.start = map->start;
info.last = map->last;
info.capability = 0;
if (dev->domain->bounce_map && map->start == 0 &&
map->last == dev->domain->bounce_size - 1)
info.capability |= VDUSE_IOVA_CAP_UMEM;
}
spin_unlock(&dev->domain->iotlb_lock);
mutex_unlock(&dev->domain_lock);
if (!map)
break;
ret = -EFAULT;
if (copy_to_user(argp, &info, sizeof(info)))
break;
ret = 0;
break;
}
default:
ret = -ENOIOCTLCMD;
break;
}
return ret;
}
static int vduse_dev_release(struct inode *inode, struct file *file)
{
struct vduse_dev *dev = file->private_data;
mutex_lock(&dev->domain_lock);
if (dev->domain)
vduse_dev_dereg_umem(dev, 0, dev->domain->bounce_size);
mutex_unlock(&dev->domain_lock);
spin_lock(&dev->msg_lock);
/* Make sure the inflight messages can processed after reconncection */
list_splice_init(&dev->recv_list, &dev->send_list);
spin_unlock(&dev->msg_lock);
dev->connected = false;
return 0;
}
static struct vduse_dev *vduse_dev_get_from_minor(int minor)
{
struct vduse_dev *dev;
mutex_lock(&vduse_lock);
dev = idr_find(&vduse_idr, minor);
mutex_unlock(&vduse_lock);
return dev;
}
static int vduse_dev_open(struct inode *inode, struct file *file)
{
int ret;
struct vduse_dev *dev = vduse_dev_get_from_minor(iminor(inode));
if (!dev)
return -ENODEV;
ret = -EBUSY;
mutex_lock(&dev->lock);
if (dev->connected)
goto unlock;
ret = 0;
dev->connected = true;
file->private_data = dev;
unlock:
mutex_unlock(&dev->lock);
return ret;
}
static const struct file_operations vduse_dev_fops = {
.owner = THIS_MODULE,
.open = vduse_dev_open,
.release = vduse_dev_release,
.read_iter = vduse_dev_read_iter,
.write_iter = vduse_dev_write_iter,
.poll = vduse_dev_poll,
.unlocked_ioctl = vduse_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
static ssize_t irq_cb_affinity_show(struct vduse_virtqueue *vq, char *buf)
{
return sprintf(buf, "%*pb\n", cpumask_pr_args(&vq->irq_affinity));
}
static ssize_t irq_cb_affinity_store(struct vduse_virtqueue *vq,
const char *buf, size_t count)
{
cpumask_var_t new_value;
int ret;
if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
ret = cpumask_parse(buf, new_value);
if (ret)
goto free_mask;
ret = -EINVAL;
if (!cpumask_intersects(new_value, cpu_online_mask))
goto free_mask;
cpumask_copy(&vq->irq_affinity, new_value);
ret = count;
free_mask:
free_cpumask_var(new_value);
return ret;
}
struct vq_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct vduse_virtqueue *vq, char *buf);
ssize_t (*store)(struct vduse_virtqueue *vq, const char *buf,
size_t count);
};
static struct vq_sysfs_entry irq_cb_affinity_attr = __ATTR_RW(irq_cb_affinity);
static struct attribute *vq_attrs[] = {
&irq_cb_affinity_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(vq);
static ssize_t vq_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct vduse_virtqueue *vq = container_of(kobj,
struct vduse_virtqueue, kobj);
struct vq_sysfs_entry *entry = container_of(attr,
struct vq_sysfs_entry, attr);
if (!entry->show)
return -EIO;
return entry->show(vq, buf);
}
static ssize_t vq_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct vduse_virtqueue *vq = container_of(kobj,
struct vduse_virtqueue, kobj);
struct vq_sysfs_entry *entry = container_of(attr,
struct vq_sysfs_entry, attr);
if (!entry->store)
return -EIO;
return entry->store(vq, buf, count);
}
static const struct sysfs_ops vq_sysfs_ops = {
.show = vq_attr_show,
.store = vq_attr_store,
};
static void vq_release(struct kobject *kobj)
{
struct vduse_virtqueue *vq = container_of(kobj,
struct vduse_virtqueue, kobj);
kfree(vq);
}
static const struct kobj_type vq_type = {
.release = vq_release,
.sysfs_ops = &vq_sysfs_ops,
.default_groups = vq_groups,
};
static void vduse_dev_deinit_vqs(struct vduse_dev *dev)
{
int i;
if (!dev->vqs)
return;
for (i = 0; i < dev->vq_num; i++)
kobject_put(&dev->vqs[i]->kobj);
kfree(dev->vqs);
}
static int vduse_dev_init_vqs(struct vduse_dev *dev, u32 vq_align, u32 vq_num)
{
int ret, i;
dev->vq_align = vq_align;
dev->vq_num = vq_num;
dev->vqs = kcalloc(dev->vq_num, sizeof(*dev->vqs), GFP_KERNEL);
if (!dev->vqs)
return -ENOMEM;
for (i = 0; i < vq_num; i++) {
dev->vqs[i] = kzalloc(sizeof(*dev->vqs[i]), GFP_KERNEL);
if (!dev->vqs[i]) {
ret = -ENOMEM;
goto err;
}
dev->vqs[i]->index = i;
dev->vqs[i]->irq_effective_cpu = IRQ_UNBOUND;
INIT_WORK(&dev->vqs[i]->inject, vduse_vq_irq_inject);
INIT_WORK(&dev->vqs[i]->kick, vduse_vq_kick_work);
spin_lock_init(&dev->vqs[i]->kick_lock);
spin_lock_init(&dev->vqs[i]->irq_lock);
cpumask_setall(&dev->vqs[i]->irq_affinity);
kobject_init(&dev->vqs[i]->kobj, &vq_type);
ret = kobject_add(&dev->vqs[i]->kobj,
&dev->dev->kobj, "vq%d", i);
if (ret) {
kfree(dev->vqs[i]);
goto err;
}
}
return 0;
err:
while (i--)
kobject_put(&dev->vqs[i]->kobj);
kfree(dev->vqs);
dev->vqs = NULL;
return ret;
}
static struct vduse_dev *vduse_dev_create(void)
{
struct vduse_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
mutex_init(&dev->lock);
mutex_init(&dev->mem_lock);
mutex_init(&dev->domain_lock);
spin_lock_init(&dev->msg_lock);
INIT_LIST_HEAD(&dev->send_list);
INIT_LIST_HEAD(&dev->recv_list);
spin_lock_init(&dev->irq_lock);
init_rwsem(&dev->rwsem);
INIT_WORK(&dev->inject, vduse_dev_irq_inject);
init_waitqueue_head(&dev->waitq);
return dev;
}
static void vduse_dev_destroy(struct vduse_dev *dev)
{
kfree(dev);
}
static struct vduse_dev *vduse_find_dev(const char *name)
{
struct vduse_dev *dev;
int id;
idr_for_each_entry(&vduse_idr, dev, id)
if (!strcmp(dev->name, name))
return dev;
return NULL;
}
static int vduse_destroy_dev(char *name)
{
struct vduse_dev *dev = vduse_find_dev(name);
if (!dev)
return -EINVAL;
mutex_lock(&dev->lock);
if (dev->vdev || dev->connected) {
mutex_unlock(&dev->lock);
return -EBUSY;
}
dev->connected = true;
mutex_unlock(&dev->lock);
vduse_dev_reset(dev);
device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
idr_remove(&vduse_idr, dev->minor);
kvfree(dev->config);
vduse_dev_deinit_vqs(dev);
if (dev->domain)
vduse_domain_destroy(dev->domain);
kfree(dev->name);
vduse_dev_destroy(dev);
module_put(THIS_MODULE);
return 0;
}
static bool device_is_allowed(u32 device_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(allowed_device_id); i++)
if (allowed_device_id[i] == device_id)
return true;
return false;
}
static bool features_is_valid(u64 features)
{
if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
return false;
/* Now we only support read-only configuration space */
if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE))
return false;
return true;
}
static bool vduse_validate_config(struct vduse_dev_config *config)
{
if (!is_mem_zero((const char *)config->reserved,
sizeof(config->reserved)))
return false;
if (config->vq_align > PAGE_SIZE)
return false;
if (config->config_size > PAGE_SIZE)
return false;
if (config->vq_num > 0xffff)
return false;
if (!config->name[0])
return false;
if (!device_is_allowed(config->device_id))
return false;
if (!features_is_valid(config->features))
return false;
return true;
}
static ssize_t msg_timeout_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct vduse_dev *dev = dev_get_drvdata(device);
return sysfs_emit(buf, "%u\n", dev->msg_timeout);
}
static ssize_t msg_timeout_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vduse_dev *dev = dev_get_drvdata(device);
int ret;
ret = kstrtouint(buf, 10, &dev->msg_timeout);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_RW(msg_timeout);
static ssize_t bounce_size_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct vduse_dev *dev = dev_get_drvdata(device);
return sysfs_emit(buf, "%u\n", dev->bounce_size);
}
static ssize_t bounce_size_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vduse_dev *dev = dev_get_drvdata(device);
unsigned int bounce_size;
int ret;
ret = -EPERM;
mutex_lock(&dev->domain_lock);
if (dev->domain)
goto unlock;
ret = kstrtouint(buf, 10, &bounce_size);
if (ret < 0)
goto unlock;
ret = -EINVAL;
if (bounce_size > VDUSE_MAX_BOUNCE_SIZE ||
bounce_size < VDUSE_MIN_BOUNCE_SIZE)
goto unlock;
dev->bounce_size = bounce_size & PAGE_MASK;
ret = count;
unlock:
mutex_unlock(&dev->domain_lock);
return ret;
}
static DEVICE_ATTR_RW(bounce_size);
static struct attribute *vduse_dev_attrs[] = {
&dev_attr_msg_timeout.attr,
&dev_attr_bounce_size.attr,
NULL
};
ATTRIBUTE_GROUPS(vduse_dev);
static int vduse_create_dev(struct vduse_dev_config *config,
void *config_buf, u64 api_version)
{
int ret;
struct vduse_dev *dev;
ret = -EEXIST;
if (vduse_find_dev(config->name))
goto err;
ret = -ENOMEM;
dev = vduse_dev_create();
if (!dev)
goto err;
dev->api_version = api_version;
dev->device_features = config->features;
dev->device_id = config->device_id;
dev->vendor_id = config->vendor_id;
dev->name = kstrdup(config->name, GFP_KERNEL);
if (!dev->name)
goto err_str;
dev->bounce_size = VDUSE_BOUNCE_SIZE;
dev->config = config_buf;
dev->config_size = config->config_size;
ret = idr_alloc(&vduse_idr, dev, 1, VDUSE_DEV_MAX, GFP_KERNEL);
if (ret < 0)
goto err_idr;
dev->minor = ret;
dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT;
dev->dev = device_create_with_groups(vduse_class, NULL,
MKDEV(MAJOR(vduse_major), dev->minor),
dev, vduse_dev_groups, "%s", config->name);
if (IS_ERR(dev->dev)) {
ret = PTR_ERR(dev->dev);
goto err_dev;
}
ret = vduse_dev_init_vqs(dev, config->vq_align, config->vq_num);
if (ret)
goto err_vqs;
__module_get(THIS_MODULE);
return 0;
err_vqs:
device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
err_dev:
idr_remove(&vduse_idr, dev->minor);
err_idr:
kfree(dev->name);
err_str:
vduse_dev_destroy(dev);
err:
return ret;
}
static long vduse_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret;
void __user *argp = (void __user *)arg;
struct vduse_control *control = file->private_data;
mutex_lock(&vduse_lock);
switch (cmd) {
case VDUSE_GET_API_VERSION:
ret = put_user(control->api_version, (u64 __user *)argp);
break;
case VDUSE_SET_API_VERSION: {
u64 api_version;
ret = -EFAULT;
if (get_user(api_version, (u64 __user *)argp))
break;
ret = -EINVAL;
if (api_version > VDUSE_API_VERSION)
break;
ret = 0;
control->api_version = api_version;
break;
}
case VDUSE_CREATE_DEV: {
struct vduse_dev_config config;
unsigned long size = offsetof(struct vduse_dev_config, config);
void *buf;
ret = -EFAULT;
if (copy_from_user(&config, argp, size))
break;
ret = -EINVAL;
if (vduse_validate_config(&config) == false)
break;
buf = vmemdup_user(argp + size, config.config_size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
break;
}
config.name[VDUSE_NAME_MAX - 1] = '\0';
ret = vduse_create_dev(&config, buf, control->api_version);
if (ret)
kvfree(buf);
break;
}
case VDUSE_DESTROY_DEV: {
char name[VDUSE_NAME_MAX];
ret = -EFAULT;
if (copy_from_user(name, argp, VDUSE_NAME_MAX))
break;
name[VDUSE_NAME_MAX - 1] = '\0';
ret = vduse_destroy_dev(name);
break;
}
default:
ret = -EINVAL;
break;
}
mutex_unlock(&vduse_lock);
return ret;
}
static int vduse_release(struct inode *inode, struct file *file)
{
struct vduse_control *control = file->private_data;
kfree(control);
return 0;
}
static int vduse_open(struct inode *inode, struct file *file)
{
struct vduse_control *control;
control = kmalloc(sizeof(struct vduse_control), GFP_KERNEL);
if (!control)
return -ENOMEM;
control->api_version = VDUSE_API_VERSION;
file->private_data = control;
return 0;
}
static const struct file_operations vduse_ctrl_fops = {
.owner = THIS_MODULE,
.open = vduse_open,
.release = vduse_release,
.unlocked_ioctl = vduse_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
static char *vduse_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
}
struct vduse_mgmt_dev {
struct vdpa_mgmt_dev mgmt_dev;
struct device dev;
};
static struct vduse_mgmt_dev *vduse_mgmt;
static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
{
struct vduse_vdpa *vdev;
int ret;
if (dev->vdev)
return -EEXIST;
vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
&vduse_vdpa_config_ops, 1, 1, name, true);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
dev->vdev = vdev;
vdev->dev = dev;
vdev->vdpa.dev.dma_mask = &vdev->vdpa.dev.coherent_dma_mask;
ret = dma_set_mask_and_coherent(&vdev->vdpa.dev, DMA_BIT_MASK(64));
if (ret) {
put_device(&vdev->vdpa.dev);
return ret;
}
set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
vdev->vdpa.dma_dev = &vdev->vdpa.dev;
vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev;
return 0;
}
static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config)
{
struct vduse_dev *dev;
int ret;
mutex_lock(&vduse_lock);
dev = vduse_find_dev(name);
if (!dev || !vduse_dev_is_ready(dev)) {
mutex_unlock(&vduse_lock);
return -EINVAL;
}
ret = vduse_dev_init_vdpa(dev, name);
mutex_unlock(&vduse_lock);
if (ret)
return ret;
mutex_lock(&dev->domain_lock);
if (!dev->domain)
dev->domain = vduse_domain_create(VDUSE_IOVA_SIZE - 1,
dev->bounce_size);
mutex_unlock(&dev->domain_lock);
if (!dev->domain) {
put_device(&dev->vdev->vdpa.dev);
return -ENOMEM;
}
ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
if (ret) {
put_device(&dev->vdev->vdpa.dev);
mutex_lock(&dev->domain_lock);
vduse_domain_destroy(dev->domain);
dev->domain = NULL;
mutex_unlock(&dev->domain_lock);
return ret;
}
return 0;
}
static void vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
{
_vdpa_unregister_device(dev);
}
static const struct vdpa_mgmtdev_ops vdpa_dev_mgmtdev_ops = {
.dev_add = vdpa_dev_add,
.dev_del = vdpa_dev_del,
};
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static void vduse_mgmtdev_release(struct device *dev)
{
struct vduse_mgmt_dev *mgmt_dev;
mgmt_dev = container_of(dev, struct vduse_mgmt_dev, dev);
kfree(mgmt_dev);
}
static int vduse_mgmtdev_init(void)
{
int ret;
vduse_mgmt = kzalloc(sizeof(*vduse_mgmt), GFP_KERNEL);
if (!vduse_mgmt)
return -ENOMEM;
ret = dev_set_name(&vduse_mgmt->dev, "vduse");
if (ret) {
kfree(vduse_mgmt);
return ret;
}
vduse_mgmt->dev.release = vduse_mgmtdev_release;
ret = device_register(&vduse_mgmt->dev);
if (ret)
goto dev_reg_err;
vduse_mgmt->mgmt_dev.id_table = id_table;
vduse_mgmt->mgmt_dev.ops = &vdpa_dev_mgmtdev_ops;
vduse_mgmt->mgmt_dev.device = &vduse_mgmt->dev;
ret = vdpa_mgmtdev_register(&vduse_mgmt->mgmt_dev);
if (ret)
device_unregister(&vduse_mgmt->dev);
return ret;
dev_reg_err:
put_device(&vduse_mgmt->dev);
return ret;
}
static void vduse_mgmtdev_exit(void)
{
vdpa_mgmtdev_unregister(&vduse_mgmt->mgmt_dev);
device_unregister(&vduse_mgmt->dev);
}
static int vduse_init(void)
{
int ret;
struct device *dev;
vduse_class = class_create("vduse");
if (IS_ERR(vduse_class))
return PTR_ERR(vduse_class);
vduse_class->devnode = vduse_devnode;
ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse");
if (ret)
goto err_chardev_region;
/* /dev/vduse/control */
cdev_init(&vduse_ctrl_cdev, &vduse_ctrl_fops);
vduse_ctrl_cdev.owner = THIS_MODULE;
ret = cdev_add(&vduse_ctrl_cdev, vduse_major, 1);
if (ret)
goto err_ctrl_cdev;
dev = device_create(vduse_class, NULL, vduse_major, NULL, "control");
if (IS_ERR(dev)) {
ret = PTR_ERR(dev);
goto err_device;
}
/* /dev/vduse/$DEVICE */
cdev_init(&vduse_cdev, &vduse_dev_fops);
vduse_cdev.owner = THIS_MODULE;
ret = cdev_add(&vduse_cdev, MKDEV(MAJOR(vduse_major), 1),
VDUSE_DEV_MAX - 1);
if (ret)
goto err_cdev;
ret = -ENOMEM;
vduse_irq_wq = alloc_workqueue("vduse-irq",
WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0);
if (!vduse_irq_wq)
goto err_wq;
vduse_irq_bound_wq = alloc_workqueue("vduse-irq-bound", WQ_HIGHPRI, 0);
if (!vduse_irq_bound_wq)
goto err_bound_wq;
ret = vduse_domain_init();
if (ret)
goto err_domain;
ret = vduse_mgmtdev_init();
if (ret)
goto err_mgmtdev;
return 0;
err_mgmtdev:
vduse_domain_exit();
err_domain:
destroy_workqueue(vduse_irq_bound_wq);
err_bound_wq:
destroy_workqueue(vduse_irq_wq);
err_wq:
cdev_del(&vduse_cdev);
err_cdev:
device_destroy(vduse_class, vduse_major);
err_device:
cdev_del(&vduse_ctrl_cdev);
err_ctrl_cdev:
unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
err_chardev_region:
class_destroy(vduse_class);
return ret;
}
module_init(vduse_init);
static void vduse_exit(void)
{
vduse_mgmtdev_exit();
vduse_domain_exit();
destroy_workqueue(vduse_irq_bound_wq);
destroy_workqueue(vduse_irq_wq);
cdev_del(&vduse_cdev);
device_destroy(vduse_class, vduse_major);
cdev_del(&vduse_ctrl_cdev);
unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
class_destroy(vduse_class);
}
module_exit(vduse_exit);
MODULE_LICENSE(DRV_LICENSE);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
|
linux-master
|
drivers/vdpa/vdpa_user/vduse_dev.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* VDPA simulator for block device.
*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2021, Red Hat Inc. All rights reserved.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <uapi/linux/virtio_blk.h>
#include "vdpa_sim.h"
#define DRV_VERSION "0.1"
#define DRV_AUTHOR "Max Gurtovoy <[email protected]>"
#define DRV_DESC "vDPA Device Simulator for block device"
#define DRV_LICENSE "GPL v2"
#define VDPASIM_BLK_FEATURES (VDPASIM_FEATURES | \
(1ULL << VIRTIO_BLK_F_FLUSH) | \
(1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
(1ULL << VIRTIO_BLK_F_SEG_MAX) | \
(1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
(1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
(1ULL << VIRTIO_BLK_F_MQ) | \
(1ULL << VIRTIO_BLK_F_DISCARD) | \
(1ULL << VIRTIO_BLK_F_WRITE_ZEROES))
#define VDPASIM_BLK_CAPACITY 0x40000
#define VDPASIM_BLK_SIZE_MAX 0x1000
#define VDPASIM_BLK_SEG_MAX 32
#define VDPASIM_BLK_DWZ_MAX_SECTORS UINT_MAX
/* 1 virtqueue, 1 address space, 1 virtqueue group */
#define VDPASIM_BLK_VQ_NUM 1
#define VDPASIM_BLK_AS_NUM 1
#define VDPASIM_BLK_GROUP_NUM 1
struct vdpasim_blk {
struct vdpasim vdpasim;
void *buffer;
bool shared_backend;
};
static struct vdpasim_blk *sim_to_blk(struct vdpasim *vdpasim)
{
return container_of(vdpasim, struct vdpasim_blk, vdpasim);
}
static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
static bool shared_backend;
module_param(shared_backend, bool, 0444);
MODULE_PARM_DESC(shared_backend, "Enable the shared backend between virtio-blk devices");
static void *shared_buffer;
/* mutex to synchronize shared_buffer access */
static DEFINE_MUTEX(shared_buffer_mutex);
static void vdpasim_blk_buffer_lock(struct vdpasim_blk *blk)
{
if (blk->shared_backend)
mutex_lock(&shared_buffer_mutex);
}
static void vdpasim_blk_buffer_unlock(struct vdpasim_blk *blk)
{
if (blk->shared_backend)
mutex_unlock(&shared_buffer_mutex);
}
static bool vdpasim_blk_check_range(struct vdpasim *vdpasim, u64 start_sector,
u64 num_sectors, u64 max_sectors)
{
if (start_sector > VDPASIM_BLK_CAPACITY) {
dev_dbg(&vdpasim->vdpa.dev,
"starting sector exceeds the capacity - start: 0x%llx capacity: 0x%x\n",
start_sector, VDPASIM_BLK_CAPACITY);
}
if (num_sectors > max_sectors) {
dev_dbg(&vdpasim->vdpa.dev,
"number of sectors exceeds the max allowed in a request - num: 0x%llx max: 0x%llx\n",
num_sectors, max_sectors);
return false;
}
if (num_sectors > VDPASIM_BLK_CAPACITY - start_sector) {
dev_dbg(&vdpasim->vdpa.dev,
"request exceeds the capacity - start: 0x%llx num: 0x%llx capacity: 0x%x\n",
start_sector, num_sectors, VDPASIM_BLK_CAPACITY);
return false;
}
return true;
}
/* Returns 'true' if the request is handled (with or without an I/O error)
* and the status is correctly written in the last byte of the 'in iov',
* 'false' otherwise.
*/
static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
struct vdpasim_virtqueue *vq)
{
struct vdpasim_blk *blk = sim_to_blk(vdpasim);
size_t pushed = 0, to_pull, to_push;
struct virtio_blk_outhdr hdr;
bool handled = false;
ssize_t bytes;
loff_t offset;
u64 sector;
u8 status;
u32 type;
int ret;
ret = vringh_getdesc_iotlb(&vq->vring, &vq->out_iov, &vq->in_iov,
&vq->head, GFP_ATOMIC);
if (ret != 1)
return false;
if (vq->out_iov.used < 1 || vq->in_iov.used < 1) {
dev_dbg(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
vq->out_iov.used, vq->in_iov.used);
goto err;
}
if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) {
dev_dbg(&vdpasim->vdpa.dev, "request in header too short\n");
goto err;
}
/* The last byte is the status and we checked if the last iov has
* enough room for it.
*/
to_push = vringh_kiov_length(&vq->in_iov) - 1;
to_pull = vringh_kiov_length(&vq->out_iov);
bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr,
sizeof(hdr));
if (bytes != sizeof(hdr)) {
dev_dbg(&vdpasim->vdpa.dev, "request out header too short\n");
goto err;
}
to_pull -= bytes;
type = vdpasim32_to_cpu(vdpasim, hdr.type);
sector = vdpasim64_to_cpu(vdpasim, hdr.sector);
offset = sector << SECTOR_SHIFT;
status = VIRTIO_BLK_S_OK;
if (type != VIRTIO_BLK_T_IN && type != VIRTIO_BLK_T_OUT &&
sector != 0) {
dev_dbg(&vdpasim->vdpa.dev,
"sector must be 0 for %u request - sector: 0x%llx\n",
type, sector);
status = VIRTIO_BLK_S_IOERR;
goto err_status;
}
switch (type) {
case VIRTIO_BLK_T_IN:
if (!vdpasim_blk_check_range(vdpasim, sector,
to_push >> SECTOR_SHIFT,
VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
vdpasim_blk_buffer_lock(blk);
bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
blk->buffer + offset, to_push);
vdpasim_blk_buffer_unlock(blk);
if (bytes < 0) {
dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_push);
status = VIRTIO_BLK_S_IOERR;
break;
}
pushed += bytes;
break;
case VIRTIO_BLK_T_OUT:
if (!vdpasim_blk_check_range(vdpasim, sector,
to_pull >> SECTOR_SHIFT,
VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
vdpasim_blk_buffer_lock(blk);
bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov,
blk->buffer + offset, to_pull);
vdpasim_blk_buffer_unlock(blk);
if (bytes < 0) {
dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_pull);
status = VIRTIO_BLK_S_IOERR;
break;
}
break;
case VIRTIO_BLK_T_GET_ID:
bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
vdpasim_blk_id,
VIRTIO_BLK_ID_BYTES);
if (bytes < 0) {
dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd\n", bytes);
status = VIRTIO_BLK_S_IOERR;
break;
}
pushed += bytes;
break;
case VIRTIO_BLK_T_FLUSH:
/* nothing to do */
break;
case VIRTIO_BLK_T_DISCARD:
case VIRTIO_BLK_T_WRITE_ZEROES: {
struct virtio_blk_discard_write_zeroes range;
u32 num_sectors, flags;
if (to_pull != sizeof(range)) {
dev_dbg(&vdpasim->vdpa.dev,
"discard/write_zeroes header len: 0x%zx [expected: 0x%zx]\n",
to_pull, sizeof(range));
status = VIRTIO_BLK_S_IOERR;
break;
}
bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &range,
to_pull);
if (bytes < 0) {
dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_pull);
status = VIRTIO_BLK_S_IOERR;
break;
}
sector = le64_to_cpu(range.sector);
offset = sector << SECTOR_SHIFT;
num_sectors = le32_to_cpu(range.num_sectors);
flags = le32_to_cpu(range.flags);
if (type == VIRTIO_BLK_T_DISCARD && flags != 0) {
dev_dbg(&vdpasim->vdpa.dev,
"discard unexpected flags set - flags: 0x%x\n",
flags);
status = VIRTIO_BLK_S_UNSUPP;
break;
}
if (type == VIRTIO_BLK_T_WRITE_ZEROES &&
flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
dev_dbg(&vdpasim->vdpa.dev,
"write_zeroes unexpected flags set - flags: 0x%x\n",
flags);
status = VIRTIO_BLK_S_UNSUPP;
break;
}
if (!vdpasim_blk_check_range(vdpasim, sector, num_sectors,
VDPASIM_BLK_DWZ_MAX_SECTORS)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
vdpasim_blk_buffer_lock(blk);
memset(blk->buffer + offset, 0,
num_sectors << SECTOR_SHIFT);
vdpasim_blk_buffer_unlock(blk);
}
break;
}
default:
dev_dbg(&vdpasim->vdpa.dev,
"Unsupported request type %d\n", type);
status = VIRTIO_BLK_S_IOERR;
break;
}
err_status:
/* If some operations fail, we need to skip the remaining bytes
* to put the status in the last byte
*/
if (to_push - pushed > 0)
vringh_kiov_advance(&vq->in_iov, to_push - pushed);
/* Last byte is the status */
bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1);
if (bytes != 1)
goto err;
pushed += bytes;
/* Make sure data is wrote before advancing index */
smp_wmb();
handled = true;
err:
vringh_complete_iotlb(&vq->vring, vq->head, pushed);
return handled;
}
static void vdpasim_blk_work(struct vdpasim *vdpasim)
{
bool reschedule = false;
int i;
mutex_lock(&vdpasim->mutex);
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
if (!vdpasim->running)
goto out;
for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) {
struct vdpasim_virtqueue *vq = &vdpasim->vqs[i];
int reqs = 0;
if (!vq->ready)
continue;
while (vdpasim_blk_handle_req(vdpasim, vq)) {
/* Make sure used is visible before rasing the interrupt. */
smp_wmb();
local_bh_disable();
if (vringh_need_notify_iotlb(&vq->vring) > 0)
vringh_notify(&vq->vring);
local_bh_enable();
if (++reqs > 4) {
reschedule = true;
break;
}
}
}
out:
mutex_unlock(&vdpasim->mutex);
if (reschedule)
vdpasim_schedule_work(vdpasim);
}
static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
{
struct virtio_blk_config *blk_config = config;
memset(config, 0, sizeof(struct virtio_blk_config));
blk_config->capacity = cpu_to_vdpasim64(vdpasim, VDPASIM_BLK_CAPACITY);
blk_config->size_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SIZE_MAX);
blk_config->seg_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SEG_MAX);
blk_config->num_queues = cpu_to_vdpasim16(vdpasim, VDPASIM_BLK_VQ_NUM);
blk_config->min_io_size = cpu_to_vdpasim16(vdpasim, 1);
blk_config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1);
blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
/* VIRTIO_BLK_F_DISCARD */
blk_config->discard_sector_alignment =
cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
blk_config->max_discard_sectors =
cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_DWZ_MAX_SECTORS);
blk_config->max_discard_seg = cpu_to_vdpasim32(vdpasim, 1);
/* VIRTIO_BLK_F_WRITE_ZEROES */
blk_config->max_write_zeroes_sectors =
cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_DWZ_MAX_SECTORS);
blk_config->max_write_zeroes_seg = cpu_to_vdpasim32(vdpasim, 1);
}
static void vdpasim_blk_free(struct vdpasim *vdpasim)
{
struct vdpasim_blk *blk = sim_to_blk(vdpasim);
if (!blk->shared_backend)
kvfree(blk->buffer);
}
static void vdpasim_blk_mgmtdev_release(struct device *dev)
{
}
static struct device vdpasim_blk_mgmtdev = {
.init_name = "vdpasim_blk",
.release = vdpasim_blk_mgmtdev_release,
};
static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config)
{
struct vdpasim_dev_attr dev_attr = {};
struct vdpasim_blk *blk;
struct vdpasim *simdev;
int ret;
dev_attr.mgmt_dev = mdev;
dev_attr.name = name;
dev_attr.id = VIRTIO_ID_BLOCK;
dev_attr.supported_features = VDPASIM_BLK_FEATURES;
dev_attr.nvqs = VDPASIM_BLK_VQ_NUM;
dev_attr.ngroups = VDPASIM_BLK_GROUP_NUM;
dev_attr.nas = VDPASIM_BLK_AS_NUM;
dev_attr.alloc_size = sizeof(struct vdpasim_blk);
dev_attr.config_size = sizeof(struct virtio_blk_config);
dev_attr.get_config = vdpasim_blk_get_config;
dev_attr.work_fn = vdpasim_blk_work;
dev_attr.free = vdpasim_blk_free;
simdev = vdpasim_create(&dev_attr, config);
if (IS_ERR(simdev))
return PTR_ERR(simdev);
blk = sim_to_blk(simdev);
blk->shared_backend = shared_backend;
if (blk->shared_backend) {
blk->buffer = shared_buffer;
} else {
blk->buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
GFP_KERNEL);
if (!blk->buffer) {
ret = -ENOMEM;
goto put_dev;
}
}
ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_BLK_VQ_NUM);
if (ret)
goto put_dev;
return 0;
put_dev:
put_device(&simdev->vdpa.dev);
return ret;
}
static void vdpasim_blk_dev_del(struct vdpa_mgmt_dev *mdev,
struct vdpa_device *dev)
{
struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa);
_vdpa_unregister_device(&simdev->vdpa);
}
static const struct vdpa_mgmtdev_ops vdpasim_blk_mgmtdev_ops = {
.dev_add = vdpasim_blk_dev_add,
.dev_del = vdpasim_blk_dev_del
};
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static struct vdpa_mgmt_dev mgmt_dev = {
.device = &vdpasim_blk_mgmtdev,
.id_table = id_table,
.ops = &vdpasim_blk_mgmtdev_ops,
};
static int __init vdpasim_blk_init(void)
{
int ret;
ret = device_register(&vdpasim_blk_mgmtdev);
if (ret) {
put_device(&vdpasim_blk_mgmtdev);
return ret;
}
ret = vdpa_mgmtdev_register(&mgmt_dev);
if (ret)
goto parent_err;
if (shared_backend) {
shared_buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
GFP_KERNEL);
if (!shared_buffer) {
ret = -ENOMEM;
goto parent_err;
}
}
return 0;
parent_err:
device_unregister(&vdpasim_blk_mgmtdev);
return ret;
}
static void __exit vdpasim_blk_exit(void)
{
kvfree(shared_buffer);
vdpa_mgmtdev_unregister(&mgmt_dev);
device_unregister(&vdpasim_blk_mgmtdev);
}
module_init(vdpasim_blk_init)
module_exit(vdpasim_blk_exit)
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE(DRV_LICENSE);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
|
linux-master
|
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* VDPA simulator for networking device.
*
* Copyright (c) 2020, Red Hat Inc. All rights reserved.
* Author: Jason Wang <[email protected]>
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <net/netlink.h>
#include <uapi/linux/virtio_net.h>
#include <uapi/linux/vdpa.h>
#include "vdpa_sim.h"
#define DRV_VERSION "0.1"
#define DRV_AUTHOR "Jason Wang <[email protected]>"
#define DRV_DESC "vDPA Device Simulator for networking device"
#define DRV_LICENSE "GPL v2"
#define VDPASIM_NET_FEATURES (VDPASIM_FEATURES | \
(1ULL << VIRTIO_NET_F_MAC) | \
(1ULL << VIRTIO_NET_F_STATUS) | \
(1ULL << VIRTIO_NET_F_MTU) | \
(1ULL << VIRTIO_NET_F_CTRL_VQ) | \
(1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR))
/* 3 virtqueues, 2 address spaces, 2 virtqueue groups */
#define VDPASIM_NET_VQ_NUM 3
#define VDPASIM_NET_AS_NUM 2
#define VDPASIM_NET_GROUP_NUM 2
struct vdpasim_dataq_stats {
struct u64_stats_sync syncp;
u64 pkts;
u64 bytes;
u64 drops;
u64 errors;
u64 overruns;
};
struct vdpasim_cq_stats {
struct u64_stats_sync syncp;
u64 requests;
u64 successes;
u64 errors;
};
struct vdpasim_net{
struct vdpasim vdpasim;
struct vdpasim_dataq_stats tx_stats;
struct vdpasim_dataq_stats rx_stats;
struct vdpasim_cq_stats cq_stats;
void *buffer;
};
static struct vdpasim_net *sim_to_net(struct vdpasim *vdpasim)
{
return container_of(vdpasim, struct vdpasim_net, vdpasim);
}
static void vdpasim_net_complete(struct vdpasim_virtqueue *vq, size_t len)
{
/* Make sure data is wrote before advancing index */
smp_wmb();
vringh_complete_iotlb(&vq->vring, vq->head, len);
/* Make sure used is visible before rasing the interrupt. */
smp_wmb();
local_bh_disable();
if (vringh_need_notify_iotlb(&vq->vring) > 0)
vringh_notify(&vq->vring);
local_bh_enable();
}
static bool receive_filter(struct vdpasim *vdpasim, size_t len)
{
bool modern = vdpasim->features & (1ULL << VIRTIO_F_VERSION_1);
size_t hdr_len = modern ? sizeof(struct virtio_net_hdr_v1) :
sizeof(struct virtio_net_hdr);
struct virtio_net_config *vio_config = vdpasim->config;
struct vdpasim_net *net = sim_to_net(vdpasim);
if (len < ETH_ALEN + hdr_len)
return false;
if (is_broadcast_ether_addr(net->buffer + hdr_len) ||
is_multicast_ether_addr(net->buffer + hdr_len))
return true;
if (!strncmp(net->buffer + hdr_len, vio_config->mac, ETH_ALEN))
return true;
return false;
}
static virtio_net_ctrl_ack vdpasim_handle_ctrl_mac(struct vdpasim *vdpasim,
u8 cmd)
{
struct virtio_net_config *vio_config = vdpasim->config;
struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2];
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
size_t read;
switch (cmd) {
case VIRTIO_NET_CTRL_MAC_ADDR_SET:
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov,
vio_config->mac, ETH_ALEN);
if (read == ETH_ALEN)
status = VIRTIO_NET_OK;
break;
default:
break;
}
return status;
}
static void vdpasim_handle_cvq(struct vdpasim *vdpasim)
{
struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2];
struct vdpasim_net *net = sim_to_net(vdpasim);
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
struct virtio_net_ctrl_hdr ctrl;
size_t read, write;
u64 requests = 0, errors = 0, successes = 0;
int err;
if (!(vdpasim->features & (1ULL << VIRTIO_NET_F_CTRL_VQ)))
return;
if (!cvq->ready)
return;
while (true) {
err = vringh_getdesc_iotlb(&cvq->vring, &cvq->in_iov,
&cvq->out_iov,
&cvq->head, GFP_ATOMIC);
if (err <= 0)
break;
++requests;
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, &ctrl,
sizeof(ctrl));
if (read != sizeof(ctrl)) {
++errors;
break;
}
switch (ctrl.class) {
case VIRTIO_NET_CTRL_MAC:
status = vdpasim_handle_ctrl_mac(vdpasim, ctrl.cmd);
break;
default:
break;
}
if (status == VIRTIO_NET_OK)
++successes;
else
++errors;
/* Make sure data is wrote before advancing index */
smp_wmb();
write = vringh_iov_push_iotlb(&cvq->vring, &cvq->out_iov,
&status, sizeof(status));
vringh_complete_iotlb(&cvq->vring, cvq->head, write);
vringh_kiov_cleanup(&cvq->in_iov);
vringh_kiov_cleanup(&cvq->out_iov);
/* Make sure used is visible before rasing the interrupt. */
smp_wmb();
local_bh_disable();
if (cvq->cb)
cvq->cb(cvq->private);
local_bh_enable();
}
u64_stats_update_begin(&net->cq_stats.syncp);
net->cq_stats.requests += requests;
net->cq_stats.errors += errors;
net->cq_stats.successes += successes;
u64_stats_update_end(&net->cq_stats.syncp);
}
static void vdpasim_net_work(struct vdpasim *vdpasim)
{
struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
struct vdpasim_net *net = sim_to_net(vdpasim);
ssize_t read, write;
u64 tx_pkts = 0, rx_pkts = 0, tx_bytes = 0, rx_bytes = 0;
u64 rx_drops = 0, rx_overruns = 0, rx_errors = 0, tx_errors = 0;
int err;
mutex_lock(&vdpasim->mutex);
if (!vdpasim->running)
goto out;
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
vdpasim_handle_cvq(vdpasim);
if (!txq->ready || !rxq->ready)
goto out;
while (true) {
err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL,
&txq->head, GFP_ATOMIC);
if (err <= 0) {
if (err)
++tx_errors;
break;
}
++tx_pkts;
read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
net->buffer, PAGE_SIZE);
tx_bytes += read;
if (!receive_filter(vdpasim, read)) {
++rx_drops;
vdpasim_net_complete(txq, 0);
continue;
}
err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov,
&rxq->head, GFP_ATOMIC);
if (err <= 0) {
++rx_overruns;
vdpasim_net_complete(txq, 0);
break;
}
write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
net->buffer, read);
if (write <= 0) {
++rx_errors;
break;
}
++rx_pkts;
rx_bytes += write;
vdpasim_net_complete(txq, 0);
vdpasim_net_complete(rxq, write);
if (tx_pkts > 4) {
vdpasim_schedule_work(vdpasim);
goto out;
}
}
out:
mutex_unlock(&vdpasim->mutex);
u64_stats_update_begin(&net->tx_stats.syncp);
net->tx_stats.pkts += tx_pkts;
net->tx_stats.bytes += tx_bytes;
net->tx_stats.errors += tx_errors;
u64_stats_update_end(&net->tx_stats.syncp);
u64_stats_update_begin(&net->rx_stats.syncp);
net->rx_stats.pkts += rx_pkts;
net->rx_stats.bytes += rx_bytes;
net->rx_stats.drops += rx_drops;
net->rx_stats.errors += rx_errors;
net->rx_stats.overruns += rx_overruns;
u64_stats_update_end(&net->rx_stats.syncp);
}
static int vdpasim_net_get_stats(struct vdpasim *vdpasim, u16 idx,
struct sk_buff *msg,
struct netlink_ext_ack *extack)
{
struct vdpasim_net *net = sim_to_net(vdpasim);
u64 rx_pkts, rx_bytes, rx_errors, rx_overruns, rx_drops;
u64 tx_pkts, tx_bytes, tx_errors, tx_drops;
u64 cq_requests, cq_successes, cq_errors;
unsigned int start;
int err = -EMSGSIZE;
switch(idx) {
case 0:
do {
start = u64_stats_fetch_begin(&net->rx_stats.syncp);
rx_pkts = net->rx_stats.pkts;
rx_bytes = net->rx_stats.bytes;
rx_errors = net->rx_stats.errors;
rx_overruns = net->rx_stats.overruns;
rx_drops = net->rx_stats.drops;
} while (u64_stats_fetch_retry(&net->rx_stats.syncp, start));
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"rx packets"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
rx_pkts, VDPA_ATTR_PAD))
break;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"rx bytes"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
rx_bytes, VDPA_ATTR_PAD))
break;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"rx errors"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
rx_errors, VDPA_ATTR_PAD))
break;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"rx overruns"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
rx_overruns, VDPA_ATTR_PAD))
break;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"rx drops"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
rx_drops, VDPA_ATTR_PAD))
break;
err = 0;
break;
case 1:
do {
start = u64_stats_fetch_begin(&net->tx_stats.syncp);
tx_pkts = net->tx_stats.pkts;
tx_bytes = net->tx_stats.bytes;
tx_errors = net->tx_stats.errors;
tx_drops = net->tx_stats.drops;
} while (u64_stats_fetch_retry(&net->tx_stats.syncp, start));
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"tx packets"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
tx_pkts, VDPA_ATTR_PAD))
break;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"tx bytes"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
tx_bytes, VDPA_ATTR_PAD))
break;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"tx errors"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
tx_errors, VDPA_ATTR_PAD))
break;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"tx drops"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
tx_drops, VDPA_ATTR_PAD))
break;
err = 0;
break;
case 2:
do {
start = u64_stats_fetch_begin(&net->cq_stats.syncp);
cq_requests = net->cq_stats.requests;
cq_successes = net->cq_stats.successes;
cq_errors = net->cq_stats.errors;
} while (u64_stats_fetch_retry(&net->cq_stats.syncp, start));
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"cvq requests"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
cq_requests, VDPA_ATTR_PAD))
break;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"cvq successes"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
cq_successes, VDPA_ATTR_PAD))
break;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME,
"cvq errors"))
break;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,
cq_errors, VDPA_ATTR_PAD))
break;
err = 0;
break;
default:
err = -EINVAL;
break;
}
return err;
}
static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
{
struct virtio_net_config *net_config = config;
net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
}
static void vdpasim_net_setup_config(struct vdpasim *vdpasim,
const struct vdpa_dev_set_config *config)
{
struct virtio_net_config *vio_config = vdpasim->config;
if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR))
memcpy(vio_config->mac, config->net.mac, ETH_ALEN);
if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MTU))
vio_config->mtu = cpu_to_vdpasim16(vdpasim, config->net.mtu);
else
/* Setup default MTU to be 1500 */
vio_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
}
static void vdpasim_net_free(struct vdpasim *vdpasim)
{
struct vdpasim_net *net = sim_to_net(vdpasim);
kvfree(net->buffer);
}
static void vdpasim_net_mgmtdev_release(struct device *dev)
{
}
static struct device vdpasim_net_mgmtdev = {
.init_name = "vdpasim_net",
.release = vdpasim_net_mgmtdev_release,
};
static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config)
{
struct vdpasim_dev_attr dev_attr = {};
struct vdpasim_net *net;
struct vdpasim *simdev;
int ret;
dev_attr.mgmt_dev = mdev;
dev_attr.name = name;
dev_attr.id = VIRTIO_ID_NET;
dev_attr.supported_features = VDPASIM_NET_FEATURES;
dev_attr.nvqs = VDPASIM_NET_VQ_NUM;
dev_attr.ngroups = VDPASIM_NET_GROUP_NUM;
dev_attr.nas = VDPASIM_NET_AS_NUM;
dev_attr.alloc_size = sizeof(struct vdpasim_net);
dev_attr.config_size = sizeof(struct virtio_net_config);
dev_attr.get_config = vdpasim_net_get_config;
dev_attr.work_fn = vdpasim_net_work;
dev_attr.get_stats = vdpasim_net_get_stats;
dev_attr.free = vdpasim_net_free;
simdev = vdpasim_create(&dev_attr, config);
if (IS_ERR(simdev))
return PTR_ERR(simdev);
vdpasim_net_setup_config(simdev, config);
net = sim_to_net(simdev);
u64_stats_init(&net->tx_stats.syncp);
u64_stats_init(&net->rx_stats.syncp);
u64_stats_init(&net->cq_stats.syncp);
net->buffer = kvmalloc(PAGE_SIZE, GFP_KERNEL);
if (!net->buffer) {
ret = -ENOMEM;
goto reg_err;
}
/*
* Initialization must be completed before this call, since it can
* connect the device to the vDPA bus, so requests can arrive after
* this call.
*/
ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
if (ret)
goto reg_err;
return 0;
reg_err:
put_device(&simdev->vdpa.dev);
return ret;
}
static void vdpasim_net_dev_del(struct vdpa_mgmt_dev *mdev,
struct vdpa_device *dev)
{
struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa);
_vdpa_unregister_device(&simdev->vdpa);
}
static const struct vdpa_mgmtdev_ops vdpasim_net_mgmtdev_ops = {
.dev_add = vdpasim_net_dev_add,
.dev_del = vdpasim_net_dev_del
};
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static struct vdpa_mgmt_dev mgmt_dev = {
.device = &vdpasim_net_mgmtdev,
.id_table = id_table,
.ops = &vdpasim_net_mgmtdev_ops,
.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR |
1 << VDPA_ATTR_DEV_NET_CFG_MTU |
1 << VDPA_ATTR_DEV_FEATURES),
.max_supported_vqs = VDPASIM_NET_VQ_NUM,
.supported_features = VDPASIM_NET_FEATURES,
};
static int __init vdpasim_net_init(void)
{
int ret;
ret = device_register(&vdpasim_net_mgmtdev);
if (ret) {
put_device(&vdpasim_net_mgmtdev);
return ret;
}
ret = vdpa_mgmtdev_register(&mgmt_dev);
if (ret)
goto parent_err;
return 0;
parent_err:
device_unregister(&vdpasim_net_mgmtdev);
return ret;
}
static void __exit vdpasim_net_exit(void)
{
vdpa_mgmtdev_unregister(&mgmt_dev);
device_unregister(&vdpasim_net_mgmtdev);
}
module_init(vdpasim_net_init);
module_exit(vdpasim_net_exit);
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE(DRV_LICENSE);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
|
linux-master
|
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* VDPA device simulator core.
*
* Copyright (c) 2020, Red Hat Inc. All rights reserved.
* Author: Jason Wang <[email protected]>
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <linux/vhost_iotlb.h>
#include <uapi/linux/vdpa.h>
#include <uapi/linux/vhost_types.h>
#include "vdpa_sim.h"
#define DRV_VERSION "0.1"
#define DRV_AUTHOR "Jason Wang <[email protected]>"
#define DRV_DESC "vDPA Device Simulator core"
#define DRV_LICENSE "GPL v2"
static int batch_mapping = 1;
module_param(batch_mapping, int, 0444);
MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
static int max_iotlb_entries = 2048;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
"Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
static bool use_va = true;
module_param(use_va, bool, 0444);
MODULE_PARM_DESC(use_va, "Enable/disable the device's ability to use VA");
#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
#define VDPASIM_QUEUE_MAX 256
#define VDPASIM_VENDOR_ID 0
struct vdpasim_mm_work {
struct kthread_work work;
struct vdpasim *vdpasim;
struct mm_struct *mm_to_bind;
int ret;
};
static void vdpasim_mm_work_fn(struct kthread_work *work)
{
struct vdpasim_mm_work *mm_work =
container_of(work, struct vdpasim_mm_work, work);
struct vdpasim *vdpasim = mm_work->vdpasim;
mm_work->ret = 0;
//TODO: should we attach the cgroup of the mm owner?
vdpasim->mm_bound = mm_work->mm_to_bind;
}
static void vdpasim_worker_change_mm_sync(struct vdpasim *vdpasim,
struct vdpasim_mm_work *mm_work)
{
struct kthread_work *work = &mm_work->work;
kthread_init_work(work, vdpasim_mm_work_fn);
kthread_queue_work(vdpasim->worker, work);
kthread_flush_work(work);
}
static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
{
return container_of(vdpa, struct vdpasim, vdpa);
}
static void vdpasim_vq_notify(struct vringh *vring)
{
struct vdpasim_virtqueue *vq =
container_of(vring, struct vdpasim_virtqueue, vring);
if (!vq->cb)
return;
vq->cb(vq->private);
}
static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
{
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
uint16_t last_avail_idx = vq->vring.last_avail_idx;
struct vring_desc *desc = (struct vring_desc *)
(uintptr_t)vq->desc_addr;
struct vring_avail *avail = (struct vring_avail *)
(uintptr_t)vq->driver_addr;
struct vring_used *used = (struct vring_used *)
(uintptr_t)vq->device_addr;
if (use_va && vdpasim->mm_bound) {
vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num,
true, desc, avail, used);
} else {
vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num,
true, desc, avail, used);
}
vq->vring.last_avail_idx = last_avail_idx;
/*
* Since vdpa_sim does not support receive inflight descriptors as a
* destination of a migration, let's set both avail_idx and used_idx
* the same at vq start. This is how vhost-user works in a
* VHOST_SET_VRING_BASE call.
*
* Although the simple fix is to set last_used_idx at
* vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
*/
vq->vring.last_used_idx = last_avail_idx;
vq->vring.notify = vdpasim_vq_notify;
}
static void vdpasim_vq_reset(struct vdpasim *vdpasim,
struct vdpasim_virtqueue *vq)
{
vq->ready = false;
vq->desc_addr = 0;
vq->driver_addr = 0;
vq->device_addr = 0;
vq->cb = NULL;
vq->private = NULL;
vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
vq->vring.notify = NULL;
}
static void vdpasim_do_reset(struct vdpasim *vdpasim)
{
int i;
spin_lock(&vdpasim->iommu_lock);
for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
&vdpasim->iommu_lock);
}
for (i = 0; i < vdpasim->dev_attr.nas; i++) {
vhost_iotlb_reset(&vdpasim->iommu[i]);
vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
0, VHOST_MAP_RW);
vdpasim->iommu_pt[i] = true;
}
vdpasim->running = true;
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
vdpasim->status = 0;
++vdpasim->generation;
}
static const struct vdpa_config_ops vdpasim_config_ops;
static const struct vdpa_config_ops vdpasim_batch_config_ops;
static void vdpasim_work_fn(struct kthread_work *work)
{
struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
struct mm_struct *mm = vdpasim->mm_bound;
if (use_va && mm) {
if (!mmget_not_zero(mm))
return;
kthread_use_mm(mm);
}
vdpasim->dev_attr.work_fn(vdpasim);
if (use_va && mm) {
kthread_unuse_mm(mm);
mmput(mm);
}
}
struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
const struct vdpa_dev_set_config *config)
{
const struct vdpa_config_ops *ops;
struct vdpa_device *vdpa;
struct vdpasim *vdpasim;
struct device *dev;
int i, ret = -ENOMEM;
if (!dev_attr->alloc_size)
return ERR_PTR(-EINVAL);
if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
if (config->device_features &
~dev_attr->supported_features)
return ERR_PTR(-EINVAL);
dev_attr->supported_features =
config->device_features;
}
if (batch_mapping)
ops = &vdpasim_batch_config_ops;
else
ops = &vdpasim_config_ops;
vdpa = __vdpa_alloc_device(NULL, ops,
dev_attr->ngroups, dev_attr->nas,
dev_attr->alloc_size,
dev_attr->name, use_va);
if (IS_ERR(vdpa)) {
ret = PTR_ERR(vdpa);
goto err_alloc;
}
vdpasim = vdpa_to_sim(vdpa);
vdpasim->dev_attr = *dev_attr;
dev = &vdpasim->vdpa.dev;
kthread_init_work(&vdpasim->work, vdpasim_work_fn);
vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s",
dev_attr->name);
if (IS_ERR(vdpasim->worker))
goto err_iommu;
mutex_init(&vdpasim->mutex);
spin_lock_init(&vdpasim->iommu_lock);
dev->dma_mask = &dev->coherent_dma_mask;
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
goto err_iommu;
vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
if (!vdpasim->config)
goto err_iommu;
vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
GFP_KERNEL);
if (!vdpasim->vqs)
goto err_iommu;
vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
sizeof(*vdpasim->iommu), GFP_KERNEL);
if (!vdpasim->iommu)
goto err_iommu;
vdpasim->iommu_pt = kmalloc_array(vdpasim->dev_attr.nas,
sizeof(*vdpasim->iommu_pt), GFP_KERNEL);
if (!vdpasim->iommu_pt)
goto err_iommu;
for (i = 0; i < vdpasim->dev_attr.nas; i++)
vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
for (i = 0; i < dev_attr->nvqs; i++)
vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
&vdpasim->iommu_lock);
vdpasim->vdpa.dma_dev = dev;
return vdpasim;
err_iommu:
put_device(dev);
err_alloc:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(vdpasim_create);
void vdpasim_schedule_work(struct vdpasim *vdpasim)
{
kthread_queue_work(vdpasim->worker, &vdpasim->work);
}
EXPORT_SYMBOL_GPL(vdpasim_schedule_work);
static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
u64 desc_area, u64 driver_area,
u64 device_area)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
vq->desc_addr = desc_area;
vq->driver_addr = driver_area;
vq->device_addr = device_area;
return 0;
}
static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
vq->num = num;
}
static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
if (!vdpasim->running &&
(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
vdpasim->pending_kick = true;
return;
}
if (vq->ready)
vdpasim_schedule_work(vdpasim);
}
static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
struct vdpa_callback *cb)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
vq->cb = cb->callback;
vq->private = cb->private;
}
static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
bool old_ready;
mutex_lock(&vdpasim->mutex);
old_ready = vq->ready;
vq->ready = ready;
if (vq->ready && !old_ready) {
vdpasim_queue_ready(vdpasim, idx);
}
mutex_unlock(&vdpasim->mutex);
}
static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
return vq->ready;
}
static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
const struct vdpa_vq_state *state)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
struct vringh *vrh = &vq->vring;
mutex_lock(&vdpasim->mutex);
vrh->last_avail_idx = state->split.avail_index;
mutex_unlock(&vdpasim->mutex);
return 0;
}
static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vdpa_vq_state *state)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
struct vringh *vrh = &vq->vring;
state->split.avail_index = vrh->last_avail_idx;
return 0;
}
static int vdpasim_get_vq_stats(struct vdpa_device *vdpa, u16 idx,
struct sk_buff *msg,
struct netlink_ext_ack *extack)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
if (vdpasim->dev_attr.get_stats)
return vdpasim->dev_attr.get_stats(vdpasim, idx,
msg, extack);
return -EOPNOTSUPP;
}
static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
{
return VDPASIM_QUEUE_ALIGN;
}
static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
{
/* RX and TX belongs to group 0, CVQ belongs to group 1 */
if (idx == 2)
return 1;
else
return 0;
}
static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
return vdpasim->dev_attr.supported_features;
}
static u64 vdpasim_get_backend_features(const struct vdpa_device *vdpa)
{
return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK);
}
static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
/* DMA mapping must be done by driver */
if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
return -EINVAL;
vdpasim->features = features & vdpasim->dev_attr.supported_features;
return 0;
}
static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
return vdpasim->features;
}
static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
/* We don't support config interrupt */
}
static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
{
return VDPASIM_QUEUE_MAX;
}
static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
return vdpasim->dev_attr.id;
}
static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
{
return VDPASIM_VENDOR_ID;
}
static u8 vdpasim_get_status(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
u8 status;
mutex_lock(&vdpasim->mutex);
status = vdpasim->status;
mutex_unlock(&vdpasim->mutex);
return status;
}
static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
mutex_lock(&vdpasim->mutex);
vdpasim->status = status;
mutex_unlock(&vdpasim->mutex);
}
static int vdpasim_reset(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
mutex_lock(&vdpasim->mutex);
vdpasim->status = 0;
vdpasim_do_reset(vdpasim);
mutex_unlock(&vdpasim->mutex);
return 0;
}
static int vdpasim_suspend(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
mutex_lock(&vdpasim->mutex);
vdpasim->running = false;
mutex_unlock(&vdpasim->mutex);
return 0;
}
static int vdpasim_resume(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int i;
mutex_lock(&vdpasim->mutex);
vdpasim->running = true;
if (vdpasim->pending_kick) {
/* Process pending descriptors */
for (i = 0; i < vdpasim->dev_attr.nvqs; ++i)
vdpasim_kick_vq(vdpa, i);
vdpasim->pending_kick = false;
}
mutex_unlock(&vdpasim->mutex);
return 0;
}
static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
return vdpasim->dev_attr.config_size;
}
static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
void *buf, unsigned int len)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
if (offset + len > vdpasim->dev_attr.config_size)
return;
if (vdpasim->dev_attr.get_config)
vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
memcpy(buf, vdpasim->config + offset, len);
}
static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
const void *buf, unsigned int len)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
if (offset + len > vdpasim->dev_attr.config_size)
return;
memcpy(vdpasim->config + offset, buf, len);
if (vdpasim->dev_attr.set_config)
vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
}
static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
return vdpasim->generation;
}
static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
{
struct vdpa_iova_range range = {
.first = 0ULL,
.last = ULLONG_MAX,
};
return range;
}
static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
unsigned int asid)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vhost_iotlb *iommu;
int i;
if (group > vdpasim->dev_attr.ngroups)
return -EINVAL;
if (asid >= vdpasim->dev_attr.nas)
return -EINVAL;
iommu = &vdpasim->iommu[asid];
mutex_lock(&vdpasim->mutex);
for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
if (vdpasim_get_vq_group(vdpa, i) == group)
vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
&vdpasim->iommu_lock);
mutex_unlock(&vdpasim->mutex);
return 0;
}
static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
struct vhost_iotlb *iotlb)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vhost_iotlb_map *map;
struct vhost_iotlb *iommu;
u64 start = 0ULL, last = 0ULL - 1;
int ret;
if (asid >= vdpasim->dev_attr.nas)
return -EINVAL;
spin_lock(&vdpasim->iommu_lock);
iommu = &vdpasim->iommu[asid];
vhost_iotlb_reset(iommu);
vdpasim->iommu_pt[asid] = false;
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
ret = vhost_iotlb_add_range(iommu, map->start,
map->last, map->addr, map->perm);
if (ret)
goto err;
}
spin_unlock(&vdpasim->iommu_lock);
return 0;
err:
vhost_iotlb_reset(iommu);
spin_unlock(&vdpasim->iommu_lock);
return ret;
}
static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_mm_work mm_work;
mm_work.vdpasim = vdpasim;
mm_work.mm_to_bind = mm;
vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
return mm_work.ret;
}
static void vdpasim_unbind_mm(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_mm_work mm_work;
mm_work.vdpasim = vdpasim;
mm_work.mm_to_bind = NULL;
vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
}
static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
u64 iova, u64 size,
u64 pa, u32 perm, void *opaque)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int ret;
if (asid >= vdpasim->dev_attr.nas)
return -EINVAL;
spin_lock(&vdpasim->iommu_lock);
if (vdpasim->iommu_pt[asid]) {
vhost_iotlb_reset(&vdpasim->iommu[asid]);
vdpasim->iommu_pt[asid] = false;
}
ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
iova + size - 1, pa, perm, opaque);
spin_unlock(&vdpasim->iommu_lock);
return ret;
}
static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
u64 iova, u64 size)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
if (asid >= vdpasim->dev_attr.nas)
return -EINVAL;
if (vdpasim->iommu_pt[asid]) {
vhost_iotlb_reset(&vdpasim->iommu[asid]);
vdpasim->iommu_pt[asid] = false;
}
spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
spin_unlock(&vdpasim->iommu_lock);
return 0;
}
static void vdpasim_free(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int i;
kthread_cancel_work_sync(&vdpasim->work);
kthread_destroy_worker(vdpasim->worker);
for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
}
vdpasim->dev_attr.free(vdpasim);
for (i = 0; i < vdpasim->dev_attr.nas; i++)
vhost_iotlb_reset(&vdpasim->iommu[i]);
kfree(vdpasim->iommu);
kfree(vdpasim->iommu_pt);
kfree(vdpasim->vqs);
kfree(vdpasim->config);
}
static const struct vdpa_config_ops vdpasim_config_ops = {
.set_vq_address = vdpasim_set_vq_address,
.set_vq_num = vdpasim_set_vq_num,
.kick_vq = vdpasim_kick_vq,
.set_vq_cb = vdpasim_set_vq_cb,
.set_vq_ready = vdpasim_set_vq_ready,
.get_vq_ready = vdpasim_get_vq_ready,
.set_vq_state = vdpasim_set_vq_state,
.get_vendor_vq_stats = vdpasim_get_vq_stats,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
.get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
.get_backend_features = vdpasim_get_backend_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
.get_device_id = vdpasim_get_device_id,
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
.suspend = vdpasim_suspend,
.resume = vdpasim_resume,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
.get_iova_range = vdpasim_get_iova_range,
.set_group_asid = vdpasim_set_group_asid,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
.bind_mm = vdpasim_bind_mm,
.unbind_mm = vdpasim_unbind_mm,
.free = vdpasim_free,
};
static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.set_vq_address = vdpasim_set_vq_address,
.set_vq_num = vdpasim_set_vq_num,
.kick_vq = vdpasim_kick_vq,
.set_vq_cb = vdpasim_set_vq_cb,
.set_vq_ready = vdpasim_set_vq_ready,
.get_vq_ready = vdpasim_get_vq_ready,
.set_vq_state = vdpasim_set_vq_state,
.get_vendor_vq_stats = vdpasim_get_vq_stats,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
.get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
.get_backend_features = vdpasim_get_backend_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
.get_device_id = vdpasim_get_device_id,
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
.suspend = vdpasim_suspend,
.resume = vdpasim_resume,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
.get_iova_range = vdpasim_get_iova_range,
.set_group_asid = vdpasim_set_group_asid,
.set_map = vdpasim_set_map,
.bind_mm = vdpasim_bind_mm,
.unbind_mm = vdpasim_unbind_mm,
.free = vdpasim_free,
};
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE(DRV_LICENSE);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
|
linux-master
|
drivers/vdpa/vdpa_sim/vdpa_sim.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Advanced Micro Devices, Inc */
#include <linux/pci.h>
#include <linux/vdpa.h>
#include <uapi/linux/vdpa.h>
#include <linux/virtio_pci_modern.h>
#include <linux/pds/pds_common.h>
#include <linux/pds/pds_core_if.h>
#include <linux/pds/pds_adminq.h>
#include <linux/pds/pds_auxbus.h>
#include "vdpa_dev.h"
#include "aux_drv.h"
#include "cmds.h"
#include "debugfs.h"
static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev);
static struct pds_vdpa_device *vdpa_to_pdsv(struct vdpa_device *vdpa_dev)
{
return container_of(vdpa_dev, struct pds_vdpa_device, vdpa_dev);
}
static int pds_vdpa_notify_handler(struct notifier_block *nb,
unsigned long ecode,
void *data)
{
struct pds_vdpa_device *pdsv = container_of(nb, struct pds_vdpa_device, nb);
struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev;
dev_dbg(dev, "%s: event code %lu\n", __func__, ecode);
if (ecode == PDS_EVENT_RESET || ecode == PDS_EVENT_LINK_CHANGE) {
if (pdsv->config_cb.callback)
pdsv->config_cb.callback(pdsv->config_cb.private);
}
return 0;
}
static int pds_vdpa_register_event_handler(struct pds_vdpa_device *pdsv)
{
struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev;
struct notifier_block *nb = &pdsv->nb;
int err;
if (!nb->notifier_call) {
nb->notifier_call = pds_vdpa_notify_handler;
err = pdsc_register_notify(nb);
if (err) {
nb->notifier_call = NULL;
dev_err(dev, "failed to register pds event handler: %ps\n",
ERR_PTR(err));
return -EINVAL;
}
dev_dbg(dev, "pds event handler registered\n");
}
return 0;
}
static void pds_vdpa_unregister_event_handler(struct pds_vdpa_device *pdsv)
{
if (pdsv->nb.notifier_call) {
pdsc_unregister_notify(&pdsv->nb);
pdsv->nb.notifier_call = NULL;
}
}
static int pds_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
u64 desc_addr, u64 driver_addr, u64 device_addr)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
pdsv->vqs[qid].desc_addr = desc_addr;
pdsv->vqs[qid].avail_addr = driver_addr;
pdsv->vqs[qid].used_addr = device_addr;
return 0;
}
static void pds_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
pdsv->vqs[qid].q_len = num;
}
static void pds_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
iowrite16(qid, pdsv->vqs[qid].notify);
}
static void pds_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
struct vdpa_callback *cb)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
pdsv->vqs[qid].event_cb = *cb;
}
static irqreturn_t pds_vdpa_isr(int irq, void *data)
{
struct pds_vdpa_vq_info *vq;
vq = data;
if (vq->event_cb.callback)
vq->event_cb.callback(vq->event_cb.private);
return IRQ_HANDLED;
}
static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid)
{
if (pdsv->vqs[qid].irq == VIRTIO_MSI_NO_VECTOR)
return;
free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]);
pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR;
}
static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
struct device *dev = &pdsv->vdpa_dev.dev;
u64 driver_features;
u16 invert_idx = 0;
int err;
dev_dbg(dev, "%s: qid %d ready %d => %d\n",
__func__, qid, pdsv->vqs[qid].ready, ready);
if (ready == pdsv->vqs[qid].ready)
return;
driver_features = pds_vdpa_get_driver_features(vdpa_dev);
if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED))
invert_idx = PDS_VDPA_PACKED_INVERT_IDX;
if (ready) {
/* Pass vq setup info to DSC using adminq to gather up and
* send all info at once so FW can do its full set up in
* one easy operation
*/
err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]);
if (err) {
dev_err(dev, "Failed to init vq %d: %pe\n",
qid, ERR_PTR(err));
ready = false;
}
} else {
err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]);
if (err)
dev_err(dev, "%s: reset_vq failed qid %d: %pe\n",
__func__, qid, ERR_PTR(err));
}
pdsv->vqs[qid].ready = ready;
}
static bool pds_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
return pdsv->vqs[qid].ready;
}
static int pds_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
const struct vdpa_vq_state *state)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
struct device *dev = &padev->aux_dev.dev;
u64 driver_features;
u16 avail;
u16 used;
if (pdsv->vqs[qid].ready) {
dev_err(dev, "Setting device position is denied while vq is enabled\n");
return -EINVAL;
}
driver_features = pds_vdpa_get_driver_features(vdpa_dev);
if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
avail = state->packed.last_avail_idx |
(state->packed.last_avail_counter << 15);
used = state->packed.last_used_idx |
(state->packed.last_used_counter << 15);
/* The avail and used index are stored with the packed wrap
* counter bit inverted. This way, in case set_vq_state is
* not called, the initial value can be set to zero prior to
* feature negotiation, and it is good for both packed and
* split vq.
*/
avail ^= PDS_VDPA_PACKED_INVERT_IDX;
used ^= PDS_VDPA_PACKED_INVERT_IDX;
} else {
avail = state->split.avail_index;
/* state->split does not provide a used_index:
* the vq will be set to "empty" here, and the vq will read
* the current used index the next time the vq is kicked.
*/
used = avail;
}
if (used != avail) {
dev_dbg(dev, "Setting used equal to avail, for interoperability\n");
used = avail;
}
pdsv->vqs[qid].avail_idx = avail;
pdsv->vqs[qid].used_idx = used;
return 0;
}
static int pds_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
struct vdpa_vq_state *state)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
struct device *dev = &padev->aux_dev.dev;
u64 driver_features;
u16 avail;
u16 used;
if (pdsv->vqs[qid].ready) {
dev_err(dev, "Getting device position is denied while vq is enabled\n");
return -EINVAL;
}
avail = pdsv->vqs[qid].avail_idx;
used = pdsv->vqs[qid].used_idx;
driver_features = pds_vdpa_get_driver_features(vdpa_dev);
if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
avail ^= PDS_VDPA_PACKED_INVERT_IDX;
used ^= PDS_VDPA_PACKED_INVERT_IDX;
state->packed.last_avail_idx = avail & 0x7fff;
state->packed.last_avail_counter = avail >> 15;
state->packed.last_used_idx = used & 0x7fff;
state->packed.last_used_counter = used >> 15;
} else {
state->split.avail_index = avail;
/* state->split does not provide a used_index. */
}
return 0;
}
static struct vdpa_notification_area
pds_vdpa_get_vq_notification(struct vdpa_device *vdpa_dev, u16 qid)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
struct virtio_pci_modern_device *vd_mdev;
struct vdpa_notification_area area;
area.addr = pdsv->vqs[qid].notify_pa;
vd_mdev = &pdsv->vdpa_aux->vd_mdev;
if (!vd_mdev->notify_offset_multiplier)
area.size = PDS_PAGE_SIZE;
else
area.size = vd_mdev->notify_offset_multiplier;
return area;
}
static int pds_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, u16 qid)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
return pdsv->vqs[qid].irq;
}
static u32 pds_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
{
return PDS_PAGE_SIZE;
}
static u32 pds_vdpa_get_vq_group(struct vdpa_device *vdpa_dev, u16 idx)
{
return 0;
}
static u64 pds_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
return pdsv->supported_features;
}
static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
struct device *dev = &pdsv->vdpa_dev.dev;
u64 driver_features;
u64 nego_features;
u64 hw_features;
u64 missing;
if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
dev_err(dev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
return -EOPNOTSUPP;
}
/* Check for valid feature bits */
nego_features = features & pdsv->supported_features;
missing = features & ~nego_features;
if (missing) {
dev_err(dev, "Can't support all requested features in %#llx, missing %#llx features\n",
features, missing);
return -EOPNOTSUPP;
}
pdsv->negotiated_features = nego_features;
driver_features = pds_vdpa_get_driver_features(vdpa_dev);
dev_dbg(dev, "%s: %#llx => %#llx\n",
__func__, driver_features, nego_features);
/* if we're faking the F_MAC, strip it before writing to device */
hw_features = le64_to_cpu(pdsv->vdpa_aux->ident.hw_features);
if (!(hw_features & BIT_ULL(VIRTIO_NET_F_MAC)))
nego_features &= ~BIT_ULL(VIRTIO_NET_F_MAC);
if (driver_features == nego_features)
return 0;
vp_modern_set_features(&pdsv->vdpa_aux->vd_mdev, nego_features);
return 0;
}
static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
return pdsv->negotiated_features;
}
static void pds_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
struct vdpa_callback *cb)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
pdsv->config_cb.callback = cb->callback;
pdsv->config_cb.private = cb->private;
}
static u16 pds_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
/* qemu has assert() that vq_num_max <= VIRTQUEUE_MAX_SIZE (1024) */
return min_t(u16, 1024, BIT(le16_to_cpu(pdsv->vdpa_aux->ident.max_qlen)));
}
static u32 pds_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
{
return VIRTIO_ID_NET;
}
static u32 pds_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
{
return PCI_VENDOR_ID_PENSANDO;
}
static u8 pds_vdpa_get_status(struct vdpa_device *vdpa_dev)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
return vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev);
}
static int pds_vdpa_request_irqs(struct pds_vdpa_device *pdsv)
{
struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev;
struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux;
struct device *dev = &pdsv->vdpa_dev.dev;
int max_vq, nintrs, qid, err;
max_vq = vdpa_aux->vdpa_mdev.max_supported_vqs;
nintrs = pci_alloc_irq_vectors(pdev, max_vq, max_vq, PCI_IRQ_MSIX);
if (nintrs < 0) {
dev_err(dev, "Couldn't get %d msix vectors: %pe\n",
max_vq, ERR_PTR(nintrs));
return nintrs;
}
for (qid = 0; qid < pdsv->num_vqs; ++qid) {
int irq = pci_irq_vector(pdev, qid);
snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name),
"vdpa-%s-%d", dev_name(dev), qid);
err = request_irq(irq, pds_vdpa_isr, 0,
pdsv->vqs[qid].irq_name,
&pdsv->vqs[qid]);
if (err) {
dev_err(dev, "%s: no irq for qid %d: %pe\n",
__func__, qid, ERR_PTR(err));
goto err_release;
}
pdsv->vqs[qid].irq = irq;
}
vdpa_aux->nintrs = nintrs;
return 0;
err_release:
while (qid--)
pds_vdpa_release_irq(pdsv, qid);
pci_free_irq_vectors(pdev);
vdpa_aux->nintrs = 0;
return err;
}
static void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv)
{
struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev;
struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux;
int qid;
if (!vdpa_aux->nintrs)
return;
for (qid = 0; qid < pdsv->num_vqs; qid++)
pds_vdpa_release_irq(pdsv, qid);
pci_free_irq_vectors(pdev);
vdpa_aux->nintrs = 0;
}
static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
struct device *dev = &pdsv->vdpa_dev.dev;
u8 old_status;
int i;
old_status = pds_vdpa_get_status(vdpa_dev);
dev_dbg(dev, "%s: old %#x new %#x\n", __func__, old_status, status);
if (status & ~old_status & VIRTIO_CONFIG_S_DRIVER_OK) {
if (pds_vdpa_request_irqs(pdsv))
status = old_status | VIRTIO_CONFIG_S_FAILED;
}
pds_vdpa_cmd_set_status(pdsv, status);
/* Note: still working with FW on the need for this reset cmd */
if (status == 0) {
pds_vdpa_cmd_reset(pdsv);
for (i = 0; i < pdsv->num_vqs; i++) {
pdsv->vqs[i].avail_idx = 0;
pdsv->vqs[i].used_idx = 0;
}
pds_vdpa_cmd_set_mac(pdsv, pdsv->mac);
}
if (status & ~old_status & VIRTIO_CONFIG_S_FEATURES_OK) {
for (i = 0; i < pdsv->num_vqs; i++) {
pdsv->vqs[i].notify =
vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev,
i, &pdsv->vqs[i].notify_pa);
}
}
if (old_status & ~status & VIRTIO_CONFIG_S_DRIVER_OK)
pds_vdpa_release_irqs(pdsv);
}
static void pds_vdpa_init_vqs_entry(struct pds_vdpa_device *pdsv, int qid,
void __iomem *notify)
{
memset(&pdsv->vqs[qid], 0, sizeof(pdsv->vqs[0]));
pdsv->vqs[qid].qid = qid;
pdsv->vqs[qid].pdsv = pdsv;
pdsv->vqs[qid].ready = false;
pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR;
pdsv->vqs[qid].notify = notify;
}
static int pds_vdpa_reset(struct vdpa_device *vdpa_dev)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
struct device *dev;
int err = 0;
u8 status;
int i;
dev = &pdsv->vdpa_aux->padev->aux_dev.dev;
status = pds_vdpa_get_status(vdpa_dev);
if (status == 0)
return 0;
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
/* Reset the vqs */
for (i = 0; i < pdsv->num_vqs && !err; i++) {
err = pds_vdpa_cmd_reset_vq(pdsv, i, 0, &pdsv->vqs[i]);
if (err)
dev_err(dev, "%s: reset_vq failed qid %d: %pe\n",
__func__, i, ERR_PTR(err));
}
}
pds_vdpa_set_status(vdpa_dev, 0);
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
/* Reset the vq info */
for (i = 0; i < pdsv->num_vqs && !err; i++)
pds_vdpa_init_vqs_entry(pdsv, i, pdsv->vqs[i].notify);
}
return 0;
}
static size_t pds_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
{
return sizeof(struct virtio_net_config);
}
static void pds_vdpa_get_config(struct vdpa_device *vdpa_dev,
unsigned int offset,
void *buf, unsigned int len)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
void __iomem *device;
if (offset + len > sizeof(struct virtio_net_config)) {
WARN(true, "%s: bad read, offset %d len %d\n", __func__, offset, len);
return;
}
device = pdsv->vdpa_aux->vd_mdev.device;
memcpy_fromio(buf, device + offset, len);
}
static void pds_vdpa_set_config(struct vdpa_device *vdpa_dev,
unsigned int offset, const void *buf,
unsigned int len)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
void __iomem *device;
if (offset + len > sizeof(struct virtio_net_config)) {
WARN(true, "%s: bad read, offset %d len %d\n", __func__, offset, len);
return;
}
device = pdsv->vdpa_aux->vd_mdev.device;
memcpy_toio(device + offset, buf, len);
}
static const struct vdpa_config_ops pds_vdpa_ops = {
.set_vq_address = pds_vdpa_set_vq_address,
.set_vq_num = pds_vdpa_set_vq_num,
.kick_vq = pds_vdpa_kick_vq,
.set_vq_cb = pds_vdpa_set_vq_cb,
.set_vq_ready = pds_vdpa_set_vq_ready,
.get_vq_ready = pds_vdpa_get_vq_ready,
.set_vq_state = pds_vdpa_set_vq_state,
.get_vq_state = pds_vdpa_get_vq_state,
.get_vq_notification = pds_vdpa_get_vq_notification,
.get_vq_irq = pds_vdpa_get_vq_irq,
.get_vq_align = pds_vdpa_get_vq_align,
.get_vq_group = pds_vdpa_get_vq_group,
.get_device_features = pds_vdpa_get_device_features,
.set_driver_features = pds_vdpa_set_driver_features,
.get_driver_features = pds_vdpa_get_driver_features,
.set_config_cb = pds_vdpa_set_config_cb,
.get_vq_num_max = pds_vdpa_get_vq_num_max,
.get_device_id = pds_vdpa_get_device_id,
.get_vendor_id = pds_vdpa_get_vendor_id,
.get_status = pds_vdpa_get_status,
.set_status = pds_vdpa_set_status,
.reset = pds_vdpa_reset,
.get_config_size = pds_vdpa_get_config_size,
.get_config = pds_vdpa_get_config,
.set_config = pds_vdpa_set_config,
};
static struct virtio_device_id pds_vdpa_id_table[] = {
{VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
{0},
};
static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *add_config)
{
struct pds_vdpa_aux *vdpa_aux;
struct pds_vdpa_device *pdsv;
struct vdpa_mgmt_dev *mgmt;
u16 fw_max_vqs, vq_pairs;
struct device *dma_dev;
struct pci_dev *pdev;
struct device *dev;
int err;
int i;
vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev);
dev = &vdpa_aux->padev->aux_dev.dev;
mgmt = &vdpa_aux->vdpa_mdev;
if (vdpa_aux->pdsv) {
dev_warn(dev, "Multiple vDPA devices on a VF is not supported.\n");
return -EOPNOTSUPP;
}
pdsv = vdpa_alloc_device(struct pds_vdpa_device, vdpa_dev,
dev, &pds_vdpa_ops, 1, 1, name, false);
if (IS_ERR(pdsv)) {
dev_err(dev, "Failed to allocate vDPA structure: %pe\n", pdsv);
return PTR_ERR(pdsv);
}
vdpa_aux->pdsv = pdsv;
pdsv->vdpa_aux = vdpa_aux;
pdev = vdpa_aux->padev->vf_pdev;
dma_dev = &pdev->dev;
pdsv->vdpa_dev.dma_dev = dma_dev;
pdsv->supported_features = mgmt->supported_features;
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
u64 unsupp_features =
add_config->device_features & ~pdsv->supported_features;
if (unsupp_features) {
dev_err(dev, "Unsupported features: %#llx\n", unsupp_features);
err = -EOPNOTSUPP;
goto err_unmap;
}
pdsv->supported_features = add_config->device_features;
}
err = pds_vdpa_cmd_reset(pdsv);
if (err) {
dev_err(dev, "Failed to reset hw: %pe\n", ERR_PTR(err));
goto err_unmap;
}
err = pds_vdpa_init_hw(pdsv);
if (err) {
dev_err(dev, "Failed to init hw: %pe\n", ERR_PTR(err));
goto err_unmap;
}
fw_max_vqs = le16_to_cpu(pdsv->vdpa_aux->ident.max_vqs);
vq_pairs = fw_max_vqs / 2;
/* Make sure we have the queues being requested */
if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
vq_pairs = add_config->net.max_vq_pairs;
pdsv->num_vqs = 2 * vq_pairs;
if (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
pdsv->num_vqs++;
if (pdsv->num_vqs > fw_max_vqs) {
dev_err(dev, "%s: queue count requested %u greater than max %u\n",
__func__, pdsv->num_vqs, fw_max_vqs);
err = -ENOSPC;
goto err_unmap;
}
if (pdsv->num_vqs != fw_max_vqs) {
err = pds_vdpa_cmd_set_max_vq_pairs(pdsv, vq_pairs);
if (err) {
dev_err(dev, "Failed to set max_vq_pairs: %pe\n",
ERR_PTR(err));
goto err_unmap;
}
}
/* Set a mac, either from the user config if provided
* or use the device's mac if not 00:..:00
* or set a random mac
*/
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
ether_addr_copy(pdsv->mac, add_config->net.mac);
} else {
struct virtio_net_config __iomem *vc;
vc = pdsv->vdpa_aux->vd_mdev.device;
memcpy_fromio(pdsv->mac, vc->mac, sizeof(pdsv->mac));
if (is_zero_ether_addr(pdsv->mac) &&
(pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_MAC))) {
eth_random_addr(pdsv->mac);
dev_info(dev, "setting random mac %pM\n", pdsv->mac);
}
}
pds_vdpa_cmd_set_mac(pdsv, pdsv->mac);
for (i = 0; i < pdsv->num_vqs; i++) {
void __iomem *notify;
notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev,
i, &pdsv->vqs[i].notify_pa);
pds_vdpa_init_vqs_entry(pdsv, i, notify);
}
pdsv->vdpa_dev.mdev = &vdpa_aux->vdpa_mdev;
err = pds_vdpa_register_event_handler(pdsv);
if (err) {
dev_err(dev, "Failed to register for PDS events: %pe\n", ERR_PTR(err));
goto err_unmap;
}
/* We use the _vdpa_register_device() call rather than the
* vdpa_register_device() to avoid a deadlock because our
* dev_add() is called with the vdpa_dev_lock already set
* by vdpa_nl_cmd_dev_add_set_doit()
*/
err = _vdpa_register_device(&pdsv->vdpa_dev, pdsv->num_vqs);
if (err) {
dev_err(dev, "Failed to register to vDPA bus: %pe\n", ERR_PTR(err));
goto err_unevent;
}
pds_vdpa_debugfs_add_vdpadev(vdpa_aux);
return 0;
err_unevent:
pds_vdpa_unregister_event_handler(pdsv);
err_unmap:
put_device(&pdsv->vdpa_dev.dev);
vdpa_aux->pdsv = NULL;
return err;
}
static void pds_vdpa_dev_del(struct vdpa_mgmt_dev *mdev,
struct vdpa_device *vdpa_dev)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
struct pds_vdpa_aux *vdpa_aux;
pds_vdpa_unregister_event_handler(pdsv);
vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev);
_vdpa_unregister_device(vdpa_dev);
pds_vdpa_cmd_reset(vdpa_aux->pdsv);
pds_vdpa_debugfs_reset_vdpadev(vdpa_aux);
vdpa_aux->pdsv = NULL;
dev_info(&vdpa_aux->padev->aux_dev.dev, "Removed vdpa device\n");
}
static const struct vdpa_mgmtdev_ops pds_vdpa_mgmt_dev_ops = {
.dev_add = pds_vdpa_dev_add,
.dev_del = pds_vdpa_dev_del
};
int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux)
{
union pds_core_adminq_cmd cmd = {
.vdpa_ident.opcode = PDS_VDPA_CMD_IDENT,
.vdpa_ident.vf_id = cpu_to_le16(vdpa_aux->vf_id),
};
union pds_core_adminq_comp comp = {};
struct vdpa_mgmt_dev *mgmt;
struct pci_dev *pf_pdev;
struct device *pf_dev;
struct pci_dev *pdev;
dma_addr_t ident_pa;
struct device *dev;
u16 dev_intrs;
u16 max_vqs;
int err;
dev = &vdpa_aux->padev->aux_dev.dev;
pdev = vdpa_aux->padev->vf_pdev;
mgmt = &vdpa_aux->vdpa_mdev;
/* Get resource info through the PF's adminq. It is a block of info,
* so we need to map some memory for PF to make available to the
* firmware for writing the data.
*/
pf_pdev = pci_physfn(vdpa_aux->padev->vf_pdev);
pf_dev = &pf_pdev->dev;
ident_pa = dma_map_single(pf_dev, &vdpa_aux->ident,
sizeof(vdpa_aux->ident), DMA_FROM_DEVICE);
if (dma_mapping_error(pf_dev, ident_pa)) {
dev_err(dev, "Failed to map ident space\n");
return -ENOMEM;
}
cmd.vdpa_ident.ident_pa = cpu_to_le64(ident_pa);
cmd.vdpa_ident.len = cpu_to_le32(sizeof(vdpa_aux->ident));
err = pds_client_adminq_cmd(vdpa_aux->padev, &cmd,
sizeof(cmd.vdpa_ident), &comp, 0);
dma_unmap_single(pf_dev, ident_pa,
sizeof(vdpa_aux->ident), DMA_FROM_DEVICE);
if (err) {
dev_err(dev, "Failed to ident hw, status %d: %pe\n",
comp.status, ERR_PTR(err));
return err;
}
max_vqs = le16_to_cpu(vdpa_aux->ident.max_vqs);
dev_intrs = pci_msix_vec_count(pdev);
dev_dbg(dev, "ident.max_vqs %d dev_intrs %d\n", max_vqs, dev_intrs);
max_vqs = min_t(u16, dev_intrs, max_vqs);
mgmt->max_supported_vqs = min_t(u16, PDS_VDPA_MAX_QUEUES, max_vqs);
vdpa_aux->nintrs = 0;
mgmt->ops = &pds_vdpa_mgmt_dev_ops;
mgmt->id_table = pds_vdpa_id_table;
mgmt->device = dev;
mgmt->supported_features = le64_to_cpu(vdpa_aux->ident.hw_features);
/* advertise F_MAC even if the device doesn't */
mgmt->supported_features |= BIT_ULL(VIRTIO_NET_F_MAC);
mgmt->config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
return 0;
}
|
linux-master
|
drivers/vdpa/pds/vdpa_dev.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Advanced Micro Devices, Inc */
#include <linux/pci.h>
#include <linux/vdpa.h>
#include <linux/pds/pds_common.h>
#include <linux/pds/pds_core_if.h>
#include <linux/pds/pds_adminq.h>
#include <linux/pds/pds_auxbus.h>
#include "aux_drv.h"
#include "vdpa_dev.h"
#include "debugfs.h"
static struct dentry *dbfs_dir;
void pds_vdpa_debugfs_create(void)
{
dbfs_dir = debugfs_create_dir(PDS_VDPA_DRV_NAME, NULL);
}
void pds_vdpa_debugfs_destroy(void)
{
debugfs_remove_recursive(dbfs_dir);
dbfs_dir = NULL;
}
#define PRINT_SBIT_NAME(__seq, __f, __name) \
do { \
if ((__f) & (__name)) \
seq_printf(__seq, " %s", &#__name[16]); \
} while (0)
static void print_status_bits(struct seq_file *seq, u8 status)
{
seq_puts(seq, "status:");
PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_ACKNOWLEDGE);
PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_DRIVER);
PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_DRIVER_OK);
PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_FEATURES_OK);
PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_NEEDS_RESET);
PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_FAILED);
seq_puts(seq, "\n");
}
static void print_feature_bits_all(struct seq_file *seq, u64 features)
{
int i;
seq_puts(seq, "features:");
for (i = 0; i < (sizeof(u64) * 8); i++) {
u64 mask = BIT_ULL(i);
switch (features & mask) {
case BIT_ULL(VIRTIO_NET_F_CSUM):
seq_puts(seq, " VIRTIO_NET_F_CSUM");
break;
case BIT_ULL(VIRTIO_NET_F_GUEST_CSUM):
seq_puts(seq, " VIRTIO_NET_F_GUEST_CSUM");
break;
case BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS):
seq_puts(seq, " VIRTIO_NET_F_CTRL_GUEST_OFFLOADS");
break;
case BIT_ULL(VIRTIO_NET_F_MTU):
seq_puts(seq, " VIRTIO_NET_F_MTU");
break;
case BIT_ULL(VIRTIO_NET_F_MAC):
seq_puts(seq, " VIRTIO_NET_F_MAC");
break;
case BIT_ULL(VIRTIO_NET_F_GUEST_TSO4):
seq_puts(seq, " VIRTIO_NET_F_GUEST_TSO4");
break;
case BIT_ULL(VIRTIO_NET_F_GUEST_TSO6):
seq_puts(seq, " VIRTIO_NET_F_GUEST_TSO6");
break;
case BIT_ULL(VIRTIO_NET_F_GUEST_ECN):
seq_puts(seq, " VIRTIO_NET_F_GUEST_ECN");
break;
case BIT_ULL(VIRTIO_NET_F_GUEST_UFO):
seq_puts(seq, " VIRTIO_NET_F_GUEST_UFO");
break;
case BIT_ULL(VIRTIO_NET_F_HOST_TSO4):
seq_puts(seq, " VIRTIO_NET_F_HOST_TSO4");
break;
case BIT_ULL(VIRTIO_NET_F_HOST_TSO6):
seq_puts(seq, " VIRTIO_NET_F_HOST_TSO6");
break;
case BIT_ULL(VIRTIO_NET_F_HOST_ECN):
seq_puts(seq, " VIRTIO_NET_F_HOST_ECN");
break;
case BIT_ULL(VIRTIO_NET_F_HOST_UFO):
seq_puts(seq, " VIRTIO_NET_F_HOST_UFO");
break;
case BIT_ULL(VIRTIO_NET_F_MRG_RXBUF):
seq_puts(seq, " VIRTIO_NET_F_MRG_RXBUF");
break;
case BIT_ULL(VIRTIO_NET_F_STATUS):
seq_puts(seq, " VIRTIO_NET_F_STATUS");
break;
case BIT_ULL(VIRTIO_NET_F_CTRL_VQ):
seq_puts(seq, " VIRTIO_NET_F_CTRL_VQ");
break;
case BIT_ULL(VIRTIO_NET_F_CTRL_RX):
seq_puts(seq, " VIRTIO_NET_F_CTRL_RX");
break;
case BIT_ULL(VIRTIO_NET_F_CTRL_VLAN):
seq_puts(seq, " VIRTIO_NET_F_CTRL_VLAN");
break;
case BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA):
seq_puts(seq, " VIRTIO_NET_F_CTRL_RX_EXTRA");
break;
case BIT_ULL(VIRTIO_NET_F_GUEST_ANNOUNCE):
seq_puts(seq, " VIRTIO_NET_F_GUEST_ANNOUNCE");
break;
case BIT_ULL(VIRTIO_NET_F_MQ):
seq_puts(seq, " VIRTIO_NET_F_MQ");
break;
case BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR):
seq_puts(seq, " VIRTIO_NET_F_CTRL_MAC_ADDR");
break;
case BIT_ULL(VIRTIO_NET_F_HASH_REPORT):
seq_puts(seq, " VIRTIO_NET_F_HASH_REPORT");
break;
case BIT_ULL(VIRTIO_NET_F_RSS):
seq_puts(seq, " VIRTIO_NET_F_RSS");
break;
case BIT_ULL(VIRTIO_NET_F_RSC_EXT):
seq_puts(seq, " VIRTIO_NET_F_RSC_EXT");
break;
case BIT_ULL(VIRTIO_NET_F_STANDBY):
seq_puts(seq, " VIRTIO_NET_F_STANDBY");
break;
case BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX):
seq_puts(seq, " VIRTIO_NET_F_SPEED_DUPLEX");
break;
case BIT_ULL(VIRTIO_F_NOTIFY_ON_EMPTY):
seq_puts(seq, " VIRTIO_F_NOTIFY_ON_EMPTY");
break;
case BIT_ULL(VIRTIO_F_ANY_LAYOUT):
seq_puts(seq, " VIRTIO_F_ANY_LAYOUT");
break;
case BIT_ULL(VIRTIO_F_VERSION_1):
seq_puts(seq, " VIRTIO_F_VERSION_1");
break;
case BIT_ULL(VIRTIO_F_ACCESS_PLATFORM):
seq_puts(seq, " VIRTIO_F_ACCESS_PLATFORM");
break;
case BIT_ULL(VIRTIO_F_RING_PACKED):
seq_puts(seq, " VIRTIO_F_RING_PACKED");
break;
case BIT_ULL(VIRTIO_F_ORDER_PLATFORM):
seq_puts(seq, " VIRTIO_F_ORDER_PLATFORM");
break;
case BIT_ULL(VIRTIO_F_SR_IOV):
seq_puts(seq, " VIRTIO_F_SR_IOV");
break;
case 0:
break;
default:
seq_printf(seq, " bit_%d", i);
break;
}
}
seq_puts(seq, "\n");
}
void pds_vdpa_debugfs_add_pcidev(struct pds_vdpa_aux *vdpa_aux)
{
vdpa_aux->dentry = debugfs_create_dir(pci_name(vdpa_aux->padev->vf_pdev), dbfs_dir);
}
static int identity_show(struct seq_file *seq, void *v)
{
struct pds_vdpa_aux *vdpa_aux = seq->private;
struct vdpa_mgmt_dev *mgmt;
u64 hw_features;
seq_printf(seq, "aux_dev: %s\n",
dev_name(&vdpa_aux->padev->aux_dev.dev));
mgmt = &vdpa_aux->vdpa_mdev;
seq_printf(seq, "max_vqs: %d\n", mgmt->max_supported_vqs);
seq_printf(seq, "config_attr_mask: %#llx\n", mgmt->config_attr_mask);
hw_features = le64_to_cpu(vdpa_aux->ident.hw_features);
seq_printf(seq, "hw_features: %#llx\n", hw_features);
print_feature_bits_all(seq, hw_features);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(identity);
void pds_vdpa_debugfs_add_ident(struct pds_vdpa_aux *vdpa_aux)
{
debugfs_create_file("identity", 0400, vdpa_aux->dentry,
vdpa_aux, &identity_fops);
}
static int config_show(struct seq_file *seq, void *v)
{
struct pds_vdpa_device *pdsv = seq->private;
struct virtio_net_config vc;
u8 status;
memcpy_fromio(&vc, pdsv->vdpa_aux->vd_mdev.device,
sizeof(struct virtio_net_config));
seq_printf(seq, "mac: %pM\n", vc.mac);
seq_printf(seq, "max_virtqueue_pairs: %d\n",
__virtio16_to_cpu(true, vc.max_virtqueue_pairs));
seq_printf(seq, "mtu: %d\n", __virtio16_to_cpu(true, vc.mtu));
seq_printf(seq, "speed: %d\n", le32_to_cpu(vc.speed));
seq_printf(seq, "duplex: %d\n", vc.duplex);
seq_printf(seq, "rss_max_key_size: %d\n", vc.rss_max_key_size);
seq_printf(seq, "rss_max_indirection_table_length: %d\n",
le16_to_cpu(vc.rss_max_indirection_table_length));
seq_printf(seq, "supported_hash_types: %#x\n",
le32_to_cpu(vc.supported_hash_types));
seq_printf(seq, "vn_status: %#x\n",
__virtio16_to_cpu(true, vc.status));
status = vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev);
seq_printf(seq, "dev_status: %#x\n", status);
print_status_bits(seq, status);
seq_printf(seq, "negotiated_features: %#llx\n", pdsv->negotiated_features);
print_feature_bits_all(seq, pdsv->negotiated_features);
seq_printf(seq, "vdpa_index: %d\n", pdsv->vdpa_index);
seq_printf(seq, "num_vqs: %d\n", pdsv->num_vqs);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(config);
static int vq_show(struct seq_file *seq, void *v)
{
struct pds_vdpa_vq_info *vq = seq->private;
seq_printf(seq, "ready: %d\n", vq->ready);
seq_printf(seq, "desc_addr: %#llx\n", vq->desc_addr);
seq_printf(seq, "avail_addr: %#llx\n", vq->avail_addr);
seq_printf(seq, "used_addr: %#llx\n", vq->used_addr);
seq_printf(seq, "q_len: %d\n", vq->q_len);
seq_printf(seq, "qid: %d\n", vq->qid);
seq_printf(seq, "doorbell: %#llx\n", vq->doorbell);
seq_printf(seq, "avail_idx: %d\n", vq->avail_idx);
seq_printf(seq, "used_idx: %d\n", vq->used_idx);
seq_printf(seq, "irq: %d\n", vq->irq);
seq_printf(seq, "irq-name: %s\n", vq->irq_name);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(vq);
void pds_vdpa_debugfs_add_vdpadev(struct pds_vdpa_aux *vdpa_aux)
{
int i;
debugfs_create_file("config", 0400, vdpa_aux->dentry, vdpa_aux->pdsv, &config_fops);
for (i = 0; i < vdpa_aux->pdsv->num_vqs; i++) {
char name[8];
snprintf(name, sizeof(name), "vq%02d", i);
debugfs_create_file(name, 0400, vdpa_aux->dentry,
&vdpa_aux->pdsv->vqs[i], &vq_fops);
}
}
void pds_vdpa_debugfs_del_vdpadev(struct pds_vdpa_aux *vdpa_aux)
{
debugfs_remove_recursive(vdpa_aux->dentry);
vdpa_aux->dentry = NULL;
}
void pds_vdpa_debugfs_reset_vdpadev(struct pds_vdpa_aux *vdpa_aux)
{
/* we don't keep track of the entries, so remove it all
* then rebuild the basics
*/
pds_vdpa_debugfs_del_vdpadev(vdpa_aux);
pds_vdpa_debugfs_add_pcidev(vdpa_aux);
pds_vdpa_debugfs_add_ident(vdpa_aux);
}
|
linux-master
|
drivers/vdpa/pds/debugfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Advanced Micro Devices, Inc */
#include <linux/auxiliary_bus.h>
#include <linux/pci.h>
#include <linux/vdpa.h>
#include <linux/virtio_pci_modern.h>
#include <linux/pds/pds_common.h>
#include <linux/pds/pds_core_if.h>
#include <linux/pds/pds_adminq.h>
#include <linux/pds/pds_auxbus.h>
#include "aux_drv.h"
#include "debugfs.h"
#include "vdpa_dev.h"
static const struct auxiliary_device_id pds_vdpa_id_table[] = {
{ .name = PDS_VDPA_DEV_NAME, },
{},
};
static int pds_vdpa_device_id_check(struct pci_dev *pdev)
{
if (pdev->device != PCI_DEVICE_ID_PENSANDO_VDPA_VF ||
pdev->vendor != PCI_VENDOR_ID_PENSANDO)
return -ENODEV;
return PCI_DEVICE_ID_PENSANDO_VDPA_VF;
}
static int pds_vdpa_probe(struct auxiliary_device *aux_dev,
const struct auxiliary_device_id *id)
{
struct pds_auxiliary_dev *padev =
container_of(aux_dev, struct pds_auxiliary_dev, aux_dev);
struct device *dev = &aux_dev->dev;
struct pds_vdpa_aux *vdpa_aux;
int err;
vdpa_aux = kzalloc(sizeof(*vdpa_aux), GFP_KERNEL);
if (!vdpa_aux)
return -ENOMEM;
vdpa_aux->padev = padev;
vdpa_aux->vf_id = pci_iov_vf_id(padev->vf_pdev);
auxiliary_set_drvdata(aux_dev, vdpa_aux);
/* Get device ident info and set up the vdpa_mgmt_dev */
err = pds_vdpa_get_mgmt_info(vdpa_aux);
if (err)
goto err_free_mem;
/* Find the virtio configuration */
vdpa_aux->vd_mdev.pci_dev = padev->vf_pdev;
vdpa_aux->vd_mdev.device_id_check = pds_vdpa_device_id_check;
vdpa_aux->vd_mdev.dma_mask = DMA_BIT_MASK(PDS_CORE_ADDR_LEN);
err = vp_modern_probe(&vdpa_aux->vd_mdev);
if (err) {
dev_err(dev, "Unable to probe for virtio configuration: %pe\n",
ERR_PTR(err));
goto err_free_mgmt_info;
}
/* Let vdpa know that we can provide devices */
err = vdpa_mgmtdev_register(&vdpa_aux->vdpa_mdev);
if (err) {
dev_err(dev, "%s: Failed to initialize vdpa_mgmt interface: %pe\n",
__func__, ERR_PTR(err));
goto err_free_virtio;
}
pds_vdpa_debugfs_add_pcidev(vdpa_aux);
pds_vdpa_debugfs_add_ident(vdpa_aux);
return 0;
err_free_virtio:
vp_modern_remove(&vdpa_aux->vd_mdev);
err_free_mgmt_info:
pci_free_irq_vectors(padev->vf_pdev);
err_free_mem:
kfree(vdpa_aux);
auxiliary_set_drvdata(aux_dev, NULL);
return err;
}
static void pds_vdpa_remove(struct auxiliary_device *aux_dev)
{
struct pds_vdpa_aux *vdpa_aux = auxiliary_get_drvdata(aux_dev);
struct device *dev = &aux_dev->dev;
vdpa_mgmtdev_unregister(&vdpa_aux->vdpa_mdev);
vp_modern_remove(&vdpa_aux->vd_mdev);
pci_free_irq_vectors(vdpa_aux->padev->vf_pdev);
pds_vdpa_debugfs_del_vdpadev(vdpa_aux);
kfree(vdpa_aux);
auxiliary_set_drvdata(aux_dev, NULL);
dev_info(dev, "Removed\n");
}
static struct auxiliary_driver pds_vdpa_driver = {
.name = PDS_DEV_TYPE_VDPA_STR,
.probe = pds_vdpa_probe,
.remove = pds_vdpa_remove,
.id_table = pds_vdpa_id_table,
};
static void __exit pds_vdpa_cleanup(void)
{
auxiliary_driver_unregister(&pds_vdpa_driver);
pds_vdpa_debugfs_destroy();
}
module_exit(pds_vdpa_cleanup);
static int __init pds_vdpa_init(void)
{
int err;
pds_vdpa_debugfs_create();
err = auxiliary_driver_register(&pds_vdpa_driver);
if (err) {
pr_err("%s: aux driver register failed: %pe\n",
PDS_VDPA_DRV_NAME, ERR_PTR(err));
pds_vdpa_debugfs_destroy();
}
return err;
}
module_init(pds_vdpa_init);
MODULE_DESCRIPTION(PDS_VDPA_DRV_DESCRIPTION);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/vdpa/pds/aux_drv.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Advanced Micro Devices, Inc */
#include <linux/vdpa.h>
#include <linux/virtio_pci_modern.h>
#include <linux/pds/pds_common.h>
#include <linux/pds/pds_core_if.h>
#include <linux/pds/pds_adminq.h>
#include <linux/pds/pds_auxbus.h>
#include "vdpa_dev.h"
#include "aux_drv.h"
#include "cmds.h"
int pds_vdpa_init_hw(struct pds_vdpa_device *pdsv)
{
struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
struct device *dev = &padev->aux_dev.dev;
union pds_core_adminq_cmd cmd = {
.vdpa_init.opcode = PDS_VDPA_CMD_INIT,
.vdpa_init.vdpa_index = pdsv->vdpa_index,
.vdpa_init.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
};
union pds_core_adminq_comp comp = {};
int err;
/* Initialize the vdpa/virtio device */
err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_init),
&comp, 0);
if (err)
dev_dbg(dev, "Failed to init hw, status %d: %pe\n",
comp.status, ERR_PTR(err));
return err;
}
int pds_vdpa_cmd_reset(struct pds_vdpa_device *pdsv)
{
struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
struct device *dev = &padev->aux_dev.dev;
union pds_core_adminq_cmd cmd = {
.vdpa.opcode = PDS_VDPA_CMD_RESET,
.vdpa.vdpa_index = pdsv->vdpa_index,
.vdpa.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
};
union pds_core_adminq_comp comp = {};
int err;
err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa), &comp, 0);
if (err)
dev_dbg(dev, "Failed to reset hw, status %d: %pe\n",
comp.status, ERR_PTR(err));
return err;
}
int pds_vdpa_cmd_set_status(struct pds_vdpa_device *pdsv, u8 status)
{
struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
struct device *dev = &padev->aux_dev.dev;
union pds_core_adminq_cmd cmd = {
.vdpa_status.opcode = PDS_VDPA_CMD_STATUS_UPDATE,
.vdpa_status.vdpa_index = pdsv->vdpa_index,
.vdpa_status.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
.vdpa_status.status = status,
};
union pds_core_adminq_comp comp = {};
int err;
err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_status), &comp, 0);
if (err)
dev_dbg(dev, "Failed to set status to %#x, error status %d: %pe\n",
status, comp.status, ERR_PTR(err));
return err;
}
int pds_vdpa_cmd_set_mac(struct pds_vdpa_device *pdsv, u8 *mac)
{
struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
struct device *dev = &padev->aux_dev.dev;
union pds_core_adminq_cmd cmd = {
.vdpa_setattr.opcode = PDS_VDPA_CMD_SET_ATTR,
.vdpa_setattr.vdpa_index = pdsv->vdpa_index,
.vdpa_setattr.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
.vdpa_setattr.attr = PDS_VDPA_ATTR_MAC,
};
union pds_core_adminq_comp comp = {};
int err;
ether_addr_copy(cmd.vdpa_setattr.mac, mac);
err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_setattr),
&comp, 0);
if (err)
dev_dbg(dev, "Failed to set mac address %pM, status %d: %pe\n",
mac, comp.status, ERR_PTR(err));
return err;
}
int pds_vdpa_cmd_set_max_vq_pairs(struct pds_vdpa_device *pdsv, u16 max_vqp)
{
struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
struct device *dev = &padev->aux_dev.dev;
union pds_core_adminq_cmd cmd = {
.vdpa_setattr.opcode = PDS_VDPA_CMD_SET_ATTR,
.vdpa_setattr.vdpa_index = pdsv->vdpa_index,
.vdpa_setattr.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
.vdpa_setattr.attr = PDS_VDPA_ATTR_MAX_VQ_PAIRS,
.vdpa_setattr.max_vq_pairs = cpu_to_le16(max_vqp),
};
union pds_core_adminq_comp comp = {};
int err;
err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_setattr),
&comp, 0);
if (err)
dev_dbg(dev, "Failed to set max vq pairs %u, status %d: %pe\n",
max_vqp, comp.status, ERR_PTR(err));
return err;
}
int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
struct pds_vdpa_vq_info *vq_info)
{
struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
struct device *dev = &padev->aux_dev.dev;
union pds_core_adminq_cmd cmd = {
.vdpa_vq_init.opcode = PDS_VDPA_CMD_VQ_INIT,
.vdpa_vq_init.vdpa_index = pdsv->vdpa_index,
.vdpa_vq_init.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
.vdpa_vq_init.qid = cpu_to_le16(qid),
.vdpa_vq_init.len = cpu_to_le16(ilog2(vq_info->q_len)),
.vdpa_vq_init.desc_addr = cpu_to_le64(vq_info->desc_addr),
.vdpa_vq_init.avail_addr = cpu_to_le64(vq_info->avail_addr),
.vdpa_vq_init.used_addr = cpu_to_le64(vq_info->used_addr),
.vdpa_vq_init.intr_index = cpu_to_le16(qid),
.vdpa_vq_init.avail_index = cpu_to_le16(vq_info->avail_idx ^ invert_idx),
.vdpa_vq_init.used_index = cpu_to_le16(vq_info->used_idx ^ invert_idx),
};
union pds_core_adminq_comp comp = {};
int err;
dev_dbg(dev, "%s: qid %d len %d desc_addr %#llx avail_addr %#llx used_addr %#llx\n",
__func__, qid, ilog2(vq_info->q_len),
vq_info->desc_addr, vq_info->avail_addr, vq_info->used_addr);
err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_vq_init),
&comp, 0);
if (err)
dev_dbg(dev, "Failed to init vq %d, status %d: %pe\n",
qid, comp.status, ERR_PTR(err));
return err;
}
int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
struct pds_vdpa_vq_info *vq_info)
{
struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
struct device *dev = &padev->aux_dev.dev;
union pds_core_adminq_cmd cmd = {
.vdpa_vq_reset.opcode = PDS_VDPA_CMD_VQ_RESET,
.vdpa_vq_reset.vdpa_index = pdsv->vdpa_index,
.vdpa_vq_reset.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
.vdpa_vq_reset.qid = cpu_to_le16(qid),
};
union pds_core_adminq_comp comp = {};
int err;
err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_vq_reset),
&comp, 0);
if (err) {
dev_dbg(dev, "Failed to reset vq %d, status %d: %pe\n",
qid, comp.status, ERR_PTR(err));
return err;
}
vq_info->avail_idx = le16_to_cpu(comp.vdpa_vq_reset.avail_index) ^ invert_idx;
vq_info->used_idx = le16_to_cpu(comp.vdpa_vq_reset.used_index) ^ invert_idx;
return 0;
}
|
linux-master
|
drivers/vdpa/pds/cmds.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* vDPA bridge driver for Alibaba ENI(Elastic Network Interface)
*
* Copyright (c) 2021, Alibaba Inc. All rights reserved.
* Author: Wu Zongyong <[email protected]>
*
*/
#include "linux/bits.h"
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vdpa.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
#include <linux/virtio_pci_legacy.h>
#include <uapi/linux/virtio_net.h>
#define ENI_MSIX_NAME_SIZE 256
#define ENI_ERR(pdev, fmt, ...) \
dev_err(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
#define ENI_DBG(pdev, fmt, ...) \
dev_dbg(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
#define ENI_INFO(pdev, fmt, ...) \
dev_info(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
struct eni_vring {
void __iomem *notify;
char msix_name[ENI_MSIX_NAME_SIZE];
struct vdpa_callback cb;
int irq;
};
struct eni_vdpa {
struct vdpa_device vdpa;
struct virtio_pci_legacy_device ldev;
struct eni_vring *vring;
struct vdpa_callback config_cb;
char msix_name[ENI_MSIX_NAME_SIZE];
int config_irq;
int queues;
int vectors;
};
static struct eni_vdpa *vdpa_to_eni(struct vdpa_device *vdpa)
{
return container_of(vdpa, struct eni_vdpa, vdpa);
}
static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa)
{
struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
return &eni_vdpa->ldev;
}
static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
u64 features = vp_legacy_get_features(ldev);
features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
features |= BIT_ULL(VIRTIO_F_ORDER_PLATFORM);
return features;
}
static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
if (!(features & BIT_ULL(VIRTIO_NET_F_MRG_RXBUF)) && features) {
ENI_ERR(ldev->pci_dev,
"VIRTIO_NET_F_MRG_RXBUF is not negotiated\n");
return -EINVAL;
}
vp_legacy_set_features(ldev, (u32)features);
return 0;
}
static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
return vp_legacy_get_driver_features(ldev);
}
static u8 eni_vdpa_get_status(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
return vp_legacy_get_status(ldev);
}
static int eni_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
{
struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
int irq = eni_vdpa->vring[idx].irq;
if (irq == VIRTIO_MSI_NO_VECTOR)
return -EINVAL;
return irq;
}
static void eni_vdpa_free_irq(struct eni_vdpa *eni_vdpa)
{
struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
struct pci_dev *pdev = ldev->pci_dev;
int i;
for (i = 0; i < eni_vdpa->queues; i++) {
if (eni_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
vp_legacy_queue_vector(ldev, i, VIRTIO_MSI_NO_VECTOR);
devm_free_irq(&pdev->dev, eni_vdpa->vring[i].irq,
&eni_vdpa->vring[i]);
eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
}
}
if (eni_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
vp_legacy_config_vector(ldev, VIRTIO_MSI_NO_VECTOR);
devm_free_irq(&pdev->dev, eni_vdpa->config_irq, eni_vdpa);
eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
}
if (eni_vdpa->vectors) {
pci_free_irq_vectors(pdev);
eni_vdpa->vectors = 0;
}
}
static irqreturn_t eni_vdpa_vq_handler(int irq, void *arg)
{
struct eni_vring *vring = arg;
if (vring->cb.callback)
return vring->cb.callback(vring->cb.private);
return IRQ_HANDLED;
}
static irqreturn_t eni_vdpa_config_handler(int irq, void *arg)
{
struct eni_vdpa *eni_vdpa = arg;
if (eni_vdpa->config_cb.callback)
return eni_vdpa->config_cb.callback(eni_vdpa->config_cb.private);
return IRQ_HANDLED;
}
static int eni_vdpa_request_irq(struct eni_vdpa *eni_vdpa)
{
struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
struct pci_dev *pdev = ldev->pci_dev;
int i, ret, irq;
int queues = eni_vdpa->queues;
int vectors = queues + 1;
ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
if (ret != vectors) {
ENI_ERR(pdev,
"failed to allocate irq vectors want %d but %d\n",
vectors, ret);
return ret;
}
eni_vdpa->vectors = vectors;
for (i = 0; i < queues; i++) {
snprintf(eni_vdpa->vring[i].msix_name, ENI_MSIX_NAME_SIZE,
"eni-vdpa[%s]-%d\n", pci_name(pdev), i);
irq = pci_irq_vector(pdev, i);
ret = devm_request_irq(&pdev->dev, irq,
eni_vdpa_vq_handler,
0, eni_vdpa->vring[i].msix_name,
&eni_vdpa->vring[i]);
if (ret) {
ENI_ERR(pdev, "failed to request irq for vq %d\n", i);
goto err;
}
vp_legacy_queue_vector(ldev, i, i);
eni_vdpa->vring[i].irq = irq;
}
snprintf(eni_vdpa->msix_name, ENI_MSIX_NAME_SIZE, "eni-vdpa[%s]-config\n",
pci_name(pdev));
irq = pci_irq_vector(pdev, queues);
ret = devm_request_irq(&pdev->dev, irq, eni_vdpa_config_handler, 0,
eni_vdpa->msix_name, eni_vdpa);
if (ret) {
ENI_ERR(pdev, "failed to request irq for config vq %d\n", i);
goto err;
}
vp_legacy_config_vector(ldev, queues);
eni_vdpa->config_irq = irq;
return 0;
err:
eni_vdpa_free_irq(eni_vdpa);
return ret;
}
static void eni_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
{
struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
u8 s = eni_vdpa_get_status(vdpa);
if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
!(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
eni_vdpa_request_irq(eni_vdpa);
}
vp_legacy_set_status(ldev, status);
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
(s & VIRTIO_CONFIG_S_DRIVER_OK))
eni_vdpa_free_irq(eni_vdpa);
}
static int eni_vdpa_reset(struct vdpa_device *vdpa)
{
struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
u8 s = eni_vdpa_get_status(vdpa);
vp_legacy_set_status(ldev, 0);
if (s & VIRTIO_CONFIG_S_DRIVER_OK)
eni_vdpa_free_irq(eni_vdpa);
return 0;
}
static u16 eni_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
return vp_legacy_get_queue_size(ldev, 0);
}
static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
return vp_legacy_get_queue_size(ldev, 0);
}
static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
struct vdpa_vq_state *state)
{
return -EOPNOTSUPP;
}
static int eni_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
const struct vdpa_vq_state *state)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
const struct vdpa_vq_state_split *split = &state->split;
/* ENI is build upon virtio-pci specfication which not support
* to set state of virtqueue. But if the state is equal to the
* device initial state by chance, we can let it go.
*/
if (!vp_legacy_get_queue_enable(ldev, qid)
&& split->avail_index == 0)
return 0;
return -EOPNOTSUPP;
}
static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
struct vdpa_callback *cb)
{
struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
eni_vdpa->vring[qid].cb = *cb;
}
static void eni_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 qid,
bool ready)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
/* ENI is a legacy virtio-pci device. This is not supported
* by specification. But we can disable virtqueue by setting
* address to 0.
*/
if (!ready)
vp_legacy_set_queue_address(ldev, qid, 0);
}
static bool eni_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
return vp_legacy_get_queue_enable(ldev, qid);
}
static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
u32 num)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
struct pci_dev *pdev = ldev->pci_dev;
u16 n = vp_legacy_get_queue_size(ldev, qid);
/* ENI is a legacy virtio-pci device which not allow to change
* virtqueue size. Just report a error if someone tries to
* change it.
*/
if (num != n)
ENI_ERR(pdev,
"not support to set vq %u fixed num %u to %u\n",
qid, n, num);
}
static int eni_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
u64 desc_area, u64 driver_area,
u64 device_area)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
u32 pfn = desc_area >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
vp_legacy_set_queue_address(ldev, qid, pfn);
return 0;
}
static void eni_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
{
struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
iowrite16(qid, eni_vdpa->vring[qid].notify);
}
static u32 eni_vdpa_get_device_id(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
return ldev->id.device;
}
static u32 eni_vdpa_get_vendor_id(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
return ldev->id.vendor;
}
static u32 eni_vdpa_get_vq_align(struct vdpa_device *vdpa)
{
return VIRTIO_PCI_VRING_ALIGN;
}
static size_t eni_vdpa_get_config_size(struct vdpa_device *vdpa)
{
return sizeof(struct virtio_net_config);
}
static void eni_vdpa_get_config(struct vdpa_device *vdpa,
unsigned int offset,
void *buf, unsigned int len)
{
struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
void __iomem *ioaddr = ldev->ioaddr +
VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
offset;
u8 *p = buf;
int i;
for (i = 0; i < len; i++)
*p++ = ioread8(ioaddr + i);
}
static void eni_vdpa_set_config(struct vdpa_device *vdpa,
unsigned int offset, const void *buf,
unsigned int len)
{
struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
void __iomem *ioaddr = ldev->ioaddr +
VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
offset;
const u8 *p = buf;
int i;
for (i = 0; i < len; i++)
iowrite8(*p++, ioaddr + i);
}
static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
eni_vdpa->config_cb = *cb;
}
static const struct vdpa_config_ops eni_vdpa_ops = {
.get_device_features = eni_vdpa_get_device_features,
.set_driver_features = eni_vdpa_set_driver_features,
.get_driver_features = eni_vdpa_get_driver_features,
.get_status = eni_vdpa_get_status,
.set_status = eni_vdpa_set_status,
.reset = eni_vdpa_reset,
.get_vq_num_max = eni_vdpa_get_vq_num_max,
.get_vq_num_min = eni_vdpa_get_vq_num_min,
.get_vq_state = eni_vdpa_get_vq_state,
.set_vq_state = eni_vdpa_set_vq_state,
.set_vq_cb = eni_vdpa_set_vq_cb,
.set_vq_ready = eni_vdpa_set_vq_ready,
.get_vq_ready = eni_vdpa_get_vq_ready,
.set_vq_num = eni_vdpa_set_vq_num,
.set_vq_address = eni_vdpa_set_vq_address,
.kick_vq = eni_vdpa_kick_vq,
.get_device_id = eni_vdpa_get_device_id,
.get_vendor_id = eni_vdpa_get_vendor_id,
.get_vq_align = eni_vdpa_get_vq_align,
.get_config_size = eni_vdpa_get_config_size,
.get_config = eni_vdpa_get_config,
.set_config = eni_vdpa_set_config,
.set_config_cb = eni_vdpa_set_config_cb,
.get_vq_irq = eni_vdpa_get_vq_irq,
};
static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa)
{
struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
u32 features = vp_legacy_get_features(ldev);
u16 num = 2;
if (features & BIT_ULL(VIRTIO_NET_F_MQ)) {
__virtio16 max_virtqueue_pairs;
eni_vdpa_get_config(&eni_vdpa->vdpa,
offsetof(struct virtio_net_config, max_virtqueue_pairs),
&max_virtqueue_pairs,
sizeof(max_virtqueue_pairs));
num = 2 * __virtio16_to_cpu(virtio_legacy_is_little_endian(),
max_virtqueue_pairs);
}
if (features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
num += 1;
return num;
}
static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct eni_vdpa *eni_vdpa;
struct virtio_pci_legacy_device *ldev;
int ret, i;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
dev, &eni_vdpa_ops, 1, 1, NULL, false);
if (IS_ERR(eni_vdpa)) {
ENI_ERR(pdev, "failed to allocate vDPA structure\n");
return PTR_ERR(eni_vdpa);
}
ldev = &eni_vdpa->ldev;
ldev->pci_dev = pdev;
ret = vp_legacy_probe(ldev);
if (ret) {
ENI_ERR(pdev, "failed to probe legacy PCI device\n");
goto err;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, eni_vdpa);
eni_vdpa->vdpa.dma_dev = &pdev->dev;
eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
sizeof(*eni_vdpa->vring),
GFP_KERNEL);
if (!eni_vdpa->vring) {
ret = -ENOMEM;
ENI_ERR(pdev, "failed to allocate virtqueues\n");
goto err;
}
for (i = 0; i < eni_vdpa->queues; i++) {
eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
eni_vdpa->vring[i].notify = ldev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
}
eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues);
if (ret) {
ENI_ERR(pdev, "failed to register to vdpa bus\n");
goto err;
}
return 0;
err:
put_device(&eni_vdpa->vdpa.dev);
return ret;
}
static void eni_vdpa_remove(struct pci_dev *pdev)
{
struct eni_vdpa *eni_vdpa = pci_get_drvdata(pdev);
vdpa_unregister_device(&eni_vdpa->vdpa);
vp_legacy_remove(&eni_vdpa->ldev);
}
static struct pci_device_id eni_pci_ids[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
VIRTIO_TRANS_ID_NET,
PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
VIRTIO_ID_NET) },
{ 0 },
};
static struct pci_driver eni_vdpa_driver = {
.name = "alibaba-eni-vdpa",
.id_table = eni_pci_ids,
.probe = eni_vdpa_probe,
.remove = eni_vdpa_remove,
};
module_pci_driver(eni_vdpa_driver);
MODULE_AUTHOR("Wu Zongyong <[email protected]>");
MODULE_DESCRIPTION("Alibaba ENI vDPA driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/vdpa/alibaba/eni_vdpa.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* SolidRun DPU driver for control plane
*
* Copyright (C) 2022-2023 SolidRun
*
* Author: Alvaro Karsz <[email protected]>
*
*/
#include <linux/iopoll.h>
#include "snet_vdpa.h"
enum snet_ctrl_opcodes {
SNET_CTRL_OP_DESTROY = 1,
SNET_CTRL_OP_READ_VQ_STATE,
SNET_CTRL_OP_SUSPEND,
SNET_CTRL_OP_RESUME,
};
#define SNET_CTRL_TIMEOUT 2000000
#define SNET_CTRL_DATA_SIZE_MASK 0x0000FFFF
#define SNET_CTRL_IN_PROCESS_MASK 0x00010000
#define SNET_CTRL_CHUNK_RDY_MASK 0x00020000
#define SNET_CTRL_ERROR_MASK 0x0FFC0000
#define SNET_VAL_TO_ERR(val) (-(((val) & SNET_CTRL_ERROR_MASK) >> 18))
#define SNET_EMPTY_CTRL(val) (((val) & SNET_CTRL_ERROR_MASK) || \
!((val) & SNET_CTRL_IN_PROCESS_MASK))
#define SNET_DATA_READY(val) ((val) & (SNET_CTRL_ERROR_MASK | SNET_CTRL_CHUNK_RDY_MASK))
/* Control register used to read data from the DPU */
struct snet_ctrl_reg_ctrl {
/* Chunk size in 4B words */
u16 data_size;
/* We are in the middle of a command */
u16 in_process:1;
/* A data chunk is ready and can be consumed */
u16 chunk_ready:1;
/* Error code */
u16 error:10;
/* Saved for future usage */
u16 rsvd:4;
};
/* Opcode register */
struct snet_ctrl_reg_op {
u16 opcode;
/* Only if VQ index is relevant for the command */
u16 vq_idx;
};
struct snet_ctrl_regs {
struct snet_ctrl_reg_op op;
struct snet_ctrl_reg_ctrl ctrl;
u32 rsvd;
u32 data[];
};
static struct snet_ctrl_regs __iomem *snet_get_ctrl(struct snet *snet)
{
return snet->bar + snet->psnet->cfg.ctrl_off;
}
static int snet_wait_for_empty_ctrl(struct snet_ctrl_regs __iomem *regs)
{
u32 val;
return readx_poll_timeout(ioread32, ®s->ctrl, val, SNET_EMPTY_CTRL(val), 10,
SNET_CTRL_TIMEOUT);
}
static int snet_wait_for_empty_op(struct snet_ctrl_regs __iomem *regs)
{
u32 val;
return readx_poll_timeout(ioread32, ®s->op, val, !val, 10, SNET_CTRL_TIMEOUT);
}
static int snet_wait_for_data(struct snet_ctrl_regs __iomem *regs)
{
u32 val;
return readx_poll_timeout(ioread32, ®s->ctrl, val, SNET_DATA_READY(val), 10,
SNET_CTRL_TIMEOUT);
}
static u32 snet_read32_word(struct snet_ctrl_regs __iomem *ctrl_regs, u16 word_idx)
{
return ioread32(&ctrl_regs->data[word_idx]);
}
static u32 snet_read_ctrl(struct snet_ctrl_regs __iomem *ctrl_regs)
{
return ioread32(&ctrl_regs->ctrl);
}
static void snet_write_ctrl(struct snet_ctrl_regs __iomem *ctrl_regs, u32 val)
{
iowrite32(val, &ctrl_regs->ctrl);
}
static void snet_write_op(struct snet_ctrl_regs __iomem *ctrl_regs, u32 val)
{
iowrite32(val, &ctrl_regs->op);
}
static int snet_wait_for_dpu_completion(struct snet_ctrl_regs __iomem *ctrl_regs)
{
/* Wait until the DPU finishes completely.
* It will clear the opcode register.
*/
return snet_wait_for_empty_op(ctrl_regs);
}
/* Reading ctrl from the DPU:
* buf_size must be 4B aligned
*
* Steps:
*
* (1) Verify that the DPU is not in the middle of another operation by
* reading the in_process and error bits in the control register.
* (2) Write the request opcode and the VQ idx in the opcode register
* and write the buffer size in the control register.
* (3) Start readind chunks of data, chunk_ready bit indicates that a
* data chunk is available, we signal that we read the data by clearing the bit.
* (4) Detect that the transfer is completed when the in_process bit
* in the control register is cleared or when the an error appears.
*/
static int snet_ctrl_read_from_dpu(struct snet *snet, u16 opcode, u16 vq_idx, void *buffer,
u32 buf_size)
{
struct pci_dev *pdev = snet->pdev;
struct snet_ctrl_regs __iomem *regs = snet_get_ctrl(snet);
u32 *bfr_ptr = (u32 *)buffer;
u32 val;
u16 buf_words;
int ret;
u16 words, i, tot_words = 0;
/* Supported for config 2+ */
if (!SNET_CFG_VER(snet, 2))
return -EOPNOTSUPP;
if (!IS_ALIGNED(buf_size, 4))
return -EINVAL;
mutex_lock(&snet->ctrl_lock);
buf_words = buf_size / 4;
/* Make sure control register is empty */
ret = snet_wait_for_empty_ctrl(regs);
if (ret) {
SNET_WARN(pdev, "Timeout waiting for previous control data to be consumed\n");
goto exit;
}
/* We need to write the buffer size in the control register, and the opcode + vq index in
* the opcode register.
* We use a spinlock to serialize the writes.
*/
spin_lock(&snet->ctrl_spinlock);
snet_write_ctrl(regs, buf_words);
snet_write_op(regs, opcode | (vq_idx << 16));
spin_unlock(&snet->ctrl_spinlock);
while (buf_words != tot_words) {
ret = snet_wait_for_data(regs);
if (ret) {
SNET_WARN(pdev, "Timeout waiting for control data\n");
goto exit;
}
val = snet_read_ctrl(regs);
/* Error? */
if (val & SNET_CTRL_ERROR_MASK) {
ret = SNET_VAL_TO_ERR(val);
SNET_WARN(pdev, "Error while reading control data from DPU, err %d\n", ret);
goto exit;
}
words = min_t(u16, val & SNET_CTRL_DATA_SIZE_MASK, buf_words - tot_words);
for (i = 0; i < words; i++) {
*bfr_ptr = snet_read32_word(regs, i);
bfr_ptr++;
}
tot_words += words;
/* Is the job completed? */
if (!(val & SNET_CTRL_IN_PROCESS_MASK))
break;
/* Clear the chunk ready bit and continue */
val &= ~SNET_CTRL_CHUNK_RDY_MASK;
snet_write_ctrl(regs, val);
}
ret = snet_wait_for_dpu_completion(regs);
if (ret)
SNET_WARN(pdev, "Timeout waiting for the DPU to complete a control command\n");
exit:
mutex_unlock(&snet->ctrl_lock);
return ret;
}
/* Send a control message to the DPU using the old mechanism
* used with config version 1.
*/
static int snet_send_ctrl_msg_old(struct snet *snet, u32 opcode)
{
struct pci_dev *pdev = snet->pdev;
struct snet_ctrl_regs __iomem *regs = snet_get_ctrl(snet);
int ret;
mutex_lock(&snet->ctrl_lock);
/* Old mechanism uses just 1 register, the opcode register.
* Make sure that the opcode register is empty, and that the DPU isn't
* processing an old message.
*/
ret = snet_wait_for_empty_op(regs);
if (ret) {
SNET_WARN(pdev, "Timeout waiting for previous control message to be ACKed\n");
goto exit;
}
/* Write the message */
snet_write_op(regs, opcode);
/* DPU ACKs the message by clearing the opcode register */
ret = snet_wait_for_empty_op(regs);
if (ret)
SNET_WARN(pdev, "Timeout waiting for a control message to be ACKed\n");
exit:
mutex_unlock(&snet->ctrl_lock);
return ret;
}
/* Send a control message to the DPU.
* A control message is a message without payload.
*/
static int snet_send_ctrl_msg(struct snet *snet, u16 opcode, u16 vq_idx)
{
struct pci_dev *pdev = snet->pdev;
struct snet_ctrl_regs __iomem *regs = snet_get_ctrl(snet);
u32 val;
int ret;
/* If config version is not 2+, use the old mechanism */
if (!SNET_CFG_VER(snet, 2))
return snet_send_ctrl_msg_old(snet, opcode);
mutex_lock(&snet->ctrl_lock);
/* Make sure control register is empty */
ret = snet_wait_for_empty_ctrl(regs);
if (ret) {
SNET_WARN(pdev, "Timeout waiting for previous control data to be consumed\n");
goto exit;
}
/* We need to clear the control register and write the opcode + vq index in the opcode
* register.
* We use a spinlock to serialize the writes.
*/
spin_lock(&snet->ctrl_spinlock);
snet_write_ctrl(regs, 0);
snet_write_op(regs, opcode | (vq_idx << 16));
spin_unlock(&snet->ctrl_spinlock);
/* The DPU ACKs control messages by setting the chunk ready bit
* without data.
*/
ret = snet_wait_for_data(regs);
if (ret) {
SNET_WARN(pdev, "Timeout waiting for control message to be ACKed\n");
goto exit;
}
/* Check for errors */
val = snet_read_ctrl(regs);
ret = SNET_VAL_TO_ERR(val);
/* Clear the chunk ready bit */
val &= ~SNET_CTRL_CHUNK_RDY_MASK;
snet_write_ctrl(regs, val);
ret = snet_wait_for_dpu_completion(regs);
if (ret)
SNET_WARN(pdev, "Timeout waiting for DPU to complete a control command, err %d\n",
ret);
exit:
mutex_unlock(&snet->ctrl_lock);
return ret;
}
void snet_ctrl_clear(struct snet *snet)
{
struct snet_ctrl_regs __iomem *regs = snet_get_ctrl(snet);
snet_write_op(regs, 0);
}
int snet_destroy_dev(struct snet *snet)
{
return snet_send_ctrl_msg(snet, SNET_CTRL_OP_DESTROY, 0);
}
int snet_read_vq_state(struct snet *snet, u16 idx, struct vdpa_vq_state *state)
{
return snet_ctrl_read_from_dpu(snet, SNET_CTRL_OP_READ_VQ_STATE, idx, state,
sizeof(*state));
}
int snet_suspend_dev(struct snet *snet)
{
return snet_send_ctrl_msg(snet, SNET_CTRL_OP_SUSPEND, 0);
}
int snet_resume_dev(struct snet *snet)
{
return snet_send_ctrl_msg(snet, SNET_CTRL_OP_RESUME, 0);
}
|
linux-master
|
drivers/vdpa/solidrun/snet_ctrl.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* SolidRun DPU driver for control plane
*
* Copyright (C) 2022-2023 SolidRun
*
* Author: Alvaro Karsz <[email protected]>
*
*/
#include <linux/hwmon.h>
#include "snet_vdpa.h"
/* Monitor offsets */
#define SNET_MON_TMP0_IN_OFF 0x00
#define SNET_MON_TMP0_MAX_OFF 0x08
#define SNET_MON_TMP0_CRIT_OFF 0x10
#define SNET_MON_TMP1_IN_OFF 0x18
#define SNET_MON_TMP1_CRIT_OFF 0x20
#define SNET_MON_CURR_IN_OFF 0x28
#define SNET_MON_CURR_MAX_OFF 0x30
#define SNET_MON_CURR_CRIT_OFF 0x38
#define SNET_MON_PWR_IN_OFF 0x40
#define SNET_MON_VOLT_IN_OFF 0x48
#define SNET_MON_VOLT_CRIT_OFF 0x50
#define SNET_MON_VOLT_LCRIT_OFF 0x58
static void snet_hwmon_read_reg(struct psnet *psnet, u32 reg, long *out)
{
*out = psnet_read64(psnet, psnet->cfg.hwmon_off + reg);
}
static umode_t snet_howmon_is_visible(const void *data,
enum hwmon_sensor_types type,
u32 attr, int channel)
{
return 0444;
}
static int snet_howmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct psnet *psnet = dev_get_drvdata(dev);
int ret = 0;
switch (type) {
case hwmon_in:
switch (attr) {
case hwmon_in_lcrit:
snet_hwmon_read_reg(psnet, SNET_MON_VOLT_LCRIT_OFF, val);
break;
case hwmon_in_crit:
snet_hwmon_read_reg(psnet, SNET_MON_VOLT_CRIT_OFF, val);
break;
case hwmon_in_input:
snet_hwmon_read_reg(psnet, SNET_MON_VOLT_IN_OFF, val);
break;
default:
ret = -EOPNOTSUPP;
break;
}
break;
case hwmon_power:
switch (attr) {
case hwmon_power_input:
snet_hwmon_read_reg(psnet, SNET_MON_PWR_IN_OFF, val);
break;
default:
ret = -EOPNOTSUPP;
break;
}
break;
case hwmon_curr:
switch (attr) {
case hwmon_curr_input:
snet_hwmon_read_reg(psnet, SNET_MON_CURR_IN_OFF, val);
break;
case hwmon_curr_max:
snet_hwmon_read_reg(psnet, SNET_MON_CURR_MAX_OFF, val);
break;
case hwmon_curr_crit:
snet_hwmon_read_reg(psnet, SNET_MON_CURR_CRIT_OFF, val);
break;
default:
ret = -EOPNOTSUPP;
break;
}
break;
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
if (channel == 0)
snet_hwmon_read_reg(psnet, SNET_MON_TMP0_IN_OFF, val);
else
snet_hwmon_read_reg(psnet, SNET_MON_TMP1_IN_OFF, val);
break;
case hwmon_temp_max:
if (channel == 0)
snet_hwmon_read_reg(psnet, SNET_MON_TMP0_MAX_OFF, val);
else
ret = -EOPNOTSUPP;
break;
case hwmon_temp_crit:
if (channel == 0)
snet_hwmon_read_reg(psnet, SNET_MON_TMP0_CRIT_OFF, val);
else
snet_hwmon_read_reg(psnet, SNET_MON_TMP1_CRIT_OFF, val);
break;
default:
ret = -EOPNOTSUPP;
break;
}
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static int snet_hwmon_read_string(struct device *dev,
enum hwmon_sensor_types type, u32 attr,
int channel, const char **str)
{
int ret = 0;
switch (type) {
case hwmon_in:
*str = "main_vin";
break;
case hwmon_power:
*str = "soc_pin";
break;
case hwmon_curr:
*str = "soc_iin";
break;
case hwmon_temp:
if (channel == 0)
*str = "power_stage_temp";
else
*str = "ic_junction_temp";
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static const struct hwmon_ops snet_hwmon_ops = {
.is_visible = snet_howmon_is_visible,
.read = snet_howmon_read,
.read_string = snet_hwmon_read_string
};
static const struct hwmon_channel_info * const snet_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_LABEL),
HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_CRIT | HWMON_C_LABEL),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_CRIT | HWMON_I_LCRIT | HWMON_I_LABEL),
NULL
};
static const struct hwmon_chip_info snet_hwmono_info = {
.ops = &snet_hwmon_ops,
.info = snet_hwmon_info,
};
/* Create an HW monitor device */
void psnet_create_hwmon(struct pci_dev *pdev)
{
struct device *hwmon;
struct psnet *psnet = pci_get_drvdata(pdev);
snprintf(psnet->hwmon_name, SNET_NAME_SIZE, "snet_%s", pci_name(pdev));
hwmon = devm_hwmon_device_register_with_info(&pdev->dev, psnet->hwmon_name, psnet,
&snet_hwmono_info, NULL);
/* The monitor is not mandatory, Just alert user in case of an error */
if (IS_ERR(hwmon))
SNET_WARN(pdev, "Failed to create SNET hwmon, error %ld\n", PTR_ERR(hwmon));
}
|
linux-master
|
drivers/vdpa/solidrun/snet_hwmon.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* SolidRun DPU driver for control plane
*
* Copyright (C) 2022-2023 SolidRun
*
* Author: Alvaro Karsz <[email protected]>
*
*/
#include <linux/iopoll.h>
#include "snet_vdpa.h"
/* SNET DPU device ID */
#define SNET_DEVICE_ID 0x1000
/* SNET signature */
#define SNET_SIGNATURE 0xD0D06363
/* Max. config version that we can work with */
#define SNET_CFG_VERSION 0x2
/* Queue align */
#define SNET_QUEUE_ALIGNMENT PAGE_SIZE
/* Kick value to notify that new data is available */
#define SNET_KICK_VAL 0x1
#define SNET_CONFIG_OFF 0x0
/* How long we are willing to wait for a SNET device */
#define SNET_DETECT_TIMEOUT 5000000
/* How long should we wait for the DPU to read our config */
#define SNET_READ_CFG_TIMEOUT 3000000
/* Size of configs written to the DPU */
#define SNET_GENERAL_CFG_LEN 36
#define SNET_GENERAL_CFG_VQ_LEN 40
static struct snet *vdpa_to_snet(struct vdpa_device *vdpa)
{
return container_of(vdpa, struct snet, vdpa);
}
static irqreturn_t snet_cfg_irq_hndlr(int irq, void *data)
{
struct snet *snet = data;
/* Call callback if any */
if (likely(snet->cb.callback))
return snet->cb.callback(snet->cb.private);
return IRQ_HANDLED;
}
static irqreturn_t snet_vq_irq_hndlr(int irq, void *data)
{
struct snet_vq *vq = data;
/* Call callback if any */
if (likely(vq->cb.callback))
return vq->cb.callback(vq->cb.private);
return IRQ_HANDLED;
}
static void snet_free_irqs(struct snet *snet)
{
struct psnet *psnet = snet->psnet;
struct pci_dev *pdev;
u32 i;
/* Which Device allcoated the IRQs? */
if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
pdev = snet->pdev->physfn;
else
pdev = snet->pdev;
/* Free config's IRQ */
if (snet->cfg_irq != -1) {
devm_free_irq(&pdev->dev, snet->cfg_irq, snet);
snet->cfg_irq = -1;
}
/* Free VQ IRQs */
for (i = 0; i < snet->cfg->vq_num; i++) {
if (snet->vqs[i] && snet->vqs[i]->irq != -1) {
devm_free_irq(&pdev->dev, snet->vqs[i]->irq, snet->vqs[i]);
snet->vqs[i]->irq = -1;
}
}
/* IRQ vectors are freed when the pci remove callback is called */
}
static int snet_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
u64 driver_area, u64 device_area)
{
struct snet *snet = vdpa_to_snet(vdev);
/* save received parameters in vqueue sturct */
snet->vqs[idx]->desc_area = desc_area;
snet->vqs[idx]->driver_area = driver_area;
snet->vqs[idx]->device_area = device_area;
return 0;
}
static void snet_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
{
struct snet *snet = vdpa_to_snet(vdev);
/* save num in vqueue */
snet->vqs[idx]->num = num;
}
static void snet_kick_vq(struct vdpa_device *vdev, u16 idx)
{
struct snet *snet = vdpa_to_snet(vdev);
/* not ready - ignore */
if (unlikely(!snet->vqs[idx]->ready))
return;
iowrite32(SNET_KICK_VAL, snet->vqs[idx]->kick_ptr);
}
static void snet_kick_vq_with_data(struct vdpa_device *vdev, u32 data)
{
struct snet *snet = vdpa_to_snet(vdev);
u16 idx = data & 0xFFFF;
/* not ready - ignore */
if (unlikely(!snet->vqs[idx]->ready))
return;
iowrite32((data & 0xFFFF0000) | SNET_KICK_VAL, snet->vqs[idx]->kick_ptr);
}
static void snet_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
{
struct snet *snet = vdpa_to_snet(vdev);
snet->vqs[idx]->cb.callback = cb->callback;
snet->vqs[idx]->cb.private = cb->private;
}
static void snet_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
{
struct snet *snet = vdpa_to_snet(vdev);
snet->vqs[idx]->ready = ready;
}
static bool snet_get_vq_ready(struct vdpa_device *vdev, u16 idx)
{
struct snet *snet = vdpa_to_snet(vdev);
return snet->vqs[idx]->ready;
}
static bool snet_vq_state_is_initial(struct snet *snet, const struct vdpa_vq_state *state)
{
if (SNET_HAS_FEATURE(snet, VIRTIO_F_RING_PACKED)) {
const struct vdpa_vq_state_packed *p = &state->packed;
if (p->last_avail_counter == 1 && p->last_used_counter == 1 &&
p->last_avail_idx == 0 && p->last_used_idx == 0)
return true;
} else {
const struct vdpa_vq_state_split *s = &state->split;
if (s->avail_index == 0)
return true;
}
return false;
}
static int snet_set_vq_state(struct vdpa_device *vdev, u16 idx, const struct vdpa_vq_state *state)
{
struct snet *snet = vdpa_to_snet(vdev);
/* We can set any state for config version 2+ */
if (SNET_CFG_VER(snet, 2)) {
memcpy(&snet->vqs[idx]->vq_state, state, sizeof(*state));
return 0;
}
/* Older config - we can't set the VQ state.
* Return 0 only if this is the initial state we use in the DPU.
*/
if (snet_vq_state_is_initial(snet, state))
return 0;
return -EOPNOTSUPP;
}
static int snet_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
{
struct snet *snet = vdpa_to_snet(vdev);
return snet_read_vq_state(snet, idx, state);
}
static int snet_get_vq_irq(struct vdpa_device *vdev, u16 idx)
{
struct snet *snet = vdpa_to_snet(vdev);
return snet->vqs[idx]->irq;
}
static u32 snet_get_vq_align(struct vdpa_device *vdev)
{
return (u32)SNET_QUEUE_ALIGNMENT;
}
static int snet_reset_dev(struct snet *snet)
{
struct pci_dev *pdev = snet->pdev;
int ret = 0;
u32 i;
/* If status is 0, nothing to do */
if (!snet->status)
return 0;
/* If DPU started, destroy it */
if (snet->status & VIRTIO_CONFIG_S_DRIVER_OK)
ret = snet_destroy_dev(snet);
/* Clear VQs */
for (i = 0; i < snet->cfg->vq_num; i++) {
if (!snet->vqs[i])
continue;
snet->vqs[i]->cb.callback = NULL;
snet->vqs[i]->cb.private = NULL;
snet->vqs[i]->desc_area = 0;
snet->vqs[i]->device_area = 0;
snet->vqs[i]->driver_area = 0;
snet->vqs[i]->ready = false;
}
/* Clear config callback */
snet->cb.callback = NULL;
snet->cb.private = NULL;
/* Free IRQs */
snet_free_irqs(snet);
/* Reset status */
snet->status = 0;
snet->dpu_ready = false;
if (ret)
SNET_WARN(pdev, "Incomplete reset to SNET[%u] device, err: %d\n", snet->sid, ret);
else
SNET_DBG(pdev, "Reset SNET[%u] device\n", snet->sid);
return 0;
}
static int snet_reset(struct vdpa_device *vdev)
{
struct snet *snet = vdpa_to_snet(vdev);
return snet_reset_dev(snet);
}
static size_t snet_get_config_size(struct vdpa_device *vdev)
{
struct snet *snet = vdpa_to_snet(vdev);
return (size_t)snet->cfg->cfg_size;
}
static u64 snet_get_features(struct vdpa_device *vdev)
{
struct snet *snet = vdpa_to_snet(vdev);
return snet->cfg->features;
}
static int snet_set_drv_features(struct vdpa_device *vdev, u64 features)
{
struct snet *snet = vdpa_to_snet(vdev);
snet->negotiated_features = snet->cfg->features & features;
return 0;
}
static u64 snet_get_drv_features(struct vdpa_device *vdev)
{
struct snet *snet = vdpa_to_snet(vdev);
return snet->negotiated_features;
}
static u16 snet_get_vq_num_max(struct vdpa_device *vdev)
{
struct snet *snet = vdpa_to_snet(vdev);
return (u16)snet->cfg->vq_size;
}
static void snet_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
{
struct snet *snet = vdpa_to_snet(vdev);
snet->cb.callback = cb->callback;
snet->cb.private = cb->private;
}
static u32 snet_get_device_id(struct vdpa_device *vdev)
{
struct snet *snet = vdpa_to_snet(vdev);
return snet->cfg->virtio_id;
}
static u32 snet_get_vendor_id(struct vdpa_device *vdev)
{
return (u32)PCI_VENDOR_ID_SOLIDRUN;
}
static u8 snet_get_status(struct vdpa_device *vdev)
{
struct snet *snet = vdpa_to_snet(vdev);
return snet->status;
}
static int snet_write_conf(struct snet *snet)
{
u32 off, i, tmp;
int ret;
/* No need to write the config twice */
if (snet->dpu_ready)
return true;
/* Snet data :
*
* General data: SNET_GENERAL_CFG_LEN bytes long
* 0 0x4 0x8 0xC 0x10 0x14 0x1C 0x24
* | MAGIC NUMBER | CFG VER | SNET SID | NUMBER OF QUEUES | IRQ IDX | FEATURES | RSVD |
*
* For every VQ: SNET_GENERAL_CFG_VQ_LEN bytes long
* 0 0x4 0x8
* | VQ SID AND QUEUE SIZE | IRQ Index |
* | DESC AREA |
* | DEVICE AREA |
* | DRIVER AREA |
* | VQ STATE (CFG 2+) | RSVD |
*
* Magic number should be written last, this is the DPU indication that the data is ready
*/
/* Init offset */
off = snet->psnet->cfg.host_cfg_off;
/* Ignore magic number for now */
off += 4;
snet_write32(snet, off, snet->psnet->negotiated_cfg_ver);
off += 4;
snet_write32(snet, off, snet->sid);
off += 4;
snet_write32(snet, off, snet->cfg->vq_num);
off += 4;
snet_write32(snet, off, snet->cfg_irq_idx);
off += 4;
snet_write64(snet, off, snet->negotiated_features);
off += 8;
/* Ignore reserved */
off += 8;
/* Write VQs */
for (i = 0 ; i < snet->cfg->vq_num ; i++) {
tmp = (i << 16) | (snet->vqs[i]->num & 0xFFFF);
snet_write32(snet, off, tmp);
off += 4;
snet_write32(snet, off, snet->vqs[i]->irq_idx);
off += 4;
snet_write64(snet, off, snet->vqs[i]->desc_area);
off += 8;
snet_write64(snet, off, snet->vqs[i]->device_area);
off += 8;
snet_write64(snet, off, snet->vqs[i]->driver_area);
off += 8;
/* Write VQ state if config version is 2+ */
if (SNET_CFG_VER(snet, 2))
snet_write32(snet, off, *(u32 *)&snet->vqs[i]->vq_state);
off += 4;
/* Ignore reserved */
off += 4;
}
/* Write magic number - data is ready */
snet_write32(snet, snet->psnet->cfg.host_cfg_off, SNET_SIGNATURE);
/* The DPU will ACK the config by clearing the signature */
ret = readx_poll_timeout(ioread32, snet->bar + snet->psnet->cfg.host_cfg_off,
tmp, !tmp, 10, SNET_READ_CFG_TIMEOUT);
if (ret) {
SNET_ERR(snet->pdev, "Timeout waiting for the DPU to read the config\n");
return false;
}
/* set DPU flag */
snet->dpu_ready = true;
return true;
}
static int snet_request_irqs(struct pci_dev *pdev, struct snet *snet)
{
int ret, i, irq;
/* Request config IRQ */
irq = pci_irq_vector(pdev, snet->cfg_irq_idx);
ret = devm_request_irq(&pdev->dev, irq, snet_cfg_irq_hndlr, 0,
snet->cfg_irq_name, snet);
if (ret) {
SNET_ERR(pdev, "Failed to request IRQ\n");
return ret;
}
snet->cfg_irq = irq;
/* Request IRQ for every VQ */
for (i = 0; i < snet->cfg->vq_num; i++) {
irq = pci_irq_vector(pdev, snet->vqs[i]->irq_idx);
ret = devm_request_irq(&pdev->dev, irq, snet_vq_irq_hndlr, 0,
snet->vqs[i]->irq_name, snet->vqs[i]);
if (ret) {
SNET_ERR(pdev, "Failed to request IRQ\n");
return ret;
}
snet->vqs[i]->irq = irq;
}
return 0;
}
static void snet_set_status(struct vdpa_device *vdev, u8 status)
{
struct snet *snet = vdpa_to_snet(vdev);
struct psnet *psnet = snet->psnet;
struct pci_dev *pdev = snet->pdev;
int ret;
bool pf_irqs;
if (status == snet->status)
return;
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
!(snet->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
/* Request IRQs */
pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
ret = snet_request_irqs(pf_irqs ? pdev->physfn : pdev, snet);
if (ret)
goto set_err;
/* Write config to the DPU */
if (snet_write_conf(snet)) {
SNET_INFO(pdev, "Create SNET[%u] device\n", snet->sid);
} else {
snet_free_irqs(snet);
goto set_err;
}
}
/* Save the new status */
snet->status = status;
return;
set_err:
snet->status |= VIRTIO_CONFIG_S_FAILED;
}
static void snet_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len)
{
struct snet *snet = vdpa_to_snet(vdev);
void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset;
u8 *buf_ptr = buf;
u32 i;
/* check for offset error */
if (offset + len > snet->cfg->cfg_size)
return;
/* Write into buffer */
for (i = 0; i < len; i++)
*buf_ptr++ = ioread8(cfg_ptr + i);
}
static void snet_set_config(struct vdpa_device *vdev, unsigned int offset,
const void *buf, unsigned int len)
{
struct snet *snet = vdpa_to_snet(vdev);
void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset;
const u8 *buf_ptr = buf;
u32 i;
/* check for offset error */
if (offset + len > snet->cfg->cfg_size)
return;
/* Write into PCI BAR */
for (i = 0; i < len; i++)
iowrite8(*buf_ptr++, cfg_ptr + i);
}
static int snet_suspend(struct vdpa_device *vdev)
{
struct snet *snet = vdpa_to_snet(vdev);
int ret;
ret = snet_suspend_dev(snet);
if (ret)
SNET_ERR(snet->pdev, "SNET[%u] suspend failed, err: %d\n", snet->sid, ret);
else
SNET_DBG(snet->pdev, "Suspend SNET[%u] device\n", snet->sid);
return ret;
}
static int snet_resume(struct vdpa_device *vdev)
{
struct snet *snet = vdpa_to_snet(vdev);
int ret;
ret = snet_resume_dev(snet);
if (ret)
SNET_ERR(snet->pdev, "SNET[%u] resume failed, err: %d\n", snet->sid, ret);
else
SNET_DBG(snet->pdev, "Resume SNET[%u] device\n", snet->sid);
return ret;
}
static const struct vdpa_config_ops snet_config_ops = {
.set_vq_address = snet_set_vq_address,
.set_vq_num = snet_set_vq_num,
.kick_vq = snet_kick_vq,
.kick_vq_with_data = snet_kick_vq_with_data,
.set_vq_cb = snet_set_vq_cb,
.set_vq_ready = snet_set_vq_ready,
.get_vq_ready = snet_get_vq_ready,
.set_vq_state = snet_set_vq_state,
.get_vq_state = snet_get_vq_state,
.get_vq_irq = snet_get_vq_irq,
.get_vq_align = snet_get_vq_align,
.reset = snet_reset,
.get_config_size = snet_get_config_size,
.get_device_features = snet_get_features,
.set_driver_features = snet_set_drv_features,
.get_driver_features = snet_get_drv_features,
.get_vq_num_min = snet_get_vq_num_max,
.get_vq_num_max = snet_get_vq_num_max,
.set_config_cb = snet_set_config_cb,
.get_device_id = snet_get_device_id,
.get_vendor_id = snet_get_vendor_id,
.get_status = snet_get_status,
.set_status = snet_set_status,
.get_config = snet_get_config,
.set_config = snet_set_config,
.suspend = snet_suspend,
.resume = snet_resume,
};
static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
{
char name[50];
int ret, i, mask = 0;
/* We don't know which BAR will be used to communicate..
* We will map every bar with len > 0.
*
* Later, we will discover the BAR and unmap all other BARs.
*/
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i))
mask |= (1 << i);
}
/* No BAR can be used.. */
if (!mask) {
SNET_ERR(pdev, "Failed to find a PCI BAR\n");
return -ENODEV;
}
snprintf(name, sizeof(name), "psnet[%s]-bars", pci_name(pdev));
ret = pcim_iomap_regions(pdev, mask, name);
if (ret) {
SNET_ERR(pdev, "Failed to request and map PCI BARs\n");
return ret;
}
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (mask & (1 << i))
psnet->bars[i] = pcim_iomap_table(pdev)[i];
}
return 0;
}
static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet)
{
char name[50];
int ret;
snprintf(name, sizeof(name), "snet[%s]-bar", pci_name(pdev));
/* Request and map BAR */
ret = pcim_iomap_regions(pdev, BIT(snet->psnet->cfg.vf_bar), name);
if (ret) {
SNET_ERR(pdev, "Failed to request and map PCI BAR for a VF\n");
return ret;
}
snet->bar = pcim_iomap_table(pdev)[snet->psnet->cfg.vf_bar];
return 0;
}
static void snet_free_cfg(struct snet_cfg *cfg)
{
u32 i;
if (!cfg->devs)
return;
/* Free devices */
for (i = 0; i < cfg->devices_num; i++) {
if (!cfg->devs[i])
break;
kfree(cfg->devs[i]);
}
/* Free pointers to devices */
kfree(cfg->devs);
}
/* Detect which BAR is used for communication with the device. */
static int psnet_detect_bar(struct psnet *psnet, u32 off)
{
unsigned long exit_time;
int i;
exit_time = jiffies + usecs_to_jiffies(SNET_DETECT_TIMEOUT);
/* SNET DPU will write SNET's signature when the config is ready. */
while (time_before(jiffies, exit_time)) {
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
/* Is this BAR mapped? */
if (!psnet->bars[i])
continue;
if (ioread32(psnet->bars[i] + off) == SNET_SIGNATURE)
return i;
}
usleep_range(1000, 10000);
}
return -ENODEV;
}
static void psnet_unmap_unused_bars(struct pci_dev *pdev, struct psnet *psnet)
{
int i, mask = 0;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (psnet->bars[i] && i != psnet->barno)
mask |= (1 << i);
}
if (mask)
pcim_iounmap_regions(pdev, mask);
}
/* Read SNET config from PCI BAR */
static int psnet_read_cfg(struct pci_dev *pdev, struct psnet *psnet)
{
struct snet_cfg *cfg = &psnet->cfg;
u32 i, off;
int barno;
/* Move to where the config starts */
off = SNET_CONFIG_OFF;
/* Find BAR used for communication */
barno = psnet_detect_bar(psnet, off);
if (barno < 0) {
SNET_ERR(pdev, "SNET config is not ready.\n");
return barno;
}
/* Save used BAR number and unmap all other BARs */
psnet->barno = barno;
SNET_DBG(pdev, "Using BAR number %d\n", barno);
psnet_unmap_unused_bars(pdev, psnet);
/* load config from BAR */
cfg->key = psnet_read32(psnet, off);
off += 4;
cfg->cfg_size = psnet_read32(psnet, off);
off += 4;
cfg->cfg_ver = psnet_read32(psnet, off);
off += 4;
/* The negotiated config version is the lower one between this driver's config
* and the DPU's.
*/
psnet->negotiated_cfg_ver = min_t(u32, cfg->cfg_ver, SNET_CFG_VERSION);
SNET_DBG(pdev, "SNET config version %u\n", psnet->negotiated_cfg_ver);
cfg->vf_num = psnet_read32(psnet, off);
off += 4;
cfg->vf_bar = psnet_read32(psnet, off);
off += 4;
cfg->host_cfg_off = psnet_read32(psnet, off);
off += 4;
cfg->max_size_host_cfg = psnet_read32(psnet, off);
off += 4;
cfg->virtio_cfg_off = psnet_read32(psnet, off);
off += 4;
cfg->kick_off = psnet_read32(psnet, off);
off += 4;
cfg->hwmon_off = psnet_read32(psnet, off);
off += 4;
cfg->ctrl_off = psnet_read32(psnet, off);
off += 4;
cfg->flags = psnet_read32(psnet, off);
off += 4;
/* Ignore Reserved */
off += sizeof(cfg->rsvd);
cfg->devices_num = psnet_read32(psnet, off);
off += 4;
/* Allocate memory to hold pointer to the devices */
cfg->devs = kcalloc(cfg->devices_num, sizeof(void *), GFP_KERNEL);
if (!cfg->devs)
return -ENOMEM;
/* Load device configuration from BAR */
for (i = 0; i < cfg->devices_num; i++) {
cfg->devs[i] = kzalloc(sizeof(*cfg->devs[i]), GFP_KERNEL);
if (!cfg->devs[i]) {
snet_free_cfg(cfg);
return -ENOMEM;
}
/* Read device config */
cfg->devs[i]->virtio_id = psnet_read32(psnet, off);
off += 4;
cfg->devs[i]->vq_num = psnet_read32(psnet, off);
off += 4;
cfg->devs[i]->vq_size = psnet_read32(psnet, off);
off += 4;
cfg->devs[i]->vfid = psnet_read32(psnet, off);
off += 4;
cfg->devs[i]->features = psnet_read64(psnet, off);
off += 8;
/* Ignore Reserved */
off += sizeof(cfg->devs[i]->rsvd);
cfg->devs[i]->cfg_size = psnet_read32(psnet, off);
off += 4;
/* Is the config witten to the DPU going to be too big? */
if (SNET_GENERAL_CFG_LEN + SNET_GENERAL_CFG_VQ_LEN * cfg->devs[i]->vq_num >
cfg->max_size_host_cfg) {
SNET_ERR(pdev, "Failed to read SNET config, the config is too big..\n");
snet_free_cfg(cfg);
return -EINVAL;
}
}
return 0;
}
static int psnet_alloc_irq_vector(struct pci_dev *pdev, struct psnet *psnet)
{
int ret = 0;
u32 i, irq_num = 0;
/* Let's count how many IRQs we need, 1 for every VQ + 1 for config change */
for (i = 0; i < psnet->cfg.devices_num; i++)
irq_num += psnet->cfg.devs[i]->vq_num + 1;
ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX);
if (ret != irq_num) {
SNET_ERR(pdev, "Failed to allocate IRQ vectors\n");
return ret;
}
SNET_DBG(pdev, "Allocated %u IRQ vectors from physical function\n", irq_num);
return 0;
}
static int snet_alloc_irq_vector(struct pci_dev *pdev, struct snet_dev_cfg *snet_cfg)
{
int ret = 0;
u32 irq_num;
/* We want 1 IRQ for every VQ + 1 for config change events */
irq_num = snet_cfg->vq_num + 1;
ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX);
if (ret <= 0) {
SNET_ERR(pdev, "Failed to allocate IRQ vectors\n");
return ret;
}
return 0;
}
static void snet_free_vqs(struct snet *snet)
{
u32 i;
if (!snet->vqs)
return;
for (i = 0 ; i < snet->cfg->vq_num ; i++) {
if (!snet->vqs[i])
break;
kfree(snet->vqs[i]);
}
kfree(snet->vqs);
}
static int snet_build_vqs(struct snet *snet)
{
u32 i;
/* Allocate the VQ pointers array */
snet->vqs = kcalloc(snet->cfg->vq_num, sizeof(void *), GFP_KERNEL);
if (!snet->vqs)
return -ENOMEM;
/* Allocate the VQs */
for (i = 0; i < snet->cfg->vq_num; i++) {
snet->vqs[i] = kzalloc(sizeof(*snet->vqs[i]), GFP_KERNEL);
if (!snet->vqs[i]) {
snet_free_vqs(snet);
return -ENOMEM;
}
/* Reset IRQ num */
snet->vqs[i]->irq = -1;
/* VQ serial ID */
snet->vqs[i]->sid = i;
/* Kick address - every VQ gets 4B */
snet->vqs[i]->kick_ptr = snet->bar + snet->psnet->cfg.kick_off +
snet->vqs[i]->sid * 4;
/* Clear kick address for this VQ */
iowrite32(0, snet->vqs[i]->kick_ptr);
}
return 0;
}
static int psnet_get_next_irq_num(struct psnet *psnet)
{
int irq;
spin_lock(&psnet->lock);
irq = psnet->next_irq++;
spin_unlock(&psnet->lock);
return irq;
}
static void snet_reserve_irq_idx(struct pci_dev *pdev, struct snet *snet)
{
struct psnet *psnet = snet->psnet;
int i;
/* one IRQ for every VQ, and one for config changes */
snet->cfg_irq_idx = psnet_get_next_irq_num(psnet);
snprintf(snet->cfg_irq_name, SNET_NAME_SIZE, "snet[%s]-cfg[%d]",
pci_name(pdev), snet->cfg_irq_idx);
for (i = 0; i < snet->cfg->vq_num; i++) {
/* Get next free IRQ ID */
snet->vqs[i]->irq_idx = psnet_get_next_irq_num(psnet);
/* Write IRQ name */
snprintf(snet->vqs[i]->irq_name, SNET_NAME_SIZE, "snet[%s]-vq[%d]",
pci_name(pdev), snet->vqs[i]->irq_idx);
}
}
/* Find a device config based on virtual function id */
static struct snet_dev_cfg *snet_find_dev_cfg(struct snet_cfg *cfg, u32 vfid)
{
u32 i;
for (i = 0; i < cfg->devices_num; i++) {
if (cfg->devs[i]->vfid == vfid)
return cfg->devs[i];
}
/* Oppss.. no config found.. */
return NULL;
}
/* Probe function for a physical PCI function */
static int snet_vdpa_probe_pf(struct pci_dev *pdev)
{
struct psnet *psnet;
int ret = 0;
bool pf_irqs = false;
ret = pcim_enable_device(pdev);
if (ret) {
SNET_ERR(pdev, "Failed to enable PCI device\n");
return ret;
}
/* Allocate a PCI physical function device */
psnet = kzalloc(sizeof(*psnet), GFP_KERNEL);
if (!psnet)
return -ENOMEM;
/* Init PSNET spinlock */
spin_lock_init(&psnet->lock);
pci_set_master(pdev);
pci_set_drvdata(pdev, psnet);
/* Open SNET MAIN BAR */
ret = psnet_open_pf_bar(pdev, psnet);
if (ret)
goto free_psnet;
/* Try to read SNET's config from PCI BAR */
ret = psnet_read_cfg(pdev, psnet);
if (ret)
goto free_psnet;
/* If SNET_CFG_FLAG_IRQ_PF flag is set, we should use
* PF MSI-X vectors
*/
pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
if (pf_irqs) {
ret = psnet_alloc_irq_vector(pdev, psnet);
if (ret)
goto free_cfg;
}
SNET_DBG(pdev, "Enable %u virtual functions\n", psnet->cfg.vf_num);
ret = pci_enable_sriov(pdev, psnet->cfg.vf_num);
if (ret) {
SNET_ERR(pdev, "Failed to enable SR-IOV\n");
goto free_irq;
}
/* Create HW monitor device */
if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_HWMON)) {
#if IS_ENABLED(CONFIG_HWMON)
psnet_create_hwmon(pdev);
#else
SNET_WARN(pdev, "Can't start HWMON, CONFIG_HWMON is not enabled\n");
#endif
}
return 0;
free_irq:
if (pf_irqs)
pci_free_irq_vectors(pdev);
free_cfg:
snet_free_cfg(&psnet->cfg);
free_psnet:
kfree(psnet);
return ret;
}
/* Probe function for a virtual PCI function */
static int snet_vdpa_probe_vf(struct pci_dev *pdev)
{
struct pci_dev *pdev_pf = pdev->physfn;
struct psnet *psnet = pci_get_drvdata(pdev_pf);
struct snet_dev_cfg *dev_cfg;
struct snet *snet;
u32 vfid;
int ret;
bool pf_irqs = false;
/* Get virtual function id.
* (the DPU counts the VFs from 1)
*/
ret = pci_iov_vf_id(pdev);
if (ret < 0) {
SNET_ERR(pdev, "Failed to find a VF id\n");
return ret;
}
vfid = ret + 1;
/* Find the snet_dev_cfg based on vfid */
dev_cfg = snet_find_dev_cfg(&psnet->cfg, vfid);
if (!dev_cfg) {
SNET_WARN(pdev, "Failed to find a VF config..\n");
return -ENODEV;
}
/* Which PCI device should allocate the IRQs?
* If the SNET_CFG_FLAG_IRQ_PF flag set, the PF device allocates the IRQs
*/
pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
ret = pcim_enable_device(pdev);
if (ret) {
SNET_ERR(pdev, "Failed to enable PCI VF device\n");
return ret;
}
/* Request for MSI-X IRQs */
if (!pf_irqs) {
ret = snet_alloc_irq_vector(pdev, dev_cfg);
if (ret)
return ret;
}
/* Allocate vdpa device */
snet = vdpa_alloc_device(struct snet, vdpa, &pdev->dev, &snet_config_ops, 1, 1, NULL,
false);
if (!snet) {
SNET_ERR(pdev, "Failed to allocate a vdpa device\n");
ret = -ENOMEM;
goto free_irqs;
}
/* Init control mutex and spinlock */
mutex_init(&snet->ctrl_lock);
spin_lock_init(&snet->ctrl_spinlock);
/* Save pci device pointer */
snet->pdev = pdev;
snet->psnet = psnet;
snet->cfg = dev_cfg;
snet->dpu_ready = false;
snet->sid = vfid;
/* Reset IRQ value */
snet->cfg_irq = -1;
ret = snet_open_vf_bar(pdev, snet);
if (ret)
goto put_device;
/* Create a VirtIO config pointer */
snet->cfg->virtio_cfg = snet->bar + snet->psnet->cfg.virtio_cfg_off;
/* Clear control registers */
snet_ctrl_clear(snet);
pci_set_master(pdev);
pci_set_drvdata(pdev, snet);
ret = snet_build_vqs(snet);
if (ret)
goto put_device;
/* Reserve IRQ indexes,
* The IRQs may be requested and freed multiple times,
* but the indexes won't change.
*/
snet_reserve_irq_idx(pf_irqs ? pdev_pf : pdev, snet);
/*set DMA device*/
snet->vdpa.dma_dev = &pdev->dev;
/* Register VDPA device */
ret = vdpa_register_device(&snet->vdpa, snet->cfg->vq_num);
if (ret) {
SNET_ERR(pdev, "Failed to register vdpa device\n");
goto free_vqs;
}
return 0;
free_vqs:
snet_free_vqs(snet);
put_device:
put_device(&snet->vdpa.dev);
free_irqs:
if (!pf_irqs)
pci_free_irq_vectors(pdev);
return ret;
}
static int snet_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
if (pdev->is_virtfn)
return snet_vdpa_probe_vf(pdev);
else
return snet_vdpa_probe_pf(pdev);
}
static void snet_vdpa_remove_pf(struct pci_dev *pdev)
{
struct psnet *psnet = pci_get_drvdata(pdev);
pci_disable_sriov(pdev);
/* If IRQs are allocated from the PF, we should free the IRQs */
if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
pci_free_irq_vectors(pdev);
snet_free_cfg(&psnet->cfg);
kfree(psnet);
}
static void snet_vdpa_remove_vf(struct pci_dev *pdev)
{
struct snet *snet = pci_get_drvdata(pdev);
struct psnet *psnet = snet->psnet;
vdpa_unregister_device(&snet->vdpa);
snet_free_vqs(snet);
/* If IRQs are allocated from the VF, we should free the IRQs */
if (!PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
pci_free_irq_vectors(pdev);
}
static void snet_vdpa_remove(struct pci_dev *pdev)
{
if (pdev->is_virtfn)
snet_vdpa_remove_vf(pdev);
else
snet_vdpa_remove_pf(pdev);
}
static struct pci_device_id snet_driver_pci_ids[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID,
PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, snet_driver_pci_ids);
static struct pci_driver snet_vdpa_driver = {
.name = "snet-vdpa-driver",
.id_table = snet_driver_pci_ids,
.probe = snet_vdpa_probe,
.remove = snet_vdpa_remove,
};
module_pci_driver(snet_vdpa_driver);
MODULE_AUTHOR("Alvaro Karsz <[email protected]>");
MODULE_DESCRIPTION("SolidRun vDPA driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/vdpa/solidrun/snet_main.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Force feedback support for memoryless devices
*
* Copyright (c) 2006 Anssi Hannula <[email protected]>
* Copyright (c) 2006 Dmitry Torokhov <[email protected]>
*/
/* #define DEBUG */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/fixp-arith.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anssi Hannula <[email protected]>");
MODULE_DESCRIPTION("Force feedback support for memoryless devices");
/* Number of effects handled with memoryless devices */
#define FF_MEMLESS_EFFECTS 16
/* Envelope update interval in ms */
#define FF_ENVELOPE_INTERVAL 50
#define FF_EFFECT_STARTED 0
#define FF_EFFECT_PLAYING 1
#define FF_EFFECT_ABORTING 2
struct ml_effect_state {
struct ff_effect *effect;
unsigned long flags; /* effect state (STARTED, PLAYING, etc) */
int count; /* loop count of the effect */
unsigned long play_at; /* start time */
unsigned long stop_at; /* stop time */
unsigned long adj_at; /* last time the effect was sent */
};
struct ml_device {
void *private;
struct ml_effect_state states[FF_MEMLESS_EFFECTS];
int gain;
struct timer_list timer;
struct input_dev *dev;
int (*play_effect)(struct input_dev *dev, void *data,
struct ff_effect *effect);
};
static const struct ff_envelope *get_envelope(const struct ff_effect *effect)
{
static const struct ff_envelope empty_envelope;
switch (effect->type) {
case FF_PERIODIC:
return &effect->u.periodic.envelope;
case FF_CONSTANT:
return &effect->u.constant.envelope;
default:
return &empty_envelope;
}
}
/*
* Check for the next time envelope requires an update on memoryless devices
*/
static unsigned long calculate_next_time(struct ml_effect_state *state)
{
const struct ff_envelope *envelope = get_envelope(state->effect);
unsigned long attack_stop, fade_start, next_fade;
if (envelope->attack_length) {
attack_stop = state->play_at +
msecs_to_jiffies(envelope->attack_length);
if (time_before(state->adj_at, attack_stop))
return state->adj_at +
msecs_to_jiffies(FF_ENVELOPE_INTERVAL);
}
if (state->effect->replay.length) {
if (envelope->fade_length) {
/* check when fading should start */
fade_start = state->stop_at -
msecs_to_jiffies(envelope->fade_length);
if (time_before(state->adj_at, fade_start))
return fade_start;
/* already fading, advance to next checkpoint */
next_fade = state->adj_at +
msecs_to_jiffies(FF_ENVELOPE_INTERVAL);
if (time_before(next_fade, state->stop_at))
return next_fade;
}
return state->stop_at;
}
return state->play_at;
}
static void ml_schedule_timer(struct ml_device *ml)
{
struct ml_effect_state *state;
unsigned long now = jiffies;
unsigned long earliest = 0;
unsigned long next_at;
int events = 0;
int i;
pr_debug("calculating next timer\n");
for (i = 0; i < FF_MEMLESS_EFFECTS; i++) {
state = &ml->states[i];
if (!test_bit(FF_EFFECT_STARTED, &state->flags))
continue;
if (test_bit(FF_EFFECT_PLAYING, &state->flags))
next_at = calculate_next_time(state);
else
next_at = state->play_at;
if (time_before_eq(now, next_at) &&
(++events == 1 || time_before(next_at, earliest)))
earliest = next_at;
}
if (!events) {
pr_debug("no actions\n");
del_timer(&ml->timer);
} else {
pr_debug("timer set\n");
mod_timer(&ml->timer, earliest);
}
}
/*
* Apply an envelope to a value
*/
static int apply_envelope(struct ml_effect_state *state, int value,
struct ff_envelope *envelope)
{
struct ff_effect *effect = state->effect;
unsigned long now = jiffies;
int time_from_level;
int time_of_envelope;
int envelope_level;
int difference;
if (envelope->attack_length &&
time_before(now,
state->play_at + msecs_to_jiffies(envelope->attack_length))) {
pr_debug("value = 0x%x, attack_level = 0x%x\n",
value, envelope->attack_level);
time_from_level = jiffies_to_msecs(now - state->play_at);
time_of_envelope = envelope->attack_length;
envelope_level = min_t(u16, envelope->attack_level, 0x7fff);
} else if (envelope->fade_length && effect->replay.length &&
time_after(now,
state->stop_at - msecs_to_jiffies(envelope->fade_length)) &&
time_before(now, state->stop_at)) {
time_from_level = jiffies_to_msecs(state->stop_at - now);
time_of_envelope = envelope->fade_length;
envelope_level = min_t(u16, envelope->fade_level, 0x7fff);
} else
return value;
difference = abs(value) - envelope_level;
pr_debug("difference = %d\n", difference);
pr_debug("time_from_level = 0x%x\n", time_from_level);
pr_debug("time_of_envelope = 0x%x\n", time_of_envelope);
difference = difference * time_from_level / time_of_envelope;
pr_debug("difference = %d\n", difference);
return value < 0 ?
-(difference + envelope_level) : (difference + envelope_level);
}
/*
* Return the type the effect has to be converted into (memless devices)
*/
static int get_compatible_type(struct ff_device *ff, int effect_type)
{
if (test_bit(effect_type, ff->ffbit))
return effect_type;
if (effect_type == FF_PERIODIC && test_bit(FF_RUMBLE, ff->ffbit))
return FF_RUMBLE;
pr_err("invalid type in get_compatible_type()\n");
return 0;
}
/*
* Only left/right direction should be used (under/over 0x8000) for
* forward/reverse motor direction (to keep calculation fast & simple).
*/
static u16 ml_calculate_direction(u16 direction, u16 force,
u16 new_direction, u16 new_force)
{
if (!force)
return new_direction;
if (!new_force)
return direction;
return (((u32)(direction >> 1) * force +
(new_direction >> 1) * new_force) /
(force + new_force)) << 1;
}
#define FRAC_N 8
static inline s16 fixp_new16(s16 a)
{
return ((s32)a) >> (16 - FRAC_N);
}
static inline s16 fixp_mult(s16 a, s16 b)
{
a = ((s32)a * 0x100) / 0x7fff;
return ((s32)(a * b)) >> FRAC_N;
}
/*
* Combine two effects and apply gain.
*/
static void ml_combine_effects(struct ff_effect *effect,
struct ml_effect_state *state,
int gain)
{
struct ff_effect *new = state->effect;
unsigned int strong, weak, i;
int x, y;
s16 level;
switch (new->type) {
case FF_CONSTANT:
i = new->direction * 360 / 0xffff;
level = fixp_new16(apply_envelope(state,
new->u.constant.level,
&new->u.constant.envelope));
x = fixp_mult(fixp_sin16(i), level) * gain / 0xffff;
y = fixp_mult(-fixp_cos16(i), level) * gain / 0xffff;
/*
* here we abuse ff_ramp to hold x and y of constant force
* If in future any driver wants something else than x and y
* in s8, this should be changed to something more generic
*/
effect->u.ramp.start_level =
clamp_val(effect->u.ramp.start_level + x, -0x80, 0x7f);
effect->u.ramp.end_level =
clamp_val(effect->u.ramp.end_level + y, -0x80, 0x7f);
break;
case FF_RUMBLE:
strong = (u32)new->u.rumble.strong_magnitude * gain / 0xffff;
weak = (u32)new->u.rumble.weak_magnitude * gain / 0xffff;
if (effect->u.rumble.strong_magnitude + strong)
effect->direction = ml_calculate_direction(
effect->direction,
effect->u.rumble.strong_magnitude,
new->direction, strong);
else if (effect->u.rumble.weak_magnitude + weak)
effect->direction = ml_calculate_direction(
effect->direction,
effect->u.rumble.weak_magnitude,
new->direction, weak);
else
effect->direction = 0;
effect->u.rumble.strong_magnitude =
min(strong + effect->u.rumble.strong_magnitude,
0xffffU);
effect->u.rumble.weak_magnitude =
min(weak + effect->u.rumble.weak_magnitude, 0xffffU);
break;
case FF_PERIODIC:
i = apply_envelope(state, abs(new->u.periodic.magnitude),
&new->u.periodic.envelope);
/* here we also scale it 0x7fff => 0xffff */
i = i * gain / 0x7fff;
if (effect->u.rumble.strong_magnitude + i)
effect->direction = ml_calculate_direction(
effect->direction,
effect->u.rumble.strong_magnitude,
new->direction, i);
else
effect->direction = 0;
effect->u.rumble.strong_magnitude =
min(i + effect->u.rumble.strong_magnitude, 0xffffU);
effect->u.rumble.weak_magnitude =
min(i + effect->u.rumble.weak_magnitude, 0xffffU);
break;
default:
pr_err("invalid type in ml_combine_effects()\n");
break;
}
}
/*
* Because memoryless devices have only one effect per effect type active
* at one time we have to combine multiple effects into one
*/
static int ml_get_combo_effect(struct ml_device *ml,
unsigned long *effect_handled,
struct ff_effect *combo_effect)
{
struct ff_effect *effect;
struct ml_effect_state *state;
int effect_type;
int i;
memset(combo_effect, 0, sizeof(struct ff_effect));
for (i = 0; i < FF_MEMLESS_EFFECTS; i++) {
if (__test_and_set_bit(i, effect_handled))
continue;
state = &ml->states[i];
effect = state->effect;
if (!test_bit(FF_EFFECT_STARTED, &state->flags))
continue;
if (time_before(jiffies, state->play_at))
continue;
/*
* here we have started effects that are either
* currently playing (and may need be aborted)
* or need to start playing.
*/
effect_type = get_compatible_type(ml->dev->ff, effect->type);
if (combo_effect->type != effect_type) {
if (combo_effect->type != 0) {
__clear_bit(i, effect_handled);
continue;
}
combo_effect->type = effect_type;
}
if (__test_and_clear_bit(FF_EFFECT_ABORTING, &state->flags)) {
__clear_bit(FF_EFFECT_PLAYING, &state->flags);
__clear_bit(FF_EFFECT_STARTED, &state->flags);
} else if (effect->replay.length &&
time_after_eq(jiffies, state->stop_at)) {
__clear_bit(FF_EFFECT_PLAYING, &state->flags);
if (--state->count <= 0) {
__clear_bit(FF_EFFECT_STARTED, &state->flags);
} else {
state->play_at = jiffies +
msecs_to_jiffies(effect->replay.delay);
state->stop_at = state->play_at +
msecs_to_jiffies(effect->replay.length);
}
} else {
__set_bit(FF_EFFECT_PLAYING, &state->flags);
state->adj_at = jiffies;
ml_combine_effects(combo_effect, state, ml->gain);
}
}
return combo_effect->type != 0;
}
static void ml_play_effects(struct ml_device *ml)
{
struct ff_effect effect;
DECLARE_BITMAP(handled_bm, FF_MEMLESS_EFFECTS);
memset(handled_bm, 0, sizeof(handled_bm));
while (ml_get_combo_effect(ml, handled_bm, &effect))
ml->play_effect(ml->dev, ml->private, &effect);
ml_schedule_timer(ml);
}
static void ml_effect_timer(struct timer_list *t)
{
struct ml_device *ml = from_timer(ml, t, timer);
struct input_dev *dev = ml->dev;
unsigned long flags;
pr_debug("timer: updating effects\n");
spin_lock_irqsave(&dev->event_lock, flags);
ml_play_effects(ml);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
/*
* Sets requested gain for FF effects. Called with dev->event_lock held.
*/
static void ml_ff_set_gain(struct input_dev *dev, u16 gain)
{
struct ml_device *ml = dev->ff->private;
int i;
ml->gain = gain;
for (i = 0; i < FF_MEMLESS_EFFECTS; i++)
__clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags);
ml_play_effects(ml);
}
/*
* Start/stop specified FF effect. Called with dev->event_lock held.
*/
static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
{
struct ml_device *ml = dev->ff->private;
struct ml_effect_state *state = &ml->states[effect_id];
if (value > 0) {
pr_debug("initiated play\n");
__set_bit(FF_EFFECT_STARTED, &state->flags);
state->count = value;
state->play_at = jiffies +
msecs_to_jiffies(state->effect->replay.delay);
state->stop_at = state->play_at +
msecs_to_jiffies(state->effect->replay.length);
state->adj_at = state->play_at;
} else {
pr_debug("initiated stop\n");
if (test_bit(FF_EFFECT_PLAYING, &state->flags))
__set_bit(FF_EFFECT_ABORTING, &state->flags);
else
__clear_bit(FF_EFFECT_STARTED, &state->flags);
}
ml_play_effects(ml);
return 0;
}
static int ml_ff_upload(struct input_dev *dev,
struct ff_effect *effect, struct ff_effect *old)
{
struct ml_device *ml = dev->ff->private;
struct ml_effect_state *state = &ml->states[effect->id];
spin_lock_irq(&dev->event_lock);
if (test_bit(FF_EFFECT_STARTED, &state->flags)) {
__clear_bit(FF_EFFECT_PLAYING, &state->flags);
state->play_at = jiffies +
msecs_to_jiffies(state->effect->replay.delay);
state->stop_at = state->play_at +
msecs_to_jiffies(state->effect->replay.length);
state->adj_at = state->play_at;
ml_schedule_timer(ml);
}
spin_unlock_irq(&dev->event_lock);
return 0;
}
static void ml_ff_destroy(struct ff_device *ff)
{
struct ml_device *ml = ff->private;
/*
* Even though we stop all playing effects when tearing down
* an input device (via input_device_flush() that calls into
* input_ff_flush() that stops and erases all effects), we
* do not actually stop the timer, and therefore we should
* do it here.
*/
del_timer_sync(&ml->timer);
kfree(ml->private);
}
/**
* input_ff_create_memless() - create memoryless force-feedback device
* @dev: input device supporting force-feedback
* @data: driver-specific data to be passed into @play_effect
* @play_effect: driver-specific method for playing FF effect
*/
int input_ff_create_memless(struct input_dev *dev, void *data,
int (*play_effect)(struct input_dev *, void *, struct ff_effect *))
{
struct ml_device *ml;
struct ff_device *ff;
int error;
int i;
ml = kzalloc(sizeof(struct ml_device), GFP_KERNEL);
if (!ml)
return -ENOMEM;
ml->dev = dev;
ml->private = data;
ml->play_effect = play_effect;
ml->gain = 0xffff;
timer_setup(&ml->timer, ml_effect_timer, 0);
set_bit(FF_GAIN, dev->ffbit);
error = input_ff_create(dev, FF_MEMLESS_EFFECTS);
if (error) {
kfree(ml);
return error;
}
ff = dev->ff;
ff->private = ml;
ff->upload = ml_ff_upload;
ff->playback = ml_ff_playback;
ff->set_gain = ml_ff_set_gain;
ff->destroy = ml_ff_destroy;
/* we can emulate periodic effects with RUMBLE */
if (test_bit(FF_RUMBLE, ff->ffbit)) {
set_bit(FF_PERIODIC, dev->ffbit);
set_bit(FF_SINE, dev->ffbit);
set_bit(FF_TRIANGLE, dev->ffbit);
set_bit(FF_SQUARE, dev->ffbit);
}
for (i = 0; i < FF_MEMLESS_EFFECTS; i++)
ml->states[i].effect = &ff->effects[i];
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create_memless);
|
linux-master
|
drivers/input/ff-memless.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Helpers for matrix keyboard bindings
*
* Copyright (C) 2012 Google, Inc
*
* Author:
* Olof Johansson <[email protected]>
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/types.h>
static bool matrix_keypad_map_key(struct input_dev *input_dev,
unsigned int rows, unsigned int cols,
unsigned int row_shift, unsigned int key)
{
unsigned short *keymap = input_dev->keycode;
unsigned int row = KEY_ROW(key);
unsigned int col = KEY_COL(key);
unsigned short code = KEY_VAL(key);
if (row >= rows || col >= cols) {
dev_err(input_dev->dev.parent,
"%s: invalid keymap entry 0x%x (row: %d, col: %d, rows: %d, cols: %d)\n",
__func__, key, row, col, rows, cols);
return false;
}
keymap[MATRIX_SCAN_CODE(row, col, row_shift)] = code;
__set_bit(code, input_dev->keybit);
return true;
}
/**
* matrix_keypad_parse_properties() - Read properties of matrix keypad
*
* @dev: Device containing properties
* @rows: Returns number of matrix rows
* @cols: Returns number of matrix columns
* @return 0 if OK, <0 on error
*/
int matrix_keypad_parse_properties(struct device *dev,
unsigned int *rows, unsigned int *cols)
{
*rows = *cols = 0;
device_property_read_u32(dev, "keypad,num-rows", rows);
device_property_read_u32(dev, "keypad,num-columns", cols);
if (!*rows || !*cols) {
dev_err(dev, "number of keypad rows/columns not specified\n");
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(matrix_keypad_parse_properties);
static int matrix_keypad_parse_keymap(const char *propname,
unsigned int rows, unsigned int cols,
struct input_dev *input_dev)
{
struct device *dev = input_dev->dev.parent;
unsigned int row_shift = get_count_order(cols);
unsigned int max_keys = rows << row_shift;
u32 *keys;
int i;
int size;
int retval;
if (!propname)
propname = "linux,keymap";
size = device_property_count_u32(dev, propname);
if (size <= 0) {
dev_err(dev, "missing or malformed property %s: %d\n",
propname, size);
return size < 0 ? size : -EINVAL;
}
if (size > max_keys) {
dev_err(dev, "%s size overflow (%d vs max %u)\n",
propname, size, max_keys);
return -EINVAL;
}
keys = kmalloc_array(size, sizeof(u32), GFP_KERNEL);
if (!keys)
return -ENOMEM;
retval = device_property_read_u32_array(dev, propname, keys, size);
if (retval) {
dev_err(dev, "failed to read %s property: %d\n",
propname, retval);
goto out;
}
for (i = 0; i < size; i++) {
if (!matrix_keypad_map_key(input_dev, rows, cols,
row_shift, keys[i])) {
retval = -EINVAL;
goto out;
}
}
retval = 0;
out:
kfree(keys);
return retval;
}
/**
* matrix_keypad_build_keymap - convert platform keymap into matrix keymap
* @keymap_data: keymap supplied by the platform code
* @keymap_name: name of device tree property containing keymap (if device
* tree support is enabled).
* @rows: number of rows in target keymap array
* @cols: number of cols in target keymap array
* @keymap: expanded version of keymap that is suitable for use by
* matrix keyboard driver
* @input_dev: input devices for which we are setting up the keymap
*
* This function converts platform keymap (encoded with KEY() macro) into
* an array of keycodes that is suitable for using in a standard matrix
* keyboard driver that uses row and col as indices.
*
* If @keymap_data is not supplied and device tree support is enabled
* it will attempt load the keymap from property specified by @keymap_name
* argument (or "linux,keymap" if @keymap_name is %NULL).
*
* If @keymap is %NULL the function will automatically allocate managed
* block of memory to store the keymap. This memory will be associated with
* the parent device and automatically freed when device unbinds from the
* driver.
*
* Callers are expected to set up input_dev->dev.parent before calling this
* function.
*/
int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
const char *keymap_name,
unsigned int rows, unsigned int cols,
unsigned short *keymap,
struct input_dev *input_dev)
{
unsigned int row_shift = get_count_order(cols);
size_t max_keys = rows << row_shift;
int i;
int error;
if (WARN_ON(!input_dev->dev.parent))
return -EINVAL;
if (!keymap) {
keymap = devm_kcalloc(input_dev->dev.parent,
max_keys, sizeof(*keymap),
GFP_KERNEL);
if (!keymap) {
dev_err(input_dev->dev.parent,
"Unable to allocate memory for keymap");
return -ENOMEM;
}
}
input_dev->keycode = keymap;
input_dev->keycodesize = sizeof(*keymap);
input_dev->keycodemax = max_keys;
__set_bit(EV_KEY, input_dev->evbit);
if (keymap_data) {
for (i = 0; i < keymap_data->keymap_size; i++) {
unsigned int key = keymap_data->keymap[i];
if (!matrix_keypad_map_key(input_dev, rows, cols,
row_shift, key))
return -EINVAL;
}
} else {
error = matrix_keypad_parse_keymap(keymap_name, rows, cols,
input_dev);
if (error)
return error;
}
__clear_bit(KEY_RESERVED, input_dev->keybit);
return 0;
}
EXPORT_SYMBOL(matrix_keypad_build_keymap);
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/matrix-keymap.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for polling mode for input devices.
*/
#include <linux/device.h>
#include <linux/input.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "input-poller.h"
struct input_dev_poller {
void (*poll)(struct input_dev *dev);
unsigned int poll_interval; /* msec */
unsigned int poll_interval_max; /* msec */
unsigned int poll_interval_min; /* msec */
struct input_dev *input;
struct delayed_work work;
};
static void input_dev_poller_queue_work(struct input_dev_poller *poller)
{
unsigned long delay;
delay = msecs_to_jiffies(poller->poll_interval);
if (delay >= HZ)
delay = round_jiffies_relative(delay);
queue_delayed_work(system_freezable_wq, &poller->work, delay);
}
static void input_dev_poller_work(struct work_struct *work)
{
struct input_dev_poller *poller =
container_of(work, struct input_dev_poller, work.work);
poller->poll(poller->input);
input_dev_poller_queue_work(poller);
}
void input_dev_poller_finalize(struct input_dev_poller *poller)
{
if (!poller->poll_interval)
poller->poll_interval = 500;
if (!poller->poll_interval_max)
poller->poll_interval_max = poller->poll_interval;
}
void input_dev_poller_start(struct input_dev_poller *poller)
{
/* Only start polling if polling is enabled */
if (poller->poll_interval > 0) {
poller->poll(poller->input);
input_dev_poller_queue_work(poller);
}
}
void input_dev_poller_stop(struct input_dev_poller *poller)
{
cancel_delayed_work_sync(&poller->work);
}
int input_setup_polling(struct input_dev *dev,
void (*poll_fn)(struct input_dev *dev))
{
struct input_dev_poller *poller;
poller = kzalloc(sizeof(*poller), GFP_KERNEL);
if (!poller) {
/*
* We want to show message even though kzalloc() may have
* printed backtrace as knowing what instance of input
* device we were dealing with is helpful.
*/
dev_err(dev->dev.parent ?: &dev->dev,
"%s: unable to allocate poller structure\n", __func__);
return -ENOMEM;
}
INIT_DELAYED_WORK(&poller->work, input_dev_poller_work);
poller->input = dev;
poller->poll = poll_fn;
dev->poller = poller;
return 0;
}
EXPORT_SYMBOL(input_setup_polling);
static bool input_dev_ensure_poller(struct input_dev *dev)
{
if (!dev->poller) {
dev_err(dev->dev.parent ?: &dev->dev,
"poller structure has not been set up\n");
return false;
}
return true;
}
void input_set_poll_interval(struct input_dev *dev, unsigned int interval)
{
if (input_dev_ensure_poller(dev))
dev->poller->poll_interval = interval;
}
EXPORT_SYMBOL(input_set_poll_interval);
void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval)
{
if (input_dev_ensure_poller(dev))
dev->poller->poll_interval_min = interval;
}
EXPORT_SYMBOL(input_set_min_poll_interval);
void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval)
{
if (input_dev_ensure_poller(dev))
dev->poller->poll_interval_max = interval;
}
EXPORT_SYMBOL(input_set_max_poll_interval);
int input_get_poll_interval(struct input_dev *dev)
{
if (!dev->poller)
return -EINVAL;
return dev->poller->poll_interval;
}
EXPORT_SYMBOL(input_get_poll_interval);
/* SYSFS interface */
static ssize_t input_dev_get_poll_interval(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct input_dev *input = to_input_dev(dev);
return sprintf(buf, "%d\n", input->poller->poll_interval);
}
static ssize_t input_dev_set_poll_interval(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct input_dev *input = to_input_dev(dev);
struct input_dev_poller *poller = input->poller;
unsigned int interval;
int err;
err = kstrtouint(buf, 0, &interval);
if (err)
return err;
if (interval < poller->poll_interval_min)
return -EINVAL;
if (interval > poller->poll_interval_max)
return -EINVAL;
mutex_lock(&input->mutex);
poller->poll_interval = interval;
if (input_device_enabled(input)) {
cancel_delayed_work_sync(&poller->work);
if (poller->poll_interval > 0)
input_dev_poller_queue_work(poller);
}
mutex_unlock(&input->mutex);
return count;
}
static DEVICE_ATTR(poll, 0644,
input_dev_get_poll_interval, input_dev_set_poll_interval);
static ssize_t input_dev_get_poll_max(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct input_dev *input = to_input_dev(dev);
return sprintf(buf, "%d\n", input->poller->poll_interval_max);
}
static DEVICE_ATTR(max, 0444, input_dev_get_poll_max, NULL);
static ssize_t input_dev_get_poll_min(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct input_dev *input = to_input_dev(dev);
return sprintf(buf, "%d\n", input->poller->poll_interval_min);
}
static DEVICE_ATTR(min, 0444, input_dev_get_poll_min, NULL);
static umode_t input_poller_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct input_dev *input = to_input_dev(dev);
return input->poller ? attr->mode : 0;
}
static struct attribute *input_poller_attrs[] = {
&dev_attr_poll.attr,
&dev_attr_max.attr,
&dev_attr_min.attr,
NULL
};
struct attribute_group input_poller_attribute_group = {
.is_visible = input_poller_attrs_visible,
.attrs = input_poller_attrs,
};
|
linux-master
|
drivers/input/input-poller.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Event char devices, giving access to raw input device events.
*
* Copyright (c) 1999-2002 Vojtech Pavlik
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define EVDEV_MINOR_BASE 64
#define EVDEV_MINORS 32
#define EVDEV_MIN_BUFFER_SIZE 64U
#define EVDEV_BUF_PACKETS 8
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input/mt.h>
#include <linux/major.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include "input-compat.h"
struct evdev {
int open;
struct input_handle handle;
struct evdev_client __rcu *grab;
struct list_head client_list;
spinlock_t client_lock; /* protects client_list */
struct mutex mutex;
struct device dev;
struct cdev cdev;
bool exist;
};
struct evdev_client {
unsigned int head;
unsigned int tail;
unsigned int packet_head; /* [future] position of the first element of next packet */
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
wait_queue_head_t wait;
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
enum input_clock_type clk_type;
bool revoked;
unsigned long *evmasks[EV_CNT];
unsigned int bufsize;
struct input_event buffer[];
};
static size_t evdev_get_mask_cnt(unsigned int type)
{
static const size_t counts[EV_CNT] = {
/* EV_SYN==0 is EV_CNT, _not_ SYN_CNT, see EVIOCGBIT */
[EV_SYN] = EV_CNT,
[EV_KEY] = KEY_CNT,
[EV_REL] = REL_CNT,
[EV_ABS] = ABS_CNT,
[EV_MSC] = MSC_CNT,
[EV_SW] = SW_CNT,
[EV_LED] = LED_CNT,
[EV_SND] = SND_CNT,
[EV_FF] = FF_CNT,
};
return (type < EV_CNT) ? counts[type] : 0;
}
/* requires the buffer lock to be held */
static bool __evdev_is_filtered(struct evdev_client *client,
unsigned int type,
unsigned int code)
{
unsigned long *mask;
size_t cnt;
/* EV_SYN and unknown codes are never filtered */
if (type == EV_SYN || type >= EV_CNT)
return false;
/* first test whether the type is filtered */
mask = client->evmasks[0];
if (mask && !test_bit(type, mask))
return true;
/* unknown values are never filtered */
cnt = evdev_get_mask_cnt(type);
if (!cnt || code >= cnt)
return false;
mask = client->evmasks[type];
return mask && !test_bit(code, mask);
}
/* flush queued events of type @type, caller must hold client->buffer_lock */
static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
{
unsigned int i, head, num;
unsigned int mask = client->bufsize - 1;
bool is_report;
struct input_event *ev;
BUG_ON(type == EV_SYN);
head = client->tail;
client->packet_head = client->tail;
/* init to 1 so a leading SYN_REPORT will not be dropped */
num = 1;
for (i = client->tail; i != client->head; i = (i + 1) & mask) {
ev = &client->buffer[i];
is_report = ev->type == EV_SYN && ev->code == SYN_REPORT;
if (ev->type == type) {
/* drop matched entry */
continue;
} else if (is_report && !num) {
/* drop empty SYN_REPORT groups */
continue;
} else if (head != i) {
/* move entry to fill the gap */
client->buffer[head] = *ev;
}
num++;
head = (head + 1) & mask;
if (is_report) {
num = 0;
client->packet_head = head;
}
}
client->head = head;
}
static void __evdev_queue_syn_dropped(struct evdev_client *client)
{
ktime_t *ev_time = input_get_timestamp(client->evdev->handle.dev);
struct timespec64 ts = ktime_to_timespec64(ev_time[client->clk_type]);
struct input_event ev;
ev.input_event_sec = ts.tv_sec;
ev.input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
ev.type = EV_SYN;
ev.code = SYN_DROPPED;
ev.value = 0;
client->buffer[client->head++] = ev;
client->head &= client->bufsize - 1;
if (unlikely(client->head == client->tail)) {
/* drop queue but keep our SYN_DROPPED event */
client->tail = (client->head - 1) & (client->bufsize - 1);
client->packet_head = client->tail;
}
}
static void evdev_queue_syn_dropped(struct evdev_client *client)
{
unsigned long flags;
spin_lock_irqsave(&client->buffer_lock, flags);
__evdev_queue_syn_dropped(client);
spin_unlock_irqrestore(&client->buffer_lock, flags);
}
static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
{
unsigned long flags;
enum input_clock_type clk_type;
switch (clkid) {
case CLOCK_REALTIME:
clk_type = INPUT_CLK_REAL;
break;
case CLOCK_MONOTONIC:
clk_type = INPUT_CLK_MONO;
break;
case CLOCK_BOOTTIME:
clk_type = INPUT_CLK_BOOT;
break;
default:
return -EINVAL;
}
if (client->clk_type != clk_type) {
client->clk_type = clk_type;
/*
* Flush pending events and queue SYN_DROPPED event,
* but only if the queue is not empty.
*/
spin_lock_irqsave(&client->buffer_lock, flags);
if (client->head != client->tail) {
client->packet_head = client->head = client->tail;
__evdev_queue_syn_dropped(client);
}
spin_unlock_irqrestore(&client->buffer_lock, flags);
}
return 0;
}
static void __pass_event(struct evdev_client *client,
const struct input_event *event)
{
client->buffer[client->head++] = *event;
client->head &= client->bufsize - 1;
if (unlikely(client->head == client->tail)) {
/*
* This effectively "drops" all unconsumed events, leaving
* EV_SYN/SYN_DROPPED plus the newest event in the queue.
*/
client->tail = (client->head - 2) & (client->bufsize - 1);
client->buffer[client->tail] = (struct input_event) {
.input_event_sec = event->input_event_sec,
.input_event_usec = event->input_event_usec,
.type = EV_SYN,
.code = SYN_DROPPED,
.value = 0,
};
client->packet_head = client->tail;
}
if (event->type == EV_SYN && event->code == SYN_REPORT) {
client->packet_head = client->head;
kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
}
static void evdev_pass_values(struct evdev_client *client,
const struct input_value *vals, unsigned int count,
ktime_t *ev_time)
{
const struct input_value *v;
struct input_event event;
struct timespec64 ts;
bool wakeup = false;
if (client->revoked)
return;
ts = ktime_to_timespec64(ev_time[client->clk_type]);
event.input_event_sec = ts.tv_sec;
event.input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
/* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
for (v = vals; v != vals + count; v++) {
if (__evdev_is_filtered(client, v->type, v->code))
continue;
if (v->type == EV_SYN && v->code == SYN_REPORT) {
/* drop empty SYN_REPORT */
if (client->packet_head == client->head)
continue;
wakeup = true;
}
event.type = v->type;
event.code = v->code;
event.value = v->value;
__pass_event(client, &event);
}
spin_unlock(&client->buffer_lock);
if (wakeup)
wake_up_interruptible_poll(&client->wait,
EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM);
}
/*
* Pass incoming events to all connected clients.
*/
static void evdev_events(struct input_handle *handle,
const struct input_value *vals, unsigned int count)
{
struct evdev *evdev = handle->private;
struct evdev_client *client;
ktime_t *ev_time = input_get_timestamp(handle->dev);
rcu_read_lock();
client = rcu_dereference(evdev->grab);
if (client)
evdev_pass_values(client, vals, count, ev_time);
else
list_for_each_entry_rcu(client, &evdev->client_list, node)
evdev_pass_values(client, vals, count, ev_time);
rcu_read_unlock();
}
/*
* Pass incoming event to all connected clients.
*/
static void evdev_event(struct input_handle *handle,
unsigned int type, unsigned int code, int value)
{
struct input_value vals[] = { { type, code, value } };
evdev_events(handle, vals, 1);
}
static int evdev_fasync(int fd, struct file *file, int on)
{
struct evdev_client *client = file->private_data;
return fasync_helper(fd, file, on, &client->fasync);
}
static void evdev_free(struct device *dev)
{
struct evdev *evdev = container_of(dev, struct evdev, dev);
input_put_device(evdev->handle.dev);
kfree(evdev);
}
/*
* Grabs an event device (along with underlying input device).
* This function is called with evdev->mutex taken.
*/
static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
{
int error;
if (evdev->grab)
return -EBUSY;
error = input_grab_device(&evdev->handle);
if (error)
return error;
rcu_assign_pointer(evdev->grab, client);
return 0;
}
static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
{
struct evdev_client *grab = rcu_dereference_protected(evdev->grab,
lockdep_is_held(&evdev->mutex));
if (grab != client)
return -EINVAL;
rcu_assign_pointer(evdev->grab, NULL);
synchronize_rcu();
input_release_device(&evdev->handle);
return 0;
}
static void evdev_attach_client(struct evdev *evdev,
struct evdev_client *client)
{
spin_lock(&evdev->client_lock);
list_add_tail_rcu(&client->node, &evdev->client_list);
spin_unlock(&evdev->client_lock);
}
static void evdev_detach_client(struct evdev *evdev,
struct evdev_client *client)
{
spin_lock(&evdev->client_lock);
list_del_rcu(&client->node);
spin_unlock(&evdev->client_lock);
synchronize_rcu();
}
static int evdev_open_device(struct evdev *evdev)
{
int retval;
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
if (!evdev->exist)
retval = -ENODEV;
else if (!evdev->open++) {
retval = input_open_device(&evdev->handle);
if (retval)
evdev->open--;
}
mutex_unlock(&evdev->mutex);
return retval;
}
static void evdev_close_device(struct evdev *evdev)
{
mutex_lock(&evdev->mutex);
if (evdev->exist && !--evdev->open)
input_close_device(&evdev->handle);
mutex_unlock(&evdev->mutex);
}
/*
* Wake up users waiting for IO so they can disconnect from
* dead device.
*/
static void evdev_hangup(struct evdev *evdev)
{
struct evdev_client *client;
spin_lock(&evdev->client_lock);
list_for_each_entry(client, &evdev->client_list, node) {
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
wake_up_interruptible_poll(&client->wait, EPOLLHUP | EPOLLERR);
}
spin_unlock(&evdev->client_lock);
}
static int evdev_release(struct inode *inode, struct file *file)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
unsigned int i;
mutex_lock(&evdev->mutex);
if (evdev->exist && !client->revoked)
input_flush_device(&evdev->handle, file);
evdev_ungrab(evdev, client);
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
for (i = 0; i < EV_CNT; ++i)
bitmap_free(client->evmasks[i]);
kvfree(client);
evdev_close_device(evdev);
return 0;
}
static unsigned int evdev_compute_buffer_size(struct input_dev *dev)
{
unsigned int n_events =
max(dev->hint_events_per_packet * EVDEV_BUF_PACKETS,
EVDEV_MIN_BUFFER_SIZE);
return roundup_pow_of_two(n_events);
}
static int evdev_open(struct inode *inode, struct file *file)
{
struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
struct evdev_client *client;
int error;
client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
if (!client)
return -ENOMEM;
init_waitqueue_head(&client->wait);
client->bufsize = bufsize;
spin_lock_init(&client->buffer_lock);
client->evdev = evdev;
evdev_attach_client(evdev, client);
error = evdev_open_device(evdev);
if (error)
goto err_free_client;
file->private_data = client;
stream_open(inode, file);
return 0;
err_free_client:
evdev_detach_client(evdev, client);
kvfree(client);
return error;
}
static ssize_t evdev_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
int retval = 0;
if (count != 0 && count < input_event_size())
return -EINVAL;
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
if (!evdev->exist || client->revoked) {
retval = -ENODEV;
goto out;
}
while (retval + input_event_size() <= count) {
if (input_event_from_user(buffer + retval, &event)) {
retval = -EFAULT;
goto out;
}
retval += input_event_size();
input_inject_event(&evdev->handle,
event.type, event.code, event.value);
cond_resched();
}
out:
mutex_unlock(&evdev->mutex);
return retval;
}
static int evdev_fetch_next_event(struct evdev_client *client,
struct input_event *event)
{
int have_event;
spin_lock_irq(&client->buffer_lock);
have_event = client->packet_head != client->tail;
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= client->bufsize - 1;
}
spin_unlock_irq(&client->buffer_lock);
return have_event;
}
static ssize_t evdev_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
size_t read = 0;
int error;
if (count != 0 && count < input_event_size())
return -EINVAL;
for (;;) {
if (!evdev->exist || client->revoked)
return -ENODEV;
if (client->packet_head == client->tail &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
/*
* count == 0 is special - no IO is done but we check
* for error conditions (see above).
*/
if (count == 0)
break;
while (read + input_event_size() <= count &&
evdev_fetch_next_event(client, &event)) {
if (input_event_to_user(buffer + read, &event))
return -EFAULT;
read += input_event_size();
}
if (read)
break;
if (!(file->f_flags & O_NONBLOCK)) {
error = wait_event_interruptible(client->wait,
client->packet_head != client->tail ||
!evdev->exist || client->revoked);
if (error)
return error;
}
}
return read;
}
/* No kernel lock - fine */
static __poll_t evdev_poll(struct file *file, poll_table *wait)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
__poll_t mask;
poll_wait(file, &client->wait, wait);
if (evdev->exist && !client->revoked)
mask = EPOLLOUT | EPOLLWRNORM;
else
mask = EPOLLHUP | EPOLLERR;
if (client->packet_head != client->tail)
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
#ifdef CONFIG_COMPAT
#define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8)
#define BITS_TO_LONGS_COMPAT(x) ((((x) - 1) / BITS_PER_LONG_COMPAT) + 1)
#ifdef __BIG_ENDIAN
static int bits_to_user(unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, void __user *p, int compat)
{
int len, i;
if (compat) {
len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t);
if (len > maxlen)
len = maxlen;
for (i = 0; i < len / sizeof(compat_long_t); i++)
if (copy_to_user((compat_long_t __user *) p + i,
(compat_long_t *) bits +
i + 1 - ((i % 2) << 1),
sizeof(compat_long_t)))
return -EFAULT;
} else {
len = BITS_TO_LONGS(maxbit) * sizeof(long);
if (len > maxlen)
len = maxlen;
if (copy_to_user(p, bits, len))
return -EFAULT;
}
return len;
}
static int bits_from_user(unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, const void __user *p, int compat)
{
int len, i;
if (compat) {
if (maxlen % sizeof(compat_long_t))
return -EINVAL;
len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t);
if (len > maxlen)
len = maxlen;
for (i = 0; i < len / sizeof(compat_long_t); i++)
if (copy_from_user((compat_long_t *) bits +
i + 1 - ((i % 2) << 1),
(compat_long_t __user *) p + i,
sizeof(compat_long_t)))
return -EFAULT;
if (i % 2)
*((compat_long_t *) bits + i - 1) = 0;
} else {
if (maxlen % sizeof(long))
return -EINVAL;
len = BITS_TO_LONGS(maxbit) * sizeof(long);
if (len > maxlen)
len = maxlen;
if (copy_from_user(bits, p, len))
return -EFAULT;
}
return len;
}
#else
static int bits_to_user(unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, void __user *p, int compat)
{
int len = compat ?
BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t) :
BITS_TO_LONGS(maxbit) * sizeof(long);
if (len > maxlen)
len = maxlen;
return copy_to_user(p, bits, len) ? -EFAULT : len;
}
static int bits_from_user(unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, const void __user *p, int compat)
{
size_t chunk_size = compat ? sizeof(compat_long_t) : sizeof(long);
int len;
if (maxlen % chunk_size)
return -EINVAL;
len = compat ? BITS_TO_LONGS_COMPAT(maxbit) : BITS_TO_LONGS(maxbit);
len *= chunk_size;
if (len > maxlen)
len = maxlen;
return copy_from_user(bits, p, len) ? -EFAULT : len;
}
#endif /* __BIG_ENDIAN */
#else
static int bits_to_user(unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, void __user *p, int compat)
{
int len = BITS_TO_LONGS(maxbit) * sizeof(long);
if (len > maxlen)
len = maxlen;
return copy_to_user(p, bits, len) ? -EFAULT : len;
}
static int bits_from_user(unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, const void __user *p, int compat)
{
int len;
if (maxlen % sizeof(long))
return -EINVAL;
len = BITS_TO_LONGS(maxbit) * sizeof(long);
if (len > maxlen)
len = maxlen;
return copy_from_user(bits, p, len) ? -EFAULT : len;
}
#endif /* CONFIG_COMPAT */
static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
{
int len;
if (!str)
return -ENOENT;
len = strlen(str) + 1;
if (len > maxlen)
len = maxlen;
return copy_to_user(p, str, len) ? -EFAULT : len;
}
static int handle_eviocgbit(struct input_dev *dev,
unsigned int type, unsigned int size,
void __user *p, int compat_mode)
{
unsigned long *bits;
int len;
switch (type) {
case 0: bits = dev->evbit; len = EV_MAX; break;
case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
case EV_REL: bits = dev->relbit; len = REL_MAX; break;
case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
case EV_SW: bits = dev->swbit; len = SW_MAX; break;
default: return -EINVAL;
}
return bits_to_user(bits, len, size, p, compat_mode);
}
static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p)
{
struct input_keymap_entry ke = {
.len = sizeof(unsigned int),
.flags = 0,
};
int __user *ip = (int __user *)p;
int error;
/* legacy case */
if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
return -EFAULT;
error = input_get_keycode(dev, &ke);
if (error)
return error;
if (put_user(ke.keycode, ip + 1))
return -EFAULT;
return 0;
}
static int evdev_handle_get_keycode_v2(struct input_dev *dev, void __user *p)
{
struct input_keymap_entry ke;
int error;
if (copy_from_user(&ke, p, sizeof(ke)))
return -EFAULT;
error = input_get_keycode(dev, &ke);
if (error)
return error;
if (copy_to_user(p, &ke, sizeof(ke)))
return -EFAULT;
return 0;
}
static int evdev_handle_set_keycode(struct input_dev *dev, void __user *p)
{
struct input_keymap_entry ke = {
.len = sizeof(unsigned int),
.flags = 0,
};
int __user *ip = (int __user *)p;
if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
return -EFAULT;
if (get_user(ke.keycode, ip + 1))
return -EFAULT;
return input_set_keycode(dev, &ke);
}
static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p)
{
struct input_keymap_entry ke;
if (copy_from_user(&ke, p, sizeof(ke)))
return -EFAULT;
if (ke.len > sizeof(ke.scancode))
return -EINVAL;
return input_set_keycode(dev, &ke);
}
/*
* If we transfer state to the user, we should flush all pending events
* of the same type from the client's queue. Otherwise, they might end up
* with duplicate events, which can screw up client's state tracking.
* If bits_to_user fails after flushing the queue, we queue a SYN_DROPPED
* event so user-space will notice missing events.
*
* LOCKING:
* We need to take event_lock before buffer_lock to avoid dead-locks. But we
* need the even_lock only to guarantee consistent state. We can safely release
* it while flushing the queue. This allows input-core to handle filters while
* we flush the queue.
*/
static int evdev_handle_get_val(struct evdev_client *client,
struct input_dev *dev, unsigned int type,
unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, void __user *p,
int compat)
{
int ret;
unsigned long *mem;
mem = bitmap_alloc(maxbit, GFP_KERNEL);
if (!mem)
return -ENOMEM;
spin_lock_irq(&dev->event_lock);
spin_lock(&client->buffer_lock);
bitmap_copy(mem, bits, maxbit);
spin_unlock(&dev->event_lock);
__evdev_flush_queue(client, type);
spin_unlock_irq(&client->buffer_lock);
ret = bits_to_user(mem, maxbit, maxlen, p, compat);
if (ret < 0)
evdev_queue_syn_dropped(client);
bitmap_free(mem);
return ret;
}
static int evdev_handle_mt_request(struct input_dev *dev,
unsigned int size,
int __user *ip)
{
const struct input_mt *mt = dev->mt;
unsigned int code;
int max_slots;
int i;
if (get_user(code, &ip[0]))
return -EFAULT;
if (!mt || !input_is_mt_value(code))
return -EINVAL;
max_slots = (size - sizeof(__u32)) / sizeof(__s32);
for (i = 0; i < mt->num_slots && i < max_slots; i++) {
int value = input_mt_get_value(&mt->slots[i], code);
if (put_user(value, &ip[1 + i]))
return -EFAULT;
}
return 0;
}
static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
struct file *file)
{
client->revoked = true;
evdev_ungrab(evdev, client);
input_flush_device(&evdev->handle, file);
wake_up_interruptible_poll(&client->wait, EPOLLHUP | EPOLLERR);
return 0;
}
/* must be called with evdev-mutex held */
static int evdev_set_mask(struct evdev_client *client,
unsigned int type,
const void __user *codes,
u32 codes_size,
int compat)
{
unsigned long flags, *mask, *oldmask;
size_t cnt;
int error;
/* we allow unknown types and 'codes_size > size' for forward-compat */
cnt = evdev_get_mask_cnt(type);
if (!cnt)
return 0;
mask = bitmap_zalloc(cnt, GFP_KERNEL);
if (!mask)
return -ENOMEM;
error = bits_from_user(mask, cnt - 1, codes_size, codes, compat);
if (error < 0) {
bitmap_free(mask);
return error;
}
spin_lock_irqsave(&client->buffer_lock, flags);
oldmask = client->evmasks[type];
client->evmasks[type] = mask;
spin_unlock_irqrestore(&client->buffer_lock, flags);
bitmap_free(oldmask);
return 0;
}
/* must be called with evdev-mutex held */
static int evdev_get_mask(struct evdev_client *client,
unsigned int type,
void __user *codes,
u32 codes_size,
int compat)
{
unsigned long *mask;
size_t cnt, size, xfer_size;
int i;
int error;
/* we allow unknown types and 'codes_size > size' for forward-compat */
cnt = evdev_get_mask_cnt(type);
size = sizeof(unsigned long) * BITS_TO_LONGS(cnt);
xfer_size = min_t(size_t, codes_size, size);
if (cnt > 0) {
mask = client->evmasks[type];
if (mask) {
error = bits_to_user(mask, cnt - 1,
xfer_size, codes, compat);
if (error < 0)
return error;
} else {
/* fake mask with all bits set */
for (i = 0; i < xfer_size; i++)
if (put_user(0xffU, (u8 __user *)codes + i))
return -EFAULT;
}
}
if (xfer_size < codes_size)
if (clear_user(codes + xfer_size, codes_size - xfer_size))
return -EFAULT;
return 0;
}
static long evdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p, int compat_mode)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_dev *dev = evdev->handle.dev;
struct input_absinfo abs;
struct input_mask mask;
struct ff_effect effect;
int __user *ip = (int __user *)p;
unsigned int i, t, u, v;
unsigned int size;
int error;
/* First we check for fixed-length commands */
switch (cmd) {
case EVIOCGVERSION:
return put_user(EV_VERSION, ip);
case EVIOCGID:
if (copy_to_user(p, &dev->id, sizeof(struct input_id)))
return -EFAULT;
return 0;
case EVIOCGREP:
if (!test_bit(EV_REP, dev->evbit))
return -ENOSYS;
if (put_user(dev->rep[REP_DELAY], ip))
return -EFAULT;
if (put_user(dev->rep[REP_PERIOD], ip + 1))
return -EFAULT;
return 0;
case EVIOCSREP:
if (!test_bit(EV_REP, dev->evbit))
return -ENOSYS;
if (get_user(u, ip))
return -EFAULT;
if (get_user(v, ip + 1))
return -EFAULT;
input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u);
input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v);
return 0;
case EVIOCRMFF:
return input_ff_erase(dev, (int)(unsigned long) p, file);
case EVIOCGEFFECTS:
i = test_bit(EV_FF, dev->evbit) ?
dev->ff->max_effects : 0;
if (put_user(i, ip))
return -EFAULT;
return 0;
case EVIOCGRAB:
if (p)
return evdev_grab(evdev, client);
else
return evdev_ungrab(evdev, client);
case EVIOCREVOKE:
if (p)
return -EINVAL;
else
return evdev_revoke(evdev, client, file);
case EVIOCGMASK: {
void __user *codes_ptr;
if (copy_from_user(&mask, p, sizeof(mask)))
return -EFAULT;
codes_ptr = (void __user *)(unsigned long)mask.codes_ptr;
return evdev_get_mask(client,
mask.type, codes_ptr, mask.codes_size,
compat_mode);
}
case EVIOCSMASK: {
const void __user *codes_ptr;
if (copy_from_user(&mask, p, sizeof(mask)))
return -EFAULT;
codes_ptr = (const void __user *)(unsigned long)mask.codes_ptr;
return evdev_set_mask(client,
mask.type, codes_ptr, mask.codes_size,
compat_mode);
}
case EVIOCSCLOCKID:
if (copy_from_user(&i, p, sizeof(unsigned int)))
return -EFAULT;
return evdev_set_clk_type(client, i);
case EVIOCGKEYCODE:
return evdev_handle_get_keycode(dev, p);
case EVIOCSKEYCODE:
return evdev_handle_set_keycode(dev, p);
case EVIOCGKEYCODE_V2:
return evdev_handle_get_keycode_v2(dev, p);
case EVIOCSKEYCODE_V2:
return evdev_handle_set_keycode_v2(dev, p);
}
size = _IOC_SIZE(cmd);
/* Now check variable-length commands */
#define EVIOC_MASK_SIZE(nr) ((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
switch (EVIOC_MASK_SIZE(cmd)) {
case EVIOCGPROP(0):
return bits_to_user(dev->propbit, INPUT_PROP_MAX,
size, p, compat_mode);
case EVIOCGMTSLOTS(0):
return evdev_handle_mt_request(dev, size, ip);
case EVIOCGKEY(0):
return evdev_handle_get_val(client, dev, EV_KEY, dev->key,
KEY_MAX, size, p, compat_mode);
case EVIOCGLED(0):
return evdev_handle_get_val(client, dev, EV_LED, dev->led,
LED_MAX, size, p, compat_mode);
case EVIOCGSND(0):
return evdev_handle_get_val(client, dev, EV_SND, dev->snd,
SND_MAX, size, p, compat_mode);
case EVIOCGSW(0):
return evdev_handle_get_val(client, dev, EV_SW, dev->sw,
SW_MAX, size, p, compat_mode);
case EVIOCGNAME(0):
return str_to_user(dev->name, size, p);
case EVIOCGPHYS(0):
return str_to_user(dev->phys, size, p);
case EVIOCGUNIQ(0):
return str_to_user(dev->uniq, size, p);
case EVIOC_MASK_SIZE(EVIOCSFF):
if (input_ff_effect_from_user(p, size, &effect))
return -EFAULT;
error = input_ff_upload(dev, &effect, file);
if (error)
return error;
if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
return -EFAULT;
return 0;
}
/* Multi-number variable-length handlers */
if (_IOC_TYPE(cmd) != 'E')
return -EINVAL;
if (_IOC_DIR(cmd) == _IOC_READ) {
if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0)))
return handle_eviocgbit(dev,
_IOC_NR(cmd) & EV_MAX, size,
p, compat_mode);
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
if (!dev->absinfo)
return -EINVAL;
t = _IOC_NR(cmd) & ABS_MAX;
abs = dev->absinfo[t];
if (copy_to_user(p, &abs, min_t(size_t,
size, sizeof(struct input_absinfo))))
return -EFAULT;
return 0;
}
}
if (_IOC_DIR(cmd) == _IOC_WRITE) {
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
if (!dev->absinfo)
return -EINVAL;
t = _IOC_NR(cmd) & ABS_MAX;
if (copy_from_user(&abs, p, min_t(size_t,
size, sizeof(struct input_absinfo))))
return -EFAULT;
if (size < sizeof(struct input_absinfo))
abs.resolution = 0;
/* We can't change number of reserved MT slots */
if (t == ABS_MT_SLOT)
return -EINVAL;
/*
* Take event lock to ensure that we are not
* changing device parameters in the middle
* of event.
*/
spin_lock_irq(&dev->event_lock);
dev->absinfo[t] = abs;
spin_unlock_irq(&dev->event_lock);
return 0;
}
}
return -EINVAL;
}
static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
void __user *p, int compat_mode)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
int retval;
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
if (!evdev->exist || client->revoked) {
retval = -ENODEV;
goto out;
}
retval = evdev_do_ioctl(file, cmd, p, compat_mode);
out:
mutex_unlock(&evdev->mutex);
return retval;
}
static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return evdev_ioctl_handler(file, cmd, (void __user *)arg, 0);
}
#ifdef CONFIG_COMPAT
static long evdev_ioctl_compat(struct file *file,
unsigned int cmd, unsigned long arg)
{
return evdev_ioctl_handler(file, cmd, compat_ptr(arg), 1);
}
#endif
static const struct file_operations evdev_fops = {
.owner = THIS_MODULE,
.read = evdev_read,
.write = evdev_write,
.poll = evdev_poll,
.open = evdev_open,
.release = evdev_release,
.unlocked_ioctl = evdev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = evdev_ioctl_compat,
#endif
.fasync = evdev_fasync,
.llseek = no_llseek,
};
/*
* Mark device non-existent. This disables writes, ioctls and
* prevents new users from opening the device. Already posted
* blocking reads will stay, however new ones will fail.
*/
static void evdev_mark_dead(struct evdev *evdev)
{
mutex_lock(&evdev->mutex);
evdev->exist = false;
mutex_unlock(&evdev->mutex);
}
static void evdev_cleanup(struct evdev *evdev)
{
struct input_handle *handle = &evdev->handle;
evdev_mark_dead(evdev);
evdev_hangup(evdev);
/* evdev is marked dead so no one else accesses evdev->open */
if (evdev->open) {
input_flush_device(handle, NULL);
input_close_device(handle);
}
}
/*
* Create new evdev device. Note that input core serializes calls
* to connect and disconnect.
*/
static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
struct evdev *evdev;
int minor;
int dev_no;
int error;
minor = input_get_new_minor(EVDEV_MINOR_BASE, EVDEV_MINORS, true);
if (minor < 0) {
error = minor;
pr_err("failed to reserve new minor: %d\n", error);
return error;
}
evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL);
if (!evdev) {
error = -ENOMEM;
goto err_free_minor;
}
INIT_LIST_HEAD(&evdev->client_list);
spin_lock_init(&evdev->client_lock);
mutex_init(&evdev->mutex);
evdev->exist = true;
dev_no = minor;
/* Normalize device number if it falls into legacy range */
if (dev_no < EVDEV_MINOR_BASE + EVDEV_MINORS)
dev_no -= EVDEV_MINOR_BASE;
dev_set_name(&evdev->dev, "event%d", dev_no);
evdev->handle.dev = input_get_device(dev);
evdev->handle.name = dev_name(&evdev->dev);
evdev->handle.handler = handler;
evdev->handle.private = evdev;
evdev->dev.devt = MKDEV(INPUT_MAJOR, minor);
evdev->dev.class = &input_class;
evdev->dev.parent = &dev->dev;
evdev->dev.release = evdev_free;
device_initialize(&evdev->dev);
error = input_register_handle(&evdev->handle);
if (error)
goto err_free_evdev;
cdev_init(&evdev->cdev, &evdev_fops);
error = cdev_device_add(&evdev->cdev, &evdev->dev);
if (error)
goto err_cleanup_evdev;
return 0;
err_cleanup_evdev:
evdev_cleanup(evdev);
input_unregister_handle(&evdev->handle);
err_free_evdev:
put_device(&evdev->dev);
err_free_minor:
input_free_minor(minor);
return error;
}
static void evdev_disconnect(struct input_handle *handle)
{
struct evdev *evdev = handle->private;
cdev_device_del(&evdev->cdev, &evdev->dev);
evdev_cleanup(evdev);
input_free_minor(MINOR(evdev->dev.devt));
input_unregister_handle(handle);
put_device(&evdev->dev);
}
static const struct input_device_id evdev_ids[] = {
{ .driver_info = 1 }, /* Matches all devices */
{ }, /* Terminating zero entry */
};
MODULE_DEVICE_TABLE(input, evdev_ids);
static struct input_handler evdev_handler = {
.event = evdev_event,
.events = evdev_events,
.connect = evdev_connect,
.disconnect = evdev_disconnect,
.legacy_minors = true,
.minor = EVDEV_MINOR_BASE,
.name = "evdev",
.id_table = evdev_ids,
};
static int __init evdev_init(void)
{
return input_register_handler(&evdev_handler);
}
static void __exit evdev_exit(void)
{
input_unregister_handler(&evdev_handler);
}
module_init(evdev_init);
module_exit(evdev_exit);
MODULE_AUTHOR("Vojtech Pavlik <[email protected]>");
MODULE_DESCRIPTION("Input driver event char devices");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/evdev.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Joystick device driver for the input driver suite.
*
* Copyright (c) 1999-2002 Vojtech Pavlik
* Copyright (c) 1999 Colin Van Dyke
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/joystick.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/cdev.h>
MODULE_AUTHOR("Vojtech Pavlik <[email protected]>");
MODULE_DESCRIPTION("Joystick device interfaces");
MODULE_LICENSE("GPL");
#define JOYDEV_MINOR_BASE 0
#define JOYDEV_MINORS 16
#define JOYDEV_BUFFER_SIZE 64
struct joydev {
int open;
struct input_handle handle;
wait_queue_head_t wait;
struct list_head client_list;
spinlock_t client_lock; /* protects client_list */
struct mutex mutex;
struct device dev;
struct cdev cdev;
bool exist;
struct js_corr corr[ABS_CNT];
struct JS_DATA_SAVE_TYPE glue;
int nabs;
int nkey;
__u16 keymap[KEY_MAX - BTN_MISC + 1];
__u16 keypam[KEY_MAX - BTN_MISC + 1];
__u8 absmap[ABS_CNT];
__u8 abspam[ABS_CNT];
__s16 abs[ABS_CNT];
};
struct joydev_client {
struct js_event buffer[JOYDEV_BUFFER_SIZE];
int head;
int tail;
int startup;
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
struct fasync_struct *fasync;
struct joydev *joydev;
struct list_head node;
};
static int joydev_correct(int value, struct js_corr *corr)
{
switch (corr->type) {
case JS_CORR_NONE:
break;
case JS_CORR_BROKEN:
value = value > corr->coef[0] ? (value < corr->coef[1] ? 0 :
((corr->coef[3] * (value - corr->coef[1])) >> 14)) :
((corr->coef[2] * (value - corr->coef[0])) >> 14);
break;
default:
return 0;
}
return clamp(value, -32767, 32767);
}
static void joydev_pass_event(struct joydev_client *client,
struct js_event *event)
{
struct joydev *joydev = client->joydev;
/*
* IRQs already disabled, just acquire the lock
*/
spin_lock(&client->buffer_lock);
client->buffer[client->head] = *event;
if (client->startup == joydev->nabs + joydev->nkey) {
client->head++;
client->head &= JOYDEV_BUFFER_SIZE - 1;
if (client->tail == client->head)
client->startup = 0;
}
spin_unlock(&client->buffer_lock);
kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
static void joydev_event(struct input_handle *handle,
unsigned int type, unsigned int code, int value)
{
struct joydev *joydev = handle->private;
struct joydev_client *client;
struct js_event event;
switch (type) {
case EV_KEY:
if (code < BTN_MISC || value == 2)
return;
event.type = JS_EVENT_BUTTON;
event.number = joydev->keymap[code - BTN_MISC];
event.value = value;
break;
case EV_ABS:
event.type = JS_EVENT_AXIS;
event.number = joydev->absmap[code];
event.value = joydev_correct(value,
&joydev->corr[event.number]);
if (event.value == joydev->abs[event.number])
return;
joydev->abs[event.number] = event.value;
break;
default:
return;
}
event.time = jiffies_to_msecs(jiffies);
rcu_read_lock();
list_for_each_entry_rcu(client, &joydev->client_list, node)
joydev_pass_event(client, &event);
rcu_read_unlock();
wake_up_interruptible(&joydev->wait);
}
static int joydev_fasync(int fd, struct file *file, int on)
{
struct joydev_client *client = file->private_data;
return fasync_helper(fd, file, on, &client->fasync);
}
static void joydev_free(struct device *dev)
{
struct joydev *joydev = container_of(dev, struct joydev, dev);
input_put_device(joydev->handle.dev);
kfree(joydev);
}
static void joydev_attach_client(struct joydev *joydev,
struct joydev_client *client)
{
spin_lock(&joydev->client_lock);
list_add_tail_rcu(&client->node, &joydev->client_list);
spin_unlock(&joydev->client_lock);
}
static void joydev_detach_client(struct joydev *joydev,
struct joydev_client *client)
{
spin_lock(&joydev->client_lock);
list_del_rcu(&client->node);
spin_unlock(&joydev->client_lock);
synchronize_rcu();
}
static void joydev_refresh_state(struct joydev *joydev)
{
struct input_dev *dev = joydev->handle.dev;
int i, val;
for (i = 0; i < joydev->nabs; i++) {
val = input_abs_get_val(dev, joydev->abspam[i]);
joydev->abs[i] = joydev_correct(val, &joydev->corr[i]);
}
}
static int joydev_open_device(struct joydev *joydev)
{
int retval;
retval = mutex_lock_interruptible(&joydev->mutex);
if (retval)
return retval;
if (!joydev->exist)
retval = -ENODEV;
else if (!joydev->open++) {
retval = input_open_device(&joydev->handle);
if (retval)
joydev->open--;
else
joydev_refresh_state(joydev);
}
mutex_unlock(&joydev->mutex);
return retval;
}
static void joydev_close_device(struct joydev *joydev)
{
mutex_lock(&joydev->mutex);
if (joydev->exist && !--joydev->open)
input_close_device(&joydev->handle);
mutex_unlock(&joydev->mutex);
}
/*
* Wake up users waiting for IO so they can disconnect from
* dead device.
*/
static void joydev_hangup(struct joydev *joydev)
{
struct joydev_client *client;
spin_lock(&joydev->client_lock);
list_for_each_entry(client, &joydev->client_list, node)
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
spin_unlock(&joydev->client_lock);
wake_up_interruptible(&joydev->wait);
}
static int joydev_release(struct inode *inode, struct file *file)
{
struct joydev_client *client = file->private_data;
struct joydev *joydev = client->joydev;
joydev_detach_client(joydev, client);
kfree(client);
joydev_close_device(joydev);
return 0;
}
static int joydev_open(struct inode *inode, struct file *file)
{
struct joydev *joydev =
container_of(inode->i_cdev, struct joydev, cdev);
struct joydev_client *client;
int error;
client = kzalloc(sizeof(struct joydev_client), GFP_KERNEL);
if (!client)
return -ENOMEM;
spin_lock_init(&client->buffer_lock);
client->joydev = joydev;
joydev_attach_client(joydev, client);
error = joydev_open_device(joydev);
if (error)
goto err_free_client;
file->private_data = client;
stream_open(inode, file);
return 0;
err_free_client:
joydev_detach_client(joydev, client);
kfree(client);
return error;
}
static int joydev_generate_startup_event(struct joydev_client *client,
struct input_dev *input,
struct js_event *event)
{
struct joydev *joydev = client->joydev;
int have_event;
spin_lock_irq(&client->buffer_lock);
have_event = client->startup < joydev->nabs + joydev->nkey;
if (have_event) {
event->time = jiffies_to_msecs(jiffies);
if (client->startup < joydev->nkey) {
event->type = JS_EVENT_BUTTON | JS_EVENT_INIT;
event->number = client->startup;
event->value = !!test_bit(joydev->keypam[event->number],
input->key);
} else {
event->type = JS_EVENT_AXIS | JS_EVENT_INIT;
event->number = client->startup - joydev->nkey;
event->value = joydev->abs[event->number];
}
client->startup++;
}
spin_unlock_irq(&client->buffer_lock);
return have_event;
}
static int joydev_fetch_next_event(struct joydev_client *client,
struct js_event *event)
{
int have_event;
spin_lock_irq(&client->buffer_lock);
have_event = client->head != client->tail;
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= JOYDEV_BUFFER_SIZE - 1;
}
spin_unlock_irq(&client->buffer_lock);
return have_event;
}
/*
* Old joystick interface
*/
static ssize_t joydev_0x_read(struct joydev_client *client,
struct input_dev *input,
char __user *buf)
{
struct joydev *joydev = client->joydev;
struct JS_DATA_TYPE data;
int i;
spin_lock_irq(&input->event_lock);
/*
* Get device state
*/
for (data.buttons = i = 0; i < 32 && i < joydev->nkey; i++)
data.buttons |=
test_bit(joydev->keypam[i], input->key) ? (1 << i) : 0;
data.x = (joydev->abs[0] / 256 + 128) >> joydev->glue.JS_CORR.x;
data.y = (joydev->abs[1] / 256 + 128) >> joydev->glue.JS_CORR.y;
/*
* Reset reader's event queue
*/
spin_lock(&client->buffer_lock);
client->startup = 0;
client->tail = client->head;
spin_unlock(&client->buffer_lock);
spin_unlock_irq(&input->event_lock);
if (copy_to_user(buf, &data, sizeof(struct JS_DATA_TYPE)))
return -EFAULT;
return sizeof(struct JS_DATA_TYPE);
}
static inline int joydev_data_pending(struct joydev_client *client)
{
struct joydev *joydev = client->joydev;
return client->startup < joydev->nabs + joydev->nkey ||
client->head != client->tail;
}
static ssize_t joydev_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct joydev_client *client = file->private_data;
struct joydev *joydev = client->joydev;
struct input_dev *input = joydev->handle.dev;
struct js_event event;
int retval;
if (!joydev->exist)
return -ENODEV;
if (count < sizeof(struct js_event))
return -EINVAL;
if (count == sizeof(struct JS_DATA_TYPE))
return joydev_0x_read(client, input, buf);
if (!joydev_data_pending(client) && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
retval = wait_event_interruptible(joydev->wait,
!joydev->exist || joydev_data_pending(client));
if (retval)
return retval;
if (!joydev->exist)
return -ENODEV;
while (retval + sizeof(struct js_event) <= count &&
joydev_generate_startup_event(client, input, &event)) {
if (copy_to_user(buf + retval, &event, sizeof(struct js_event)))
return -EFAULT;
retval += sizeof(struct js_event);
}
while (retval + sizeof(struct js_event) <= count &&
joydev_fetch_next_event(client, &event)) {
if (copy_to_user(buf + retval, &event, sizeof(struct js_event)))
return -EFAULT;
retval += sizeof(struct js_event);
}
return retval;
}
/* No kernel lock - fine */
static __poll_t joydev_poll(struct file *file, poll_table *wait)
{
struct joydev_client *client = file->private_data;
struct joydev *joydev = client->joydev;
poll_wait(file, &joydev->wait, wait);
return (joydev_data_pending(client) ? (EPOLLIN | EPOLLRDNORM) : 0) |
(joydev->exist ? 0 : (EPOLLHUP | EPOLLERR));
}
static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
void __user *argp, size_t len)
{
__u8 *abspam;
int i;
int retval = 0;
len = min(len, sizeof(joydev->abspam));
/* Validate the map. */
abspam = memdup_user(argp, len);
if (IS_ERR(abspam))
return PTR_ERR(abspam);
for (i = 0; i < len && i < joydev->nabs; i++) {
if (abspam[i] > ABS_MAX) {
retval = -EINVAL;
goto out;
}
}
memcpy(joydev->abspam, abspam, len);
for (i = 0; i < joydev->nabs; i++)
joydev->absmap[joydev->abspam[i]] = i;
out:
kfree(abspam);
return retval;
}
static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
void __user *argp, size_t len)
{
__u16 *keypam;
int i;
int retval = 0;
if (len % sizeof(*keypam))
return -EINVAL;
len = min(len, sizeof(joydev->keypam));
/* Validate the map. */
keypam = memdup_user(argp, len);
if (IS_ERR(keypam))
return PTR_ERR(keypam);
for (i = 0; i < (len / 2) && i < joydev->nkey; i++) {
if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) {
retval = -EINVAL;
goto out;
}
}
memcpy(joydev->keypam, keypam, len);
for (i = 0; i < joydev->nkey; i++)
joydev->keymap[joydev->keypam[i] - BTN_MISC] = i;
out:
kfree(keypam);
return retval;
}
static int joydev_ioctl_common(struct joydev *joydev,
unsigned int cmd, void __user *argp)
{
struct input_dev *dev = joydev->handle.dev;
size_t len;
int i;
const char *name;
/* Process fixed-sized commands. */
switch (cmd) {
case JS_SET_CAL:
return copy_from_user(&joydev->glue.JS_CORR, argp,
sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
case JS_GET_CAL:
return copy_to_user(argp, &joydev->glue.JS_CORR,
sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
case JS_SET_TIMEOUT:
return get_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
case JS_GET_TIMEOUT:
return put_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
case JSIOCGVERSION:
return put_user(JS_VERSION, (__u32 __user *) argp);
case JSIOCGAXES:
return put_user(joydev->nabs, (__u8 __user *) argp);
case JSIOCGBUTTONS:
return put_user(joydev->nkey, (__u8 __user *) argp);
case JSIOCSCORR:
if (copy_from_user(joydev->corr, argp,
sizeof(joydev->corr[0]) * joydev->nabs))
return -EFAULT;
for (i = 0; i < joydev->nabs; i++) {
int val = input_abs_get_val(dev, joydev->abspam[i]);
joydev->abs[i] = joydev_correct(val, &joydev->corr[i]);
}
return 0;
case JSIOCGCORR:
return copy_to_user(argp, joydev->corr,
sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0;
}
/*
* Process variable-sized commands (the axis and button map commands
* are considered variable-sized to decouple them from the values of
* ABS_MAX and KEY_MAX).
*/
switch (cmd & ~IOCSIZE_MASK) {
case (JSIOCSAXMAP & ~IOCSIZE_MASK):
return joydev_handle_JSIOCSAXMAP(joydev, argp, _IOC_SIZE(cmd));
case (JSIOCGAXMAP & ~IOCSIZE_MASK):
len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->abspam));
return copy_to_user(argp, joydev->abspam, len) ? -EFAULT : len;
case (JSIOCSBTNMAP & ~IOCSIZE_MASK):
return joydev_handle_JSIOCSBTNMAP(joydev, argp, _IOC_SIZE(cmd));
case (JSIOCGBTNMAP & ~IOCSIZE_MASK):
len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->keypam));
return copy_to_user(argp, joydev->keypam, len) ? -EFAULT : len;
case JSIOCGNAME(0):
name = dev->name;
if (!name)
return 0;
len = min_t(size_t, _IOC_SIZE(cmd), strlen(name) + 1);
return copy_to_user(argp, name, len) ? -EFAULT : len;
}
return -EINVAL;
}
#ifdef CONFIG_COMPAT
static long joydev_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct joydev_client *client = file->private_data;
struct joydev *joydev = client->joydev;
void __user *argp = (void __user *)arg;
s32 tmp32;
struct JS_DATA_SAVE_TYPE_32 ds32;
int retval;
retval = mutex_lock_interruptible(&joydev->mutex);
if (retval)
return retval;
if (!joydev->exist) {
retval = -ENODEV;
goto out;
}
switch (cmd) {
case JS_SET_TIMELIMIT:
retval = get_user(tmp32, (s32 __user *) arg);
if (retval == 0)
joydev->glue.JS_TIMELIMIT = tmp32;
break;
case JS_GET_TIMELIMIT:
tmp32 = joydev->glue.JS_TIMELIMIT;
retval = put_user(tmp32, (s32 __user *) arg);
break;
case JS_SET_ALL:
retval = copy_from_user(&ds32, argp,
sizeof(ds32)) ? -EFAULT : 0;
if (retval == 0) {
joydev->glue.JS_TIMEOUT = ds32.JS_TIMEOUT;
joydev->glue.BUSY = ds32.BUSY;
joydev->glue.JS_EXPIRETIME = ds32.JS_EXPIRETIME;
joydev->glue.JS_TIMELIMIT = ds32.JS_TIMELIMIT;
joydev->glue.JS_SAVE = ds32.JS_SAVE;
joydev->glue.JS_CORR = ds32.JS_CORR;
}
break;
case JS_GET_ALL:
ds32.JS_TIMEOUT = joydev->glue.JS_TIMEOUT;
ds32.BUSY = joydev->glue.BUSY;
ds32.JS_EXPIRETIME = joydev->glue.JS_EXPIRETIME;
ds32.JS_TIMELIMIT = joydev->glue.JS_TIMELIMIT;
ds32.JS_SAVE = joydev->glue.JS_SAVE;
ds32.JS_CORR = joydev->glue.JS_CORR;
retval = copy_to_user(argp, &ds32, sizeof(ds32)) ? -EFAULT : 0;
break;
default:
retval = joydev_ioctl_common(joydev, cmd, argp);
break;
}
out:
mutex_unlock(&joydev->mutex);
return retval;
}
#endif /* CONFIG_COMPAT */
static long joydev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct joydev_client *client = file->private_data;
struct joydev *joydev = client->joydev;
void __user *argp = (void __user *)arg;
int retval;
retval = mutex_lock_interruptible(&joydev->mutex);
if (retval)
return retval;
if (!joydev->exist) {
retval = -ENODEV;
goto out;
}
switch (cmd) {
case JS_SET_TIMELIMIT:
retval = get_user(joydev->glue.JS_TIMELIMIT,
(long __user *) arg);
break;
case JS_GET_TIMELIMIT:
retval = put_user(joydev->glue.JS_TIMELIMIT,
(long __user *) arg);
break;
case JS_SET_ALL:
retval = copy_from_user(&joydev->glue, argp,
sizeof(joydev->glue)) ? -EFAULT : 0;
break;
case JS_GET_ALL:
retval = copy_to_user(argp, &joydev->glue,
sizeof(joydev->glue)) ? -EFAULT : 0;
break;
default:
retval = joydev_ioctl_common(joydev, cmd, argp);
break;
}
out:
mutex_unlock(&joydev->mutex);
return retval;
}
static const struct file_operations joydev_fops = {
.owner = THIS_MODULE,
.read = joydev_read,
.poll = joydev_poll,
.open = joydev_open,
.release = joydev_release,
.unlocked_ioctl = joydev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = joydev_compat_ioctl,
#endif
.fasync = joydev_fasync,
.llseek = no_llseek,
};
/*
* Mark device non-existent. This disables writes, ioctls and
* prevents new users from opening the device. Already posted
* blocking reads will stay, however new ones will fail.
*/
static void joydev_mark_dead(struct joydev *joydev)
{
mutex_lock(&joydev->mutex);
joydev->exist = false;
mutex_unlock(&joydev->mutex);
}
static void joydev_cleanup(struct joydev *joydev)
{
struct input_handle *handle = &joydev->handle;
joydev_mark_dead(joydev);
joydev_hangup(joydev);
/* joydev is marked dead so no one else accesses joydev->open */
if (joydev->open)
input_close_device(handle);
}
/*
* These codes are copied from hid-ids.h, unfortunately there is no common
* usb_ids/bt_ids.h header.
*/
#define USB_VENDOR_ID_SONY 0x054c
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
#define USB_VENDOR_ID_THQ 0x20d6
#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
#define USB_VENDOR_ID_NINTENDO 0x057e
#define USB_DEVICE_ID_NINTENDO_JOYCONL 0x2006
#define USB_DEVICE_ID_NINTENDO_JOYCONR 0x2007
#define USB_DEVICE_ID_NINTENDO_PROCON 0x2009
#define USB_DEVICE_ID_NINTENDO_CHRGGRIP 0x200E
#define ACCEL_DEV(vnd, prd) \
{ \
.flags = INPUT_DEVICE_ID_MATCH_VENDOR | \
INPUT_DEVICE_ID_MATCH_PRODUCT | \
INPUT_DEVICE_ID_MATCH_PROPBIT, \
.vendor = (vnd), \
.product = (prd), \
.propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) }, \
}
static const struct input_device_id joydev_blacklist[] = {
/* Avoid touchpads and touchscreens */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
},
/* Avoid tablets, digitisers and similar devices */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) },
},
/* Disable accelerometers on composite devices */
ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW),
ACCEL_DEV(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_PROCON),
ACCEL_DEV(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_CHRGGRIP),
ACCEL_DEV(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_JOYCONL),
ACCEL_DEV(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_JOYCONR),
{ /* sentinel */ }
};
static bool joydev_dev_is_blacklisted(struct input_dev *dev)
{
const struct input_device_id *id;
for (id = joydev_blacklist; id->flags; id++) {
if (input_match_device_id(dev, id)) {
dev_dbg(&dev->dev,
"joydev: blacklisting '%s'\n", dev->name);
return true;
}
}
return false;
}
static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
{
DECLARE_BITMAP(jd_scratch, KEY_CNT);
bool ev_match = false;
BUILD_BUG_ON(ABS_CNT > KEY_CNT || EV_CNT > KEY_CNT);
/*
* Virtualization (VMware, etc) and remote management (HP
* ILO2) solutions use absolute coordinates for their virtual
* pointing devices so that there is one-to-one relationship
* between pointer position on the host screen and virtual
* guest screen, and so their mice use ABS_X, ABS_Y and 3
* primary button events. This clashes with what joydev
* considers to be joysticks (a device with at minimum ABS_X
* axis).
*
* Here we are trying to separate absolute mice from
* joysticks. A device is, for joystick detection purposes,
* considered to be an absolute mouse if the following is
* true:
*
* 1) Event types are exactly
* EV_ABS, EV_KEY and EV_SYN
* or
* EV_ABS, EV_KEY, EV_SYN and EV_MSC
* or
* EV_ABS, EV_KEY, EV_SYN, EV_MSC and EV_REL.
* 2) Absolute events are exactly ABS_X and ABS_Y.
* 3) Keys are exactly BTN_LEFT, BTN_RIGHT and BTN_MIDDLE.
* 4) Device is not on "Amiga" bus.
*/
bitmap_zero(jd_scratch, EV_CNT);
/* VMware VMMouse, HP ILO2 */
__set_bit(EV_ABS, jd_scratch);
__set_bit(EV_KEY, jd_scratch);
__set_bit(EV_SYN, jd_scratch);
if (bitmap_equal(jd_scratch, dev->evbit, EV_CNT))
ev_match = true;
/* HP ILO2, AMI BMC firmware */
__set_bit(EV_MSC, jd_scratch);
if (bitmap_equal(jd_scratch, dev->evbit, EV_CNT))
ev_match = true;
/* VMware Virtual USB Mouse, QEMU USB Tablet, ATEN BMC firmware */
__set_bit(EV_REL, jd_scratch);
if (bitmap_equal(jd_scratch, dev->evbit, EV_CNT))
ev_match = true;
if (!ev_match)
return false;
bitmap_zero(jd_scratch, ABS_CNT);
__set_bit(ABS_X, jd_scratch);
__set_bit(ABS_Y, jd_scratch);
if (!bitmap_equal(dev->absbit, jd_scratch, ABS_CNT))
return false;
bitmap_zero(jd_scratch, KEY_CNT);
__set_bit(BTN_LEFT, jd_scratch);
__set_bit(BTN_RIGHT, jd_scratch);
__set_bit(BTN_MIDDLE, jd_scratch);
if (!bitmap_equal(dev->keybit, jd_scratch, KEY_CNT))
return false;
/*
* Amiga joystick (amijoy) historically uses left/middle/right
* button events.
*/
if (dev->id.bustype == BUS_AMIGA)
return false;
return true;
}
static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
{
/* Disable blacklisted devices */
if (joydev_dev_is_blacklisted(dev))
return false;
/* Avoid absolute mice */
if (joydev_dev_is_absolute_mouse(dev))
return false;
return true;
}
static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
struct joydev *joydev;
int i, j, t, minor, dev_no;
int error;
minor = input_get_new_minor(JOYDEV_MINOR_BASE, JOYDEV_MINORS, true);
if (minor < 0) {
error = minor;
pr_err("failed to reserve new minor: %d\n", error);
return error;
}
joydev = kzalloc(sizeof(struct joydev), GFP_KERNEL);
if (!joydev) {
error = -ENOMEM;
goto err_free_minor;
}
INIT_LIST_HEAD(&joydev->client_list);
spin_lock_init(&joydev->client_lock);
mutex_init(&joydev->mutex);
init_waitqueue_head(&joydev->wait);
joydev->exist = true;
dev_no = minor;
/* Normalize device number if it falls into legacy range */
if (dev_no < JOYDEV_MINOR_BASE + JOYDEV_MINORS)
dev_no -= JOYDEV_MINOR_BASE;
dev_set_name(&joydev->dev, "js%d", dev_no);
joydev->handle.dev = input_get_device(dev);
joydev->handle.name = dev_name(&joydev->dev);
joydev->handle.handler = handler;
joydev->handle.private = joydev;
for_each_set_bit(i, dev->absbit, ABS_CNT) {
joydev->absmap[i] = joydev->nabs;
joydev->abspam[joydev->nabs] = i;
joydev->nabs++;
}
for (i = BTN_JOYSTICK - BTN_MISC; i < KEY_MAX - BTN_MISC + 1; i++)
if (test_bit(i + BTN_MISC, dev->keybit)) {
joydev->keymap[i] = joydev->nkey;
joydev->keypam[joydev->nkey] = i + BTN_MISC;
joydev->nkey++;
}
for (i = 0; i < BTN_JOYSTICK - BTN_MISC; i++)
if (test_bit(i + BTN_MISC, dev->keybit)) {
joydev->keymap[i] = joydev->nkey;
joydev->keypam[joydev->nkey] = i + BTN_MISC;
joydev->nkey++;
}
for (i = 0; i < joydev->nabs; i++) {
j = joydev->abspam[i];
if (input_abs_get_max(dev, j) == input_abs_get_min(dev, j)) {
joydev->corr[i].type = JS_CORR_NONE;
continue;
}
joydev->corr[i].type = JS_CORR_BROKEN;
joydev->corr[i].prec = input_abs_get_fuzz(dev, j);
t = (input_abs_get_max(dev, j) + input_abs_get_min(dev, j)) / 2;
joydev->corr[i].coef[0] = t - input_abs_get_flat(dev, j);
joydev->corr[i].coef[1] = t + input_abs_get_flat(dev, j);
t = (input_abs_get_max(dev, j) - input_abs_get_min(dev, j)) / 2
- 2 * input_abs_get_flat(dev, j);
if (t) {
joydev->corr[i].coef[2] = (1 << 29) / t;
joydev->corr[i].coef[3] = (1 << 29) / t;
}
}
joydev->dev.devt = MKDEV(INPUT_MAJOR, minor);
joydev->dev.class = &input_class;
joydev->dev.parent = &dev->dev;
joydev->dev.release = joydev_free;
device_initialize(&joydev->dev);
error = input_register_handle(&joydev->handle);
if (error)
goto err_free_joydev;
cdev_init(&joydev->cdev, &joydev_fops);
error = cdev_device_add(&joydev->cdev, &joydev->dev);
if (error)
goto err_cleanup_joydev;
return 0;
err_cleanup_joydev:
joydev_cleanup(joydev);
input_unregister_handle(&joydev->handle);
err_free_joydev:
put_device(&joydev->dev);
err_free_minor:
input_free_minor(minor);
return error;
}
static void joydev_disconnect(struct input_handle *handle)
{
struct joydev *joydev = handle->private;
cdev_device_del(&joydev->cdev, &joydev->dev);
joydev_cleanup(joydev);
input_free_minor(MINOR(joydev->dev.devt));
input_unregister_handle(handle);
put_device(&joydev->dev);
}
static const struct input_device_id joydev_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) },
.absbit = { BIT_MASK(ABS_X) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) },
.absbit = { BIT_MASK(ABS_Z) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) },
.absbit = { BIT_MASK(ABS_WHEEL) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) },
.absbit = { BIT_MASK(ABS_THROTTLE) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = {[BIT_WORD(BTN_JOYSTICK)] = BIT_MASK(BTN_JOYSTICK) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(BTN_GAMEPAD)] = BIT_MASK(BTN_GAMEPAD) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(BTN_TRIGGER_HAPPY)] = BIT_MASK(BTN_TRIGGER_HAPPY) },
},
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(input, joydev_ids);
static struct input_handler joydev_handler = {
.event = joydev_event,
.match = joydev_match,
.connect = joydev_connect,
.disconnect = joydev_disconnect,
.legacy_minors = true,
.minor = JOYDEV_MINOR_BASE,
.name = "joydev",
.id_table = joydev_ids,
};
static int __init joydev_init(void)
{
return input_register_handler(&joydev_handler);
}
static void __exit joydev_exit(void)
{
input_unregister_handler(&joydev_handler);
}
module_init(joydev_init);
module_exit(joydev_exit);
|
linux-master
|
drivers/input/joydev.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* LED support for the input layer
*
* Copyright 2010-2015 Samuel Thibault <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/leds.h>
#include <linux/input.h>
#if IS_ENABLED(CONFIG_VT)
#define VT_TRIGGER(_name) .trigger = _name
#else
#define VT_TRIGGER(_name) .trigger = NULL
#endif
static const struct {
const char *name;
const char *trigger;
} input_led_info[LED_CNT] = {
[LED_NUML] = { "numlock", VT_TRIGGER("kbd-numlock") },
[LED_CAPSL] = { "capslock", VT_TRIGGER("kbd-capslock") },
[LED_SCROLLL] = { "scrolllock", VT_TRIGGER("kbd-scrolllock") },
[LED_COMPOSE] = { "compose" },
[LED_KANA] = { "kana", VT_TRIGGER("kbd-kanalock") },
[LED_SLEEP] = { "sleep" } ,
[LED_SUSPEND] = { "suspend" },
[LED_MUTE] = { "mute" },
[LED_MISC] = { "misc" },
[LED_MAIL] = { "mail" },
[LED_CHARGING] = { "charging" },
};
struct input_led {
struct led_classdev cdev;
struct input_handle *handle;
unsigned int code; /* One of LED_* constants */
};
struct input_leds {
struct input_handle handle;
unsigned int num_leds;
struct input_led leds[];
};
static enum led_brightness input_leds_brightness_get(struct led_classdev *cdev)
{
struct input_led *led = container_of(cdev, struct input_led, cdev);
struct input_dev *input = led->handle->dev;
return test_bit(led->code, input->led) ? cdev->max_brightness : 0;
}
static void input_leds_brightness_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct input_led *led = container_of(cdev, struct input_led, cdev);
input_inject_event(led->handle, EV_LED, led->code, !!brightness);
}
static void input_leds_event(struct input_handle *handle, unsigned int type,
unsigned int code, int value)
{
}
static int input_leds_get_count(struct input_dev *dev)
{
unsigned int led_code;
int count = 0;
for_each_set_bit(led_code, dev->ledbit, LED_CNT)
if (input_led_info[led_code].name)
count++;
return count;
}
static int input_leds_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id)
{
struct input_leds *leds;
struct input_led *led;
unsigned int num_leds;
unsigned int led_code;
int led_no;
int error;
num_leds = input_leds_get_count(dev);
if (!num_leds)
return -ENXIO;
leds = kzalloc(struct_size(leds, leds, num_leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
leds->num_leds = num_leds;
leds->handle.dev = dev;
leds->handle.handler = handler;
leds->handle.name = "leds";
leds->handle.private = leds;
error = input_register_handle(&leds->handle);
if (error)
goto err_free_mem;
error = input_open_device(&leds->handle);
if (error)
goto err_unregister_handle;
led_no = 0;
for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
if (!input_led_info[led_code].name)
continue;
led = &leds->leds[led_no];
led->handle = &leds->handle;
led->code = led_code;
led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
dev_name(&dev->dev),
input_led_info[led_code].name);
if (!led->cdev.name) {
error = -ENOMEM;
goto err_unregister_leds;
}
led->cdev.max_brightness = 1;
led->cdev.brightness_get = input_leds_brightness_get;
led->cdev.brightness_set = input_leds_brightness_set;
led->cdev.default_trigger = input_led_info[led_code].trigger;
error = led_classdev_register(&dev->dev, &led->cdev);
if (error) {
dev_err(&dev->dev, "failed to register LED %s: %d\n",
led->cdev.name, error);
kfree(led->cdev.name);
goto err_unregister_leds;
}
led_no++;
}
return 0;
err_unregister_leds:
while (--led_no >= 0) {
struct input_led *led = &leds->leds[led_no];
led_classdev_unregister(&led->cdev);
kfree(led->cdev.name);
}
input_close_device(&leds->handle);
err_unregister_handle:
input_unregister_handle(&leds->handle);
err_free_mem:
kfree(leds);
return error;
}
static void input_leds_disconnect(struct input_handle *handle)
{
struct input_leds *leds = handle->private;
int i;
for (i = 0; i < leds->num_leds; i++) {
struct input_led *led = &leds->leds[i];
led_classdev_unregister(&led->cdev);
kfree(led->cdev.name);
}
input_close_device(handle);
input_unregister_handle(handle);
kfree(leds);
}
static const struct input_device_id input_leds_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_LED) },
},
{ },
};
MODULE_DEVICE_TABLE(input, input_leds_ids);
static struct input_handler input_leds_handler = {
.event = input_leds_event,
.connect = input_leds_connect,
.disconnect = input_leds_disconnect,
.name = "leds",
.id_table = input_leds_ids,
};
static int __init input_leds_init(void)
{
return input_register_handler(&input_leds_handler);
}
module_init(input_leds_init);
static void __exit input_leds_exit(void)
{
input_unregister_handler(&input_leds_handler);
}
module_exit(input_leds_exit);
MODULE_AUTHOR("Samuel Thibault <[email protected]>");
MODULE_AUTHOR("Dmitry Torokhov <[email protected]>");
MODULE_DESCRIPTION("Input -> LEDs Bridge");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/input/input-leds.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic support for sparse keymaps
*
* Copyright (c) 2009 Dmitry Torokhov
*
* Derived from wistron button driver:
* Copyright (C) 2005 Miloslav Trmac <[email protected]>
* Copyright (C) 2005 Bernhard Rosenkraenzer <[email protected]>
* Copyright (C) 2005 Dmitry Torokhov <[email protected]>
*/
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/module.h>
#include <linux/slab.h>
MODULE_AUTHOR("Dmitry Torokhov <[email protected]>");
MODULE_DESCRIPTION("Generic support for sparse keymaps");
MODULE_LICENSE("GPL v2");
static unsigned int sparse_keymap_get_key_index(struct input_dev *dev,
const struct key_entry *k)
{
struct key_entry *key;
unsigned int idx = 0;
for (key = dev->keycode; key->type != KE_END; key++) {
if (key->type == KE_KEY) {
if (key == k)
break;
idx++;
}
}
return idx;
}
static struct key_entry *sparse_keymap_entry_by_index(struct input_dev *dev,
unsigned int index)
{
struct key_entry *key;
unsigned int key_cnt = 0;
for (key = dev->keycode; key->type != KE_END; key++)
if (key->type == KE_KEY)
if (key_cnt++ == index)
return key;
return NULL;
}
/**
* sparse_keymap_entry_from_scancode - perform sparse keymap lookup
* @dev: Input device using sparse keymap
* @code: Scan code
*
* This function is used to perform &struct key_entry lookup in an
* input device using sparse keymap.
*/
struct key_entry *sparse_keymap_entry_from_scancode(struct input_dev *dev,
unsigned int code)
{
struct key_entry *key;
for (key = dev->keycode; key->type != KE_END; key++)
if (code == key->code)
return key;
return NULL;
}
EXPORT_SYMBOL(sparse_keymap_entry_from_scancode);
/**
* sparse_keymap_entry_from_keycode - perform sparse keymap lookup
* @dev: Input device using sparse keymap
* @keycode: Key code
*
* This function is used to perform &struct key_entry lookup in an
* input device using sparse keymap.
*/
struct key_entry *sparse_keymap_entry_from_keycode(struct input_dev *dev,
unsigned int keycode)
{
struct key_entry *key;
for (key = dev->keycode; key->type != KE_END; key++)
if (key->type == KE_KEY && keycode == key->keycode)
return key;
return NULL;
}
EXPORT_SYMBOL(sparse_keymap_entry_from_keycode);
static struct key_entry *sparse_keymap_locate(struct input_dev *dev,
const struct input_keymap_entry *ke)
{
struct key_entry *key;
unsigned int scancode;
if (ke->flags & INPUT_KEYMAP_BY_INDEX)
key = sparse_keymap_entry_by_index(dev, ke->index);
else if (input_scancode_to_scalar(ke, &scancode) == 0)
key = sparse_keymap_entry_from_scancode(dev, scancode);
else
key = NULL;
return key;
}
static int sparse_keymap_getkeycode(struct input_dev *dev,
struct input_keymap_entry *ke)
{
const struct key_entry *key;
if (dev->keycode) {
key = sparse_keymap_locate(dev, ke);
if (key && key->type == KE_KEY) {
ke->keycode = key->keycode;
if (!(ke->flags & INPUT_KEYMAP_BY_INDEX))
ke->index =
sparse_keymap_get_key_index(dev, key);
ke->len = sizeof(key->code);
memcpy(ke->scancode, &key->code, sizeof(key->code));
return 0;
}
}
return -EINVAL;
}
static int sparse_keymap_setkeycode(struct input_dev *dev,
const struct input_keymap_entry *ke,
unsigned int *old_keycode)
{
struct key_entry *key;
if (dev->keycode) {
key = sparse_keymap_locate(dev, ke);
if (key && key->type == KE_KEY) {
*old_keycode = key->keycode;
key->keycode = ke->keycode;
set_bit(ke->keycode, dev->keybit);
if (!sparse_keymap_entry_from_keycode(dev, *old_keycode))
clear_bit(*old_keycode, dev->keybit);
return 0;
}
}
return -EINVAL;
}
/**
* sparse_keymap_setup - set up sparse keymap for an input device
* @dev: Input device
* @keymap: Keymap in form of array of &key_entry structures ending
* with %KE_END type entry
* @setup: Function that can be used to adjust keymap entries
* depending on device's needs, may be %NULL
*
* The function calculates size and allocates copy of the original
* keymap after which sets up input device event bits appropriately.
* The allocated copy of the keymap is automatically freed when it
* is no longer needed.
*/
int sparse_keymap_setup(struct input_dev *dev,
const struct key_entry *keymap,
int (*setup)(struct input_dev *, struct key_entry *))
{
size_t map_size = 1; /* to account for the last KE_END entry */
const struct key_entry *e;
struct key_entry *map, *entry;
int i;
int error;
for (e = keymap; e->type != KE_END; e++)
map_size++;
map = devm_kmemdup(&dev->dev, keymap, map_size * sizeof(*map),
GFP_KERNEL);
if (!map)
return -ENOMEM;
for (i = 0; i < map_size; i++) {
entry = &map[i];
if (setup) {
error = setup(dev, entry);
if (error)
return error;
}
switch (entry->type) {
case KE_KEY:
__set_bit(EV_KEY, dev->evbit);
__set_bit(entry->keycode, dev->keybit);
break;
case KE_SW:
case KE_VSW:
__set_bit(EV_SW, dev->evbit);
__set_bit(entry->sw.code, dev->swbit);
break;
}
}
if (test_bit(EV_KEY, dev->evbit)) {
__set_bit(KEY_UNKNOWN, dev->keybit);
__set_bit(EV_MSC, dev->evbit);
__set_bit(MSC_SCAN, dev->mscbit);
}
dev->keycode = map;
dev->keycodemax = map_size;
dev->getkeycode = sparse_keymap_getkeycode;
dev->setkeycode = sparse_keymap_setkeycode;
return 0;
}
EXPORT_SYMBOL(sparse_keymap_setup);
/**
* sparse_keymap_report_entry - report event corresponding to given key entry
* @dev: Input device for which event should be reported
* @ke: key entry describing event
* @value: Value that should be reported (ignored by %KE_SW entries)
* @autorelease: Signals whether release event should be emitted for %KE_KEY
* entries right after reporting press event, ignored by all other
* entries
*
* This function is used to report input event described by given
* &struct key_entry.
*/
void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *ke,
unsigned int value, bool autorelease)
{
switch (ke->type) {
case KE_KEY:
input_event(dev, EV_MSC, MSC_SCAN, ke->code);
input_report_key(dev, ke->keycode, value);
input_sync(dev);
if (value && autorelease) {
input_report_key(dev, ke->keycode, 0);
input_sync(dev);
}
break;
case KE_SW:
value = ke->sw.value;
fallthrough;
case KE_VSW:
input_report_switch(dev, ke->sw.code, value);
input_sync(dev);
break;
}
}
EXPORT_SYMBOL(sparse_keymap_report_entry);
/**
* sparse_keymap_report_event - report event corresponding to given scancode
* @dev: Input device using sparse keymap
* @code: Scan code
* @value: Value that should be reported (ignored by %KE_SW entries)
* @autorelease: Signals whether release event should be emitted for %KE_KEY
* entries right after reporting press event, ignored by all other
* entries
*
* This function is used to perform lookup in an input device using sparse
* keymap and report corresponding event. Returns %true if lookup was
* successful and %false otherwise.
*/
bool sparse_keymap_report_event(struct input_dev *dev, unsigned int code,
unsigned int value, bool autorelease)
{
const struct key_entry *ke =
sparse_keymap_entry_from_scancode(dev, code);
struct key_entry unknown_ke;
if (ke) {
sparse_keymap_report_entry(dev, ke, value, autorelease);
return true;
}
/* Report an unknown key event as a debugging aid */
unknown_ke.type = KE_KEY;
unknown_ke.code = code;
unknown_ke.keycode = KEY_UNKNOWN;
sparse_keymap_report_entry(dev, &unknown_ke, value, true);
return false;
}
EXPORT_SYMBOL(sparse_keymap_report_event);
|
linux-master
|
drivers/input/sparse-keymap.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Input Multitouch Library
*
* Copyright (c) 2008-2010 Henrik Rydberg
*/
#include <linux/input/mt.h>
#include <linux/export.h>
#include <linux/slab.h>
#include "input-core-private.h"
#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
{
if (dev->absinfo && test_bit(src, dev->absbit)) {
dev->absinfo[dst] = dev->absinfo[src];
dev->absinfo[dst].fuzz = 0;
__set_bit(dst, dev->absbit);
}
}
/**
* input_mt_init_slots() - initialize MT input slots
* @dev: input device supporting MT events and finger tracking
* @num_slots: number of slots used by the device
* @flags: mt tasks to handle in core
*
* This function allocates all necessary memory for MT slot handling
* in the input device, prepares the ABS_MT_SLOT and
* ABS_MT_TRACKING_ID events for use and sets up appropriate buffers.
* Depending on the flags set, it also performs pointer emulation and
* frame synchronization.
*
* May be called repeatedly. Returns -EINVAL if attempting to
* reinitialize with a different number of slots.
*/
int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
unsigned int flags)
{
struct input_mt *mt = dev->mt;
int i;
if (!num_slots)
return 0;
if (mt)
return mt->num_slots != num_slots ? -EINVAL : 0;
mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
if (!mt)
goto err_mem;
mt->num_slots = num_slots;
mt->flags = flags;
input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
input_set_abs_params(dev, ABS_MT_TRACKING_ID, 0, TRKID_MAX, 0, 0);
if (flags & (INPUT_MT_POINTER | INPUT_MT_DIRECT)) {
__set_bit(EV_KEY, dev->evbit);
__set_bit(BTN_TOUCH, dev->keybit);
copy_abs(dev, ABS_X, ABS_MT_POSITION_X);
copy_abs(dev, ABS_Y, ABS_MT_POSITION_Y);
copy_abs(dev, ABS_PRESSURE, ABS_MT_PRESSURE);
}
if (flags & INPUT_MT_POINTER) {
__set_bit(BTN_TOOL_FINGER, dev->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
if (num_slots >= 3)
__set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
if (num_slots >= 4)
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
if (num_slots >= 5)
__set_bit(BTN_TOOL_QUINTTAP, dev->keybit);
__set_bit(INPUT_PROP_POINTER, dev->propbit);
}
if (flags & INPUT_MT_DIRECT)
__set_bit(INPUT_PROP_DIRECT, dev->propbit);
if (flags & INPUT_MT_SEMI_MT)
__set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
if (flags & INPUT_MT_TRACK) {
unsigned int n2 = num_slots * num_slots;
mt->red = kcalloc(n2, sizeof(*mt->red), GFP_KERNEL);
if (!mt->red)
goto err_mem;
}
/* Mark slots as 'inactive' */
for (i = 0; i < num_slots; i++)
input_mt_set_value(&mt->slots[i], ABS_MT_TRACKING_ID, -1);
/* Mark slots as 'unused' */
mt->frame = 1;
dev->mt = mt;
return 0;
err_mem:
kfree(mt);
return -ENOMEM;
}
EXPORT_SYMBOL(input_mt_init_slots);
/**
* input_mt_destroy_slots() - frees the MT slots of the input device
* @dev: input device with allocated MT slots
*
* This function is only needed in error path as the input core will
* automatically free the MT slots when the device is destroyed.
*/
void input_mt_destroy_slots(struct input_dev *dev)
{
if (dev->mt) {
kfree(dev->mt->red);
kfree(dev->mt);
}
dev->mt = NULL;
}
EXPORT_SYMBOL(input_mt_destroy_slots);
/**
* input_mt_report_slot_state() - report contact state
* @dev: input device with allocated MT slots
* @tool_type: the tool type to use in this slot
* @active: true if contact is active, false otherwise
*
* Reports a contact via ABS_MT_TRACKING_ID, and optionally
* ABS_MT_TOOL_TYPE. If active is true and the slot is currently
* inactive, or if the tool type is changed, a new tracking id is
* assigned to the slot. The tool type is only reported if the
* corresponding absbit field is set.
*
* Returns true if contact is active.
*/
bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active)
{
struct input_mt *mt = dev->mt;
struct input_mt_slot *slot;
int id;
if (!mt)
return false;
slot = &mt->slots[mt->slot];
slot->frame = mt->frame;
if (!active) {
input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
return false;
}
id = input_mt_get_value(slot, ABS_MT_TRACKING_ID);
if (id < 0)
id = input_mt_new_trkid(mt);
input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id);
input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type);
return true;
}
EXPORT_SYMBOL(input_mt_report_slot_state);
/**
* input_mt_report_finger_count() - report contact count
* @dev: input device with allocated MT slots
* @count: the number of contacts
*
* Reports the contact count via BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP,
* BTN_TOOL_TRIPLETAP and BTN_TOOL_QUADTAP.
*
* The input core ensures only the KEY events already setup for
* this device will produce output.
*/
void input_mt_report_finger_count(struct input_dev *dev, int count)
{
input_event(dev, EV_KEY, BTN_TOOL_FINGER, count == 1);
input_event(dev, EV_KEY, BTN_TOOL_DOUBLETAP, count == 2);
input_event(dev, EV_KEY, BTN_TOOL_TRIPLETAP, count == 3);
input_event(dev, EV_KEY, BTN_TOOL_QUADTAP, count == 4);
input_event(dev, EV_KEY, BTN_TOOL_QUINTTAP, count == 5);
}
EXPORT_SYMBOL(input_mt_report_finger_count);
/**
* input_mt_report_pointer_emulation() - common pointer emulation
* @dev: input device with allocated MT slots
* @use_count: report number of active contacts as finger count
*
* Performs legacy pointer emulation via BTN_TOUCH, ABS_X, ABS_Y and
* ABS_PRESSURE. Touchpad finger count is emulated if use_count is true.
*
* The input core ensures only the KEY and ABS axes already setup for
* this device will produce output.
*/
void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
{
struct input_mt *mt = dev->mt;
struct input_mt_slot *oldest;
int oldid, count, i;
if (!mt)
return;
oldest = NULL;
oldid = mt->trkid;
count = 0;
for (i = 0; i < mt->num_slots; ++i) {
struct input_mt_slot *ps = &mt->slots[i];
int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
if (id < 0)
continue;
if ((id - oldid) & TRKID_SGN) {
oldest = ps;
oldid = id;
}
count++;
}
input_event(dev, EV_KEY, BTN_TOUCH, count > 0);
if (use_count) {
if (count == 0 &&
!test_bit(ABS_MT_DISTANCE, dev->absbit) &&
test_bit(ABS_DISTANCE, dev->absbit) &&
input_abs_get_val(dev, ABS_DISTANCE) != 0) {
/*
* Force reporting BTN_TOOL_FINGER for devices that
* only report general hover (and not per-contact
* distance) when contact is in proximity but not
* on the surface.
*/
count = 1;
}
input_mt_report_finger_count(dev, count);
}
if (oldest) {
int x = input_mt_get_value(oldest, ABS_MT_POSITION_X);
int y = input_mt_get_value(oldest, ABS_MT_POSITION_Y);
input_event(dev, EV_ABS, ABS_X, x);
input_event(dev, EV_ABS, ABS_Y, y);
if (test_bit(ABS_MT_PRESSURE, dev->absbit)) {
int p = input_mt_get_value(oldest, ABS_MT_PRESSURE);
input_event(dev, EV_ABS, ABS_PRESSURE, p);
}
} else {
if (test_bit(ABS_MT_PRESSURE, dev->absbit))
input_event(dev, EV_ABS, ABS_PRESSURE, 0);
}
}
EXPORT_SYMBOL(input_mt_report_pointer_emulation);
static void __input_mt_drop_unused(struct input_dev *dev, struct input_mt *mt)
{
int i;
lockdep_assert_held(&dev->event_lock);
for (i = 0; i < mt->num_slots; i++) {
if (input_mt_is_active(&mt->slots[i]) &&
!input_mt_is_used(mt, &mt->slots[i])) {
input_handle_event(dev, EV_ABS, ABS_MT_SLOT, i);
input_handle_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
}
}
}
/**
* input_mt_drop_unused() - Inactivate slots not seen in this frame
* @dev: input device with allocated MT slots
*
* Lift all slots not seen since the last call to this function.
*/
void input_mt_drop_unused(struct input_dev *dev)
{
struct input_mt *mt = dev->mt;
if (mt) {
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
__input_mt_drop_unused(dev, mt);
mt->frame++;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_mt_drop_unused);
/**
* input_mt_release_slots() - Deactivate all slots
* @dev: input device with allocated MT slots
*
* Lift all active slots.
*/
void input_mt_release_slots(struct input_dev *dev)
{
struct input_mt *mt = dev->mt;
lockdep_assert_held(&dev->event_lock);
if (mt) {
/* This will effectively mark all slots unused. */
mt->frame++;
__input_mt_drop_unused(dev, mt);
if (test_bit(ABS_PRESSURE, dev->absbit))
input_handle_event(dev, EV_ABS, ABS_PRESSURE, 0);
mt->frame++;
}
}
/**
* input_mt_sync_frame() - synchronize mt frame
* @dev: input device with allocated MT slots
*
* Close the frame and prepare the internal state for a new one.
* Depending on the flags, marks unused slots as inactive and performs
* pointer emulation.
*/
void input_mt_sync_frame(struct input_dev *dev)
{
struct input_mt *mt = dev->mt;
bool use_count = false;
if (!mt)
return;
if (mt->flags & INPUT_MT_DROP_UNUSED) {
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
__input_mt_drop_unused(dev, mt);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
use_count = true;
input_mt_report_pointer_emulation(dev, use_count);
mt->frame++;
}
EXPORT_SYMBOL(input_mt_sync_frame);
static int adjust_dual(int *begin, int step, int *end, int eq, int mu)
{
int f, *p, s, c;
if (begin == end)
return 0;
f = *begin;
p = begin + step;
s = p == end ? f + 1 : *p;
for (; p != end; p += step) {
if (*p < f) {
s = f;
f = *p;
} else if (*p < s) {
s = *p;
}
}
c = (f + s + 1) / 2;
if (c == 0 || (c > mu && (!eq || mu > 0)))
return 0;
/* Improve convergence for positive matrices by penalizing overcovers */
if (s < 0 && mu <= 0)
c *= 2;
for (p = begin; p != end; p += step)
*p -= c;
return (c < s && s <= 0) || (f >= 0 && f < c);
}
static void find_reduced_matrix(int *w, int nr, int nc, int nrc, int mu)
{
int i, k, sum;
for (k = 0; k < nrc; k++) {
for (i = 0; i < nr; i++)
adjust_dual(w + i, nr, w + i + nrc, nr <= nc, mu);
sum = 0;
for (i = 0; i < nrc; i += nr)
sum += adjust_dual(w + i, 1, w + i + nr, nc <= nr, mu);
if (!sum)
break;
}
}
static int input_mt_set_matrix(struct input_mt *mt,
const struct input_mt_pos *pos, int num_pos,
int mu)
{
const struct input_mt_pos *p;
struct input_mt_slot *s;
int *w = mt->red;
int x, y;
for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
if (!input_mt_is_active(s))
continue;
x = input_mt_get_value(s, ABS_MT_POSITION_X);
y = input_mt_get_value(s, ABS_MT_POSITION_Y);
for (p = pos; p != pos + num_pos; p++) {
int dx = x - p->x, dy = y - p->y;
*w++ = dx * dx + dy * dy - mu;
}
}
return w - mt->red;
}
static void input_mt_set_slots(struct input_mt *mt,
int *slots, int num_pos)
{
struct input_mt_slot *s;
int *w = mt->red, j;
for (j = 0; j != num_pos; j++)
slots[j] = -1;
for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
if (!input_mt_is_active(s))
continue;
for (j = 0; j != num_pos; j++) {
if (w[j] < 0) {
slots[j] = s - mt->slots;
break;
}
}
w += num_pos;
}
for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
if (input_mt_is_active(s))
continue;
for (j = 0; j != num_pos; j++) {
if (slots[j] < 0) {
slots[j] = s - mt->slots;
break;
}
}
}
}
/**
* input_mt_assign_slots() - perform a best-match assignment
* @dev: input device with allocated MT slots
* @slots: the slot assignment to be filled
* @pos: the position array to match
* @num_pos: number of positions
* @dmax: maximum ABS_MT_POSITION displacement (zero for infinite)
*
* Performs a best match against the current contacts and returns
* the slot assignment list. New contacts are assigned to unused
* slots.
*
* The assignments are balanced so that all coordinate displacements are
* below the euclidian distance dmax. If no such assignment can be found,
* some contacts are assigned to unused slots.
*
* Returns zero on success, or negative error in case of failure.
*/
int input_mt_assign_slots(struct input_dev *dev, int *slots,
const struct input_mt_pos *pos, int num_pos,
int dmax)
{
struct input_mt *mt = dev->mt;
int mu = 2 * dmax * dmax;
int nrc;
if (!mt || !mt->red)
return -ENXIO;
if (num_pos > mt->num_slots)
return -EINVAL;
if (num_pos < 1)
return 0;
nrc = input_mt_set_matrix(mt, pos, num_pos, mu);
find_reduced_matrix(mt->red, num_pos, nrc / num_pos, nrc, mu);
input_mt_set_slots(mt, slots, num_pos);
return 0;
}
EXPORT_SYMBOL(input_mt_assign_slots);
/**
* input_mt_get_slot_by_key() - return slot matching key
* @dev: input device with allocated MT slots
* @key: the key of the sought slot
*
* Returns the slot of the given key, if it exists, otherwise
* set the key on the first unused slot and return.
*
* If no available slot can be found, -1 is returned.
* Note that for this function to work properly, input_mt_sync_frame() has
* to be called at each frame.
*/
int input_mt_get_slot_by_key(struct input_dev *dev, int key)
{
struct input_mt *mt = dev->mt;
struct input_mt_slot *s;
if (!mt)
return -1;
for (s = mt->slots; s != mt->slots + mt->num_slots; s++)
if (input_mt_is_active(s) && s->key == key)
return s - mt->slots;
for (s = mt->slots; s != mt->slots + mt->num_slots; s++)
if (!input_mt_is_active(s) && !input_mt_is_used(mt, s)) {
s->key = key;
return s - mt->slots;
}
return -1;
}
EXPORT_SYMBOL(input_mt_get_slot_by_key);
|
linux-master
|
drivers/input/input-mt.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* 32bit compatibility wrappers for the input subsystem.
*
* Very heavily based on evdev.c - Copyright (c) 1999-2002 Vojtech Pavlik
*/
#include <linux/export.h>
#include <linux/uaccess.h>
#include "input-compat.h"
#ifdef CONFIG_COMPAT
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
if (copy_from_user(&compat_event, buffer,
sizeof(struct input_event_compat)))
return -EFAULT;
event->input_event_sec = compat_event.sec;
event->input_event_usec = compat_event.usec;
event->type = compat_event.type;
event->code = compat_event.code;
event->value = compat_event.value;
} else {
if (copy_from_user(event, buffer, sizeof(struct input_event)))
return -EFAULT;
}
return 0;
}
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
compat_event.sec = event->input_event_sec;
compat_event.usec = event->input_event_usec;
compat_event.type = event->type;
compat_event.code = event->code;
compat_event.value = event->value;
if (copy_to_user(buffer, &compat_event,
sizeof(struct input_event_compat)))
return -EFAULT;
} else {
if (copy_to_user(buffer, event, sizeof(struct input_event)))
return -EFAULT;
}
return 0;
}
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
if (in_compat_syscall()) {
struct ff_effect_compat *compat_effect;
if (size != sizeof(struct ff_effect_compat))
return -EINVAL;
/*
* It so happens that the pointer which needs to be changed
* is the last field in the structure, so we can retrieve the
* whole thing and replace just the pointer.
*/
compat_effect = (struct ff_effect_compat *)effect;
if (copy_from_user(compat_effect, buffer,
sizeof(struct ff_effect_compat)))
return -EFAULT;
if (compat_effect->type == FF_PERIODIC &&
compat_effect->u.periodic.waveform == FF_CUSTOM)
effect->u.periodic.custom_data =
compat_ptr(compat_effect->u.periodic.custom_data);
} else {
if (size != sizeof(struct ff_effect))
return -EINVAL;
if (copy_from_user(effect, buffer, sizeof(struct ff_effect)))
return -EFAULT;
}
return 0;
}
#else
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
if (copy_from_user(event, buffer, sizeof(struct input_event)))
return -EFAULT;
return 0;
}
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
if (copy_to_user(buffer, event, sizeof(struct input_event)))
return -EFAULT;
return 0;
}
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
if (size != sizeof(struct ff_effect))
return -EINVAL;
if (copy_from_user(effect, buffer, sizeof(struct ff_effect)))
return -EFAULT;
return 0;
}
#endif /* CONFIG_COMPAT */
EXPORT_SYMBOL_GPL(input_event_from_user);
EXPORT_SYMBOL_GPL(input_event_to_user);
EXPORT_SYMBOL_GPL(input_ff_effect_from_user);
|
linux-master
|
drivers/input/input-compat.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Helpers for ChromeOS Vivaldi keyboard function row mapping
*
* Copyright (C) 2022 Google, Inc
*/
#include <linux/export.h>
#include <linux/input/vivaldi-fmap.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
/**
* vivaldi_function_row_physmap_show - Print vivaldi function row physmap attribute
* @data: The vivaldi function row map
* @buf: Buffer to print the function row phsymap to
*/
ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
char *buf)
{
ssize_t size = 0;
int i;
const u32 *physmap = data->function_row_physmap;
if (!data->num_function_row_keys)
return 0;
for (i = 0; i < data->num_function_row_keys; i++)
size += scnprintf(buf + size, PAGE_SIZE - size,
"%s%02X", size ? " " : "", physmap[i]);
if (size)
size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
return size;
}
EXPORT_SYMBOL_GPL(vivaldi_function_row_physmap_show);
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/vivaldi-fmap.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 1999-2001 Vojtech Pavlik
*/
/*
* Input driver event debug module - dumps all events into syslog
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/init.h>
#include <linux/device.h>
MODULE_AUTHOR("Vojtech Pavlik <[email protected]>");
MODULE_DESCRIPTION("Input driver event debug module");
MODULE_LICENSE("GPL");
static void evbug_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
{
printk(KERN_DEBUG pr_fmt("Event. Dev: %s, Type: %d, Code: %d, Value: %d\n"),
dev_name(&handle->dev->dev), type, code, value);
}
static int evbug_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
struct input_handle *handle;
int error;
handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = "evbug";
error = input_register_handle(handle);
if (error)
goto err_free_handle;
error = input_open_device(handle);
if (error)
goto err_unregister_handle;
printk(KERN_DEBUG pr_fmt("Connected device: %s (%s at %s)\n"),
dev_name(&dev->dev),
dev->name ?: "unknown",
dev->phys ?: "unknown");
return 0;
err_unregister_handle:
input_unregister_handle(handle);
err_free_handle:
kfree(handle);
return error;
}
static void evbug_disconnect(struct input_handle *handle)
{
printk(KERN_DEBUG pr_fmt("Disconnected device: %s\n"),
dev_name(&handle->dev->dev));
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
static const struct input_device_id evbug_ids[] = {
{ .driver_info = 1 }, /* Matches all devices */
{ }, /* Terminating zero entry */
};
MODULE_DEVICE_TABLE(input, evbug_ids);
static struct input_handler evbug_handler = {
.event = evbug_event,
.connect = evbug_connect,
.disconnect = evbug_disconnect,
.name = "evbug",
.id_table = evbug_ids,
};
static int __init evbug_init(void)
{
return input_register_handler(&evbug_handler);
}
static void __exit evbug_exit(void)
{
input_unregister_handler(&evbug_handler);
}
module_init(evbug_init);
module_exit(evbug_exit);
|
linux-master
|
drivers/input/evbug.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Force feedback support for Linux input subsystem
*
* Copyright (c) 2006 Anssi Hannula <[email protected]>
* Copyright (c) 2006 Dmitry Torokhov <[email protected]>
*/
/* #define DEBUG */
#include <linux/input.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
/*
* Check that the effect_id is a valid effect and whether the user
* is the owner
*/
static int check_effect_access(struct ff_device *ff, int effect_id,
struct file *file)
{
if (effect_id < 0 || effect_id >= ff->max_effects ||
!ff->effect_owners[effect_id])
return -EINVAL;
if (file && ff->effect_owners[effect_id] != file)
return -EACCES;
return 0;
}
/*
* Checks whether 2 effects can be combined together
*/
static inline int check_effects_compatible(struct ff_effect *e1,
struct ff_effect *e2)
{
return e1->type == e2->type &&
(e1->type != FF_PERIODIC ||
e1->u.periodic.waveform == e2->u.periodic.waveform);
}
/*
* Convert an effect into compatible one
*/
static int compat_effect(struct ff_device *ff, struct ff_effect *effect)
{
int magnitude;
switch (effect->type) {
case FF_RUMBLE:
if (!test_bit(FF_PERIODIC, ff->ffbit))
return -EINVAL;
/*
* calculate magnitude of sine wave as average of rumble's
* 2/3 of strong magnitude and 1/3 of weak magnitude
*/
magnitude = effect->u.rumble.strong_magnitude / 3 +
effect->u.rumble.weak_magnitude / 6;
effect->type = FF_PERIODIC;
effect->u.periodic.waveform = FF_SINE;
effect->u.periodic.period = 50;
effect->u.periodic.magnitude = magnitude;
effect->u.periodic.offset = 0;
effect->u.periodic.phase = 0;
effect->u.periodic.envelope.attack_length = 0;
effect->u.periodic.envelope.attack_level = 0;
effect->u.periodic.envelope.fade_length = 0;
effect->u.periodic.envelope.fade_level = 0;
return 0;
default:
/* Let driver handle conversion */
return 0;
}
}
/**
* input_ff_upload() - upload effect into force-feedback device
* @dev: input device
* @effect: effect to be uploaded
* @file: owner of the effect
*/
int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
struct file *file)
{
struct ff_device *ff = dev->ff;
struct ff_effect *old;
int ret = 0;
int id;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
if (effect->type < FF_EFFECT_MIN || effect->type > FF_EFFECT_MAX ||
!test_bit(effect->type, dev->ffbit)) {
dev_dbg(&dev->dev, "invalid or not supported effect type in upload\n");
return -EINVAL;
}
if (effect->type == FF_PERIODIC &&
(effect->u.periodic.waveform < FF_WAVEFORM_MIN ||
effect->u.periodic.waveform > FF_WAVEFORM_MAX ||
!test_bit(effect->u.periodic.waveform, dev->ffbit))) {
dev_dbg(&dev->dev, "invalid or not supported wave form in upload\n");
return -EINVAL;
}
if (!test_bit(effect->type, ff->ffbit)) {
ret = compat_effect(ff, effect);
if (ret)
return ret;
}
mutex_lock(&ff->mutex);
if (effect->id == -1) {
for (id = 0; id < ff->max_effects; id++)
if (!ff->effect_owners[id])
break;
if (id >= ff->max_effects) {
ret = -ENOSPC;
goto out;
}
effect->id = id;
old = NULL;
} else {
id = effect->id;
ret = check_effect_access(ff, id, file);
if (ret)
goto out;
old = &ff->effects[id];
if (!check_effects_compatible(effect, old)) {
ret = -EINVAL;
goto out;
}
}
ret = ff->upload(dev, effect, old);
if (ret)
goto out;
spin_lock_irq(&dev->event_lock);
ff->effects[id] = *effect;
ff->effect_owners[id] = file;
spin_unlock_irq(&dev->event_lock);
out:
mutex_unlock(&ff->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(input_ff_upload);
/*
* Erases the effect if the requester is also the effect owner. The mutex
* should already be locked before calling this function.
*/
static int erase_effect(struct input_dev *dev, int effect_id,
struct file *file)
{
struct ff_device *ff = dev->ff;
int error;
error = check_effect_access(ff, effect_id, file);
if (error)
return error;
spin_lock_irq(&dev->event_lock);
ff->playback(dev, effect_id, 0);
ff->effect_owners[effect_id] = NULL;
spin_unlock_irq(&dev->event_lock);
if (ff->erase) {
error = ff->erase(dev, effect_id);
if (error) {
spin_lock_irq(&dev->event_lock);
ff->effect_owners[effect_id] = file;
spin_unlock_irq(&dev->event_lock);
return error;
}
}
return 0;
}
/**
* input_ff_erase - erase a force-feedback effect from device
* @dev: input device to erase effect from
* @effect_id: id of the effect to be erased
* @file: purported owner of the request
*
* This function erases a force-feedback effect from specified device.
* The effect will only be erased if it was uploaded through the same
* file handle that is requesting erase.
*/
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
{
struct ff_device *ff = dev->ff;
int ret;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
mutex_lock(&ff->mutex);
ret = erase_effect(dev, effect_id, file);
mutex_unlock(&ff->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(input_ff_erase);
/*
* input_ff_flush - erase all effects owned by a file handle
* @dev: input device to erase effect from
* @file: purported owner of the effects
*
* This function erases all force-feedback effects associated with
* the given owner from specified device. Note that @file may be %NULL,
* in which case all effects will be erased.
*/
int input_ff_flush(struct input_dev *dev, struct file *file)
{
struct ff_device *ff = dev->ff;
int i;
dev_dbg(&dev->dev, "flushing now\n");
mutex_lock(&ff->mutex);
for (i = 0; i < ff->max_effects; i++)
erase_effect(dev, i, file);
mutex_unlock(&ff->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_flush);
/**
* input_ff_event() - generic handler for force-feedback events
* @dev: input device to send the effect to
* @type: event type (anything but EV_FF is ignored)
* @code: event code
* @value: event value
*/
int input_ff_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
struct ff_device *ff = dev->ff;
if (type != EV_FF)
return 0;
switch (code) {
case FF_GAIN:
if (!test_bit(FF_GAIN, dev->ffbit) || value > 0xffffU)
break;
ff->set_gain(dev, value);
break;
case FF_AUTOCENTER:
if (!test_bit(FF_AUTOCENTER, dev->ffbit) || value > 0xffffU)
break;
ff->set_autocenter(dev, value);
break;
default:
if (check_effect_access(ff, code, NULL) == 0)
ff->playback(dev, code, value);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_event);
/**
* input_ff_create() - create force-feedback device
* @dev: input device supporting force-feedback
* @max_effects: maximum number of effects supported by the device
*
* This function allocates all necessary memory for a force feedback
* portion of an input device and installs all default handlers.
* @dev->ffbit should be already set up before calling this function.
* Once ff device is created you need to setup its upload, erase,
* playback and other handlers before registering input device
*/
int input_ff_create(struct input_dev *dev, unsigned int max_effects)
{
struct ff_device *ff;
size_t ff_dev_size;
int i;
if (!max_effects) {
dev_err(&dev->dev, "cannot allocate device without any effects\n");
return -EINVAL;
}
if (max_effects > FF_MAX_EFFECTS) {
dev_err(&dev->dev, "cannot allocate more than FF_MAX_EFFECTS effects\n");
return -EINVAL;
}
ff_dev_size = sizeof(struct ff_device) +
max_effects * sizeof(struct file *);
if (ff_dev_size < max_effects) /* overflow */
return -EINVAL;
ff = kzalloc(ff_dev_size, GFP_KERNEL);
if (!ff)
return -ENOMEM;
ff->effects = kcalloc(max_effects, sizeof(struct ff_effect),
GFP_KERNEL);
if (!ff->effects) {
kfree(ff);
return -ENOMEM;
}
ff->max_effects = max_effects;
mutex_init(&ff->mutex);
dev->ff = ff;
dev->flush = input_ff_flush;
dev->event = input_ff_event;
__set_bit(EV_FF, dev->evbit);
/* Copy "true" bits into ff device bitmap */
for_each_set_bit(i, dev->ffbit, FF_CNT)
__set_bit(i, ff->ffbit);
/* we can emulate RUMBLE with periodic effects */
if (test_bit(FF_PERIODIC, ff->ffbit))
__set_bit(FF_RUMBLE, dev->ffbit);
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create);
/**
* input_ff_destroy() - frees force feedback portion of input device
* @dev: input device supporting force feedback
*
* This function is only needed in error path as input core will
* automatically free force feedback structures when device is
* destroyed.
*/
void input_ff_destroy(struct input_dev *dev)
{
struct ff_device *ff = dev->ff;
__clear_bit(EV_FF, dev->evbit);
if (ff) {
if (ff->destroy)
ff->destroy(ff);
kfree(ff->private);
kfree(ff->effects);
kfree(ff);
dev->ff = NULL;
}
}
EXPORT_SYMBOL_GPL(input_ff_destroy);
|
linux-master
|
drivers/input/ff-core.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* The input core
*
* Copyright (c) 1999-2002 Vojtech Pavlik
*/
#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
#include <linux/init.h>
#include <linux/types.h>
#include <linux/idr.h>
#include <linux/input/mt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/major.h>
#include <linux/proc_fs.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/pm.h>
#include <linux/poll.h>
#include <linux/device.h>
#include <linux/kstrtox.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include "input-compat.h"
#include "input-core-private.h"
#include "input-poller.h"
MODULE_AUTHOR("Vojtech Pavlik <[email protected]>");
MODULE_DESCRIPTION("Input core");
MODULE_LICENSE("GPL");
#define INPUT_MAX_CHAR_DEVICES 1024
#define INPUT_FIRST_DYNAMIC_DEV 256
static DEFINE_IDA(input_ida);
static LIST_HEAD(input_dev_list);
static LIST_HEAD(input_handler_list);
/*
* input_mutex protects access to both input_dev_list and input_handler_list.
* This also causes input_[un]register_device and input_[un]register_handler
* be mutually exclusive which simplifies locking in drivers implementing
* input handlers.
*/
static DEFINE_MUTEX(input_mutex);
static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
static const unsigned int input_max_code[EV_CNT] = {
[EV_KEY] = KEY_MAX,
[EV_REL] = REL_MAX,
[EV_ABS] = ABS_MAX,
[EV_MSC] = MSC_MAX,
[EV_SW] = SW_MAX,
[EV_LED] = LED_MAX,
[EV_SND] = SND_MAX,
[EV_FF] = FF_MAX,
};
static inline int is_event_supported(unsigned int code,
unsigned long *bm, unsigned int max)
{
return code <= max && test_bit(code, bm);
}
static int input_defuzz_abs_event(int value, int old_val, int fuzz)
{
if (fuzz) {
if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2)
return old_val;
if (value > old_val - fuzz && value < old_val + fuzz)
return (old_val * 3 + value) / 4;
if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2)
return (old_val + value) / 2;
}
return value;
}
static void input_start_autorepeat(struct input_dev *dev, int code)
{
if (test_bit(EV_REP, dev->evbit) &&
dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
dev->timer.function) {
dev->repeat_key = code;
mod_timer(&dev->timer,
jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
}
}
static void input_stop_autorepeat(struct input_dev *dev)
{
del_timer(&dev->timer);
}
/*
* Pass event first through all filters and then, if event has not been
* filtered out, through all open handles. This function is called with
* dev->event_lock held and interrupts disabled.
*/
static unsigned int input_to_handler(struct input_handle *handle,
struct input_value *vals, unsigned int count)
{
struct input_handler *handler = handle->handler;
struct input_value *end = vals;
struct input_value *v;
if (handler->filter) {
for (v = vals; v != vals + count; v++) {
if (handler->filter(handle, v->type, v->code, v->value))
continue;
if (end != v)
*end = *v;
end++;
}
count = end - vals;
}
if (!count)
return 0;
if (handler->events)
handler->events(handle, vals, count);
else if (handler->event)
for (v = vals; v != vals + count; v++)
handler->event(handle, v->type, v->code, v->value);
return count;
}
/*
* Pass values first through all filters and then, if event has not been
* filtered out, through all open handles. This function is called with
* dev->event_lock held and interrupts disabled.
*/
static void input_pass_values(struct input_dev *dev,
struct input_value *vals, unsigned int count)
{
struct input_handle *handle;
struct input_value *v;
lockdep_assert_held(&dev->event_lock);
if (!count)
return;
rcu_read_lock();
handle = rcu_dereference(dev->grab);
if (handle) {
count = input_to_handler(handle, vals, count);
} else {
list_for_each_entry_rcu(handle, &dev->h_list, d_node)
if (handle->open) {
count = input_to_handler(handle, vals, count);
if (!count)
break;
}
}
rcu_read_unlock();
/* trigger auto repeat for key events */
if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
for (v = vals; v != vals + count; v++) {
if (v->type == EV_KEY && v->value != 2) {
if (v->value)
input_start_autorepeat(dev, v->code);
else
input_stop_autorepeat(dev);
}
}
}
}
#define INPUT_IGNORE_EVENT 0
#define INPUT_PASS_TO_HANDLERS 1
#define INPUT_PASS_TO_DEVICE 2
#define INPUT_SLOT 4
#define INPUT_FLUSH 8
#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
static int input_handle_abs_event(struct input_dev *dev,
unsigned int code, int *pval)
{
struct input_mt *mt = dev->mt;
bool is_new_slot = false;
bool is_mt_event;
int *pold;
if (code == ABS_MT_SLOT) {
/*
* "Stage" the event; we'll flush it later, when we
* get actual touch data.
*/
if (mt && *pval >= 0 && *pval < mt->num_slots)
mt->slot = *pval;
return INPUT_IGNORE_EVENT;
}
is_mt_event = input_is_mt_value(code);
if (!is_mt_event) {
pold = &dev->absinfo[code].value;
} else if (mt) {
pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
is_new_slot = mt->slot != dev->absinfo[ABS_MT_SLOT].value;
} else {
/*
* Bypass filtering for multi-touch events when
* not employing slots.
*/
pold = NULL;
}
if (pold) {
*pval = input_defuzz_abs_event(*pval, *pold,
dev->absinfo[code].fuzz);
if (*pold == *pval)
return INPUT_IGNORE_EVENT;
*pold = *pval;
}
/* Flush pending "slot" event */
if (is_new_slot) {
dev->absinfo[ABS_MT_SLOT].value = mt->slot;
return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
}
return INPUT_PASS_TO_HANDLERS;
}
static int input_get_disposition(struct input_dev *dev,
unsigned int type, unsigned int code, int *pval)
{
int disposition = INPUT_IGNORE_EVENT;
int value = *pval;
/* filter-out events from inhibited devices */
if (dev->inhibited)
return INPUT_IGNORE_EVENT;
switch (type) {
case EV_SYN:
switch (code) {
case SYN_CONFIG:
disposition = INPUT_PASS_TO_ALL;
break;
case SYN_REPORT:
disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH;
break;
case SYN_MT_REPORT:
disposition = INPUT_PASS_TO_HANDLERS;
break;
}
break;
case EV_KEY:
if (is_event_supported(code, dev->keybit, KEY_MAX)) {
/* auto-repeat bypasses state updates */
if (value == 2) {
disposition = INPUT_PASS_TO_HANDLERS;
break;
}
if (!!test_bit(code, dev->key) != !!value) {
__change_bit(code, dev->key);
disposition = INPUT_PASS_TO_HANDLERS;
}
}
break;
case EV_SW:
if (is_event_supported(code, dev->swbit, SW_MAX) &&
!!test_bit(code, dev->sw) != !!value) {
__change_bit(code, dev->sw);
disposition = INPUT_PASS_TO_HANDLERS;
}
break;
case EV_ABS:
if (is_event_supported(code, dev->absbit, ABS_MAX))
disposition = input_handle_abs_event(dev, code, &value);
break;
case EV_REL:
if (is_event_supported(code, dev->relbit, REL_MAX) && value)
disposition = INPUT_PASS_TO_HANDLERS;
break;
case EV_MSC:
if (is_event_supported(code, dev->mscbit, MSC_MAX))
disposition = INPUT_PASS_TO_ALL;
break;
case EV_LED:
if (is_event_supported(code, dev->ledbit, LED_MAX) &&
!!test_bit(code, dev->led) != !!value) {
__change_bit(code, dev->led);
disposition = INPUT_PASS_TO_ALL;
}
break;
case EV_SND:
if (is_event_supported(code, dev->sndbit, SND_MAX)) {
if (!!test_bit(code, dev->snd) != !!value)
__change_bit(code, dev->snd);
disposition = INPUT_PASS_TO_ALL;
}
break;
case EV_REP:
if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) {
dev->rep[code] = value;
disposition = INPUT_PASS_TO_ALL;
}
break;
case EV_FF:
if (value >= 0)
disposition = INPUT_PASS_TO_ALL;
break;
case EV_PWR:
disposition = INPUT_PASS_TO_ALL;
break;
}
*pval = value;
return disposition;
}
static void input_event_dispose(struct input_dev *dev, int disposition,
unsigned int type, unsigned int code, int value)
{
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);
if (!dev->vals)
return;
if (disposition & INPUT_PASS_TO_HANDLERS) {
struct input_value *v;
if (disposition & INPUT_SLOT) {
v = &dev->vals[dev->num_vals++];
v->type = EV_ABS;
v->code = ABS_MT_SLOT;
v->value = dev->mt->slot;
}
v = &dev->vals[dev->num_vals++];
v->type = type;
v->code = code;
v->value = value;
}
if (disposition & INPUT_FLUSH) {
if (dev->num_vals >= 2)
input_pass_values(dev, dev->vals, dev->num_vals);
dev->num_vals = 0;
/*
* Reset the timestamp on flush so we won't end up
* with a stale one. Note we only need to reset the
* monolithic one as we use its presence when deciding
* whether to generate a synthetic timestamp.
*/
dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0);
} else if (dev->num_vals >= dev->max_vals - 2) {
dev->vals[dev->num_vals++] = input_value_sync;
input_pass_values(dev, dev->vals, dev->num_vals);
dev->num_vals = 0;
}
}
void input_handle_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
int disposition;
lockdep_assert_held(&dev->event_lock);
disposition = input_get_disposition(dev, type, code, &value);
if (disposition != INPUT_IGNORE_EVENT) {
if (type != EV_SYN)
add_input_randomness(type, code, value);
input_event_dispose(dev, disposition, type, code, value);
}
}
/**
* input_event() - report new input event
* @dev: device that generated the event
* @type: type of the event
* @code: event code
* @value: value of the event
*
* This function should be used by drivers implementing various input
* devices to report input events. See also input_inject_event().
*
* NOTE: input_event() may be safely used right after input device was
* allocated with input_allocate_device(), even before it is registered
* with input_register_device(), but the event will not reach any of the
* input handlers. Such early invocation of input_event() may be used
* to 'seed' initial state of a switch or initial position of absolute
* axis, etc.
*/
void input_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
unsigned long flags;
if (is_event_supported(type, dev->evbit, EV_MAX)) {
spin_lock_irqsave(&dev->event_lock, flags);
input_handle_event(dev, type, code, value);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_event);
/**
* input_inject_event() - send input event from input handler
* @handle: input handle to send event through
* @type: type of the event
* @code: event code
* @value: value of the event
*
* Similar to input_event() but will ignore event if device is
* "grabbed" and handle injecting event is not the one that owns
* the device.
*/
void input_inject_event(struct input_handle *handle,
unsigned int type, unsigned int code, int value)
{
struct input_dev *dev = handle->dev;
struct input_handle *grab;
unsigned long flags;
if (is_event_supported(type, dev->evbit, EV_MAX)) {
spin_lock_irqsave(&dev->event_lock, flags);
rcu_read_lock();
grab = rcu_dereference(dev->grab);
if (!grab || grab == handle)
input_handle_event(dev, type, code, value);
rcu_read_unlock();
spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_inject_event);
/**
* input_alloc_absinfo - allocates array of input_absinfo structs
* @dev: the input device emitting absolute events
*
* If the absinfo struct the caller asked for is already allocated, this
* functions will not do anything.
*/
void input_alloc_absinfo(struct input_dev *dev)
{
if (dev->absinfo)
return;
dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
if (!dev->absinfo) {
dev_err(dev->dev.parent ?: &dev->dev,
"%s: unable to allocate memory\n", __func__);
/*
* We will handle this allocation failure in
* input_register_device() when we refuse to register input
* device with ABS bits but without absinfo.
*/
}
}
EXPORT_SYMBOL(input_alloc_absinfo);
void input_set_abs_params(struct input_dev *dev, unsigned int axis,
int min, int max, int fuzz, int flat)
{
struct input_absinfo *absinfo;
__set_bit(EV_ABS, dev->evbit);
__set_bit(axis, dev->absbit);
input_alloc_absinfo(dev);
if (!dev->absinfo)
return;
absinfo = &dev->absinfo[axis];
absinfo->minimum = min;
absinfo->maximum = max;
absinfo->fuzz = fuzz;
absinfo->flat = flat;
}
EXPORT_SYMBOL(input_set_abs_params);
/**
* input_copy_abs - Copy absinfo from one input_dev to another
* @dst: Destination input device to copy the abs settings to
* @dst_axis: ABS_* value selecting the destination axis
* @src: Source input device to copy the abs settings from
* @src_axis: ABS_* value selecting the source axis
*
* Set absinfo for the selected destination axis by copying it from
* the specified source input device's source axis.
* This is useful to e.g. setup a pen/stylus input-device for combined
* touchscreen/pen hardware where the pen uses the same coordinates as
* the touchscreen.
*/
void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
const struct input_dev *src, unsigned int src_axis)
{
/* src must have EV_ABS and src_axis set */
if (WARN_ON(!(test_bit(EV_ABS, src->evbit) &&
test_bit(src_axis, src->absbit))))
return;
/*
* input_alloc_absinfo() may have failed for the source. Our caller is
* expected to catch this when registering the input devices, which may
* happen after the input_copy_abs() call.
*/
if (!src->absinfo)
return;
input_set_capability(dst, EV_ABS, dst_axis);
if (!dst->absinfo)
return;
dst->absinfo[dst_axis] = src->absinfo[src_axis];
}
EXPORT_SYMBOL(input_copy_abs);
/**
* input_grab_device - grabs device for exclusive use
* @handle: input handle that wants to own the device
*
* When a device is grabbed by an input handle all events generated by
* the device are delivered only to this handle. Also events injected
* by other input handles are ignored while device is grabbed.
*/
int input_grab_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
int retval;
retval = mutex_lock_interruptible(&dev->mutex);
if (retval)
return retval;
if (dev->grab) {
retval = -EBUSY;
goto out;
}
rcu_assign_pointer(dev->grab, handle);
out:
mutex_unlock(&dev->mutex);
return retval;
}
EXPORT_SYMBOL(input_grab_device);
static void __input_release_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
struct input_handle *grabber;
grabber = rcu_dereference_protected(dev->grab,
lockdep_is_held(&dev->mutex));
if (grabber == handle) {
rcu_assign_pointer(dev->grab, NULL);
/* Make sure input_pass_values() notices that grab is gone */
synchronize_rcu();
list_for_each_entry(handle, &dev->h_list, d_node)
if (handle->open && handle->handler->start)
handle->handler->start(handle);
}
}
/**
* input_release_device - release previously grabbed device
* @handle: input handle that owns the device
*
* Releases previously grabbed device so that other input handles can
* start receiving input events. Upon release all handlers attached
* to the device have their start() method called so they have a change
* to synchronize device state with the rest of the system.
*/
void input_release_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
mutex_lock(&dev->mutex);
__input_release_device(handle);
mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_release_device);
/**
* input_open_device - open input device
* @handle: handle through which device is being accessed
*
* This function should be called by input handlers when they
* want to start receive events from given input device.
*/
int input_open_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
int retval;
retval = mutex_lock_interruptible(&dev->mutex);
if (retval)
return retval;
if (dev->going_away) {
retval = -ENODEV;
goto out;
}
handle->open++;
if (dev->users++ || dev->inhibited) {
/*
* Device is already opened and/or inhibited,
* so we can exit immediately and report success.
*/
goto out;
}
if (dev->open) {
retval = dev->open(dev);
if (retval) {
dev->users--;
handle->open--;
/*
* Make sure we are not delivering any more events
* through this handle
*/
synchronize_rcu();
goto out;
}
}
if (dev->poller)
input_dev_poller_start(dev->poller);
out:
mutex_unlock(&dev->mutex);
return retval;
}
EXPORT_SYMBOL(input_open_device);
int input_flush_device(struct input_handle *handle, struct file *file)
{
struct input_dev *dev = handle->dev;
int retval;
retval = mutex_lock_interruptible(&dev->mutex);
if (retval)
return retval;
if (dev->flush)
retval = dev->flush(dev, file);
mutex_unlock(&dev->mutex);
return retval;
}
EXPORT_SYMBOL(input_flush_device);
/**
* input_close_device - close input device
* @handle: handle through which device is being accessed
*
* This function should be called by input handlers when they
* want to stop receive events from given input device.
*/
void input_close_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
mutex_lock(&dev->mutex);
__input_release_device(handle);
if (!--dev->users && !dev->inhibited) {
if (dev->poller)
input_dev_poller_stop(dev->poller);
if (dev->close)
dev->close(dev);
}
if (!--handle->open) {
/*
* synchronize_rcu() makes sure that input_pass_values()
* completed and that no more input events are delivered
* through this handle
*/
synchronize_rcu();
}
mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_close_device);
/*
* Simulate keyup events for all keys that are marked as pressed.
* The function must be called with dev->event_lock held.
*/
static bool input_dev_release_keys(struct input_dev *dev)
{
bool need_sync = false;
int code;
lockdep_assert_held(&dev->event_lock);
if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
for_each_set_bit(code, dev->key, KEY_CNT) {
input_handle_event(dev, EV_KEY, code, 0);
need_sync = true;
}
}
return need_sync;
}
/*
* Prepare device for unregistering
*/
static void input_disconnect_device(struct input_dev *dev)
{
struct input_handle *handle;
/*
* Mark device as going away. Note that we take dev->mutex here
* not to protect access to dev->going_away but rather to ensure
* that there are no threads in the middle of input_open_device()
*/
mutex_lock(&dev->mutex);
dev->going_away = true;
mutex_unlock(&dev->mutex);
spin_lock_irq(&dev->event_lock);
/*
* Simulate keyup events for all pressed keys so that handlers
* are not left with "stuck" keys. The driver may continue
* generate events even after we done here but they will not
* reach any handlers.
*/
if (input_dev_release_keys(dev))
input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
list_for_each_entry(handle, &dev->h_list, d_node)
handle->open = 0;
spin_unlock_irq(&dev->event_lock);
}
/**
* input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry
* @ke: keymap entry containing scancode to be converted.
* @scancode: pointer to the location where converted scancode should
* be stored.
*
* This function is used to convert scancode stored in &struct keymap_entry
* into scalar form understood by legacy keymap handling methods. These
* methods expect scancodes to be represented as 'unsigned int'.
*/
int input_scancode_to_scalar(const struct input_keymap_entry *ke,
unsigned int *scancode)
{
switch (ke->len) {
case 1:
*scancode = *((u8 *)ke->scancode);
break;
case 2:
*scancode = *((u16 *)ke->scancode);
break;
case 4:
*scancode = *((u32 *)ke->scancode);
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(input_scancode_to_scalar);
/*
* Those routines handle the default case where no [gs]etkeycode() is
* defined. In this case, an array indexed by the scancode is used.
*/
static unsigned int input_fetch_keycode(struct input_dev *dev,
unsigned int index)
{
switch (dev->keycodesize) {
case 1:
return ((u8 *)dev->keycode)[index];
case 2:
return ((u16 *)dev->keycode)[index];
default:
return ((u32 *)dev->keycode)[index];
}
}
static int input_default_getkeycode(struct input_dev *dev,
struct input_keymap_entry *ke)
{
unsigned int index;
int error;
if (!dev->keycodesize)
return -EINVAL;
if (ke->flags & INPUT_KEYMAP_BY_INDEX)
index = ke->index;
else {
error = input_scancode_to_scalar(ke, &index);
if (error)
return error;
}
if (index >= dev->keycodemax)
return -EINVAL;
ke->keycode = input_fetch_keycode(dev, index);
ke->index = index;
ke->len = sizeof(index);
memcpy(ke->scancode, &index, sizeof(index));
return 0;
}
static int input_default_setkeycode(struct input_dev *dev,
const struct input_keymap_entry *ke,
unsigned int *old_keycode)
{
unsigned int index;
int error;
int i;
if (!dev->keycodesize)
return -EINVAL;
if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
index = ke->index;
} else {
error = input_scancode_to_scalar(ke, &index);
if (error)
return error;
}
if (index >= dev->keycodemax)
return -EINVAL;
if (dev->keycodesize < sizeof(ke->keycode) &&
(ke->keycode >> (dev->keycodesize * 8)))
return -EINVAL;
switch (dev->keycodesize) {
case 1: {
u8 *k = (u8 *)dev->keycode;
*old_keycode = k[index];
k[index] = ke->keycode;
break;
}
case 2: {
u16 *k = (u16 *)dev->keycode;
*old_keycode = k[index];
k[index] = ke->keycode;
break;
}
default: {
u32 *k = (u32 *)dev->keycode;
*old_keycode = k[index];
k[index] = ke->keycode;
break;
}
}
if (*old_keycode <= KEY_MAX) {
__clear_bit(*old_keycode, dev->keybit);
for (i = 0; i < dev->keycodemax; i++) {
if (input_fetch_keycode(dev, i) == *old_keycode) {
__set_bit(*old_keycode, dev->keybit);
/* Setting the bit twice is useless, so break */
break;
}
}
}
__set_bit(ke->keycode, dev->keybit);
return 0;
}
/**
* input_get_keycode - retrieve keycode currently mapped to a given scancode
* @dev: input device which keymap is being queried
* @ke: keymap entry
*
* This function should be called by anyone interested in retrieving current
* keymap. Presently evdev handlers use it.
*/
int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->event_lock, flags);
retval = dev->getkeycode(dev, ke);
spin_unlock_irqrestore(&dev->event_lock, flags);
return retval;
}
EXPORT_SYMBOL(input_get_keycode);
/**
* input_set_keycode - attribute a keycode to a given scancode
* @dev: input device which keymap is being updated
* @ke: new keymap entry
*
* This function should be called by anyone needing to update current
* keymap. Presently keyboard and evdev handlers use it.
*/
int input_set_keycode(struct input_dev *dev,
const struct input_keymap_entry *ke)
{
unsigned long flags;
unsigned int old_keycode;
int retval;
if (ke->keycode > KEY_MAX)
return -EINVAL;
spin_lock_irqsave(&dev->event_lock, flags);
retval = dev->setkeycode(dev, ke, &old_keycode);
if (retval)
goto out;
/* Make sure KEY_RESERVED did not get enabled. */
__clear_bit(KEY_RESERVED, dev->keybit);
/*
* Simulate keyup event if keycode is not present
* in the keymap anymore
*/
if (old_keycode > KEY_MAX) {
dev_warn(dev->dev.parent ?: &dev->dev,
"%s: got too big old keycode %#x\n",
__func__, old_keycode);
} else if (test_bit(EV_KEY, dev->evbit) &&
!is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(old_keycode, dev->key)) {
/*
* We have to use input_event_dispose() here directly instead
* of input_handle_event() because the key we want to release
* here is considered no longer supported by the device and
* input_handle_event() will ignore it.
*/
input_event_dispose(dev, INPUT_PASS_TO_HANDLERS,
EV_KEY, old_keycode, 0);
input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH,
EV_SYN, SYN_REPORT, 1);
}
out:
spin_unlock_irqrestore(&dev->event_lock, flags);
return retval;
}
EXPORT_SYMBOL(input_set_keycode);
bool input_match_device_id(const struct input_dev *dev,
const struct input_device_id *id)
{
if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
if (id->bustype != dev->id.bustype)
return false;
if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
if (id->vendor != dev->id.vendor)
return false;
if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
if (id->product != dev->id.product)
return false;
if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
if (id->version != dev->id.version)
return false;
if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
!bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
!bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
!bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
!bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
!bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
return false;
}
return true;
}
EXPORT_SYMBOL(input_match_device_id);
static const struct input_device_id *input_match_device(struct input_handler *handler,
struct input_dev *dev)
{
const struct input_device_id *id;
for (id = handler->id_table; id->flags || id->driver_info; id++) {
if (input_match_device_id(dev, id) &&
(!handler->match || handler->match(handler, dev))) {
return id;
}
}
return NULL;
}
static int input_attach_handler(struct input_dev *dev, struct input_handler *handler)
{
const struct input_device_id *id;
int error;
id = input_match_device(handler, dev);
if (!id)
return -ENODEV;
error = handler->connect(handler, dev, id);
if (error && error != -ENODEV)
pr_err("failed to attach handler %s to device %s, error: %d\n",
handler->name, kobject_name(&dev->dev.kobj), error);
return error;
}
#ifdef CONFIG_COMPAT
static int input_bits_to_string(char *buf, int buf_size,
unsigned long bits, bool skip_empty)
{
int len = 0;
if (in_compat_syscall()) {
u32 dword = bits >> 32;
if (dword || !skip_empty)
len += snprintf(buf, buf_size, "%x ", dword);
dword = bits & 0xffffffffUL;
if (dword || !skip_empty || len)
len += snprintf(buf + len, max(buf_size - len, 0),
"%x", dword);
} else {
if (bits || !skip_empty)
len += snprintf(buf, buf_size, "%lx", bits);
}
return len;
}
#else /* !CONFIG_COMPAT */
static int input_bits_to_string(char *buf, int buf_size,
unsigned long bits, bool skip_empty)
{
return bits || !skip_empty ?
snprintf(buf, buf_size, "%lx", bits) : 0;
}
#endif
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *proc_bus_input_dir;
static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait);
static int input_devices_state;
static inline void input_wakeup_procfs_readers(void)
{
input_devices_state++;
wake_up(&input_devices_poll_wait);
}
static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &input_devices_poll_wait, wait);
if (file->f_version != input_devices_state) {
file->f_version = input_devices_state;
return EPOLLIN | EPOLLRDNORM;
}
return 0;
}
union input_seq_state {
struct {
unsigned short pos;
bool mutex_acquired;
};
void *p;
};
static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
{
union input_seq_state *state = (union input_seq_state *)&seq->private;
int error;
/* We need to fit into seq->private pointer */
BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
error = mutex_lock_interruptible(&input_mutex);
if (error) {
state->mutex_acquired = false;
return ERR_PTR(error);
}
state->mutex_acquired = true;
return seq_list_start(&input_dev_list, *pos);
}
static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &input_dev_list, pos);
}
static void input_seq_stop(struct seq_file *seq, void *v)
{
union input_seq_state *state = (union input_seq_state *)&seq->private;
if (state->mutex_acquired)
mutex_unlock(&input_mutex);
}
static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
unsigned long *bitmap, int max)
{
int i;
bool skip_empty = true;
char buf[18];
seq_printf(seq, "B: %s=", name);
for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
if (input_bits_to_string(buf, sizeof(buf),
bitmap[i], skip_empty)) {
skip_empty = false;
seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
}
}
/*
* If no output was produced print a single 0.
*/
if (skip_empty)
seq_putc(seq, '0');
seq_putc(seq, '\n');
}
static int input_devices_seq_show(struct seq_file *seq, void *v)
{
struct input_dev *dev = container_of(v, struct input_dev, node);
const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
struct input_handle *handle;
seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : "");
seq_puts(seq, "H: Handlers=");
list_for_each_entry(handle, &dev->h_list, d_node)
seq_printf(seq, "%s ", handle->name);
seq_putc(seq, '\n');
input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX);
input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
if (test_bit(EV_KEY, dev->evbit))
input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
if (test_bit(EV_REL, dev->evbit))
input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
if (test_bit(EV_ABS, dev->evbit))
input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
if (test_bit(EV_MSC, dev->evbit))
input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
if (test_bit(EV_LED, dev->evbit))
input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
if (test_bit(EV_SND, dev->evbit))
input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
if (test_bit(EV_FF, dev->evbit))
input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
if (test_bit(EV_SW, dev->evbit))
input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
seq_putc(seq, '\n');
kfree(path);
return 0;
}
static const struct seq_operations input_devices_seq_ops = {
.start = input_devices_seq_start,
.next = input_devices_seq_next,
.stop = input_seq_stop,
.show = input_devices_seq_show,
};
static int input_proc_devices_open(struct inode *inode, struct file *file)
{
return seq_open(file, &input_devices_seq_ops);
}
static const struct proc_ops input_devices_proc_ops = {
.proc_open = input_proc_devices_open,
.proc_poll = input_proc_devices_poll,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
};
static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
{
union input_seq_state *state = (union input_seq_state *)&seq->private;
int error;
/* We need to fit into seq->private pointer */
BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
error = mutex_lock_interruptible(&input_mutex);
if (error) {
state->mutex_acquired = false;
return ERR_PTR(error);
}
state->mutex_acquired = true;
state->pos = *pos;
return seq_list_start(&input_handler_list, *pos);
}
static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
union input_seq_state *state = (union input_seq_state *)&seq->private;
state->pos = *pos + 1;
return seq_list_next(v, &input_handler_list, pos);
}
static int input_handlers_seq_show(struct seq_file *seq, void *v)
{
struct input_handler *handler = container_of(v, struct input_handler, node);
union input_seq_state *state = (union input_seq_state *)&seq->private;
seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
if (handler->filter)
seq_puts(seq, " (filter)");
if (handler->legacy_minors)
seq_printf(seq, " Minor=%d", handler->minor);
seq_putc(seq, '\n');
return 0;
}
static const struct seq_operations input_handlers_seq_ops = {
.start = input_handlers_seq_start,
.next = input_handlers_seq_next,
.stop = input_seq_stop,
.show = input_handlers_seq_show,
};
static int input_proc_handlers_open(struct inode *inode, struct file *file)
{
return seq_open(file, &input_handlers_seq_ops);
}
static const struct proc_ops input_handlers_proc_ops = {
.proc_open = input_proc_handlers_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
};
static int __init input_proc_init(void)
{
struct proc_dir_entry *entry;
proc_bus_input_dir = proc_mkdir("bus/input", NULL);
if (!proc_bus_input_dir)
return -ENOMEM;
entry = proc_create("devices", 0, proc_bus_input_dir,
&input_devices_proc_ops);
if (!entry)
goto fail1;
entry = proc_create("handlers", 0, proc_bus_input_dir,
&input_handlers_proc_ops);
if (!entry)
goto fail2;
return 0;
fail2: remove_proc_entry("devices", proc_bus_input_dir);
fail1: remove_proc_entry("bus/input", NULL);
return -ENOMEM;
}
static void input_proc_exit(void)
{
remove_proc_entry("devices", proc_bus_input_dir);
remove_proc_entry("handlers", proc_bus_input_dir);
remove_proc_entry("bus/input", NULL);
}
#else /* !CONFIG_PROC_FS */
static inline void input_wakeup_procfs_readers(void) { }
static inline int input_proc_init(void) { return 0; }
static inline void input_proc_exit(void) { }
#endif
#define INPUT_DEV_STRING_ATTR_SHOW(name) \
static ssize_t input_dev_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct input_dev *input_dev = to_input_dev(dev); \
\
return scnprintf(buf, PAGE_SIZE, "%s\n", \
input_dev->name ? input_dev->name : ""); \
} \
static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
INPUT_DEV_STRING_ATTR_SHOW(name);
INPUT_DEV_STRING_ATTR_SHOW(phys);
INPUT_DEV_STRING_ATTR_SHOW(uniq);
static int input_print_modalias_bits(char *buf, int size,
char name, const unsigned long *bm,
unsigned int min_bit, unsigned int max_bit)
{
int len = 0, i;
len += snprintf(buf, max(size, 0), "%c", name);
for (i = min_bit; i < max_bit; i++)
if (bm[BIT_WORD(i)] & BIT_MASK(i))
len += snprintf(buf + len, max(size - len, 0), "%X,", i);
return len;
}
static int input_print_modalias(char *buf, int size, const struct input_dev *id,
int add_cr)
{
int len;
len = snprintf(buf, max(size, 0),
"input:b%04Xv%04Xp%04Xe%04X-",
id->id.bustype, id->id.vendor,
id->id.product, id->id.version);
len += input_print_modalias_bits(buf + len, size - len,
'e', id->evbit, 0, EV_MAX);
len += input_print_modalias_bits(buf + len, size - len,
'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
len += input_print_modalias_bits(buf + len, size - len,
'r', id->relbit, 0, REL_MAX);
len += input_print_modalias_bits(buf + len, size - len,
'a', id->absbit, 0, ABS_MAX);
len += input_print_modalias_bits(buf + len, size - len,
'm', id->mscbit, 0, MSC_MAX);
len += input_print_modalias_bits(buf + len, size - len,
'l', id->ledbit, 0, LED_MAX);
len += input_print_modalias_bits(buf + len, size - len,
's', id->sndbit, 0, SND_MAX);
len += input_print_modalias_bits(buf + len, size - len,
'f', id->ffbit, 0, FF_MAX);
len += input_print_modalias_bits(buf + len, size - len,
'w', id->swbit, 0, SW_MAX);
if (add_cr)
len += snprintf(buf + len, max(size - len, 0), "\n");
return len;
}
static ssize_t input_dev_show_modalias(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct input_dev *id = to_input_dev(dev);
ssize_t len;
len = input_print_modalias(buf, PAGE_SIZE, id, 1);
return min_t(int, len, PAGE_SIZE);
}
static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap,
int max, int add_cr);
static ssize_t input_dev_show_properties(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct input_dev *input_dev = to_input_dev(dev);
int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
INPUT_PROP_MAX, true);
return min_t(int, len, PAGE_SIZE);
}
static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL);
static int input_inhibit_device(struct input_dev *dev);
static int input_uninhibit_device(struct input_dev *dev);
static ssize_t inhibited_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct input_dev *input_dev = to_input_dev(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", input_dev->inhibited);
}
static ssize_t inhibited_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t len)
{
struct input_dev *input_dev = to_input_dev(dev);
ssize_t rv;
bool inhibited;
if (kstrtobool(buf, &inhibited))
return -EINVAL;
if (inhibited)
rv = input_inhibit_device(input_dev);
else
rv = input_uninhibit_device(input_dev);
if (rv != 0)
return rv;
return len;
}
static DEVICE_ATTR_RW(inhibited);
static struct attribute *input_dev_attrs[] = {
&dev_attr_name.attr,
&dev_attr_phys.attr,
&dev_attr_uniq.attr,
&dev_attr_modalias.attr,
&dev_attr_properties.attr,
&dev_attr_inhibited.attr,
NULL
};
static const struct attribute_group input_dev_attr_group = {
.attrs = input_dev_attrs,
};
#define INPUT_DEV_ID_ATTR(name) \
static ssize_t input_dev_show_id_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct input_dev *input_dev = to_input_dev(dev); \
return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \
} \
static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
INPUT_DEV_ID_ATTR(bustype);
INPUT_DEV_ID_ATTR(vendor);
INPUT_DEV_ID_ATTR(product);
INPUT_DEV_ID_ATTR(version);
static struct attribute *input_dev_id_attrs[] = {
&dev_attr_bustype.attr,
&dev_attr_vendor.attr,
&dev_attr_product.attr,
&dev_attr_version.attr,
NULL
};
static const struct attribute_group input_dev_id_attr_group = {
.name = "id",
.attrs = input_dev_id_attrs,
};
static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap,
int max, int add_cr)
{
int i;
int len = 0;
bool skip_empty = true;
for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
len += input_bits_to_string(buf + len, max(buf_size - len, 0),
bitmap[i], skip_empty);
if (len) {
skip_empty = false;
if (i > 0)
len += snprintf(buf + len, max(buf_size - len, 0), " ");
}
}
/*
* If no output was produced print a single 0.
*/
if (len == 0)
len = snprintf(buf, buf_size, "%d", 0);
if (add_cr)
len += snprintf(buf + len, max(buf_size - len, 0), "\n");
return len;
}
#define INPUT_DEV_CAP_ATTR(ev, bm) \
static ssize_t input_dev_show_cap_##bm(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct input_dev *input_dev = to_input_dev(dev); \
int len = input_print_bitmap(buf, PAGE_SIZE, \
input_dev->bm##bit, ev##_MAX, \
true); \
return min_t(int, len, PAGE_SIZE); \
} \
static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
INPUT_DEV_CAP_ATTR(EV, ev);
INPUT_DEV_CAP_ATTR(KEY, key);
INPUT_DEV_CAP_ATTR(REL, rel);
INPUT_DEV_CAP_ATTR(ABS, abs);
INPUT_DEV_CAP_ATTR(MSC, msc);
INPUT_DEV_CAP_ATTR(LED, led);
INPUT_DEV_CAP_ATTR(SND, snd);
INPUT_DEV_CAP_ATTR(FF, ff);
INPUT_DEV_CAP_ATTR(SW, sw);
static struct attribute *input_dev_caps_attrs[] = {
&dev_attr_ev.attr,
&dev_attr_key.attr,
&dev_attr_rel.attr,
&dev_attr_abs.attr,
&dev_attr_msc.attr,
&dev_attr_led.attr,
&dev_attr_snd.attr,
&dev_attr_ff.attr,
&dev_attr_sw.attr,
NULL
};
static const struct attribute_group input_dev_caps_attr_group = {
.name = "capabilities",
.attrs = input_dev_caps_attrs,
};
static const struct attribute_group *input_dev_attr_groups[] = {
&input_dev_attr_group,
&input_dev_id_attr_group,
&input_dev_caps_attr_group,
&input_poller_attribute_group,
NULL
};
static void input_dev_release(struct device *device)
{
struct input_dev *dev = to_input_dev(device);
input_ff_destroy(dev);
input_mt_destroy_slots(dev);
kfree(dev->poller);
kfree(dev->absinfo);
kfree(dev->vals);
kfree(dev);
module_put(THIS_MODULE);
}
/*
* Input uevent interface - loading event handlers based on
* device bitfields.
*/
static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
const char *name, const unsigned long *bitmap, int max)
{
int len;
if (add_uevent_var(env, "%s", name))
return -ENOMEM;
len = input_print_bitmap(&env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen,
bitmap, max, false);
if (len >= (sizeof(env->buf) - env->buflen))
return -ENOMEM;
env->buflen += len;
return 0;
}
static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
const struct input_dev *dev)
{
int len;
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = input_print_modalias(&env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen,
dev, 0);
if (len >= (sizeof(env->buf) - env->buflen))
return -ENOMEM;
env->buflen += len;
return 0;
}
#define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \
do { \
int err = add_uevent_var(env, fmt, val); \
if (err) \
return err; \
} while (0)
#define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \
do { \
int err = input_add_uevent_bm_var(env, name, bm, max); \
if (err) \
return err; \
} while (0)
#define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \
do { \
int err = input_add_uevent_modalias_var(env, dev); \
if (err) \
return err; \
} while (0)
static int input_dev_uevent(const struct device *device, struct kobj_uevent_env *env)
{
const struct input_dev *dev = to_input_dev(device);
INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
dev->id.bustype, dev->id.vendor,
dev->id.product, dev->id.version);
if (dev->name)
INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name);
if (dev->phys)
INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys);
if (dev->uniq)
INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq);
INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX);
INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX);
if (test_bit(EV_KEY, dev->evbit))
INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX);
if (test_bit(EV_REL, dev->evbit))
INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX);
if (test_bit(EV_ABS, dev->evbit))
INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX);
if (test_bit(EV_MSC, dev->evbit))
INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX);
if (test_bit(EV_LED, dev->evbit))
INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX);
if (test_bit(EV_SND, dev->evbit))
INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX);
if (test_bit(EV_FF, dev->evbit))
INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX);
if (test_bit(EV_SW, dev->evbit))
INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
return 0;
}
#define INPUT_DO_TOGGLE(dev, type, bits, on) \
do { \
int i; \
bool active; \
\
if (!test_bit(EV_##type, dev->evbit)) \
break; \
\
for_each_set_bit(i, dev->bits##bit, type##_CNT) { \
active = test_bit(i, dev->bits); \
if (!active && !on) \
continue; \
\
dev->event(dev, EV_##type, i, on ? active : 0); \
} \
} while (0)
static void input_dev_toggle(struct input_dev *dev, bool activate)
{
if (!dev->event)
return;
INPUT_DO_TOGGLE(dev, LED, led, activate);
INPUT_DO_TOGGLE(dev, SND, snd, activate);
if (activate && test_bit(EV_REP, dev->evbit)) {
dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]);
dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]);
}
}
/**
* input_reset_device() - reset/restore the state of input device
* @dev: input device whose state needs to be reset
*
* This function tries to reset the state of an opened input device and
* bring internal state and state if the hardware in sync with each other.
* We mark all keys as released, restore LED state, repeat rate, etc.
*/
void input_reset_device(struct input_dev *dev)
{
unsigned long flags;
mutex_lock(&dev->mutex);
spin_lock_irqsave(&dev->event_lock, flags);
input_dev_toggle(dev, true);
if (input_dev_release_keys(dev))
input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
spin_unlock_irqrestore(&dev->event_lock, flags);
mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_reset_device);
static int input_inhibit_device(struct input_dev *dev)
{
mutex_lock(&dev->mutex);
if (dev->inhibited)
goto out;
if (dev->users) {
if (dev->close)
dev->close(dev);
if (dev->poller)
input_dev_poller_stop(dev->poller);
}
spin_lock_irq(&dev->event_lock);
input_mt_release_slots(dev);
input_dev_release_keys(dev);
input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
input_dev_toggle(dev, false);
spin_unlock_irq(&dev->event_lock);
dev->inhibited = true;
out:
mutex_unlock(&dev->mutex);
return 0;
}
static int input_uninhibit_device(struct input_dev *dev)
{
int ret = 0;
mutex_lock(&dev->mutex);
if (!dev->inhibited)
goto out;
if (dev->users) {
if (dev->open) {
ret = dev->open(dev);
if (ret)
goto out;
}
if (dev->poller)
input_dev_poller_start(dev->poller);
}
dev->inhibited = false;
spin_lock_irq(&dev->event_lock);
input_dev_toggle(dev, true);
spin_unlock_irq(&dev->event_lock);
out:
mutex_unlock(&dev->mutex);
return ret;
}
static int input_dev_suspend(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
spin_lock_irq(&input_dev->event_lock);
/*
* Keys that are pressed now are unlikely to be
* still pressed when we resume.
*/
if (input_dev_release_keys(input_dev))
input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
spin_unlock_irq(&input_dev->event_lock);
return 0;
}
static int input_dev_resume(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
spin_lock_irq(&input_dev->event_lock);
/* Restore state of LEDs and sounds, if any were active. */
input_dev_toggle(input_dev, true);
spin_unlock_irq(&input_dev->event_lock);
return 0;
}
static int input_dev_freeze(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
spin_lock_irq(&input_dev->event_lock);
/*
* Keys that are pressed now are unlikely to be
* still pressed when we resume.
*/
if (input_dev_release_keys(input_dev))
input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
spin_unlock_irq(&input_dev->event_lock);
return 0;
}
static int input_dev_poweroff(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
spin_lock_irq(&input_dev->event_lock);
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
spin_unlock_irq(&input_dev->event_lock);
return 0;
}
static const struct dev_pm_ops input_dev_pm_ops = {
.suspend = input_dev_suspend,
.resume = input_dev_resume,
.freeze = input_dev_freeze,
.poweroff = input_dev_poweroff,
.restore = input_dev_resume,
};
static const struct device_type input_dev_type = {
.groups = input_dev_attr_groups,
.release = input_dev_release,
.uevent = input_dev_uevent,
.pm = pm_sleep_ptr(&input_dev_pm_ops),
};
static char *input_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
}
struct class input_class = {
.name = "input",
.devnode = input_devnode,
};
EXPORT_SYMBOL_GPL(input_class);
/**
* input_allocate_device - allocate memory for new input device
*
* Returns prepared struct input_dev or %NULL.
*
* NOTE: Use input_free_device() to free devices that have not been
* registered; input_unregister_device() should be used for already
* registered devices.
*/
struct input_dev *input_allocate_device(void)
{
static atomic_t input_no = ATOMIC_INIT(-1);
struct input_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev) {
dev->dev.type = &input_dev_type;
dev->dev.class = &input_class;
device_initialize(&dev->dev);
mutex_init(&dev->mutex);
spin_lock_init(&dev->event_lock);
timer_setup(&dev->timer, NULL, 0);
INIT_LIST_HEAD(&dev->h_list);
INIT_LIST_HEAD(&dev->node);
dev_set_name(&dev->dev, "input%lu",
(unsigned long)atomic_inc_return(&input_no));
__module_get(THIS_MODULE);
}
return dev;
}
EXPORT_SYMBOL(input_allocate_device);
struct input_devres {
struct input_dev *input;
};
static int devm_input_device_match(struct device *dev, void *res, void *data)
{
struct input_devres *devres = res;
return devres->input == data;
}
static void devm_input_device_release(struct device *dev, void *res)
{
struct input_devres *devres = res;
struct input_dev *input = devres->input;
dev_dbg(dev, "%s: dropping reference to %s\n",
__func__, dev_name(&input->dev));
input_put_device(input);
}
/**
* devm_input_allocate_device - allocate managed input device
* @dev: device owning the input device being created
*
* Returns prepared struct input_dev or %NULL.
*
* Managed input devices do not need to be explicitly unregistered or
* freed as it will be done automatically when owner device unbinds from
* its driver (or binding fails). Once managed input device is allocated,
* it is ready to be set up and registered in the same fashion as regular
* input device. There are no special devm_input_device_[un]register()
* variants, regular ones work with both managed and unmanaged devices,
* should you need them. In most cases however, managed input device need
* not be explicitly unregistered or freed.
*
* NOTE: the owner device is set up as parent of input device and users
* should not override it.
*/
struct input_dev *devm_input_allocate_device(struct device *dev)
{
struct input_dev *input;
struct input_devres *devres;
devres = devres_alloc(devm_input_device_release,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return NULL;
input = input_allocate_device();
if (!input) {
devres_free(devres);
return NULL;
}
input->dev.parent = dev;
input->devres_managed = true;
devres->input = input;
devres_add(dev, devres);
return input;
}
EXPORT_SYMBOL(devm_input_allocate_device);
/**
* input_free_device - free memory occupied by input_dev structure
* @dev: input device to free
*
* This function should only be used if input_register_device()
* was not called yet or if it failed. Once device was registered
* use input_unregister_device() and memory will be freed once last
* reference to the device is dropped.
*
* Device should be allocated by input_allocate_device().
*
* NOTE: If there are references to the input device then memory
* will not be freed until last reference is dropped.
*/
void input_free_device(struct input_dev *dev)
{
if (dev) {
if (dev->devres_managed)
WARN_ON(devres_destroy(dev->dev.parent,
devm_input_device_release,
devm_input_device_match,
dev));
input_put_device(dev);
}
}
EXPORT_SYMBOL(input_free_device);
/**
* input_set_timestamp - set timestamp for input events
* @dev: input device to set timestamp for
* @timestamp: the time at which the event has occurred
* in CLOCK_MONOTONIC
*
* This function is intended to provide to the input system a more
* accurate time of when an event actually occurred. The driver should
* call this function as soon as a timestamp is acquired ensuring
* clock conversions in input_set_timestamp are done correctly.
*
* The system entering suspend state between timestamp acquisition and
* calling input_set_timestamp can result in inaccurate conversions.
*/
void input_set_timestamp(struct input_dev *dev, ktime_t timestamp)
{
dev->timestamp[INPUT_CLK_MONO] = timestamp;
dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp);
dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp,
TK_OFFS_BOOT);
}
EXPORT_SYMBOL(input_set_timestamp);
/**
* input_get_timestamp - get timestamp for input events
* @dev: input device to get timestamp from
*
* A valid timestamp is a timestamp of non-zero value.
*/
ktime_t *input_get_timestamp(struct input_dev *dev)
{
const ktime_t invalid_timestamp = ktime_set(0, 0);
if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp))
input_set_timestamp(dev, ktime_get());
return dev->timestamp;
}
EXPORT_SYMBOL(input_get_timestamp);
/**
* input_set_capability - mark device as capable of a certain event
* @dev: device that is capable of emitting or accepting event
* @type: type of the event (EV_KEY, EV_REL, etc...)
* @code: event code
*
* In addition to setting up corresponding bit in appropriate capability
* bitmap the function also adjusts dev->evbit.
*/
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
{
if (type < EV_CNT && input_max_code[type] &&
code > input_max_code[type]) {
pr_err("%s: invalid code %u for type %u\n", __func__, code,
type);
dump_stack();
return;
}
switch (type) {
case EV_KEY:
__set_bit(code, dev->keybit);
break;
case EV_REL:
__set_bit(code, dev->relbit);
break;
case EV_ABS:
input_alloc_absinfo(dev);
__set_bit(code, dev->absbit);
break;
case EV_MSC:
__set_bit(code, dev->mscbit);
break;
case EV_SW:
__set_bit(code, dev->swbit);
break;
case EV_LED:
__set_bit(code, dev->ledbit);
break;
case EV_SND:
__set_bit(code, dev->sndbit);
break;
case EV_FF:
__set_bit(code, dev->ffbit);
break;
case EV_PWR:
/* do nothing */
break;
default:
pr_err("%s: unknown type %u (code %u)\n", __func__, type, code);
dump_stack();
return;
}
__set_bit(type, dev->evbit);
}
EXPORT_SYMBOL(input_set_capability);
static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
{
int mt_slots;
int i;
unsigned int events;
if (dev->mt) {
mt_slots = dev->mt->num_slots;
} else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
mt_slots = clamp(mt_slots, 2, 32);
} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
mt_slots = 2;
} else {
mt_slots = 0;
}
events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
if (test_bit(EV_ABS, dev->evbit))
for_each_set_bit(i, dev->absbit, ABS_CNT)
events += input_is_mt_axis(i) ? mt_slots : 1;
if (test_bit(EV_REL, dev->evbit))
events += bitmap_weight(dev->relbit, REL_CNT);
/* Make room for KEY and MSC events */
events += 7;
return events;
}
#define INPUT_CLEANSE_BITMASK(dev, type, bits) \
do { \
if (!test_bit(EV_##type, dev->evbit)) \
memset(dev->bits##bit, 0, \
sizeof(dev->bits##bit)); \
} while (0)
static void input_cleanse_bitmasks(struct input_dev *dev)
{
INPUT_CLEANSE_BITMASK(dev, KEY, key);
INPUT_CLEANSE_BITMASK(dev, REL, rel);
INPUT_CLEANSE_BITMASK(dev, ABS, abs);
INPUT_CLEANSE_BITMASK(dev, MSC, msc);
INPUT_CLEANSE_BITMASK(dev, LED, led);
INPUT_CLEANSE_BITMASK(dev, SND, snd);
INPUT_CLEANSE_BITMASK(dev, FF, ff);
INPUT_CLEANSE_BITMASK(dev, SW, sw);
}
static void __input_unregister_device(struct input_dev *dev)
{
struct input_handle *handle, *next;
input_disconnect_device(dev);
mutex_lock(&input_mutex);
list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
handle->handler->disconnect(handle);
WARN_ON(!list_empty(&dev->h_list));
del_timer_sync(&dev->timer);
list_del_init(&dev->node);
input_wakeup_procfs_readers();
mutex_unlock(&input_mutex);
device_del(&dev->dev);
}
static void devm_input_device_unregister(struct device *dev, void *res)
{
struct input_devres *devres = res;
struct input_dev *input = devres->input;
dev_dbg(dev, "%s: unregistering device %s\n",
__func__, dev_name(&input->dev));
__input_unregister_device(input);
}
/*
* Generate software autorepeat event. Note that we take
* dev->event_lock here to avoid racing with input_event
* which may cause keys get "stuck".
*/
static void input_repeat_key(struct timer_list *t)
{
struct input_dev *dev = from_timer(dev, t, timer);
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (!dev->inhibited &&
test_bit(dev->repeat_key, dev->key) &&
is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
input_set_timestamp(dev, ktime_get());
input_handle_event(dev, EV_KEY, dev->repeat_key, 2);
input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
if (dev->rep[REP_PERIOD])
mod_timer(&dev->timer, jiffies +
msecs_to_jiffies(dev->rep[REP_PERIOD]));
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
/**
* input_enable_softrepeat - enable software autorepeat
* @dev: input device
* @delay: repeat delay
* @period: repeat period
*
* Enable software autorepeat on the input device.
*/
void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
{
dev->timer.function = input_repeat_key;
dev->rep[REP_DELAY] = delay;
dev->rep[REP_PERIOD] = period;
}
EXPORT_SYMBOL(input_enable_softrepeat);
bool input_device_enabled(struct input_dev *dev)
{
lockdep_assert_held(&dev->mutex);
return !dev->inhibited && dev->users > 0;
}
EXPORT_SYMBOL_GPL(input_device_enabled);
/**
* input_register_device - register device with input core
* @dev: device to be registered
*
* This function registers device with input core. The device must be
* allocated with input_allocate_device() and all it's capabilities
* set up before registering.
* If function fails the device must be freed with input_free_device().
* Once device has been successfully registered it can be unregistered
* with input_unregister_device(); input_free_device() should not be
* called in this case.
*
* Note that this function is also used to register managed input devices
* (ones allocated with devm_input_allocate_device()). Such managed input
* devices need not be explicitly unregistered or freed, their tear down
* is controlled by the devres infrastructure. It is also worth noting
* that tear down of managed input devices is internally a 2-step process:
* registered managed input device is first unregistered, but stays in
* memory and can still handle input_event() calls (although events will
* not be delivered anywhere). The freeing of managed input device will
* happen later, when devres stack is unwound to the point where device
* allocation was made.
*/
int input_register_device(struct input_dev *dev)
{
struct input_devres *devres = NULL;
struct input_handler *handler;
unsigned int packet_size;
const char *path;
int error;
if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) {
dev_err(&dev->dev,
"Absolute device without dev->absinfo, refusing to register\n");
return -EINVAL;
}
if (dev->devres_managed) {
devres = devres_alloc(devm_input_device_unregister,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
devres->input = dev;
}
/* Every input device generates EV_SYN/SYN_REPORT events. */
__set_bit(EV_SYN, dev->evbit);
/* KEY_RESERVED is not supposed to be transmitted to userspace. */
__clear_bit(KEY_RESERVED, dev->keybit);
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
input_cleanse_bitmasks(dev);
packet_size = input_estimate_events_per_packet(dev);
if (dev->hint_events_per_packet < packet_size)
dev->hint_events_per_packet = packet_size;
dev->max_vals = dev->hint_events_per_packet + 2;
dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
if (!dev->vals) {
error = -ENOMEM;
goto err_devres_free;
}
/*
* If delay and period are pre-set by the driver, then autorepeating
* is handled by the driver itself and we don't do it in input.c.
*/
if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD])
input_enable_softrepeat(dev, 250, 33);
if (!dev->getkeycode)
dev->getkeycode = input_default_getkeycode;
if (!dev->setkeycode)
dev->setkeycode = input_default_setkeycode;
if (dev->poller)
input_dev_poller_finalize(dev->poller);
error = device_add(&dev->dev);
if (error)
goto err_free_vals;
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
pr_info("%s as %s\n",
dev->name ? dev->name : "Unspecified device",
path ? path : "N/A");
kfree(path);
error = mutex_lock_interruptible(&input_mutex);
if (error)
goto err_device_del;
list_add_tail(&dev->node, &input_dev_list);
list_for_each_entry(handler, &input_handler_list, node)
input_attach_handler(dev, handler);
input_wakeup_procfs_readers();
mutex_unlock(&input_mutex);
if (dev->devres_managed) {
dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
__func__, dev_name(&dev->dev));
devres_add(dev->dev.parent, devres);
}
return 0;
err_device_del:
device_del(&dev->dev);
err_free_vals:
kfree(dev->vals);
dev->vals = NULL;
err_devres_free:
devres_free(devres);
return error;
}
EXPORT_SYMBOL(input_register_device);
/**
* input_unregister_device - unregister previously registered device
* @dev: device to be unregistered
*
* This function unregisters an input device. Once device is unregistered
* the caller should not try to access it as it may get freed at any moment.
*/
void input_unregister_device(struct input_dev *dev)
{
if (dev->devres_managed) {
WARN_ON(devres_destroy(dev->dev.parent,
devm_input_device_unregister,
devm_input_device_match,
dev));
__input_unregister_device(dev);
/*
* We do not do input_put_device() here because it will be done
* when 2nd devres fires up.
*/
} else {
__input_unregister_device(dev);
input_put_device(dev);
}
}
EXPORT_SYMBOL(input_unregister_device);
/**
* input_register_handler - register a new input handler
* @handler: handler to be registered
*
* This function registers a new input handler (interface) for input
* devices in the system and attaches it to all input devices that
* are compatible with the handler.
*/
int input_register_handler(struct input_handler *handler)
{
struct input_dev *dev;
int error;
error = mutex_lock_interruptible(&input_mutex);
if (error)
return error;
INIT_LIST_HEAD(&handler->h_list);
list_add_tail(&handler->node, &input_handler_list);
list_for_each_entry(dev, &input_dev_list, node)
input_attach_handler(dev, handler);
input_wakeup_procfs_readers();
mutex_unlock(&input_mutex);
return 0;
}
EXPORT_SYMBOL(input_register_handler);
/**
* input_unregister_handler - unregisters an input handler
* @handler: handler to be unregistered
*
* This function disconnects a handler from its input devices and
* removes it from lists of known handlers.
*/
void input_unregister_handler(struct input_handler *handler)
{
struct input_handle *handle, *next;
mutex_lock(&input_mutex);
list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
handler->disconnect(handle);
WARN_ON(!list_empty(&handler->h_list));
list_del_init(&handler->node);
input_wakeup_procfs_readers();
mutex_unlock(&input_mutex);
}
EXPORT_SYMBOL(input_unregister_handler);
/**
* input_handler_for_each_handle - handle iterator
* @handler: input handler to iterate
* @data: data for the callback
* @fn: function to be called for each handle
*
* Iterate over @bus's list of devices, and call @fn for each, passing
* it @data and stop when @fn returns a non-zero value. The function is
* using RCU to traverse the list and therefore may be using in atomic
* contexts. The @fn callback is invoked from RCU critical section and
* thus must not sleep.
*/
int input_handler_for_each_handle(struct input_handler *handler, void *data,
int (*fn)(struct input_handle *, void *))
{
struct input_handle *handle;
int retval = 0;
rcu_read_lock();
list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
retval = fn(handle, data);
if (retval)
break;
}
rcu_read_unlock();
return retval;
}
EXPORT_SYMBOL(input_handler_for_each_handle);
/**
* input_register_handle - register a new input handle
* @handle: handle to register
*
* This function puts a new input handle onto device's
* and handler's lists so that events can flow through
* it once it is opened using input_open_device().
*
* This function is supposed to be called from handler's
* connect() method.
*/
int input_register_handle(struct input_handle *handle)
{
struct input_handler *handler = handle->handler;
struct input_dev *dev = handle->dev;
int error;
/*
* We take dev->mutex here to prevent race with
* input_release_device().
*/
error = mutex_lock_interruptible(&dev->mutex);
if (error)
return error;
/*
* Filters go to the head of the list, normal handlers
* to the tail.
*/
if (handler->filter)
list_add_rcu(&handle->d_node, &dev->h_list);
else
list_add_tail_rcu(&handle->d_node, &dev->h_list);
mutex_unlock(&dev->mutex);
/*
* Since we are supposed to be called from ->connect()
* which is mutually exclusive with ->disconnect()
* we can't be racing with input_unregister_handle()
* and so separate lock is not needed here.
*/
list_add_tail_rcu(&handle->h_node, &handler->h_list);
if (handler->start)
handler->start(handle);
return 0;
}
EXPORT_SYMBOL(input_register_handle);
/**
* input_unregister_handle - unregister an input handle
* @handle: handle to unregister
*
* This function removes input handle from device's
* and handler's lists.
*
* This function is supposed to be called from handler's
* disconnect() method.
*/
void input_unregister_handle(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
list_del_rcu(&handle->h_node);
/*
* Take dev->mutex to prevent race with input_release_device().
*/
mutex_lock(&dev->mutex);
list_del_rcu(&handle->d_node);
mutex_unlock(&dev->mutex);
synchronize_rcu();
}
EXPORT_SYMBOL(input_unregister_handle);
/**
* input_get_new_minor - allocates a new input minor number
* @legacy_base: beginning or the legacy range to be searched
* @legacy_num: size of legacy range
* @allow_dynamic: whether we can also take ID from the dynamic range
*
* This function allocates a new device minor for from input major namespace.
* Caller can request legacy minor by specifying @legacy_base and @legacy_num
* parameters and whether ID can be allocated from dynamic range if there are
* no free IDs in legacy range.
*/
int input_get_new_minor(int legacy_base, unsigned int legacy_num,
bool allow_dynamic)
{
/*
* This function should be called from input handler's ->connect()
* methods, which are serialized with input_mutex, so no additional
* locking is needed here.
*/
if (legacy_base >= 0) {
int minor = ida_simple_get(&input_ida,
legacy_base,
legacy_base + legacy_num,
GFP_KERNEL);
if (minor >= 0 || !allow_dynamic)
return minor;
}
return ida_simple_get(&input_ida,
INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES,
GFP_KERNEL);
}
EXPORT_SYMBOL(input_get_new_minor);
/**
* input_free_minor - release previously allocated minor
* @minor: minor to be released
*
* This function releases previously allocated input minor so that it can be
* reused later.
*/
void input_free_minor(unsigned int minor)
{
ida_simple_remove(&input_ida, minor);
}
EXPORT_SYMBOL(input_free_minor);
static int __init input_init(void)
{
int err;
err = class_register(&input_class);
if (err) {
pr_err("unable to register input_dev class\n");
return err;
}
err = input_proc_init();
if (err)
goto fail1;
err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0),
INPUT_MAX_CHAR_DEVICES, "input");
if (err) {
pr_err("unable to register char major %d", INPUT_MAJOR);
goto fail2;
}
return 0;
fail2: input_proc_exit();
fail1: class_unregister(&input_class);
return err;
}
static void __exit input_exit(void)
{
input_proc_exit();
unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0),
INPUT_MAX_CHAR_DEVICES);
class_unregister(&input_class);
}
subsys_initcall(input_init);
module_exit(input_exit);
|
linux-master
|
drivers/input/input.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Input Power Event -> APM Bridge
*
* Copyright (c) 2007 Richard Purdie
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/apm-emulation.h>
static void system_power_event(unsigned int keycode)
{
switch (keycode) {
case KEY_SUSPEND:
apm_queue_event(APM_USER_SUSPEND);
pr_info("Requesting system suspend...\n");
break;
default:
break;
}
}
static void apmpower_event(struct input_handle *handle, unsigned int type,
unsigned int code, int value)
{
/* only react on key down events */
if (value != 1)
return;
switch (type) {
case EV_PWR:
system_power_event(code);
break;
default:
break;
}
}
static int apmpower_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id)
{
struct input_handle *handle;
int error;
handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = "apm-power";
error = input_register_handle(handle);
if (error) {
pr_err("Failed to register input power handler, error %d\n",
error);
kfree(handle);
return error;
}
error = input_open_device(handle);
if (error) {
pr_err("Failed to open input power device, error %d\n", error);
input_unregister_handle(handle);
kfree(handle);
return error;
}
return 0;
}
static void apmpower_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
static const struct input_device_id apmpower_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_PWR) },
},
{ },
};
MODULE_DEVICE_TABLE(input, apmpower_ids);
static struct input_handler apmpower_handler = {
.event = apmpower_event,
.connect = apmpower_connect,
.disconnect = apmpower_disconnect,
.name = "apm-power",
.id_table = apmpower_ids,
};
static int __init apmpower_init(void)
{
return input_register_handler(&apmpower_handler);
}
static void __exit apmpower_exit(void)
{
input_unregister_handler(&apmpower_handler);
}
module_init(apmpower_init);
module_exit(apmpower_exit);
MODULE_AUTHOR("Richard Purdie <[email protected]>");
MODULE_DESCRIPTION("Input Power Event -> APM Bridge");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/apm-power.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic helper functions for touchscreens and other two-dimensional
* pointing devices
*
* Copyright (c) 2014 Sebastian Reichel <[email protected]>
*/
#include <linux/property.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <linux/module.h>
static bool touchscreen_get_prop_u32(struct device *dev,
const char *property,
unsigned int default_value,
unsigned int *value)
{
u32 val;
int error;
error = device_property_read_u32(dev, property, &val);
if (error) {
*value = default_value;
return false;
}
*value = val;
return true;
}
static void touchscreen_set_params(struct input_dev *dev,
unsigned long axis,
int min, int max, int fuzz)
{
struct input_absinfo *absinfo;
if (!test_bit(axis, dev->absbit)) {
dev_warn(&dev->dev,
"Parameters are specified but the axis %lu is not set up\n",
axis);
return;
}
absinfo = &dev->absinfo[axis];
absinfo->minimum = min;
absinfo->maximum = max;
absinfo->fuzz = fuzz;
}
/**
* touchscreen_parse_properties - parse common touchscreen properties
* @input: input device that should be parsed
* @multitouch: specifies whether parsed properties should be applied to
* single-touch or multi-touch axes
* @prop: pointer to a struct touchscreen_properties into which to store
* axis swap and invert info for use with touchscreen_report_x_y();
* or %NULL
*
* This function parses common properties for touchscreens and sets up the
* input device accordingly. The function keeps previously set up default
* values if no value is specified.
*/
void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
struct touchscreen_properties *prop)
{
struct device *dev = input->dev.parent;
struct input_absinfo *absinfo;
unsigned int axis, axis_x, axis_y;
unsigned int minimum, maximum, fuzz;
bool data_present;
input_alloc_absinfo(input);
if (!input->absinfo)
return;
axis_x = multitouch ? ABS_MT_POSITION_X : ABS_X;
axis_y = multitouch ? ABS_MT_POSITION_Y : ABS_Y;
data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-x",
input_abs_get_min(input, axis_x),
&minimum);
data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-x",
input_abs_get_max(input,
axis_x) + 1,
&maximum);
data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
input_abs_get_fuzz(input, axis_x),
&fuzz);
if (data_present)
touchscreen_set_params(input, axis_x, minimum, maximum - 1, fuzz);
data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-y",
input_abs_get_min(input, axis_y),
&minimum);
data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-y",
input_abs_get_max(input,
axis_y) + 1,
&maximum);
data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
input_abs_get_fuzz(input, axis_y),
&fuzz);
if (data_present)
touchscreen_set_params(input, axis_y, minimum, maximum - 1, fuzz);
axis = multitouch ? ABS_MT_PRESSURE : ABS_PRESSURE;
data_present = touchscreen_get_prop_u32(dev,
"touchscreen-max-pressure",
input_abs_get_max(input, axis),
&maximum);
data_present |= touchscreen_get_prop_u32(dev,
"touchscreen-fuzz-pressure",
input_abs_get_fuzz(input, axis),
&fuzz);
if (data_present)
touchscreen_set_params(input, axis, 0, maximum, fuzz);
if (!prop)
return;
prop->max_x = input_abs_get_max(input, axis_x);
prop->max_y = input_abs_get_max(input, axis_y);
prop->invert_x =
device_property_read_bool(dev, "touchscreen-inverted-x");
if (prop->invert_x) {
absinfo = &input->absinfo[axis_x];
absinfo->maximum -= absinfo->minimum;
absinfo->minimum = 0;
}
prop->invert_y =
device_property_read_bool(dev, "touchscreen-inverted-y");
if (prop->invert_y) {
absinfo = &input->absinfo[axis_y];
absinfo->maximum -= absinfo->minimum;
absinfo->minimum = 0;
}
prop->swap_x_y =
device_property_read_bool(dev, "touchscreen-swapped-x-y");
if (prop->swap_x_y)
swap(input->absinfo[axis_x], input->absinfo[axis_y]);
}
EXPORT_SYMBOL(touchscreen_parse_properties);
static void
touchscreen_apply_prop_to_x_y(const struct touchscreen_properties *prop,
unsigned int *x, unsigned int *y)
{
if (prop->invert_x)
*x = prop->max_x - *x;
if (prop->invert_y)
*y = prop->max_y - *y;
if (prop->swap_x_y)
swap(*x, *y);
}
/**
* touchscreen_set_mt_pos - Set input_mt_pos coordinates
* @pos: input_mt_pos to set coordinates of
* @prop: pointer to a struct touchscreen_properties
* @x: X coordinate to store in pos
* @y: Y coordinate to store in pos
*
* Adjust the passed in x and y values applying any axis inversion and
* swapping requested in the passed in touchscreen_properties and store
* the result in a struct input_mt_pos.
*/
void touchscreen_set_mt_pos(struct input_mt_pos *pos,
const struct touchscreen_properties *prop,
unsigned int x, unsigned int y)
{
touchscreen_apply_prop_to_x_y(prop, &x, &y);
pos->x = x;
pos->y = y;
}
EXPORT_SYMBOL(touchscreen_set_mt_pos);
/**
* touchscreen_report_pos - Report touchscreen coordinates
* @input: input_device to report coordinates for
* @prop: pointer to a struct touchscreen_properties
* @x: X coordinate to report
* @y: Y coordinate to report
* @multitouch: Report coordinates on single-touch or multi-touch axes
*
* Adjust the passed in x and y values applying any axis inversion and
* swapping requested in the passed in touchscreen_properties and then
* report the resulting coordinates on the input_dev's x and y axis.
*/
void touchscreen_report_pos(struct input_dev *input,
const struct touchscreen_properties *prop,
unsigned int x, unsigned int y,
bool multitouch)
{
touchscreen_apply_prop_to_x_y(prop, &x, &y);
input_report_abs(input, multitouch ? ABS_MT_POSITION_X : ABS_X, x);
input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y);
}
EXPORT_SYMBOL(touchscreen_report_pos);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Helper functions for touchscreens and other devices");
|
linux-master
|
drivers/input/touchscreen.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Input driver to ExplorerPS/2 device driver module.
*
* Copyright (c) 1999-2002 Vojtech Pavlik
* Copyright (c) 2004 Dmitry Torokhov
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MOUSEDEV_MINOR_BASE 32
#define MOUSEDEV_MINORS 31
#define MOUSEDEV_MIX 63
#include <linux/bitops.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/random.h>
#include <linux/major.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/kernel.h>
MODULE_AUTHOR("Vojtech Pavlik <[email protected]>");
MODULE_DESCRIPTION("Mouse (ExplorerPS/2) device interfaces");
MODULE_LICENSE("GPL");
#ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_X
#define CONFIG_INPUT_MOUSEDEV_SCREEN_X 1024
#endif
#ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_Y
#define CONFIG_INPUT_MOUSEDEV_SCREEN_Y 768
#endif
static int xres = CONFIG_INPUT_MOUSEDEV_SCREEN_X;
module_param(xres, uint, 0644);
MODULE_PARM_DESC(xres, "Horizontal screen resolution");
static int yres = CONFIG_INPUT_MOUSEDEV_SCREEN_Y;
module_param(yres, uint, 0644);
MODULE_PARM_DESC(yres, "Vertical screen resolution");
static unsigned tap_time = 200;
module_param(tap_time, uint, 0644);
MODULE_PARM_DESC(tap_time, "Tap time for touchpads in absolute mode (msecs)");
struct mousedev_hw_data {
int dx, dy, dz;
int x, y;
int abs_event;
unsigned long buttons;
};
struct mousedev {
int open;
struct input_handle handle;
wait_queue_head_t wait;
struct list_head client_list;
spinlock_t client_lock; /* protects client_list */
struct mutex mutex;
struct device dev;
struct cdev cdev;
bool exist;
struct list_head mixdev_node;
bool opened_by_mixdev;
struct mousedev_hw_data packet;
unsigned int pkt_count;
int old_x[4], old_y[4];
int frac_dx, frac_dy;
unsigned long touch;
int (*open_device)(struct mousedev *mousedev);
void (*close_device)(struct mousedev *mousedev);
};
enum mousedev_emul {
MOUSEDEV_EMUL_PS2,
MOUSEDEV_EMUL_IMPS,
MOUSEDEV_EMUL_EXPS
};
struct mousedev_motion {
int dx, dy, dz;
unsigned long buttons;
};
#define PACKET_QUEUE_LEN 16
struct mousedev_client {
struct fasync_struct *fasync;
struct mousedev *mousedev;
struct list_head node;
struct mousedev_motion packets[PACKET_QUEUE_LEN];
unsigned int head, tail;
spinlock_t packet_lock;
int pos_x, pos_y;
u8 ps2[6];
unsigned char ready, buffer, bufsiz;
unsigned char imexseq, impsseq;
enum mousedev_emul mode;
unsigned long last_buttons;
};
#define MOUSEDEV_SEQ_LEN 6
static unsigned char mousedev_imps_seq[] = { 0xf3, 200, 0xf3, 100, 0xf3, 80 };
static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
static struct mousedev *mousedev_mix;
static LIST_HEAD(mousedev_mix_list);
#define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
#define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
static void mousedev_touchpad_event(struct input_dev *dev,
struct mousedev *mousedev,
unsigned int code, int value)
{
int size, tmp;
enum { FRACTION_DENOM = 128 };
switch (code) {
case ABS_X:
fx(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
size = input_abs_get_max(dev, ABS_X) -
input_abs_get_min(dev, ABS_X);
if (size == 0)
size = 256 * 2;
tmp = ((value - fx(2)) * 256 * FRACTION_DENOM) / size;
tmp += mousedev->frac_dx;
mousedev->packet.dx = tmp / FRACTION_DENOM;
mousedev->frac_dx =
tmp - mousedev->packet.dx * FRACTION_DENOM;
}
break;
case ABS_Y:
fy(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
/* use X size for ABS_Y to keep the same scale */
size = input_abs_get_max(dev, ABS_X) -
input_abs_get_min(dev, ABS_X);
if (size == 0)
size = 256 * 2;
tmp = -((value - fy(2)) * 256 * FRACTION_DENOM) / size;
tmp += mousedev->frac_dy;
mousedev->packet.dy = tmp / FRACTION_DENOM;
mousedev->frac_dy = tmp -
mousedev->packet.dy * FRACTION_DENOM;
}
break;
}
}
static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
unsigned int code, int value)
{
int min, max, size;
switch (code) {
case ABS_X:
min = input_abs_get_min(dev, ABS_X);
max = input_abs_get_max(dev, ABS_X);
size = max - min;
if (size == 0)
size = xres ? : 1;
value = clamp(value, min, max);
mousedev->packet.x = ((value - min) * xres) / size;
mousedev->packet.abs_event = 1;
break;
case ABS_Y:
min = input_abs_get_min(dev, ABS_Y);
max = input_abs_get_max(dev, ABS_Y);
size = max - min;
if (size == 0)
size = yres ? : 1;
value = clamp(value, min, max);
mousedev->packet.y = yres - ((value - min) * yres) / size;
mousedev->packet.abs_event = 1;
break;
}
}
static void mousedev_rel_event(struct mousedev *mousedev,
unsigned int code, int value)
{
switch (code) {
case REL_X:
mousedev->packet.dx += value;
break;
case REL_Y:
mousedev->packet.dy -= value;
break;
case REL_WHEEL:
mousedev->packet.dz -= value;
break;
}
}
static void mousedev_key_event(struct mousedev *mousedev,
unsigned int code, int value)
{
int index;
switch (code) {
case BTN_TOUCH:
case BTN_0:
case BTN_LEFT: index = 0; break;
case BTN_STYLUS:
case BTN_1:
case BTN_RIGHT: index = 1; break;
case BTN_2:
case BTN_FORWARD:
case BTN_STYLUS2:
case BTN_MIDDLE: index = 2; break;
case BTN_3:
case BTN_BACK:
case BTN_SIDE: index = 3; break;
case BTN_4:
case BTN_EXTRA: index = 4; break;
default: return;
}
if (value) {
set_bit(index, &mousedev->packet.buttons);
set_bit(index, &mousedev_mix->packet.buttons);
} else {
clear_bit(index, &mousedev->packet.buttons);
clear_bit(index, &mousedev_mix->packet.buttons);
}
}
static void mousedev_notify_readers(struct mousedev *mousedev,
struct mousedev_hw_data *packet)
{
struct mousedev_client *client;
struct mousedev_motion *p;
unsigned int new_head;
int wake_readers = 0;
rcu_read_lock();
list_for_each_entry_rcu(client, &mousedev->client_list, node) {
/* Just acquire the lock, interrupts already disabled */
spin_lock(&client->packet_lock);
p = &client->packets[client->head];
if (client->ready && p->buttons != mousedev->packet.buttons) {
new_head = (client->head + 1) % PACKET_QUEUE_LEN;
if (new_head != client->tail) {
p = &client->packets[client->head = new_head];
memset(p, 0, sizeof(struct mousedev_motion));
}
}
if (packet->abs_event) {
p->dx += packet->x - client->pos_x;
p->dy += packet->y - client->pos_y;
client->pos_x = packet->x;
client->pos_y = packet->y;
}
client->pos_x += packet->dx;
client->pos_x = clamp_val(client->pos_x, 0, xres);
client->pos_y += packet->dy;
client->pos_y = clamp_val(client->pos_y, 0, yres);
p->dx += packet->dx;
p->dy += packet->dy;
p->dz += packet->dz;
p->buttons = mousedev->packet.buttons;
if (p->dx || p->dy || p->dz ||
p->buttons != client->last_buttons)
client->ready = 1;
spin_unlock(&client->packet_lock);
if (client->ready) {
kill_fasync(&client->fasync, SIGIO, POLL_IN);
wake_readers = 1;
}
}
rcu_read_unlock();
if (wake_readers)
wake_up_interruptible(&mousedev->wait);
}
static void mousedev_touchpad_touch(struct mousedev *mousedev, int value)
{
if (!value) {
if (mousedev->touch &&
time_before(jiffies,
mousedev->touch + msecs_to_jiffies(tap_time))) {
/*
* Toggle left button to emulate tap.
* We rely on the fact that mousedev_mix always has 0
* motion packet so we won't mess current position.
*/
set_bit(0, &mousedev->packet.buttons);
set_bit(0, &mousedev_mix->packet.buttons);
mousedev_notify_readers(mousedev, &mousedev_mix->packet);
mousedev_notify_readers(mousedev_mix,
&mousedev_mix->packet);
clear_bit(0, &mousedev->packet.buttons);
clear_bit(0, &mousedev_mix->packet.buttons);
}
mousedev->touch = mousedev->pkt_count = 0;
mousedev->frac_dx = 0;
mousedev->frac_dy = 0;
} else if (!mousedev->touch)
mousedev->touch = jiffies;
}
static void mousedev_event(struct input_handle *handle,
unsigned int type, unsigned int code, int value)
{
struct mousedev *mousedev = handle->private;
switch (type) {
case EV_ABS:
/* Ignore joysticks */
if (test_bit(BTN_TRIGGER, handle->dev->keybit))
return;
if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit))
mousedev_touchpad_event(handle->dev,
mousedev, code, value);
else
mousedev_abs_event(handle->dev, mousedev, code, value);
break;
case EV_REL:
mousedev_rel_event(mousedev, code, value);
break;
case EV_KEY:
if (value != 2) {
if (code == BTN_TOUCH &&
test_bit(BTN_TOOL_FINGER, handle->dev->keybit))
mousedev_touchpad_touch(mousedev, value);
else
mousedev_key_event(mousedev, code, value);
}
break;
case EV_SYN:
if (code == SYN_REPORT) {
if (mousedev->touch) {
mousedev->pkt_count++;
/*
* Input system eats duplicate events,
* but we need all of them to do correct
* averaging so apply present one forward
*/
fx(0) = fx(1);
fy(0) = fy(1);
}
mousedev_notify_readers(mousedev, &mousedev->packet);
mousedev_notify_readers(mousedev_mix, &mousedev->packet);
mousedev->packet.dx = mousedev->packet.dy =
mousedev->packet.dz = 0;
mousedev->packet.abs_event = 0;
}
break;
}
}
static int mousedev_fasync(int fd, struct file *file, int on)
{
struct mousedev_client *client = file->private_data;
return fasync_helper(fd, file, on, &client->fasync);
}
static void mousedev_free(struct device *dev)
{
struct mousedev *mousedev = container_of(dev, struct mousedev, dev);
input_put_device(mousedev->handle.dev);
kfree(mousedev);
}
static int mousedev_open_device(struct mousedev *mousedev)
{
int retval;
retval = mutex_lock_interruptible(&mousedev->mutex);
if (retval)
return retval;
if (!mousedev->exist)
retval = -ENODEV;
else if (!mousedev->open++) {
retval = input_open_device(&mousedev->handle);
if (retval)
mousedev->open--;
}
mutex_unlock(&mousedev->mutex);
return retval;
}
static void mousedev_close_device(struct mousedev *mousedev)
{
mutex_lock(&mousedev->mutex);
if (mousedev->exist && !--mousedev->open)
input_close_device(&mousedev->handle);
mutex_unlock(&mousedev->mutex);
}
/*
* Open all available devices so they can all be multiplexed in one.
* stream. Note that this function is called with mousedev_mix->mutex
* held.
*/
static int mixdev_open_devices(struct mousedev *mixdev)
{
int error;
error = mutex_lock_interruptible(&mixdev->mutex);
if (error)
return error;
if (!mixdev->open++) {
struct mousedev *mousedev;
list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
if (!mousedev->opened_by_mixdev) {
if (mousedev_open_device(mousedev))
continue;
mousedev->opened_by_mixdev = true;
}
}
}
mutex_unlock(&mixdev->mutex);
return 0;
}
/*
* Close all devices that were opened as part of multiplexed
* device. Note that this function is called with mousedev_mix->mutex
* held.
*/
static void mixdev_close_devices(struct mousedev *mixdev)
{
mutex_lock(&mixdev->mutex);
if (!--mixdev->open) {
struct mousedev *mousedev;
list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
if (mousedev->opened_by_mixdev) {
mousedev->opened_by_mixdev = false;
mousedev_close_device(mousedev);
}
}
}
mutex_unlock(&mixdev->mutex);
}
static void mousedev_attach_client(struct mousedev *mousedev,
struct mousedev_client *client)
{
spin_lock(&mousedev->client_lock);
list_add_tail_rcu(&client->node, &mousedev->client_list);
spin_unlock(&mousedev->client_lock);
}
static void mousedev_detach_client(struct mousedev *mousedev,
struct mousedev_client *client)
{
spin_lock(&mousedev->client_lock);
list_del_rcu(&client->node);
spin_unlock(&mousedev->client_lock);
synchronize_rcu();
}
static int mousedev_release(struct inode *inode, struct file *file)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
mousedev_detach_client(mousedev, client);
kfree(client);
mousedev->close_device(mousedev);
return 0;
}
static int mousedev_open(struct inode *inode, struct file *file)
{
struct mousedev_client *client;
struct mousedev *mousedev;
int error;
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
if (imajor(inode) == MISC_MAJOR)
mousedev = mousedev_mix;
else
#endif
mousedev = container_of(inode->i_cdev, struct mousedev, cdev);
client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL);
if (!client)
return -ENOMEM;
spin_lock_init(&client->packet_lock);
client->pos_x = xres / 2;
client->pos_y = yres / 2;
client->mousedev = mousedev;
mousedev_attach_client(mousedev, client);
error = mousedev->open_device(mousedev);
if (error)
goto err_free_client;
file->private_data = client;
stream_open(inode, file);
return 0;
err_free_client:
mousedev_detach_client(mousedev, client);
kfree(client);
return error;
}
static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data)
{
struct mousedev_motion *p = &client->packets[client->tail];
s8 dx, dy, dz;
dx = clamp_val(p->dx, -127, 127);
p->dx -= dx;
dy = clamp_val(p->dy, -127, 127);
p->dy -= dy;
ps2_data[0] = BIT(3);
ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2);
ps2_data[0] |= p->buttons & 0x07;
ps2_data[1] = dx;
ps2_data[2] = dy;
switch (client->mode) {
case MOUSEDEV_EMUL_EXPS:
dz = clamp_val(p->dz, -7, 7);
p->dz -= dz;
ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1);
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_IMPS:
dz = clamp_val(p->dz, -127, 127);
p->dz -= dz;
ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
((p->buttons & 0x08) >> 1);
ps2_data[3] = dz;
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_PS2:
default:
p->dz = 0;
ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
((p->buttons & 0x08) >> 1);
client->bufsiz = 3;
break;
}
if (!p->dx && !p->dy && !p->dz) {
if (client->tail == client->head) {
client->ready = 0;
client->last_buttons = p->buttons;
} else
client->tail = (client->tail + 1) % PACKET_QUEUE_LEN;
}
}
static void mousedev_generate_response(struct mousedev_client *client,
int command)
{
client->ps2[0] = 0xfa; /* ACK */
switch (command) {
case 0xeb: /* Poll */
mousedev_packet(client, &client->ps2[1]);
client->bufsiz++; /* account for leading ACK */
break;
case 0xf2: /* Get ID */
switch (client->mode) {
case MOUSEDEV_EMUL_PS2:
client->ps2[1] = 0;
break;
case MOUSEDEV_EMUL_IMPS:
client->ps2[1] = 3;
break;
case MOUSEDEV_EMUL_EXPS:
client->ps2[1] = 4;
break;
}
client->bufsiz = 2;
break;
case 0xe9: /* Get info */
client->ps2[1] = 0x60; client->ps2[2] = 3; client->ps2[3] = 200;
client->bufsiz = 4;
break;
case 0xff: /* Reset */
client->impsseq = client->imexseq = 0;
client->mode = MOUSEDEV_EMUL_PS2;
client->ps2[1] = 0xaa; client->ps2[2] = 0x00;
client->bufsiz = 3;
break;
default:
client->bufsiz = 1;
break;
}
client->buffer = client->bufsiz;
}
static ssize_t mousedev_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct mousedev_client *client = file->private_data;
unsigned char c;
unsigned int i;
for (i = 0; i < count; i++) {
if (get_user(c, buffer + i))
return -EFAULT;
spin_lock_irq(&client->packet_lock);
if (c == mousedev_imex_seq[client->imexseq]) {
if (++client->imexseq == MOUSEDEV_SEQ_LEN) {
client->imexseq = 0;
client->mode = MOUSEDEV_EMUL_EXPS;
}
} else
client->imexseq = 0;
if (c == mousedev_imps_seq[client->impsseq]) {
if (++client->impsseq == MOUSEDEV_SEQ_LEN) {
client->impsseq = 0;
client->mode = MOUSEDEV_EMUL_IMPS;
}
} else
client->impsseq = 0;
mousedev_generate_response(client, c);
spin_unlock_irq(&client->packet_lock);
cond_resched();
}
kill_fasync(&client->fasync, SIGIO, POLL_IN);
wake_up_interruptible(&client->mousedev->wait);
return count;
}
static ssize_t mousedev_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
u8 data[sizeof(client->ps2)];
int retval = 0;
if (!client->ready && !client->buffer && mousedev->exist &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
retval = wait_event_interruptible(mousedev->wait,
!mousedev->exist || client->ready || client->buffer);
if (retval)
return retval;
if (!mousedev->exist)
return -ENODEV;
spin_lock_irq(&client->packet_lock);
if (!client->buffer && client->ready) {
mousedev_packet(client, client->ps2);
client->buffer = client->bufsiz;
}
if (count > client->buffer)
count = client->buffer;
memcpy(data, client->ps2 + client->bufsiz - client->buffer, count);
client->buffer -= count;
spin_unlock_irq(&client->packet_lock);
if (copy_to_user(buffer, data, count))
return -EFAULT;
return count;
}
/* No kernel lock - fine */
static __poll_t mousedev_poll(struct file *file, poll_table *wait)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
__poll_t mask;
poll_wait(file, &mousedev->wait, wait);
mask = mousedev->exist ? EPOLLOUT | EPOLLWRNORM : EPOLLHUP | EPOLLERR;
if (client->ready || client->buffer)
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
static const struct file_operations mousedev_fops = {
.owner = THIS_MODULE,
.read = mousedev_read,
.write = mousedev_write,
.poll = mousedev_poll,
.open = mousedev_open,
.release = mousedev_release,
.fasync = mousedev_fasync,
.llseek = noop_llseek,
};
/*
* Mark device non-existent. This disables writes, ioctls and
* prevents new users from opening the device. Already posted
* blocking reads will stay, however new ones will fail.
*/
static void mousedev_mark_dead(struct mousedev *mousedev)
{
mutex_lock(&mousedev->mutex);
mousedev->exist = false;
mutex_unlock(&mousedev->mutex);
}
/*
* Wake up users waiting for IO so they can disconnect from
* dead device.
*/
static void mousedev_hangup(struct mousedev *mousedev)
{
struct mousedev_client *client;
spin_lock(&mousedev->client_lock);
list_for_each_entry(client, &mousedev->client_list, node)
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
spin_unlock(&mousedev->client_lock);
wake_up_interruptible(&mousedev->wait);
}
static void mousedev_cleanup(struct mousedev *mousedev)
{
struct input_handle *handle = &mousedev->handle;
mousedev_mark_dead(mousedev);
mousedev_hangup(mousedev);
/* mousedev is marked dead so no one else accesses mousedev->open */
if (mousedev->open)
input_close_device(handle);
}
static int mousedev_reserve_minor(bool mixdev)
{
int minor;
if (mixdev) {
minor = input_get_new_minor(MOUSEDEV_MIX, 1, false);
if (minor < 0)
pr_err("failed to reserve mixdev minor: %d\n", minor);
} else {
minor = input_get_new_minor(MOUSEDEV_MINOR_BASE,
MOUSEDEV_MINORS, true);
if (minor < 0)
pr_err("failed to reserve new minor: %d\n", minor);
}
return minor;
}
static struct mousedev *mousedev_create(struct input_dev *dev,
struct input_handler *handler,
bool mixdev)
{
struct mousedev *mousedev;
int minor;
int error;
minor = mousedev_reserve_minor(mixdev);
if (minor < 0) {
error = minor;
goto err_out;
}
mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL);
if (!mousedev) {
error = -ENOMEM;
goto err_free_minor;
}
INIT_LIST_HEAD(&mousedev->client_list);
INIT_LIST_HEAD(&mousedev->mixdev_node);
spin_lock_init(&mousedev->client_lock);
mutex_init(&mousedev->mutex);
lockdep_set_subclass(&mousedev->mutex,
mixdev ? SINGLE_DEPTH_NESTING : 0);
init_waitqueue_head(&mousedev->wait);
if (mixdev) {
dev_set_name(&mousedev->dev, "mice");
mousedev->open_device = mixdev_open_devices;
mousedev->close_device = mixdev_close_devices;
} else {
int dev_no = minor;
/* Normalize device number if it falls into legacy range */
if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
dev_no -= MOUSEDEV_MINOR_BASE;
dev_set_name(&mousedev->dev, "mouse%d", dev_no);
mousedev->open_device = mousedev_open_device;
mousedev->close_device = mousedev_close_device;
}
mousedev->exist = true;
mousedev->handle.dev = input_get_device(dev);
mousedev->handle.name = dev_name(&mousedev->dev);
mousedev->handle.handler = handler;
mousedev->handle.private = mousedev;
mousedev->dev.class = &input_class;
if (dev)
mousedev->dev.parent = &dev->dev;
mousedev->dev.devt = MKDEV(INPUT_MAJOR, minor);
mousedev->dev.release = mousedev_free;
device_initialize(&mousedev->dev);
if (!mixdev) {
error = input_register_handle(&mousedev->handle);
if (error)
goto err_free_mousedev;
}
cdev_init(&mousedev->cdev, &mousedev_fops);
error = cdev_device_add(&mousedev->cdev, &mousedev->dev);
if (error)
goto err_cleanup_mousedev;
return mousedev;
err_cleanup_mousedev:
mousedev_cleanup(mousedev);
if (!mixdev)
input_unregister_handle(&mousedev->handle);
err_free_mousedev:
put_device(&mousedev->dev);
err_free_minor:
input_free_minor(minor);
err_out:
return ERR_PTR(error);
}
static void mousedev_destroy(struct mousedev *mousedev)
{
cdev_device_del(&mousedev->cdev, &mousedev->dev);
mousedev_cleanup(mousedev);
input_free_minor(MINOR(mousedev->dev.devt));
if (mousedev != mousedev_mix)
input_unregister_handle(&mousedev->handle);
put_device(&mousedev->dev);
}
static int mixdev_add_device(struct mousedev *mousedev)
{
int retval;
retval = mutex_lock_interruptible(&mousedev_mix->mutex);
if (retval)
return retval;
if (mousedev_mix->open) {
retval = mousedev_open_device(mousedev);
if (retval)
goto out;
mousedev->opened_by_mixdev = true;
}
get_device(&mousedev->dev);
list_add_tail(&mousedev->mixdev_node, &mousedev_mix_list);
out:
mutex_unlock(&mousedev_mix->mutex);
return retval;
}
static void mixdev_remove_device(struct mousedev *mousedev)
{
mutex_lock(&mousedev_mix->mutex);
if (mousedev->opened_by_mixdev) {
mousedev->opened_by_mixdev = false;
mousedev_close_device(mousedev);
}
list_del_init(&mousedev->mixdev_node);
mutex_unlock(&mousedev_mix->mutex);
put_device(&mousedev->dev);
}
static int mousedev_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id)
{
struct mousedev *mousedev;
int error;
mousedev = mousedev_create(dev, handler, false);
if (IS_ERR(mousedev))
return PTR_ERR(mousedev);
error = mixdev_add_device(mousedev);
if (error) {
mousedev_destroy(mousedev);
return error;
}
return 0;
}
static void mousedev_disconnect(struct input_handle *handle)
{
struct mousedev *mousedev = handle->private;
mixdev_remove_device(mousedev);
mousedev_destroy(mousedev);
}
static const struct input_device_id mousedev_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_RELBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
.keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) },
.relbit = { BIT_MASK(REL_X) | BIT_MASK(REL_Y) },
}, /* A mouse like device, at least one button,
two relative axes */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_RELBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
.relbit = { BIT_MASK(REL_WHEEL) },
}, /* A separate scrollwheel */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
}, /* A tablet like device, at least touch detection,
two absolute axes */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
.keybit = { [BIT_WORD(BTN_TOOL_FINGER)] =
BIT_MASK(BTN_TOOL_FINGER) },
.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
BIT_MASK(ABS_PRESSURE) |
BIT_MASK(ABS_TOOL_WIDTH) },
}, /* A touchpad */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
.keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) },
.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
}, /* Mouse-like device with absolute X and Y but ordinary
clicks, like hp ILO2 High Performance mouse */
{ }, /* Terminating entry */
};
MODULE_DEVICE_TABLE(input, mousedev_ids);
static struct input_handler mousedev_handler = {
.event = mousedev_event,
.connect = mousedev_connect,
.disconnect = mousedev_disconnect,
.legacy_minors = true,
.minor = MOUSEDEV_MINOR_BASE,
.name = "mousedev",
.id_table = mousedev_ids,
};
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
#include <linux/miscdevice.h>
static struct miscdevice psaux_mouse = {
.minor = PSMOUSE_MINOR,
.name = "psaux",
.fops = &mousedev_fops,
};
static bool psaux_registered;
static void __init mousedev_psaux_register(void)
{
int error;
error = misc_register(&psaux_mouse);
if (error)
pr_warn("could not register psaux device, error: %d\n",
error);
else
psaux_registered = true;
}
static void __exit mousedev_psaux_unregister(void)
{
if (psaux_registered)
misc_deregister(&psaux_mouse);
}
#else
static inline void mousedev_psaux_register(void) { }
static inline void mousedev_psaux_unregister(void) { }
#endif
static int __init mousedev_init(void)
{
int error;
mousedev_mix = mousedev_create(NULL, &mousedev_handler, true);
if (IS_ERR(mousedev_mix))
return PTR_ERR(mousedev_mix);
error = input_register_handler(&mousedev_handler);
if (error) {
mousedev_destroy(mousedev_mix);
return error;
}
mousedev_psaux_register();
pr_info("PS/2 mouse device common for all mice\n");
return 0;
}
static void __exit mousedev_exit(void)
{
mousedev_psaux_unregister();
input_unregister_handler(&mousedev_handler);
mousedev_destroy(mousedev_mix);
}
module_init(mousedev_init);
module_exit(mousedev_exit);
|
linux-master
|
drivers/input/mousedev.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* A driver for the Griffin Technology, Inc. "PowerMate" USB controller dial.
*
* v1.1, (c)2002 William R Sowerbutts <[email protected]>
*
* This device is a anodised aluminium knob which connects over USB. It can measure
* clockwise and anticlockwise rotation. The dial also acts as a pushbutton with
* a spring for automatic release. The base contains a pair of LEDs which illuminate
* the translucent base. It rotates without limit and reports its relative rotation
* back to the host when polled by the USB controller.
*
* Testing with the knob I have has shown that it measures approximately 94 "clicks"
* for one full rotation. Testing with my High Speed Rotation Actuator (ok, it was
* a variable speed cordless electric drill) has shown that the device can measure
* speeds of up to 7 clicks either clockwise or anticlockwise between pollings from
* the host. If it counts more than 7 clicks before it is polled, it will wrap back
* to zero and start counting again. This was at quite high speed, however, almost
* certainly faster than the human hand could turn it. Griffin say that it loses a
* pulse or two on a direction change; the granularity is so fine that I never
* noticed this in practice.
*
* The device's microcontroller can be programmed to set the LED to either a constant
* intensity, or to a rhythmic pulsing. Several patterns and speeds are available.
*
* Griffin were very happy to provide documentation and free hardware for development.
*
* Some userspace tools are available on the web: http://sowerbutts.com/powermate/
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/usb/input.h>
#define POWERMATE_VENDOR 0x077d /* Griffin Technology, Inc. */
#define POWERMATE_PRODUCT_NEW 0x0410 /* Griffin PowerMate */
#define POWERMATE_PRODUCT_OLD 0x04AA /* Griffin soundKnob */
#define CONTOUR_VENDOR 0x05f3 /* Contour Design, Inc. */
#define CONTOUR_JOG 0x0240 /* Jog and Shuttle */
/* these are the command codes we send to the device */
#define SET_STATIC_BRIGHTNESS 0x01
#define SET_PULSE_ASLEEP 0x02
#define SET_PULSE_AWAKE 0x03
#define SET_PULSE_MODE 0x04
/* these refer to bits in the powermate_device's requires_update field. */
#define UPDATE_STATIC_BRIGHTNESS (1<<0)
#define UPDATE_PULSE_ASLEEP (1<<1)
#define UPDATE_PULSE_AWAKE (1<<2)
#define UPDATE_PULSE_MODE (1<<3)
/* at least two versions of the hardware exist, with differing payload
sizes. the first three bytes always contain the "interesting" data in
the relevant format. */
#define POWERMATE_PAYLOAD_SIZE_MAX 6
#define POWERMATE_PAYLOAD_SIZE_MIN 3
struct powermate_device {
signed char *data;
dma_addr_t data_dma;
struct urb *irq, *config;
struct usb_ctrlrequest *configcr;
struct usb_device *udev;
struct usb_interface *intf;
struct input_dev *input;
spinlock_t lock;
int static_brightness;
int pulse_speed;
int pulse_table;
int pulse_asleep;
int pulse_awake;
int requires_update; // physical settings which are out of sync
char phys[64];
};
static char pm_name_powermate[] = "Griffin PowerMate";
static char pm_name_soundknob[] = "Griffin SoundKnob";
static void powermate_config_complete(struct urb *urb);
/* Callback for data arriving from the PowerMate over the USB interrupt pipe */
static void powermate_irq(struct urb *urb)
{
struct powermate_device *pm = urb->context;
struct device *dev = &pm->intf->dev;
int retval;
switch (urb->status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(dev, "%s - urb shutting down with status: %d\n",
__func__, urb->status);
return;
default:
dev_dbg(dev, "%s - nonzero urb status received: %d\n",
__func__, urb->status);
goto exit;
}
/* handle updates to device state */
input_report_key(pm->input, BTN_0, pm->data[0] & 0x01);
input_report_rel(pm->input, REL_DIAL, pm->data[1]);
input_sync(pm->input);
exit:
retval = usb_submit_urb (urb, GFP_ATOMIC);
if (retval)
dev_err(dev, "%s - usb_submit_urb failed with result: %d\n",
__func__, retval);
}
/* Decide if we need to issue a control message and do so. Must be called with pm->lock taken */
static void powermate_sync_state(struct powermate_device *pm)
{
if (pm->requires_update == 0)
return; /* no updates are required */
if (pm->config->status == -EINPROGRESS)
return; /* an update is already in progress; it'll issue this update when it completes */
if (pm->requires_update & UPDATE_PULSE_ASLEEP){
pm->configcr->wValue = cpu_to_le16( SET_PULSE_ASLEEP );
pm->configcr->wIndex = cpu_to_le16( pm->pulse_asleep ? 1 : 0 );
pm->requires_update &= ~UPDATE_PULSE_ASLEEP;
}else if (pm->requires_update & UPDATE_PULSE_AWAKE){
pm->configcr->wValue = cpu_to_le16( SET_PULSE_AWAKE );
pm->configcr->wIndex = cpu_to_le16( pm->pulse_awake ? 1 : 0 );
pm->requires_update &= ~UPDATE_PULSE_AWAKE;
}else if (pm->requires_update & UPDATE_PULSE_MODE){
int op, arg;
/* the powermate takes an operation and an argument for its pulse algorithm.
the operation can be:
0: divide the speed
1: pulse at normal speed
2: multiply the speed
the argument only has an effect for operations 0 and 2, and ranges between
1 (least effect) to 255 (maximum effect).
thus, several states are equivalent and are coalesced into one state.
we map this onto a range from 0 to 510, with:
0 -- 254 -- use divide (0 = slowest)
255 -- use normal speed
256 -- 510 -- use multiple (510 = fastest).
Only values of 'arg' quite close to 255 are particularly useful/spectacular.
*/
if (pm->pulse_speed < 255) {
op = 0; // divide
arg = 255 - pm->pulse_speed;
} else if (pm->pulse_speed > 255) {
op = 2; // multiply
arg = pm->pulse_speed - 255;
} else {
op = 1; // normal speed
arg = 0; // can be any value
}
pm->configcr->wValue = cpu_to_le16( (pm->pulse_table << 8) | SET_PULSE_MODE );
pm->configcr->wIndex = cpu_to_le16( (arg << 8) | op );
pm->requires_update &= ~UPDATE_PULSE_MODE;
} else if (pm->requires_update & UPDATE_STATIC_BRIGHTNESS) {
pm->configcr->wValue = cpu_to_le16( SET_STATIC_BRIGHTNESS );
pm->configcr->wIndex = cpu_to_le16( pm->static_brightness );
pm->requires_update &= ~UPDATE_STATIC_BRIGHTNESS;
} else {
printk(KERN_ERR "powermate: unknown update required");
pm->requires_update = 0; /* fudge the bug */
return;
}
/* printk("powermate: %04x %04x\n", pm->configcr->wValue, pm->configcr->wIndex); */
pm->configcr->bRequestType = 0x41; /* vendor request */
pm->configcr->bRequest = 0x01;
pm->configcr->wLength = 0;
usb_fill_control_urb(pm->config, pm->udev, usb_sndctrlpipe(pm->udev, 0),
(void *) pm->configcr, NULL, 0,
powermate_config_complete, pm);
if (usb_submit_urb(pm->config, GFP_ATOMIC))
printk(KERN_ERR "powermate: usb_submit_urb(config) failed");
}
/* Called when our asynchronous control message completes. We may need to issue another immediately */
static void powermate_config_complete(struct urb *urb)
{
struct powermate_device *pm = urb->context;
unsigned long flags;
if (urb->status)
printk(KERN_ERR "powermate: config urb returned %d\n", urb->status);
spin_lock_irqsave(&pm->lock, flags);
powermate_sync_state(pm);
spin_unlock_irqrestore(&pm->lock, flags);
}
/* Set the LED up as described and begin the sync with the hardware if required */
static void powermate_pulse_led(struct powermate_device *pm, int static_brightness, int pulse_speed,
int pulse_table, int pulse_asleep, int pulse_awake)
{
unsigned long flags;
if (pulse_speed < 0)
pulse_speed = 0;
if (pulse_table < 0)
pulse_table = 0;
if (pulse_speed > 510)
pulse_speed = 510;
if (pulse_table > 2)
pulse_table = 2;
pulse_asleep = !!pulse_asleep;
pulse_awake = !!pulse_awake;
spin_lock_irqsave(&pm->lock, flags);
/* mark state updates which are required */
if (static_brightness != pm->static_brightness) {
pm->static_brightness = static_brightness;
pm->requires_update |= UPDATE_STATIC_BRIGHTNESS;
}
if (pulse_asleep != pm->pulse_asleep) {
pm->pulse_asleep = pulse_asleep;
pm->requires_update |= (UPDATE_PULSE_ASLEEP | UPDATE_STATIC_BRIGHTNESS);
}
if (pulse_awake != pm->pulse_awake) {
pm->pulse_awake = pulse_awake;
pm->requires_update |= (UPDATE_PULSE_AWAKE | UPDATE_STATIC_BRIGHTNESS);
}
if (pulse_speed != pm->pulse_speed || pulse_table != pm->pulse_table) {
pm->pulse_speed = pulse_speed;
pm->pulse_table = pulse_table;
pm->requires_update |= UPDATE_PULSE_MODE;
}
powermate_sync_state(pm);
spin_unlock_irqrestore(&pm->lock, flags);
}
/* Callback from the Input layer when an event arrives from userspace to configure the LED */
static int powermate_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int _value)
{
unsigned int command = (unsigned int)_value;
struct powermate_device *pm = input_get_drvdata(dev);
if (type == EV_MSC && code == MSC_PULSELED){
/*
bits 0- 7: 8 bits: LED brightness
bits 8-16: 9 bits: pulsing speed modifier (0 ... 510); 0-254 = slower, 255 = standard, 256-510 = faster.
bits 17-18: 2 bits: pulse table (0, 1, 2 valid)
bit 19: 1 bit : pulse whilst asleep?
bit 20: 1 bit : pulse constantly?
*/
int static_brightness = command & 0xFF; // bits 0-7
int pulse_speed = (command >> 8) & 0x1FF; // bits 8-16
int pulse_table = (command >> 17) & 0x3; // bits 17-18
int pulse_asleep = (command >> 19) & 0x1; // bit 19
int pulse_awake = (command >> 20) & 0x1; // bit 20
powermate_pulse_led(pm, static_brightness, pulse_speed, pulse_table, pulse_asleep, pulse_awake);
}
return 0;
}
static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
{
pm->data = usb_alloc_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
GFP_KERNEL, &pm->data_dma);
if (!pm->data)
return -1;
pm->configcr = kmalloc(sizeof(*(pm->configcr)), GFP_KERNEL);
if (!pm->configcr)
return -ENOMEM;
return 0;
}
static void powermate_free_buffers(struct usb_device *udev, struct powermate_device *pm)
{
usb_free_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
pm->data, pm->data_dma);
kfree(pm->configcr);
}
/* Called whenever a USB device matching one in our supported devices table is connected */
static int powermate_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev (intf);
struct usb_host_interface *interface;
struct usb_endpoint_descriptor *endpoint;
struct powermate_device *pm;
struct input_dev *input_dev;
int pipe, maxp;
int error = -ENOMEM;
interface = intf->cur_altsetting;
if (interface->desc.bNumEndpoints < 1)
return -EINVAL;
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -EIO;
usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x0a, USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, interface->desc.bInterfaceNumber, NULL, 0,
USB_CTRL_SET_TIMEOUT);
pm = kzalloc(sizeof(struct powermate_device), GFP_KERNEL);
input_dev = input_allocate_device();
if (!pm || !input_dev)
goto fail1;
if (powermate_alloc_buffers(udev, pm))
goto fail2;
pm->irq = usb_alloc_urb(0, GFP_KERNEL);
if (!pm->irq)
goto fail2;
pm->config = usb_alloc_urb(0, GFP_KERNEL);
if (!pm->config)
goto fail3;
pm->udev = udev;
pm->intf = intf;
pm->input = input_dev;
usb_make_path(udev, pm->phys, sizeof(pm->phys));
strlcat(pm->phys, "/input0", sizeof(pm->phys));
spin_lock_init(&pm->lock);
switch (le16_to_cpu(udev->descriptor.idProduct)) {
case POWERMATE_PRODUCT_NEW:
input_dev->name = pm_name_powermate;
break;
case POWERMATE_PRODUCT_OLD:
input_dev->name = pm_name_soundknob;
break;
default:
input_dev->name = pm_name_soundknob;
printk(KERN_WARNING "powermate: unknown product id %04x\n",
le16_to_cpu(udev->descriptor.idProduct));
}
input_dev->phys = pm->phys;
usb_to_input_id(udev, &input_dev->id);
input_dev->dev.parent = &intf->dev;
input_set_drvdata(input_dev, pm);
input_dev->event = powermate_input_event;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) |
BIT_MASK(EV_MSC);
input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
input_dev->relbit[BIT_WORD(REL_DIAL)] = BIT_MASK(REL_DIAL);
input_dev->mscbit[BIT_WORD(MSC_PULSELED)] = BIT_MASK(MSC_PULSELED);
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(udev, pipe);
if (maxp < POWERMATE_PAYLOAD_SIZE_MIN || maxp > POWERMATE_PAYLOAD_SIZE_MAX) {
printk(KERN_WARNING "powermate: Expected payload of %d--%d bytes, found %d bytes!\n",
POWERMATE_PAYLOAD_SIZE_MIN, POWERMATE_PAYLOAD_SIZE_MAX, maxp);
maxp = POWERMATE_PAYLOAD_SIZE_MAX;
}
usb_fill_int_urb(pm->irq, udev, pipe, pm->data,
maxp, powermate_irq,
pm, endpoint->bInterval);
pm->irq->transfer_dma = pm->data_dma;
pm->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* register our interrupt URB with the USB system */
if (usb_submit_urb(pm->irq, GFP_KERNEL)) {
error = -EIO;
goto fail4;
}
error = input_register_device(pm->input);
if (error)
goto fail5;
/* force an update of everything */
pm->requires_update = UPDATE_PULSE_ASLEEP | UPDATE_PULSE_AWAKE | UPDATE_PULSE_MODE | UPDATE_STATIC_BRIGHTNESS;
powermate_pulse_led(pm, 0x80, 255, 0, 1, 0); // set default pulse parameters
usb_set_intfdata(intf, pm);
return 0;
fail5: usb_kill_urb(pm->irq);
fail4: usb_free_urb(pm->config);
fail3: usb_free_urb(pm->irq);
fail2: powermate_free_buffers(udev, pm);
fail1: input_free_device(input_dev);
kfree(pm);
return error;
}
/* Called when a USB device we've accepted ownership of is removed */
static void powermate_disconnect(struct usb_interface *intf)
{
struct powermate_device *pm = usb_get_intfdata (intf);
usb_set_intfdata(intf, NULL);
if (pm) {
pm->requires_update = 0;
usb_kill_urb(pm->irq);
input_unregister_device(pm->input);
usb_free_urb(pm->irq);
usb_free_urb(pm->config);
powermate_free_buffers(interface_to_usbdev(intf), pm);
kfree(pm);
}
}
static const struct usb_device_id powermate_devices[] = {
{ USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_NEW) },
{ USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_OLD) },
{ USB_DEVICE(CONTOUR_VENDOR, CONTOUR_JOG) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, powermate_devices);
static struct usb_driver powermate_driver = {
.name = "powermate",
.probe = powermate_probe,
.disconnect = powermate_disconnect,
.id_table = powermate_devices,
};
module_usb_driver(powermate_driver);
MODULE_AUTHOR( "William R Sowerbutts" );
MODULE_DESCRIPTION( "Griffin Technology, Inc PowerMate driver" );
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/powermate.c
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) STMicroelectronics 2018
// Author: Pascal Paillet <[email protected]> for STMicroelectronics.
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mfd/stpmic1.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
/**
* struct stpmic1_onkey - OnKey data
* @input_dev: pointer to input device
* @irq_falling: irq that we are hooked on to
* @irq_rising: irq that we are hooked on to
*/
struct stpmic1_onkey {
struct input_dev *input_dev;
int irq_falling;
int irq_rising;
};
static irqreturn_t onkey_falling_irq(int irq, void *ponkey)
{
struct stpmic1_onkey *onkey = ponkey;
struct input_dev *input_dev = onkey->input_dev;
input_report_key(input_dev, KEY_POWER, 1);
pm_wakeup_event(input_dev->dev.parent, 0);
input_sync(input_dev);
return IRQ_HANDLED;
}
static irqreturn_t onkey_rising_irq(int irq, void *ponkey)
{
struct stpmic1_onkey *onkey = ponkey;
struct input_dev *input_dev = onkey->input_dev;
input_report_key(input_dev, KEY_POWER, 0);
pm_wakeup_event(input_dev->dev.parent, 0);
input_sync(input_dev);
return IRQ_HANDLED;
}
static int stpmic1_onkey_probe(struct platform_device *pdev)
{
struct stpmic1 *pmic = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct input_dev *input_dev;
struct stpmic1_onkey *onkey;
unsigned int val, reg = 0;
int error;
onkey = devm_kzalloc(dev, sizeof(*onkey), GFP_KERNEL);
if (!onkey)
return -ENOMEM;
onkey->irq_falling = platform_get_irq_byname(pdev, "onkey-falling");
if (onkey->irq_falling < 0)
return onkey->irq_falling;
onkey->irq_rising = platform_get_irq_byname(pdev, "onkey-rising");
if (onkey->irq_rising < 0)
return onkey->irq_rising;
if (!device_property_read_u32(dev, "power-off-time-sec", &val)) {
if (val > 0 && val <= 16) {
dev_dbg(dev, "power-off-time=%d seconds\n", val);
reg |= PONKEY_PWR_OFF;
reg |= ((16 - val) & PONKEY_TURNOFF_TIMER_MASK);
} else {
dev_err(dev, "power-off-time-sec out of range\n");
return -EINVAL;
}
}
if (device_property_present(dev, "st,onkey-clear-cc-flag"))
reg |= PONKEY_CC_FLAG_CLEAR;
error = regmap_update_bits(pmic->regmap, PKEY_TURNOFF_CR,
PONKEY_TURNOFF_MASK, reg);
if (error) {
dev_err(dev, "PKEY_TURNOFF_CR write failed: %d\n", error);
return error;
}
if (device_property_present(dev, "st,onkey-pu-inactive")) {
error = regmap_update_bits(pmic->regmap, PADS_PULL_CR,
PONKEY_PU_INACTIVE,
PONKEY_PU_INACTIVE);
if (error) {
dev_err(dev, "ONKEY Pads configuration failed: %d\n",
error);
return error;
}
}
input_dev = devm_input_allocate_device(dev);
if (!input_dev) {
dev_err(dev, "Can't allocate Pwr Onkey Input Device\n");
return -ENOMEM;
}
input_dev->name = "pmic_onkey";
input_dev->phys = "pmic_onkey/input0";
input_set_capability(input_dev, EV_KEY, KEY_POWER);
onkey->input_dev = input_dev;
/* interrupt is nested in a thread */
error = devm_request_threaded_irq(dev, onkey->irq_falling, NULL,
onkey_falling_irq, IRQF_ONESHOT,
dev_name(dev), onkey);
if (error) {
dev_err(dev, "Can't get IRQ Onkey Falling: %d\n", error);
return error;
}
error = devm_request_threaded_irq(dev, onkey->irq_rising, NULL,
onkey_rising_irq, IRQF_ONESHOT,
dev_name(dev), onkey);
if (error) {
dev_err(dev, "Can't get IRQ Onkey Rising: %d\n", error);
return error;
}
error = input_register_device(input_dev);
if (error) {
dev_err(dev, "Can't register power button: %d\n", error);
return error;
}
platform_set_drvdata(pdev, onkey);
device_init_wakeup(dev, true);
return 0;
}
static int stpmic1_onkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct stpmic1_onkey *onkey = platform_get_drvdata(pdev);
if (device_may_wakeup(dev)) {
enable_irq_wake(onkey->irq_falling);
enable_irq_wake(onkey->irq_rising);
}
return 0;
}
static int stpmic1_onkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct stpmic1_onkey *onkey = platform_get_drvdata(pdev);
if (device_may_wakeup(dev)) {
disable_irq_wake(onkey->irq_falling);
disable_irq_wake(onkey->irq_rising);
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(stpmic1_onkey_pm,
stpmic1_onkey_suspend,
stpmic1_onkey_resume);
static const struct of_device_id of_stpmic1_onkey_match[] = {
{ .compatible = "st,stpmic1-onkey" },
{ },
};
MODULE_DEVICE_TABLE(of, of_stpmic1_onkey_match);
static struct platform_driver stpmic1_onkey_driver = {
.probe = stpmic1_onkey_probe,
.driver = {
.name = "stpmic1_onkey",
.of_match_table = of_match_ptr(of_stpmic1_onkey_match),
.pm = pm_sleep_ptr(&stpmic1_onkey_pm),
},
};
module_platform_driver(stpmic1_onkey_driver);
MODULE_DESCRIPTION("Onkey driver for STPMIC1");
MODULE_AUTHOR("Pascal Paillet <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/input/misc/stpmic1_onkey.c
|
/*
* Retu power button driver.
*
* Copyright (C) 2004-2010 Nokia Corporation
*
* Original code written by Ari Saastamoinen, Juha Yrjölä and Felipe Balbi.
* Rewritten by Aaro Koskinen.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/retu.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#define RETU_STATUS_PWRONX (1 << 5)
static irqreturn_t retu_pwrbutton_irq(int irq, void *_pwr)
{
struct input_dev *idev = _pwr;
struct retu_dev *rdev = input_get_drvdata(idev);
bool state;
state = !(retu_read(rdev, RETU_REG_STATUS) & RETU_STATUS_PWRONX);
input_report_key(idev, KEY_POWER, state);
input_sync(idev);
return IRQ_HANDLED;
}
static int retu_pwrbutton_probe(struct platform_device *pdev)
{
struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
struct input_dev *idev;
int irq;
int error;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
idev = devm_input_allocate_device(&pdev->dev);
if (!idev)
return -ENOMEM;
idev->name = "retu-pwrbutton";
idev->dev.parent = &pdev->dev;
input_set_capability(idev, EV_KEY, KEY_POWER);
input_set_drvdata(idev, rdev);
error = devm_request_threaded_irq(&pdev->dev, irq,
NULL, retu_pwrbutton_irq,
IRQF_ONESHOT,
"retu-pwrbutton", idev);
if (error)
return error;
error = input_register_device(idev);
if (error)
return error;
return 0;
}
static struct platform_driver retu_pwrbutton_driver = {
.probe = retu_pwrbutton_probe,
.driver = {
.name = "retu-pwrbutton",
},
};
module_platform_driver(retu_pwrbutton_driver);
MODULE_ALIAS("platform:retu-pwrbutton");
MODULE_DESCRIPTION("Retu Power Button");
MODULE_AUTHOR("Ari Saastamoinen");
MODULE_AUTHOR("Felipe Balbi");
MODULE_AUTHOR("Aaro Koskinen <[email protected]>");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/retu-pwrbutton.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MAX8997-haptic controller driver
*
* Copyright (C) 2012 Samsung Electronics
* Donggeun Kim <[email protected]>
*
* This program is not provided / owned by Maxim Integrated Products.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/pwm.h>
#include <linux/input.h>
#include <linux/mfd/max8997-private.h>
#include <linux/mfd/max8997.h>
#include <linux/regulator/consumer.h>
/* Haptic configuration 2 register */
#define MAX8997_MOTOR_TYPE_SHIFT 7
#define MAX8997_ENABLE_SHIFT 6
#define MAX8997_MODE_SHIFT 5
/* Haptic driver configuration register */
#define MAX8997_CYCLE_SHIFT 6
#define MAX8997_SIG_PERIOD_SHIFT 4
#define MAX8997_SIG_DUTY_SHIFT 2
#define MAX8997_PWM_DUTY_SHIFT 0
struct max8997_haptic {
struct device *dev;
struct i2c_client *client;
struct input_dev *input_dev;
struct regulator *regulator;
struct work_struct work;
struct mutex mutex;
bool enabled;
unsigned int level;
struct pwm_device *pwm;
unsigned int pwm_period;
enum max8997_haptic_pwm_divisor pwm_divisor;
enum max8997_haptic_motor_type type;
enum max8997_haptic_pulse_mode mode;
unsigned int internal_mode_pattern;
unsigned int pattern_cycle;
unsigned int pattern_signal_period;
};
static int max8997_haptic_set_duty_cycle(struct max8997_haptic *chip)
{
int ret = 0;
if (chip->mode == MAX8997_EXTERNAL_MODE) {
unsigned int duty = chip->pwm_period * chip->level / 100;
ret = pwm_config(chip->pwm, duty, chip->pwm_period);
} else {
u8 duty_index = 0;
duty_index = DIV_ROUND_UP(chip->level * 64, 100);
switch (chip->internal_mode_pattern) {
case 0:
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_SIGPWMDC1, duty_index);
break;
case 1:
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_SIGPWMDC2, duty_index);
break;
case 2:
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_SIGPWMDC3, duty_index);
break;
case 3:
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_SIGPWMDC4, duty_index);
break;
default:
break;
}
}
return ret;
}
static void max8997_haptic_configure(struct max8997_haptic *chip)
{
u8 value;
value = chip->type << MAX8997_MOTOR_TYPE_SHIFT |
chip->enabled << MAX8997_ENABLE_SHIFT |
chip->mode << MAX8997_MODE_SHIFT | chip->pwm_divisor;
max8997_write_reg(chip->client, MAX8997_HAPTIC_REG_CONF2, value);
if (chip->mode == MAX8997_INTERNAL_MODE && chip->enabled) {
value = chip->internal_mode_pattern << MAX8997_CYCLE_SHIFT |
chip->internal_mode_pattern << MAX8997_SIG_PERIOD_SHIFT |
chip->internal_mode_pattern << MAX8997_SIG_DUTY_SHIFT |
chip->internal_mode_pattern << MAX8997_PWM_DUTY_SHIFT;
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_DRVCONF, value);
switch (chip->internal_mode_pattern) {
case 0:
value = chip->pattern_cycle << 4;
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_CYCLECONF1, value);
value = chip->pattern_signal_period;
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_SIGCONF1, value);
break;
case 1:
value = chip->pattern_cycle;
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_CYCLECONF1, value);
value = chip->pattern_signal_period;
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_SIGCONF2, value);
break;
case 2:
value = chip->pattern_cycle << 4;
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_CYCLECONF2, value);
value = chip->pattern_signal_period;
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_SIGCONF3, value);
break;
case 3:
value = chip->pattern_cycle;
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_CYCLECONF2, value);
value = chip->pattern_signal_period;
max8997_write_reg(chip->client,
MAX8997_HAPTIC_REG_SIGCONF4, value);
break;
default:
break;
}
}
}
static void max8997_haptic_enable(struct max8997_haptic *chip)
{
int error;
mutex_lock(&chip->mutex);
error = max8997_haptic_set_duty_cycle(chip);
if (error) {
dev_err(chip->dev, "set_pwm_cycle failed, error: %d\n", error);
goto out;
}
if (!chip->enabled) {
error = regulator_enable(chip->regulator);
if (error) {
dev_err(chip->dev, "Failed to enable regulator\n");
goto out;
}
max8997_haptic_configure(chip);
if (chip->mode == MAX8997_EXTERNAL_MODE) {
error = pwm_enable(chip->pwm);
if (error) {
dev_err(chip->dev, "Failed to enable PWM\n");
regulator_disable(chip->regulator);
goto out;
}
}
chip->enabled = true;
}
out:
mutex_unlock(&chip->mutex);
}
static void max8997_haptic_disable(struct max8997_haptic *chip)
{
mutex_lock(&chip->mutex);
if (chip->enabled) {
chip->enabled = false;
max8997_haptic_configure(chip);
if (chip->mode == MAX8997_EXTERNAL_MODE)
pwm_disable(chip->pwm);
regulator_disable(chip->regulator);
}
mutex_unlock(&chip->mutex);
}
static void max8997_haptic_play_effect_work(struct work_struct *work)
{
struct max8997_haptic *chip =
container_of(work, struct max8997_haptic, work);
if (chip->level)
max8997_haptic_enable(chip);
else
max8997_haptic_disable(chip);
}
static int max8997_haptic_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct max8997_haptic *chip = input_get_drvdata(dev);
chip->level = effect->u.rumble.strong_magnitude;
if (!chip->level)
chip->level = effect->u.rumble.weak_magnitude;
schedule_work(&chip->work);
return 0;
}
static void max8997_haptic_close(struct input_dev *dev)
{
struct max8997_haptic *chip = input_get_drvdata(dev);
cancel_work_sync(&chip->work);
max8997_haptic_disable(chip);
}
static int max8997_haptic_probe(struct platform_device *pdev)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
const struct max8997_platform_data *pdata =
dev_get_platdata(iodev->dev);
const struct max8997_haptic_platform_data *haptic_pdata = NULL;
struct max8997_haptic *chip;
struct input_dev *input_dev;
int error;
if (pdata)
haptic_pdata = pdata->haptic_pdata;
if (!haptic_pdata) {
dev_err(&pdev->dev, "no haptic platform data\n");
return -EINVAL;
}
chip = kzalloc(sizeof(struct max8997_haptic), GFP_KERNEL);
input_dev = input_allocate_device();
if (!chip || !input_dev) {
dev_err(&pdev->dev, "unable to allocate memory\n");
error = -ENOMEM;
goto err_free_mem;
}
INIT_WORK(&chip->work, max8997_haptic_play_effect_work);
mutex_init(&chip->mutex);
chip->client = iodev->haptic;
chip->dev = &pdev->dev;
chip->input_dev = input_dev;
chip->pwm_period = haptic_pdata->pwm_period;
chip->type = haptic_pdata->type;
chip->mode = haptic_pdata->mode;
chip->pwm_divisor = haptic_pdata->pwm_divisor;
switch (chip->mode) {
case MAX8997_INTERNAL_MODE:
chip->internal_mode_pattern =
haptic_pdata->internal_mode_pattern;
chip->pattern_cycle = haptic_pdata->pattern_cycle;
chip->pattern_signal_period =
haptic_pdata->pattern_signal_period;
break;
case MAX8997_EXTERNAL_MODE:
chip->pwm = pwm_get(&pdev->dev, NULL);
if (IS_ERR(chip->pwm)) {
error = PTR_ERR(chip->pwm);
dev_err(&pdev->dev,
"unable to request PWM for haptic, error: %d\n",
error);
goto err_free_mem;
}
/*
* FIXME: pwm_apply_args() should be removed when switching to
* the atomic PWM API.
*/
pwm_apply_args(chip->pwm);
break;
default:
dev_err(&pdev->dev,
"Invalid chip mode specified (%d)\n", chip->mode);
error = -EINVAL;
goto err_free_mem;
}
chip->regulator = regulator_get(&pdev->dev, "inmotor");
if (IS_ERR(chip->regulator)) {
error = PTR_ERR(chip->regulator);
dev_err(&pdev->dev,
"unable to get regulator, error: %d\n",
error);
goto err_free_pwm;
}
input_dev->name = "max8997-haptic";
input_dev->id.version = 1;
input_dev->dev.parent = &pdev->dev;
input_dev->close = max8997_haptic_close;
input_set_drvdata(input_dev, chip);
input_set_capability(input_dev, EV_FF, FF_RUMBLE);
error = input_ff_create_memless(input_dev, NULL,
max8997_haptic_play_effect);
if (error) {
dev_err(&pdev->dev,
"unable to create FF device, error: %d\n",
error);
goto err_put_regulator;
}
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev,
"unable to register input device, error: %d\n",
error);
goto err_destroy_ff;
}
platform_set_drvdata(pdev, chip);
return 0;
err_destroy_ff:
input_ff_destroy(input_dev);
err_put_regulator:
regulator_put(chip->regulator);
err_free_pwm:
if (chip->mode == MAX8997_EXTERNAL_MODE)
pwm_put(chip->pwm);
err_free_mem:
input_free_device(input_dev);
kfree(chip);
return error;
}
static int max8997_haptic_remove(struct platform_device *pdev)
{
struct max8997_haptic *chip = platform_get_drvdata(pdev);
input_unregister_device(chip->input_dev);
regulator_put(chip->regulator);
if (chip->mode == MAX8997_EXTERNAL_MODE)
pwm_put(chip->pwm);
kfree(chip);
return 0;
}
static int max8997_haptic_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8997_haptic *chip = platform_get_drvdata(pdev);
max8997_haptic_disable(chip);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(max8997_haptic_pm_ops,
max8997_haptic_suspend, NULL);
static const struct platform_device_id max8997_haptic_id[] = {
{ "max8997-haptic", 0 },
{ },
};
MODULE_DEVICE_TABLE(platform, max8997_haptic_id);
static struct platform_driver max8997_haptic_driver = {
.driver = {
.name = "max8997-haptic",
.pm = pm_sleep_ptr(&max8997_haptic_pm_ops),
},
.probe = max8997_haptic_probe,
.remove = max8997_haptic_remove,
.id_table = max8997_haptic_id,
};
module_platform_driver(max8997_haptic_driver);
MODULE_AUTHOR("Donggeun Kim <[email protected]>");
MODULE_DESCRIPTION("max8997_haptic driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/max8997_haptic.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AD714X CapTouch Programmable Controller driver (SPI bus)
*
* Copyright 2009-2011 Analog Devices Inc.
*/
#include <linux/input.h> /* BUS_SPI */
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/pm.h>
#include <linux/types.h>
#include "ad714x.h"
#define AD714x_SPI_CMD_PREFIX 0xE000 /* bits 15:11 */
#define AD714x_SPI_READ BIT(10)
static int ad714x_spi_read(struct ad714x_chip *chip,
unsigned short reg, unsigned short *data, size_t len)
{
struct spi_device *spi = to_spi_device(chip->dev);
struct spi_message message;
struct spi_transfer xfer[2];
int i;
int error;
spi_message_init(&message);
memset(xfer, 0, sizeof(xfer));
chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX |
AD714x_SPI_READ | reg);
xfer[0].tx_buf = &chip->xfer_buf[0];
xfer[0].len = sizeof(chip->xfer_buf[0]);
spi_message_add_tail(&xfer[0], &message);
xfer[1].rx_buf = &chip->xfer_buf[1];
xfer[1].len = sizeof(chip->xfer_buf[1]) * len;
spi_message_add_tail(&xfer[1], &message);
error = spi_sync(spi, &message);
if (unlikely(error)) {
dev_err(chip->dev, "SPI read error: %d\n", error);
return error;
}
for (i = 0; i < len; i++)
data[i] = be16_to_cpu(chip->xfer_buf[i + 1]);
return 0;
}
static int ad714x_spi_write(struct ad714x_chip *chip,
unsigned short reg, unsigned short data)
{
struct spi_device *spi = to_spi_device(chip->dev);
int error;
chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg);
chip->xfer_buf[1] = cpu_to_be16(data);
error = spi_write(spi, (u8 *)chip->xfer_buf,
2 * sizeof(*chip->xfer_buf));
if (unlikely(error)) {
dev_err(chip->dev, "SPI write error: %d\n", error);
return error;
}
return 0;
}
static int ad714x_spi_probe(struct spi_device *spi)
{
struct ad714x_chip *chip;
int err;
spi->bits_per_word = 8;
err = spi_setup(spi);
if (err < 0)
return err;
chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq,
ad714x_spi_read, ad714x_spi_write);
if (IS_ERR(chip))
return PTR_ERR(chip);
spi_set_drvdata(spi, chip);
return 0;
}
static struct spi_driver ad714x_spi_driver = {
.driver = {
.name = "ad714x_captouch",
.pm = pm_sleep_ptr(&ad714x_pm),
},
.probe = ad714x_spi_probe,
};
module_spi_driver(ad714x_spi_driver);
MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor SPI Bus Driver");
MODULE_AUTHOR("Barry Song <[email protected]>");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/ad714x-spi.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* rotary_encoder.c
*
* (c) 2009 Daniel Mack <[email protected]>
* Copyright (C) 2011 Johan Hovold <[email protected]>
*
* state machine code inspired by code from Tim Ruetz
*
* A generic driver for rotary encoders connected to GPIO lines.
* See file:Documentation/input/devices/rotary-encoder.rst for more information
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/property.h>
#define DRV_NAME "rotary-encoder"
enum rotary_encoder_encoding {
ROTENC_GRAY,
ROTENC_BINARY,
};
struct rotary_encoder {
struct input_dev *input;
struct mutex access_mutex;
u32 steps;
u32 axis;
bool relative_axis;
bool rollover;
enum rotary_encoder_encoding encoding;
unsigned int pos;
struct gpio_descs *gpios;
unsigned int *irq;
bool armed;
signed char dir; /* 1 - clockwise, -1 - CCW */
unsigned int last_stable;
};
static unsigned int rotary_encoder_get_state(struct rotary_encoder *encoder)
{
int i;
unsigned int ret = 0;
for (i = 0; i < encoder->gpios->ndescs; ++i) {
int val = gpiod_get_value_cansleep(encoder->gpios->desc[i]);
/* convert from gray encoding to normal */
if (encoder->encoding == ROTENC_GRAY && ret & 1)
val = !val;
ret = ret << 1 | val;
}
return ret & 3;
}
static void rotary_encoder_report_event(struct rotary_encoder *encoder)
{
if (encoder->relative_axis) {
input_report_rel(encoder->input,
encoder->axis, encoder->dir);
} else {
unsigned int pos = encoder->pos;
if (encoder->dir < 0) {
/* turning counter-clockwise */
if (encoder->rollover)
pos += encoder->steps;
if (pos)
pos--;
} else {
/* turning clockwise */
if (encoder->rollover || pos < encoder->steps)
pos++;
}
if (encoder->rollover)
pos %= encoder->steps;
encoder->pos = pos;
input_report_abs(encoder->input, encoder->axis, encoder->pos);
}
input_sync(encoder->input);
}
static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
unsigned int state;
mutex_lock(&encoder->access_mutex);
state = rotary_encoder_get_state(encoder);
switch (state) {
case 0x0:
if (encoder->armed) {
rotary_encoder_report_event(encoder);
encoder->armed = false;
}
break;
case 0x1:
case 0x3:
if (encoder->armed)
encoder->dir = 2 - state;
break;
case 0x2:
encoder->armed = true;
break;
}
mutex_unlock(&encoder->access_mutex);
return IRQ_HANDLED;
}
static irqreturn_t rotary_encoder_half_period_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
unsigned int state;
mutex_lock(&encoder->access_mutex);
state = rotary_encoder_get_state(encoder);
if (state & 1) {
encoder->dir = ((encoder->last_stable - state + 1) % 4) - 1;
} else {
if (state != encoder->last_stable) {
rotary_encoder_report_event(encoder);
encoder->last_stable = state;
}
}
mutex_unlock(&encoder->access_mutex);
return IRQ_HANDLED;
}
static irqreturn_t rotary_encoder_quarter_period_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
unsigned int state;
mutex_lock(&encoder->access_mutex);
state = rotary_encoder_get_state(encoder);
if ((encoder->last_stable + 1) % 4 == state)
encoder->dir = 1;
else if (encoder->last_stable == (state + 1) % 4)
encoder->dir = -1;
else
goto out;
rotary_encoder_report_event(encoder);
out:
encoder->last_stable = state;
mutex_unlock(&encoder->access_mutex);
return IRQ_HANDLED;
}
static int rotary_encoder_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rotary_encoder *encoder;
struct input_dev *input;
irq_handler_t handler;
u32 steps_per_period;
unsigned int i;
int err;
encoder = devm_kzalloc(dev, sizeof(struct rotary_encoder), GFP_KERNEL);
if (!encoder)
return -ENOMEM;
mutex_init(&encoder->access_mutex);
device_property_read_u32(dev, "rotary-encoder,steps", &encoder->steps);
err = device_property_read_u32(dev, "rotary-encoder,steps-per-period",
&steps_per_period);
if (err) {
/*
* The 'half-period' property has been deprecated, you must
* use 'steps-per-period' and set an appropriate value, but
* we still need to parse it to maintain compatibility. If
* neither property is present we fall back to the one step
* per period behavior.
*/
steps_per_period = device_property_read_bool(dev,
"rotary-encoder,half-period") ? 2 : 1;
}
encoder->rollover =
device_property_read_bool(dev, "rotary-encoder,rollover");
if (!device_property_present(dev, "rotary-encoder,encoding") ||
!device_property_match_string(dev, "rotary-encoder,encoding",
"gray")) {
dev_info(dev, "gray");
encoder->encoding = ROTENC_GRAY;
} else if (!device_property_match_string(dev, "rotary-encoder,encoding",
"binary")) {
dev_info(dev, "binary");
encoder->encoding = ROTENC_BINARY;
} else {
dev_err(dev, "unknown encoding setting\n");
return -EINVAL;
}
device_property_read_u32(dev, "linux,axis", &encoder->axis);
encoder->relative_axis =
device_property_read_bool(dev, "rotary-encoder,relative-axis");
encoder->gpios = devm_gpiod_get_array(dev, NULL, GPIOD_IN);
if (IS_ERR(encoder->gpios))
return dev_err_probe(dev, PTR_ERR(encoder->gpios), "unable to get gpios\n");
if (encoder->gpios->ndescs < 2) {
dev_err(dev, "not enough gpios found\n");
return -EINVAL;
}
input = devm_input_allocate_device(dev);
if (!input)
return -ENOMEM;
encoder->input = input;
input->name = pdev->name;
input->id.bustype = BUS_HOST;
if (encoder->relative_axis)
input_set_capability(input, EV_REL, encoder->axis);
else
input_set_abs_params(input,
encoder->axis, 0, encoder->steps, 0, 1);
switch (steps_per_period >> (encoder->gpios->ndescs - 2)) {
case 4:
handler = &rotary_encoder_quarter_period_irq;
encoder->last_stable = rotary_encoder_get_state(encoder);
break;
case 2:
handler = &rotary_encoder_half_period_irq;
encoder->last_stable = rotary_encoder_get_state(encoder);
break;
case 1:
handler = &rotary_encoder_irq;
break;
default:
dev_err(dev, "'%d' is not a valid steps-per-period value\n",
steps_per_period);
return -EINVAL;
}
encoder->irq =
devm_kcalloc(dev,
encoder->gpios->ndescs, sizeof(*encoder->irq),
GFP_KERNEL);
if (!encoder->irq)
return -ENOMEM;
for (i = 0; i < encoder->gpios->ndescs; ++i) {
encoder->irq[i] = gpiod_to_irq(encoder->gpios->desc[i]);
err = devm_request_threaded_irq(dev, encoder->irq[i],
NULL, handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
DRV_NAME, encoder);
if (err) {
dev_err(dev, "unable to request IRQ %d (gpio#%d)\n",
encoder->irq[i], i);
return err;
}
}
err = input_register_device(input);
if (err) {
dev_err(dev, "failed to register input device\n");
return err;
}
device_init_wakeup(dev,
device_property_read_bool(dev, "wakeup-source"));
platform_set_drvdata(pdev, encoder);
return 0;
}
static int rotary_encoder_suspend(struct device *dev)
{
struct rotary_encoder *encoder = dev_get_drvdata(dev);
unsigned int i;
if (device_may_wakeup(dev)) {
for (i = 0; i < encoder->gpios->ndescs; ++i)
enable_irq_wake(encoder->irq[i]);
}
return 0;
}
static int rotary_encoder_resume(struct device *dev)
{
struct rotary_encoder *encoder = dev_get_drvdata(dev);
unsigned int i;
if (device_may_wakeup(dev)) {
for (i = 0; i < encoder->gpios->ndescs; ++i)
disable_irq_wake(encoder->irq[i]);
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(rotary_encoder_pm_ops,
rotary_encoder_suspend, rotary_encoder_resume);
#ifdef CONFIG_OF
static const struct of_device_id rotary_encoder_of_match[] = {
{ .compatible = "rotary-encoder", },
{ },
};
MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
#endif
static struct platform_driver rotary_encoder_driver = {
.probe = rotary_encoder_probe,
.driver = {
.name = DRV_NAME,
.pm = pm_sleep_ptr(&rotary_encoder_pm_ops),
.of_match_table = of_match_ptr(rotary_encoder_of_match),
}
};
module_platform_driver(rotary_encoder_driver);
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_DESCRIPTION("GPIO rotary encoder driver");
MODULE_AUTHOR("Daniel Mack <[email protected]>, Johan Hovold");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/input/misc/rotary_encoder.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for the S1 button on Routerboard 532
*
* Copyright (C) 2009 Phil Sutter <[email protected]>
*/
#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <asm/mach-rc32434/gpio.h>
#include <asm/mach-rc32434/rb.h>
#define DRV_NAME "rb532-button"
#define RB532_BTN_RATE 100 /* msec */
#define RB532_BTN_KSYM BTN_0
/* The S1 button state is provided by GPIO pin 1. But as this
* pin is also used for uart input as alternate function, the
* operational modes must be switched first:
* 1) disable uart using set_latch_u5()
* 2) turn off alternate function implicitly through
* gpio_direction_input()
* 3) read the GPIO's current value
* 4) undo step 2 by enabling alternate function (in this
* mode the GPIO direction is fixed, so no change needed)
* 5) turn on uart again
* The GPIO value occurs to be inverted, so pin high means
* button is not pressed.
*/
static bool rb532_button_pressed(void)
{
int val;
set_latch_u5(0, LO_FOFF);
gpio_direction_input(GPIO_BTN_S1);
val = gpio_get_value(GPIO_BTN_S1);
rb532_gpio_set_func(GPIO_BTN_S1);
set_latch_u5(LO_FOFF, 0);
return !val;
}
static void rb532_button_poll(struct input_dev *input)
{
input_report_key(input, RB532_BTN_KSYM, rb532_button_pressed());
input_sync(input);
}
static int rb532_button_probe(struct platform_device *pdev)
{
struct input_dev *input;
int error;
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
input->name = "rb532 button";
input->phys = "rb532/button0";
input->id.bustype = BUS_HOST;
input_set_capability(input, EV_KEY, RB532_BTN_KSYM);
error = input_setup_polling(input, rb532_button_poll);
if (error)
return error;
input_set_poll_interval(input, RB532_BTN_RATE);
error = input_register_device(input);
if (error)
return error;
return 0;
}
static struct platform_driver rb532_button_driver = {
.probe = rb532_button_probe,
.driver = {
.name = DRV_NAME,
},
};
module_platform_driver(rb532_button_driver);
MODULE_AUTHOR("Phil Sutter <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Support for S1 button on Routerboard 532");
MODULE_ALIAS("platform:" DRV_NAME);
|
linux-master
|
drivers/input/misc/rb532_button.c
|
// SPDX-License-Identifier: GPL-2.0+
//
// Power Button driver for RAVE SP
//
// Copyright (C) 2017 Zodiac Inflight Innovations
//
//
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/rave-sp.h>
#include <linux/platform_device.h>
#define RAVE_SP_EVNT_BUTTON_PRESS (RAVE_SP_EVNT_BASE + 0x00)
struct rave_sp_power_button {
struct input_dev *idev;
struct notifier_block nb;
};
static int rave_sp_power_button_event(struct notifier_block *nb,
unsigned long action, void *data)
{
struct rave_sp_power_button *pb =
container_of(nb, struct rave_sp_power_button, nb);
const u8 event = rave_sp_action_unpack_event(action);
const u8 value = rave_sp_action_unpack_value(action);
struct input_dev *idev = pb->idev;
if (event == RAVE_SP_EVNT_BUTTON_PRESS) {
input_report_key(idev, KEY_POWER, value);
input_sync(idev);
return NOTIFY_STOP;
}
return NOTIFY_DONE;
}
static int rave_sp_pwrbutton_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rave_sp_power_button *pb;
struct input_dev *idev;
int error;
pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
if (!pb)
return -ENOMEM;
idev = devm_input_allocate_device(dev);
if (!idev)
return -ENOMEM;
idev->name = pdev->name;
input_set_capability(idev, EV_KEY, KEY_POWER);
error = input_register_device(idev);
if (error)
return error;
pb->idev = idev;
pb->nb.notifier_call = rave_sp_power_button_event;
pb->nb.priority = 128;
error = devm_rave_sp_register_event_notifier(dev, &pb->nb);
if (error)
return error;
return 0;
}
static const struct of_device_id rave_sp_pwrbutton_of_match[] = {
{ .compatible = "zii,rave-sp-pwrbutton" },
{}
};
static struct platform_driver rave_sp_pwrbutton_driver = {
.probe = rave_sp_pwrbutton_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = rave_sp_pwrbutton_of_match,
},
};
module_platform_driver(rave_sp_pwrbutton_driver);
MODULE_DEVICE_TABLE(of, rave_sp_pwrbutton_of_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrey Vostrikov <[email protected]>");
MODULE_AUTHOR("Nikita Yushchenko <[email protected]>");
MODULE_AUTHOR("Andrey Smirnov <[email protected]>");
MODULE_DESCRIPTION("RAVE SP Power Button driver");
|
linux-master
|
drivers/input/misc/rave-sp-pwrbutton.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
*
* A generic driver to read multiple gpio lines and translate the
* encoded numeric value into an input event.
*/
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
struct gpio_decoder {
struct gpio_descs *input_gpios;
struct device *dev;
u32 axis;
u32 last_stable;
};
static int gpio_decoder_get_gpios_state(struct gpio_decoder *decoder)
{
struct gpio_descs *gpios = decoder->input_gpios;
unsigned int ret = 0;
int i, val;
for (i = 0; i < gpios->ndescs; i++) {
val = gpiod_get_value_cansleep(gpios->desc[i]);
if (val < 0) {
dev_err(decoder->dev,
"Error reading gpio %d: %d\n",
desc_to_gpio(gpios->desc[i]), val);
return val;
}
val = !!val;
ret = (ret << 1) | val;
}
return ret;
}
static void gpio_decoder_poll_gpios(struct input_dev *input)
{
struct gpio_decoder *decoder = input_get_drvdata(input);
int state;
state = gpio_decoder_get_gpios_state(decoder);
if (state >= 0 && state != decoder->last_stable) {
input_report_abs(input, decoder->axis, state);
input_sync(input);
decoder->last_stable = state;
}
}
static int gpio_decoder_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gpio_decoder *decoder;
struct input_dev *input;
u32 max;
int err;
decoder = devm_kzalloc(dev, sizeof(*decoder), GFP_KERNEL);
if (!decoder)
return -ENOMEM;
decoder->dev = dev;
device_property_read_u32(dev, "linux,axis", &decoder->axis);
decoder->input_gpios = devm_gpiod_get_array(dev, NULL, GPIOD_IN);
if (IS_ERR(decoder->input_gpios)) {
dev_err(dev, "unable to acquire input gpios\n");
return PTR_ERR(decoder->input_gpios);
}
if (decoder->input_gpios->ndescs < 2) {
dev_err(dev, "not enough gpios found\n");
return -EINVAL;
}
if (device_property_read_u32(dev, "decoder-max-value", &max))
max = (1U << decoder->input_gpios->ndescs) - 1;
input = devm_input_allocate_device(dev);
if (!input)
return -ENOMEM;
input_set_drvdata(input, decoder);
input->name = pdev->name;
input->id.bustype = BUS_HOST;
input_set_abs_params(input, decoder->axis, 0, max, 0, 0);
err = input_setup_polling(input, gpio_decoder_poll_gpios);
if (err) {
dev_err(dev, "failed to set up polling\n");
return err;
}
err = input_register_device(input);
if (err) {
dev_err(dev, "failed to register input device\n");
return err;
}
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id gpio_decoder_of_match[] = {
{ .compatible = "gpio-decoder", },
{ },
};
MODULE_DEVICE_TABLE(of, gpio_decoder_of_match);
#endif
static struct platform_driver gpio_decoder_driver = {
.probe = gpio_decoder_probe,
.driver = {
.name = "gpio-decoder",
.of_match_table = of_match_ptr(gpio_decoder_of_match),
}
};
module_platform_driver(gpio_decoder_driver);
MODULE_DESCRIPTION("GPIO decoder input driver");
MODULE_AUTHOR("Vignesh R <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/input/misc/gpio_decoder.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010-2011, 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2014, Sony Mobile Communications Inc.
*/
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
#define PON_REV2 0x01
#define PON_SUBTYPE 0x05
#define PON_SUBTYPE_PRIMARY 0x01
#define PON_SUBTYPE_SECONDARY 0x02
#define PON_SUBTYPE_1REG 0x03
#define PON_SUBTYPE_GEN2_PRIMARY 0x04
#define PON_SUBTYPE_GEN2_SECONDARY 0x05
#define PON_SUBTYPE_GEN3_PBS 0x08
#define PON_SUBTYPE_GEN3_HLOS 0x09
#define PON_RT_STS 0x10
#define PON_KPDPWR_N_SET BIT(0)
#define PON_RESIN_N_SET BIT(1)
#define PON_GEN3_RESIN_N_SET BIT(6)
#define PON_GEN3_KPDPWR_N_SET BIT(7)
#define PON_PS_HOLD_RST_CTL 0x5a
#define PON_PS_HOLD_RST_CTL2 0x5b
#define PON_PS_HOLD_ENABLE BIT(7)
#define PON_PS_HOLD_TYPE_MASK 0x0f
#define PON_PS_HOLD_TYPE_WARM_RESET 1
#define PON_PS_HOLD_TYPE_SHUTDOWN 4
#define PON_PS_HOLD_TYPE_HARD_RESET 7
#define PON_PULL_CTL 0x70
#define PON_KPDPWR_PULL_UP BIT(1)
#define PON_RESIN_PULL_UP BIT(0)
#define PON_DBC_CTL 0x71
#define PON_DBC_DELAY_MASK_GEN1 0x7
#define PON_DBC_DELAY_MASK_GEN2 0xf
#define PON_DBC_SHIFT_GEN1 6
#define PON_DBC_SHIFT_GEN2 14
struct pm8941_data {
unsigned int pull_up_bit;
unsigned int status_bit;
bool supports_ps_hold_poff_config;
bool supports_debounce_config;
bool has_pon_pbs;
const char *name;
const char *phys;
};
struct pm8941_pwrkey {
struct device *dev;
int irq;
u32 baseaddr;
u32 pon_pbs_baseaddr;
struct regmap *regmap;
struct input_dev *input;
unsigned int revision;
unsigned int subtype;
struct notifier_block reboot_notifier;
u32 code;
u32 sw_debounce_time_us;
ktime_t sw_debounce_end_time;
bool last_status;
const struct pm8941_data *data;
};
static int pm8941_reboot_notify(struct notifier_block *nb,
unsigned long code, void *unused)
{
struct pm8941_pwrkey *pwrkey = container_of(nb, struct pm8941_pwrkey,
reboot_notifier);
unsigned int enable_reg;
unsigned int reset_type;
int error;
/* PMICs with revision 0 have the enable bit in same register as ctrl */
if (pwrkey->revision == 0)
enable_reg = PON_PS_HOLD_RST_CTL;
else
enable_reg = PON_PS_HOLD_RST_CTL2;
error = regmap_update_bits(pwrkey->regmap,
pwrkey->baseaddr + enable_reg,
PON_PS_HOLD_ENABLE,
0);
if (error)
dev_err(pwrkey->dev,
"unable to clear ps hold reset enable: %d\n",
error);
/*
* Updates of PON_PS_HOLD_ENABLE requires 3 sleep cycles between
* writes.
*/
usleep_range(100, 1000);
switch (code) {
case SYS_HALT:
case SYS_POWER_OFF:
reset_type = PON_PS_HOLD_TYPE_SHUTDOWN;
break;
case SYS_RESTART:
default:
if (reboot_mode == REBOOT_WARM)
reset_type = PON_PS_HOLD_TYPE_WARM_RESET;
else
reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
break;
}
error = regmap_update_bits(pwrkey->regmap,
pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
PON_PS_HOLD_TYPE_MASK,
reset_type);
if (error)
dev_err(pwrkey->dev, "unable to set ps hold reset type: %d\n",
error);
error = regmap_update_bits(pwrkey->regmap,
pwrkey->baseaddr + enable_reg,
PON_PS_HOLD_ENABLE,
PON_PS_HOLD_ENABLE);
if (error)
dev_err(pwrkey->dev, "unable to re-set enable: %d\n", error);
return NOTIFY_DONE;
}
static irqreturn_t pm8941_pwrkey_irq(int irq, void *_data)
{
struct pm8941_pwrkey *pwrkey = _data;
unsigned int sts;
int err;
if (pwrkey->sw_debounce_time_us) {
if (ktime_before(ktime_get(), pwrkey->sw_debounce_end_time)) {
dev_dbg(pwrkey->dev,
"ignoring key event received before debounce end %llu us\n",
pwrkey->sw_debounce_end_time);
return IRQ_HANDLED;
}
}
err = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_RT_STS, &sts);
if (err)
return IRQ_HANDLED;
sts &= pwrkey->data->status_bit;
if (pwrkey->sw_debounce_time_us && !sts)
pwrkey->sw_debounce_end_time = ktime_add_us(ktime_get(),
pwrkey->sw_debounce_time_us);
/*
* Simulate a press event in case a release event occurred without a
* corresponding press event.
*/
if (!pwrkey->last_status && !sts) {
input_report_key(pwrkey->input, pwrkey->code, 1);
input_sync(pwrkey->input);
}
pwrkey->last_status = sts;
input_report_key(pwrkey->input, pwrkey->code, sts);
input_sync(pwrkey->input);
return IRQ_HANDLED;
}
static int pm8941_pwrkey_sw_debounce_init(struct pm8941_pwrkey *pwrkey)
{
unsigned int val, addr, mask;
int error;
if (pwrkey->data->has_pon_pbs && !pwrkey->pon_pbs_baseaddr) {
dev_err(pwrkey->dev,
"PON_PBS address missing, can't read HW debounce time\n");
return 0;
}
if (pwrkey->pon_pbs_baseaddr)
addr = pwrkey->pon_pbs_baseaddr + PON_DBC_CTL;
else
addr = pwrkey->baseaddr + PON_DBC_CTL;
error = regmap_read(pwrkey->regmap, addr, &val);
if (error)
return error;
if (pwrkey->subtype >= PON_SUBTYPE_GEN2_PRIMARY)
mask = 0xf;
else
mask = 0x7;
pwrkey->sw_debounce_time_us =
2 * USEC_PER_SEC / (1 << (mask - (val & mask)));
dev_dbg(pwrkey->dev, "SW debounce time = %u us\n",
pwrkey->sw_debounce_time_us);
return 0;
}
static int pm8941_pwrkey_suspend(struct device *dev)
{
struct pm8941_pwrkey *pwrkey = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(pwrkey->irq);
return 0;
}
static int pm8941_pwrkey_resume(struct device *dev)
{
struct pm8941_pwrkey *pwrkey = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(pwrkey->irq);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(pm8941_pwr_key_pm_ops,
pm8941_pwrkey_suspend, pm8941_pwrkey_resume);
static int pm8941_pwrkey_probe(struct platform_device *pdev)
{
struct pm8941_pwrkey *pwrkey;
bool pull_up;
struct device *parent;
struct device_node *regmap_node;
const __be32 *addr;
u32 req_delay, mask, delay_shift;
int error;
if (of_property_read_u32(pdev->dev.of_node, "debounce", &req_delay))
req_delay = 15625;
if (req_delay > 2000000 || req_delay == 0) {
dev_err(&pdev->dev, "invalid debounce time: %u\n", req_delay);
return -EINVAL;
}
pull_up = of_property_read_bool(pdev->dev.of_node, "bias-pull-up");
pwrkey = devm_kzalloc(&pdev->dev, sizeof(*pwrkey), GFP_KERNEL);
if (!pwrkey)
return -ENOMEM;
pwrkey->dev = &pdev->dev;
pwrkey->data = of_device_get_match_data(&pdev->dev);
parent = pdev->dev.parent;
regmap_node = pdev->dev.of_node;
pwrkey->regmap = dev_get_regmap(parent, NULL);
if (!pwrkey->regmap) {
regmap_node = parent->of_node;
/*
* We failed to get regmap for parent. Let's see if we are
* a child of pon node and read regmap and reg from its
* parent.
*/
pwrkey->regmap = dev_get_regmap(parent->parent, NULL);
if (!pwrkey->regmap) {
dev_err(&pdev->dev, "failed to locate regmap\n");
return -ENODEV;
}
}
addr = of_get_address(regmap_node, 0, NULL, NULL);
if (!addr) {
dev_err(&pdev->dev, "reg property missing\n");
return -EINVAL;
}
pwrkey->baseaddr = be32_to_cpup(addr);
if (pwrkey->data->has_pon_pbs) {
/* PON_PBS base address is optional */
addr = of_get_address(regmap_node, 1, NULL, NULL);
if (addr)
pwrkey->pon_pbs_baseaddr = be32_to_cpup(addr);
}
pwrkey->irq = platform_get_irq(pdev, 0);
if (pwrkey->irq < 0)
return pwrkey->irq;
error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_REV2,
&pwrkey->revision);
if (error) {
dev_err(&pdev->dev, "failed to read revision: %d\n", error);
return error;
}
error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_SUBTYPE,
&pwrkey->subtype);
if (error) {
dev_err(&pdev->dev, "failed to read subtype: %d\n", error);
return error;
}
error = of_property_read_u32(pdev->dev.of_node, "linux,code",
&pwrkey->code);
if (error) {
dev_dbg(&pdev->dev,
"no linux,code assuming power (%d)\n", error);
pwrkey->code = KEY_POWER;
}
pwrkey->input = devm_input_allocate_device(&pdev->dev);
if (!pwrkey->input) {
dev_dbg(&pdev->dev, "unable to allocate input device\n");
return -ENOMEM;
}
input_set_capability(pwrkey->input, EV_KEY, pwrkey->code);
pwrkey->input->name = pwrkey->data->name;
pwrkey->input->phys = pwrkey->data->phys;
if (pwrkey->data->supports_debounce_config) {
if (pwrkey->subtype >= PON_SUBTYPE_GEN2_PRIMARY) {
mask = PON_DBC_DELAY_MASK_GEN2;
delay_shift = PON_DBC_SHIFT_GEN2;
} else {
mask = PON_DBC_DELAY_MASK_GEN1;
delay_shift = PON_DBC_SHIFT_GEN1;
}
req_delay = (req_delay << delay_shift) / USEC_PER_SEC;
req_delay = ilog2(req_delay);
error = regmap_update_bits(pwrkey->regmap,
pwrkey->baseaddr + PON_DBC_CTL,
mask,
req_delay);
if (error) {
dev_err(&pdev->dev, "failed to set debounce: %d\n",
error);
return error;
}
}
error = pm8941_pwrkey_sw_debounce_init(pwrkey);
if (error)
return error;
if (pwrkey->data->pull_up_bit) {
error = regmap_update_bits(pwrkey->regmap,
pwrkey->baseaddr + PON_PULL_CTL,
pwrkey->data->pull_up_bit,
pull_up ? pwrkey->data->pull_up_bit :
0);
if (error) {
dev_err(&pdev->dev, "failed to set pull: %d\n", error);
return error;
}
}
error = devm_request_threaded_irq(&pdev->dev, pwrkey->irq,
NULL, pm8941_pwrkey_irq,
IRQF_ONESHOT,
pwrkey->data->name, pwrkey);
if (error) {
dev_err(&pdev->dev, "failed requesting IRQ: %d\n", error);
return error;
}
error = input_register_device(pwrkey->input);
if (error) {
dev_err(&pdev->dev, "failed to register input device: %d\n",
error);
return error;
}
if (pwrkey->data->supports_ps_hold_poff_config) {
pwrkey->reboot_notifier.notifier_call = pm8941_reboot_notify;
error = register_reboot_notifier(&pwrkey->reboot_notifier);
if (error) {
dev_err(&pdev->dev, "failed to register reboot notifier: %d\n",
error);
return error;
}
}
platform_set_drvdata(pdev, pwrkey);
device_init_wakeup(&pdev->dev, 1);
return 0;
}
static int pm8941_pwrkey_remove(struct platform_device *pdev)
{
struct pm8941_pwrkey *pwrkey = platform_get_drvdata(pdev);
if (pwrkey->data->supports_ps_hold_poff_config)
unregister_reboot_notifier(&pwrkey->reboot_notifier);
return 0;
}
static const struct pm8941_data pwrkey_data = {
.pull_up_bit = PON_KPDPWR_PULL_UP,
.status_bit = PON_KPDPWR_N_SET,
.name = "pm8941_pwrkey",
.phys = "pm8941_pwrkey/input0",
.supports_ps_hold_poff_config = true,
.supports_debounce_config = true,
.has_pon_pbs = false,
};
static const struct pm8941_data resin_data = {
.pull_up_bit = PON_RESIN_PULL_UP,
.status_bit = PON_RESIN_N_SET,
.name = "pm8941_resin",
.phys = "pm8941_resin/input0",
.supports_ps_hold_poff_config = true,
.supports_debounce_config = true,
.has_pon_pbs = false,
};
static const struct pm8941_data pon_gen3_pwrkey_data = {
.status_bit = PON_GEN3_KPDPWR_N_SET,
.name = "pmic_pwrkey",
.phys = "pmic_pwrkey/input0",
.supports_ps_hold_poff_config = false,
.supports_debounce_config = false,
.has_pon_pbs = true,
};
static const struct pm8941_data pon_gen3_resin_data = {
.status_bit = PON_GEN3_RESIN_N_SET,
.name = "pmic_resin",
.phys = "pmic_resin/input0",
.supports_ps_hold_poff_config = false,
.supports_debounce_config = false,
.has_pon_pbs = true,
};
static const struct of_device_id pm8941_pwr_key_id_table[] = {
{ .compatible = "qcom,pm8941-pwrkey", .data = &pwrkey_data },
{ .compatible = "qcom,pm8941-resin", .data = &resin_data },
{ .compatible = "qcom,pmk8350-pwrkey", .data = &pon_gen3_pwrkey_data },
{ .compatible = "qcom,pmk8350-resin", .data = &pon_gen3_resin_data },
{ }
};
MODULE_DEVICE_TABLE(of, pm8941_pwr_key_id_table);
static struct platform_driver pm8941_pwrkey_driver = {
.probe = pm8941_pwrkey_probe,
.remove = pm8941_pwrkey_remove,
.driver = {
.name = "pm8941-pwrkey",
.pm = pm_sleep_ptr(&pm8941_pwr_key_pm_ops),
.of_match_table = of_match_ptr(pm8941_pwr_key_id_table),
},
};
module_platform_driver(pm8941_pwrkey_driver);
MODULE_DESCRIPTION("PM8941 Power Key driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/input/misc/pm8941-pwrkey.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ADLX345/346 Three-Axis Digital Accelerometers (I2C Interface)
*
* Enter bugs at http://blackfin.uclinux.org/
*
* Copyright (C) 2009 Michael Hennerich, Analog Devices Inc.
*/
#include <linux/input.h> /* BUS_I2C */
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/types.h>
#include <linux/pm.h>
#include "adxl34x.h"
static int adxl34x_smbus_read(struct device *dev, unsigned char reg)
{
struct i2c_client *client = to_i2c_client(dev);
return i2c_smbus_read_byte_data(client, reg);
}
static int adxl34x_smbus_write(struct device *dev,
unsigned char reg, unsigned char val)
{
struct i2c_client *client = to_i2c_client(dev);
return i2c_smbus_write_byte_data(client, reg, val);
}
static int adxl34x_smbus_read_block(struct device *dev,
unsigned char reg, int count,
void *buf)
{
struct i2c_client *client = to_i2c_client(dev);
return i2c_smbus_read_i2c_block_data(client, reg, count, buf);
}
static int adxl34x_i2c_read_block(struct device *dev,
unsigned char reg, int count,
void *buf)
{
struct i2c_client *client = to_i2c_client(dev);
int ret;
ret = i2c_master_send(client, ®, 1);
if (ret < 0)
return ret;
ret = i2c_master_recv(client, buf, count);
if (ret < 0)
return ret;
if (ret != count)
return -EIO;
return 0;
}
static const struct adxl34x_bus_ops adxl34x_smbus_bops = {
.bustype = BUS_I2C,
.write = adxl34x_smbus_write,
.read = adxl34x_smbus_read,
.read_block = adxl34x_smbus_read_block,
};
static const struct adxl34x_bus_ops adxl34x_i2c_bops = {
.bustype = BUS_I2C,
.write = adxl34x_smbus_write,
.read = adxl34x_smbus_read,
.read_block = adxl34x_i2c_read_block,
};
static int adxl34x_i2c_probe(struct i2c_client *client)
{
struct adxl34x *ac;
int error;
error = i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA);
if (!error) {
dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
return -EIO;
}
ac = adxl34x_probe(&client->dev, client->irq, false,
i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK) ?
&adxl34x_smbus_bops : &adxl34x_i2c_bops);
if (IS_ERR(ac))
return PTR_ERR(ac);
i2c_set_clientdata(client, ac);
return 0;
}
static void adxl34x_i2c_remove(struct i2c_client *client)
{
struct adxl34x *ac = i2c_get_clientdata(client);
adxl34x_remove(ac);
}
static const struct i2c_device_id adxl34x_id[] = {
{ "adxl34x", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adxl34x_id);
static const struct of_device_id adxl34x_of_id[] = {
/*
* The ADXL346 is backward-compatible with the ADXL345. Differences are
* handled by runtime detection of the device model, there's thus no
* need for listing the "adi,adxl346" compatible value explicitly.
*/
{ .compatible = "adi,adxl345", },
/*
* Deprecated, DT nodes should use one or more of the device-specific
* compatible values "adi,adxl345" and "adi,adxl346".
*/
{ .compatible = "adi,adxl34x", },
{ }
};
MODULE_DEVICE_TABLE(of, adxl34x_of_id);
static struct i2c_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
.pm = pm_sleep_ptr(&adxl34x_pm),
.of_match_table = adxl34x_of_id,
},
.probe = adxl34x_i2c_probe,
.remove = adxl34x_i2c_remove,
.id_table = adxl34x_id,
};
module_i2c_driver(adxl34x_driver);
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("ADXL345/346 Three-Axis Digital Accelerometer I2C Bus Driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/adxl34x-i2c.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, National Instruments Corp. All rights reserved.
*
* Driver for NI Ettus Research USRP E3x0 Button Driver
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/slab.h>
static irqreturn_t e3x0_button_release_handler(int irq, void *data)
{
struct input_dev *idev = data;
input_report_key(idev, KEY_POWER, 0);
input_sync(idev);
return IRQ_HANDLED;
}
static irqreturn_t e3x0_button_press_handler(int irq, void *data)
{
struct input_dev *idev = data;
input_report_key(idev, KEY_POWER, 1);
pm_wakeup_event(idev->dev.parent, 0);
input_sync(idev);
return IRQ_HANDLED;
}
static int e3x0_button_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
if (device_may_wakeup(dev))
enable_irq_wake(platform_get_irq_byname(pdev, "press"));
return 0;
}
static int e3x0_button_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
if (device_may_wakeup(dev))
disable_irq_wake(platform_get_irq_byname(pdev, "press"));
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(e3x0_button_pm_ops,
e3x0_button_suspend, e3x0_button_resume);
static int e3x0_button_probe(struct platform_device *pdev)
{
struct input_dev *input;
int irq_press, irq_release;
int error;
irq_press = platform_get_irq_byname(pdev, "press");
if (irq_press < 0)
return irq_press;
irq_release = platform_get_irq_byname(pdev, "release");
if (irq_release < 0)
return irq_release;
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
input->name = "NI Ettus Research USRP E3x0 Button Driver";
input->phys = "e3x0_button/input0";
input->dev.parent = &pdev->dev;
input_set_capability(input, EV_KEY, KEY_POWER);
error = devm_request_irq(&pdev->dev, irq_press,
e3x0_button_press_handler, 0,
"e3x0-button", input);
if (error) {
dev_err(&pdev->dev, "Failed to request 'press' IRQ#%d: %d\n",
irq_press, error);
return error;
}
error = devm_request_irq(&pdev->dev, irq_release,
e3x0_button_release_handler, 0,
"e3x0-button", input);
if (error) {
dev_err(&pdev->dev, "Failed to request 'release' IRQ#%d: %d\n",
irq_release, error);
return error;
}
error = input_register_device(input);
if (error) {
dev_err(&pdev->dev, "Can't register input device: %d\n", error);
return error;
}
device_init_wakeup(&pdev->dev, 1);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id e3x0_button_match[] = {
{ .compatible = "ettus,e3x0-button", },
{ }
};
MODULE_DEVICE_TABLE(of, e3x0_button_match);
#endif
static struct platform_driver e3x0_button_driver = {
.driver = {
.name = "e3x0-button",
.of_match_table = of_match_ptr(e3x0_button_match),
.pm = pm_sleep_ptr(&e3x0_button_pm_ops),
},
.probe = e3x0_button_probe,
};
module_platform_driver(e3x0_button_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Moritz Fischer <[email protected]>");
MODULE_DESCRIPTION("NI Ettus Research USRP E3x0 Button driver");
MODULE_ALIAS("platform:e3x0-button");
|
linux-master
|
drivers/input/misc/e3x0-button.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the VoIP USB phones with CM109 chipsets.
*
* Copyright (C) 2007 - 2008 Alfred E. Heggestad <[email protected]>
*/
/*
* Tested devices:
* - Komunikate KIP1000
* - Genius G-talk
* - Allied-Telesis Corega USBPH01
* - ...
*
* This driver is based on the yealink.c driver
*
* Thanks to:
* - Authors of yealink.c
* - Thomas Reitmayr
* - Oliver Neukum for good review comments and code
* - Shaun Jackman <[email protected]> for Genius G-talk keymap
* - Dmitry Torokhov for valuable input and review
*
* Todo:
* - Read/write EEPROM
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/rwsem.h>
#include <linux/usb/input.h>
#define DRIVER_VERSION "20080805"
#define DRIVER_AUTHOR "Alfred E. Heggestad"
#define DRIVER_DESC "CM109 phone driver"
static char *phone = "kip1000";
module_param(phone, charp, S_IRUSR);
MODULE_PARM_DESC(phone, "Phone name {kip1000, gtalk, usbph01, atcom}");
enum {
/* HID Registers */
HID_IR0 = 0x00, /* Record/Playback-mute button, Volume up/down */
HID_IR1 = 0x01, /* GPI, generic registers or EEPROM_DATA0 */
HID_IR2 = 0x02, /* Generic registers or EEPROM_DATA1 */
HID_IR3 = 0x03, /* Generic registers or EEPROM_CTRL */
HID_OR0 = 0x00, /* Mapping control, buzzer, SPDIF (offset 0x04) */
HID_OR1 = 0x01, /* GPO - General Purpose Output */
HID_OR2 = 0x02, /* Set GPIO to input/output mode */
HID_OR3 = 0x03, /* SPDIF status channel or EEPROM_CTRL */
/* HID_IR0 */
RECORD_MUTE = 1 << 3,
PLAYBACK_MUTE = 1 << 2,
VOLUME_DOWN = 1 << 1,
VOLUME_UP = 1 << 0,
/* HID_OR0 */
/* bits 7-6
0: HID_OR1-2 are used for GPO; HID_OR0, 3 are used for buzzer
and SPDIF
1: HID_OR0-3 are used as generic HID registers
2: Values written to HID_OR0-3 are also mapped to MCU_CTRL,
EEPROM_DATA0-1, EEPROM_CTRL (see Note)
3: Reserved
*/
HID_OR_GPO_BUZ_SPDIF = 0 << 6,
HID_OR_GENERIC_HID_REG = 1 << 6,
HID_OR_MAP_MCU_EEPROM = 2 << 6,
BUZZER_ON = 1 << 5,
/* up to 256 normal keys, up to 15 special key combinations */
KEYMAP_SIZE = 256 + 15,
};
/* CM109 protocol packet */
struct cm109_ctl_packet {
u8 byte[4];
} __attribute__ ((packed));
enum { USB_PKT_LEN = sizeof(struct cm109_ctl_packet) };
/* CM109 device structure */
struct cm109_dev {
struct input_dev *idev; /* input device */
struct usb_device *udev; /* usb device */
struct usb_interface *intf;
/* irq input channel */
struct cm109_ctl_packet *irq_data;
dma_addr_t irq_dma;
struct urb *urb_irq;
/* control output channel */
struct cm109_ctl_packet *ctl_data;
dma_addr_t ctl_dma;
struct usb_ctrlrequest *ctl_req;
struct urb *urb_ctl;
/*
* The 3 bitfields below are protected by ctl_submit_lock.
* They have to be separate since they are accessed from IRQ
* context.
*/
unsigned irq_urb_pending:1; /* irq_urb is in flight */
unsigned ctl_urb_pending:1; /* ctl_urb is in flight */
unsigned buzzer_pending:1; /* need to issue buzz command */
spinlock_t ctl_submit_lock;
unsigned char buzzer_state; /* on/off */
/* flags */
unsigned open:1;
unsigned resetting:1;
unsigned shutdown:1;
/* This mutex protects writes to the above flags */
struct mutex pm_mutex;
unsigned short keymap[KEYMAP_SIZE];
char phys[64]; /* physical device path */
int key_code; /* last reported key */
int keybit; /* 0=new scan 1,2,4,8=scan columns */
u8 gpi; /* Cached value of GPI (high nibble) */
};
/******************************************************************************
* CM109 key interface
*****************************************************************************/
static unsigned short special_keymap(int code)
{
if (code > 0xff) {
switch (code - 0xff) {
case RECORD_MUTE: return KEY_MICMUTE;
case PLAYBACK_MUTE: return KEY_MUTE;
case VOLUME_DOWN: return KEY_VOLUMEDOWN;
case VOLUME_UP: return KEY_VOLUMEUP;
}
}
return KEY_RESERVED;
}
/* Map device buttons to internal key events.
*
* The "up" and "down" keys, are symbolised by arrows on the button.
* The "pickup" and "hangup" keys are symbolised by a green and red phone
* on the button.
Komunikate KIP1000 Keyboard Matrix
-> -- 1 -- 2 -- 3 --> GPI pin 4 (0x10)
| | | |
<- -- 4 -- 5 -- 6 --> GPI pin 5 (0x20)
| | | |
END - 7 -- 8 -- 9 --> GPI pin 6 (0x40)
| | | |
OK -- * -- 0 -- # --> GPI pin 7 (0x80)
| | | |
/|\ /|\ /|\ /|\
| | | |
GPO
pin: 3 2 1 0
0x8 0x4 0x2 0x1
*/
static unsigned short keymap_kip1000(int scancode)
{
switch (scancode) { /* phone key: */
case 0x82: return KEY_NUMERIC_0; /* 0 */
case 0x14: return KEY_NUMERIC_1; /* 1 */
case 0x12: return KEY_NUMERIC_2; /* 2 */
case 0x11: return KEY_NUMERIC_3; /* 3 */
case 0x24: return KEY_NUMERIC_4; /* 4 */
case 0x22: return KEY_NUMERIC_5; /* 5 */
case 0x21: return KEY_NUMERIC_6; /* 6 */
case 0x44: return KEY_NUMERIC_7; /* 7 */
case 0x42: return KEY_NUMERIC_8; /* 8 */
case 0x41: return KEY_NUMERIC_9; /* 9 */
case 0x81: return KEY_NUMERIC_POUND; /* # */
case 0x84: return KEY_NUMERIC_STAR; /* * */
case 0x88: return KEY_ENTER; /* pickup */
case 0x48: return KEY_ESC; /* hangup */
case 0x28: return KEY_LEFT; /* IN */
case 0x18: return KEY_RIGHT; /* OUT */
default: return special_keymap(scancode);
}
}
/*
Contributed by Shaun Jackman <[email protected]>
Genius G-Talk keyboard matrix
0 1 2 3
4: 0 4 8 Talk
5: 1 5 9 End
6: 2 6 # Up
7: 3 7 * Down
*/
static unsigned short keymap_gtalk(int scancode)
{
switch (scancode) {
case 0x11: return KEY_NUMERIC_0;
case 0x21: return KEY_NUMERIC_1;
case 0x41: return KEY_NUMERIC_2;
case 0x81: return KEY_NUMERIC_3;
case 0x12: return KEY_NUMERIC_4;
case 0x22: return KEY_NUMERIC_5;
case 0x42: return KEY_NUMERIC_6;
case 0x82: return KEY_NUMERIC_7;
case 0x14: return KEY_NUMERIC_8;
case 0x24: return KEY_NUMERIC_9;
case 0x44: return KEY_NUMERIC_POUND; /* # */
case 0x84: return KEY_NUMERIC_STAR; /* * */
case 0x18: return KEY_ENTER; /* Talk (green handset) */
case 0x28: return KEY_ESC; /* End (red handset) */
case 0x48: return KEY_UP; /* Menu up (rocker switch) */
case 0x88: return KEY_DOWN; /* Menu down (rocker switch) */
default: return special_keymap(scancode);
}
}
/*
* Keymap for Allied-Telesis Corega USBPH01
* http://www.alliedtelesis-corega.com/2/1344/1437/1360/chprd.html
*
* Contributed by [email protected]
*/
static unsigned short keymap_usbph01(int scancode)
{
switch (scancode) {
case 0x11: return KEY_NUMERIC_0; /* 0 */
case 0x21: return KEY_NUMERIC_1; /* 1 */
case 0x41: return KEY_NUMERIC_2; /* 2 */
case 0x81: return KEY_NUMERIC_3; /* 3 */
case 0x12: return KEY_NUMERIC_4; /* 4 */
case 0x22: return KEY_NUMERIC_5; /* 5 */
case 0x42: return KEY_NUMERIC_6; /* 6 */
case 0x82: return KEY_NUMERIC_7; /* 7 */
case 0x14: return KEY_NUMERIC_8; /* 8 */
case 0x24: return KEY_NUMERIC_9; /* 9 */
case 0x44: return KEY_NUMERIC_POUND; /* # */
case 0x84: return KEY_NUMERIC_STAR; /* * */
case 0x18: return KEY_ENTER; /* pickup */
case 0x28: return KEY_ESC; /* hangup */
case 0x48: return KEY_LEFT; /* IN */
case 0x88: return KEY_RIGHT; /* OUT */
default: return special_keymap(scancode);
}
}
/*
* Keymap for ATCom AU-100
* http://www.atcom.cn/products.html
* http://www.packetizer.com/products/au100/
* http://www.voip-info.org/wiki/view/AU-100
*
* Contributed by [email protected]
*/
static unsigned short keymap_atcom(int scancode)
{
switch (scancode) { /* phone key: */
case 0x82: return KEY_NUMERIC_0; /* 0 */
case 0x11: return KEY_NUMERIC_1; /* 1 */
case 0x12: return KEY_NUMERIC_2; /* 2 */
case 0x14: return KEY_NUMERIC_3; /* 3 */
case 0x21: return KEY_NUMERIC_4; /* 4 */
case 0x22: return KEY_NUMERIC_5; /* 5 */
case 0x24: return KEY_NUMERIC_6; /* 6 */
case 0x41: return KEY_NUMERIC_7; /* 7 */
case 0x42: return KEY_NUMERIC_8; /* 8 */
case 0x44: return KEY_NUMERIC_9; /* 9 */
case 0x84: return KEY_NUMERIC_POUND; /* # */
case 0x81: return KEY_NUMERIC_STAR; /* * */
case 0x18: return KEY_ENTER; /* pickup */
case 0x28: return KEY_ESC; /* hangup */
case 0x48: return KEY_LEFT; /* left arrow */
case 0x88: return KEY_RIGHT; /* right arrow */
default: return special_keymap(scancode);
}
}
static unsigned short (*keymap)(int) = keymap_kip1000;
/*
* Completes a request by converting the data into events for the
* input subsystem.
*/
static void report_key(struct cm109_dev *dev, int key)
{
struct input_dev *idev = dev->idev;
if (dev->key_code >= 0) {
/* old key up */
input_report_key(idev, dev->key_code, 0);
}
dev->key_code = key;
if (key >= 0) {
/* new valid key */
input_report_key(idev, key, 1);
}
input_sync(idev);
}
/*
* Converts data of special key presses (volume, mute) into events
* for the input subsystem, sends press-n-release for mute keys.
*/
static void cm109_report_special(struct cm109_dev *dev)
{
static const u8 autorelease = RECORD_MUTE | PLAYBACK_MUTE;
struct input_dev *idev = dev->idev;
u8 data = dev->irq_data->byte[HID_IR0];
unsigned short keycode;
int i;
for (i = 0; i < 4; i++) {
keycode = dev->keymap[0xff + BIT(i)];
if (keycode == KEY_RESERVED)
continue;
input_report_key(idev, keycode, data & BIT(i));
if (data & autorelease & BIT(i)) {
input_sync(idev);
input_report_key(idev, keycode, 0);
}
}
input_sync(idev);
}
/******************************************************************************
* CM109 usb communication interface
*****************************************************************************/
static void cm109_submit_buzz_toggle(struct cm109_dev *dev)
{
int error;
if (dev->buzzer_state)
dev->ctl_data->byte[HID_OR0] |= BUZZER_ON;
else
dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON;
error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC);
if (error)
dev_err(&dev->intf->dev,
"%s: usb_submit_urb (urb_ctl) failed %d\n",
__func__, error);
}
/*
* IRQ handler
*/
static void cm109_urb_irq_callback(struct urb *urb)
{
struct cm109_dev *dev = urb->context;
const int status = urb->status;
int error;
unsigned long flags;
dev_dbg(&dev->intf->dev, "### URB IRQ: [0x%02x 0x%02x 0x%02x 0x%02x] keybit=0x%02x\n",
dev->irq_data->byte[0],
dev->irq_data->byte[1],
dev->irq_data->byte[2],
dev->irq_data->byte[3],
dev->keybit);
if (status) {
if (status == -ESHUTDOWN)
return;
dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
__func__, status);
goto out;
}
/* Special keys */
cm109_report_special(dev);
/* Scan key column */
if (dev->keybit == 0xf) {
/* Any changes ? */
if ((dev->gpi & 0xf0) == (dev->irq_data->byte[HID_IR1] & 0xf0))
goto out;
dev->gpi = dev->irq_data->byte[HID_IR1] & 0xf0;
dev->keybit = 0x1;
} else {
report_key(dev, dev->keymap[dev->irq_data->byte[HID_IR1]]);
dev->keybit <<= 1;
if (dev->keybit > 0x8)
dev->keybit = 0xf;
}
out:
spin_lock_irqsave(&dev->ctl_submit_lock, flags);
dev->irq_urb_pending = 0;
if (likely(!dev->shutdown)) {
if (dev->buzzer_state)
dev->ctl_data->byte[HID_OR0] |= BUZZER_ON;
else
dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON;
dev->ctl_data->byte[HID_OR1] = dev->keybit;
dev->ctl_data->byte[HID_OR2] = dev->keybit;
dev->buzzer_pending = 0;
dev->ctl_urb_pending = 1;
error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC);
if (error)
dev_err(&dev->intf->dev,
"%s: usb_submit_urb (urb_ctl) failed %d\n",
__func__, error);
}
spin_unlock_irqrestore(&dev->ctl_submit_lock, flags);
}
static void cm109_urb_ctl_callback(struct urb *urb)
{
struct cm109_dev *dev = urb->context;
const int status = urb->status;
int error;
unsigned long flags;
dev_dbg(&dev->intf->dev, "### URB CTL: [0x%02x 0x%02x 0x%02x 0x%02x]\n",
dev->ctl_data->byte[0],
dev->ctl_data->byte[1],
dev->ctl_data->byte[2],
dev->ctl_data->byte[3]);
if (status) {
if (status == -ESHUTDOWN)
return;
dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
__func__, status);
}
spin_lock_irqsave(&dev->ctl_submit_lock, flags);
dev->ctl_urb_pending = 0;
if (likely(!dev->shutdown)) {
if (dev->buzzer_pending || status) {
dev->buzzer_pending = 0;
dev->ctl_urb_pending = 1;
cm109_submit_buzz_toggle(dev);
} else if (likely(!dev->irq_urb_pending)) {
/* ask for key data */
dev->irq_urb_pending = 1;
error = usb_submit_urb(dev->urb_irq, GFP_ATOMIC);
if (error)
dev_err(&dev->intf->dev,
"%s: usb_submit_urb (urb_irq) failed %d\n",
__func__, error);
}
}
spin_unlock_irqrestore(&dev->ctl_submit_lock, flags);
}
static void cm109_toggle_buzzer_async(struct cm109_dev *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->ctl_submit_lock, flags);
if (dev->ctl_urb_pending) {
/* URB completion will resubmit */
dev->buzzer_pending = 1;
} else {
dev->ctl_urb_pending = 1;
cm109_submit_buzz_toggle(dev);
}
spin_unlock_irqrestore(&dev->ctl_submit_lock, flags);
}
static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on)
{
int error;
if (on)
dev->ctl_data->byte[HID_OR0] |= BUZZER_ON;
else
dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON;
error = usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
dev->ctl_req->bRequest,
dev->ctl_req->bRequestType,
le16_to_cpu(dev->ctl_req->wValue),
le16_to_cpu(dev->ctl_req->wIndex),
dev->ctl_data,
USB_PKT_LEN, USB_CTRL_SET_TIMEOUT);
if (error < 0 && error != -EINTR)
dev_err(&dev->intf->dev, "%s: usb_control_msg() failed %d\n",
__func__, error);
}
static void cm109_stop_traffic(struct cm109_dev *dev)
{
dev->shutdown = 1;
/*
* Make sure other CPUs see this
*/
smp_wmb();
usb_kill_urb(dev->urb_ctl);
usb_kill_urb(dev->urb_irq);
cm109_toggle_buzzer_sync(dev, 0);
dev->shutdown = 0;
smp_wmb();
}
static void cm109_restore_state(struct cm109_dev *dev)
{
if (dev->open) {
/*
* Restore buzzer state.
* This will also kick regular URB submission
*/
cm109_toggle_buzzer_async(dev);
}
}
/******************************************************************************
* input event interface
*****************************************************************************/
static int cm109_input_open(struct input_dev *idev)
{
struct cm109_dev *dev = input_get_drvdata(idev);
int error;
error = usb_autopm_get_interface(dev->intf);
if (error < 0) {
dev_err(&idev->dev, "%s - cannot autoresume, result %d\n",
__func__, error);
return error;
}
mutex_lock(&dev->pm_mutex);
dev->buzzer_state = 0;
dev->key_code = -1; /* no keys pressed */
dev->keybit = 0xf;
/* issue INIT */
dev->ctl_data->byte[HID_OR0] = HID_OR_GPO_BUZ_SPDIF;
dev->ctl_data->byte[HID_OR1] = dev->keybit;
dev->ctl_data->byte[HID_OR2] = dev->keybit;
dev->ctl_data->byte[HID_OR3] = 0x00;
dev->ctl_urb_pending = 1;
error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL);
if (error) {
dev->ctl_urb_pending = 0;
dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n",
__func__, error);
} else {
dev->open = 1;
}
mutex_unlock(&dev->pm_mutex);
if (error)
usb_autopm_put_interface(dev->intf);
return error;
}
static void cm109_input_close(struct input_dev *idev)
{
struct cm109_dev *dev = input_get_drvdata(idev);
mutex_lock(&dev->pm_mutex);
/*
* Once we are here event delivery is stopped so we
* don't need to worry about someone starting buzzer
* again
*/
cm109_stop_traffic(dev);
dev->open = 0;
mutex_unlock(&dev->pm_mutex);
usb_autopm_put_interface(dev->intf);
}
static int cm109_input_ev(struct input_dev *idev, unsigned int type,
unsigned int code, int value)
{
struct cm109_dev *dev = input_get_drvdata(idev);
dev_dbg(&dev->intf->dev,
"input_ev: type=%u code=%u value=%d\n", type, code, value);
if (type != EV_SND)
return -EINVAL;
switch (code) {
case SND_TONE:
case SND_BELL:
dev->buzzer_state = !!value;
if (!dev->resetting)
cm109_toggle_buzzer_async(dev);
return 0;
default:
return -EINVAL;
}
}
/******************************************************************************
* Linux interface and usb initialisation
*****************************************************************************/
struct driver_info {
char *name;
};
static const struct driver_info info_cm109 = {
.name = "CM109 USB driver",
};
enum {
VENDOR_ID = 0x0d8c, /* C-Media Electronics */
PRODUCT_ID_CM109 = 0x000e, /* CM109 defines range 0x0008 - 0x000f */
};
/* table of devices that work with this driver */
static const struct usb_device_id cm109_usb_table[] = {
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = VENDOR_ID,
.idProduct = PRODUCT_ID_CM109,
.bInterfaceClass = USB_CLASS_HID,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t) &info_cm109
},
/* you can add more devices here with product ID 0x0008 - 0x000f */
{ }
};
static void cm109_usb_cleanup(struct cm109_dev *dev)
{
kfree(dev->ctl_req);
usb_free_coherent(dev->udev, USB_PKT_LEN, dev->ctl_data, dev->ctl_dma);
usb_free_coherent(dev->udev, USB_PKT_LEN, dev->irq_data, dev->irq_dma);
usb_free_urb(dev->urb_irq); /* parameter validation in core/urb */
usb_free_urb(dev->urb_ctl); /* parameter validation in core/urb */
kfree(dev);
}
static void cm109_usb_disconnect(struct usb_interface *interface)
{
struct cm109_dev *dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
input_unregister_device(dev->idev);
cm109_usb_cleanup(dev);
}
static int cm109_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct driver_info *nfo = (struct driver_info *)id->driver_info;
struct usb_host_interface *interface;
struct usb_endpoint_descriptor *endpoint;
struct cm109_dev *dev;
struct input_dev *input_dev = NULL;
int ret, pipe, i;
int error = -ENOMEM;
interface = intf->cur_altsetting;
if (interface->desc.bNumEndpoints < 1)
return -ENODEV;
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
spin_lock_init(&dev->ctl_submit_lock);
mutex_init(&dev->pm_mutex);
dev->udev = udev;
dev->intf = intf;
dev->idev = input_dev = input_allocate_device();
if (!input_dev)
goto err_out;
/* allocate usb buffers */
dev->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
GFP_KERNEL, &dev->irq_dma);
if (!dev->irq_data)
goto err_out;
dev->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
GFP_KERNEL, &dev->ctl_dma);
if (!dev->ctl_data)
goto err_out;
dev->ctl_req = kmalloc(sizeof(*(dev->ctl_req)), GFP_KERNEL);
if (!dev->ctl_req)
goto err_out;
/* allocate urb structures */
dev->urb_irq = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_irq)
goto err_out;
dev->urb_ctl = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_ctl)
goto err_out;
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
ret = usb_maxpacket(udev, pipe);
if (ret != USB_PKT_LEN)
dev_err(&intf->dev, "invalid payload size %d, expected %d\n",
ret, USB_PKT_LEN);
/* initialise irq urb */
usb_fill_int_urb(dev->urb_irq, udev, pipe, dev->irq_data,
USB_PKT_LEN,
cm109_urb_irq_callback, dev, endpoint->bInterval);
dev->urb_irq->transfer_dma = dev->irq_dma;
dev->urb_irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
dev->urb_irq->dev = udev;
/* initialise ctl urb */
dev->ctl_req->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE |
USB_DIR_OUT;
dev->ctl_req->bRequest = USB_REQ_SET_CONFIGURATION;
dev->ctl_req->wValue = cpu_to_le16(0x200);
dev->ctl_req->wIndex = cpu_to_le16(interface->desc.bInterfaceNumber);
dev->ctl_req->wLength = cpu_to_le16(USB_PKT_LEN);
usb_fill_control_urb(dev->urb_ctl, udev, usb_sndctrlpipe(udev, 0),
(void *)dev->ctl_req, dev->ctl_data, USB_PKT_LEN,
cm109_urb_ctl_callback, dev);
dev->urb_ctl->transfer_dma = dev->ctl_dma;
dev->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
dev->urb_ctl->dev = udev;
/* find out the physical bus location */
usb_make_path(udev, dev->phys, sizeof(dev->phys));
strlcat(dev->phys, "/input0", sizeof(dev->phys));
/* register settings for the input device */
input_dev->name = nfo->name;
input_dev->phys = dev->phys;
usb_to_input_id(udev, &input_dev->id);
input_dev->dev.parent = &intf->dev;
input_set_drvdata(input_dev, dev);
input_dev->open = cm109_input_open;
input_dev->close = cm109_input_close;
input_dev->event = cm109_input_ev;
input_dev->keycode = dev->keymap;
input_dev->keycodesize = sizeof(unsigned char);
input_dev->keycodemax = ARRAY_SIZE(dev->keymap);
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_SND);
input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
/* register available key events */
for (i = 0; i < KEYMAP_SIZE; i++) {
unsigned short k = keymap(i);
dev->keymap[i] = k;
__set_bit(k, input_dev->keybit);
}
__clear_bit(KEY_RESERVED, input_dev->keybit);
error = input_register_device(dev->idev);
if (error)
goto err_out;
usb_set_intfdata(intf, dev);
return 0;
err_out:
input_free_device(input_dev);
cm109_usb_cleanup(dev);
return error;
}
static int cm109_usb_suspend(struct usb_interface *intf, pm_message_t message)
{
struct cm109_dev *dev = usb_get_intfdata(intf);
dev_info(&intf->dev, "cm109: usb_suspend (event=%d)\n", message.event);
mutex_lock(&dev->pm_mutex);
cm109_stop_traffic(dev);
mutex_unlock(&dev->pm_mutex);
return 0;
}
static int cm109_usb_resume(struct usb_interface *intf)
{
struct cm109_dev *dev = usb_get_intfdata(intf);
dev_info(&intf->dev, "cm109: usb_resume\n");
mutex_lock(&dev->pm_mutex);
cm109_restore_state(dev);
mutex_unlock(&dev->pm_mutex);
return 0;
}
static int cm109_usb_pre_reset(struct usb_interface *intf)
{
struct cm109_dev *dev = usb_get_intfdata(intf);
mutex_lock(&dev->pm_mutex);
/*
* Make sure input events don't try to toggle buzzer
* while we are resetting
*/
dev->resetting = 1;
smp_wmb();
cm109_stop_traffic(dev);
return 0;
}
static int cm109_usb_post_reset(struct usb_interface *intf)
{
struct cm109_dev *dev = usb_get_intfdata(intf);
dev->resetting = 0;
smp_wmb();
cm109_restore_state(dev);
mutex_unlock(&dev->pm_mutex);
return 0;
}
static struct usb_driver cm109_driver = {
.name = "cm109",
.probe = cm109_usb_probe,
.disconnect = cm109_usb_disconnect,
.suspend = cm109_usb_suspend,
.resume = cm109_usb_resume,
.reset_resume = cm109_usb_resume,
.pre_reset = cm109_usb_pre_reset,
.post_reset = cm109_usb_post_reset,
.id_table = cm109_usb_table,
.supports_autosuspend = 1,
};
static int __init cm109_select_keymap(void)
{
/* Load the phone keymap */
if (!strcasecmp(phone, "kip1000")) {
keymap = keymap_kip1000;
printk(KERN_INFO KBUILD_MODNAME ": "
"Keymap for Komunikate KIP1000 phone loaded\n");
} else if (!strcasecmp(phone, "gtalk")) {
keymap = keymap_gtalk;
printk(KERN_INFO KBUILD_MODNAME ": "
"Keymap for Genius G-talk phone loaded\n");
} else if (!strcasecmp(phone, "usbph01")) {
keymap = keymap_usbph01;
printk(KERN_INFO KBUILD_MODNAME ": "
"Keymap for Allied-Telesis Corega USBPH01 phone loaded\n");
} else if (!strcasecmp(phone, "atcom")) {
keymap = keymap_atcom;
printk(KERN_INFO KBUILD_MODNAME ": "
"Keymap for ATCom AU-100 phone loaded\n");
} else {
printk(KERN_ERR KBUILD_MODNAME ": "
"Unsupported phone: %s\n", phone);
return -EINVAL;
}
return 0;
}
static int __init cm109_init(void)
{
int err;
err = cm109_select_keymap();
if (err)
return err;
err = usb_register(&cm109_driver);
if (err)
return err;
printk(KERN_INFO KBUILD_MODNAME ": "
DRIVER_DESC ": " DRIVER_VERSION " (C) " DRIVER_AUTHOR "\n");
return 0;
}
static void __exit cm109_exit(void)
{
usb_deregister(&cm109_driver);
}
module_init(cm109_init);
module_exit(cm109_exit);
MODULE_DEVICE_TABLE(usb, cm109_usb_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/cm109.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for a keypad w/16 buttons connected to a PCF8574 I2C I/O expander
*
* Copyright 2005-2008 Analog Devices Inc.
*/
#include <linux/module.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#define DRV_NAME "pcf8574_keypad"
static const unsigned char pcf8574_kp_btncode[] = {
[0] = KEY_RESERVED,
[1] = KEY_ENTER,
[2] = KEY_BACKSLASH,
[3] = KEY_0,
[4] = KEY_RIGHTBRACE,
[5] = KEY_C,
[6] = KEY_9,
[7] = KEY_8,
[8] = KEY_7,
[9] = KEY_B,
[10] = KEY_6,
[11] = KEY_5,
[12] = KEY_4,
[13] = KEY_A,
[14] = KEY_3,
[15] = KEY_2,
[16] = KEY_1
};
struct kp_data {
unsigned short btncode[ARRAY_SIZE(pcf8574_kp_btncode)];
struct input_dev *idev;
struct i2c_client *client;
char name[64];
char phys[32];
unsigned char laststate;
};
static short read_state(struct kp_data *lp)
{
unsigned char x, y, a, b;
i2c_smbus_write_byte(lp->client, 240);
x = 0xF & (~(i2c_smbus_read_byte(lp->client) >> 4));
i2c_smbus_write_byte(lp->client, 15);
y = 0xF & (~i2c_smbus_read_byte(lp->client));
for (a = 0; x > 0; a++)
x = x >> 1;
for (b = 0; y > 0; b++)
y = y >> 1;
return ((a - 1) * 4) + b;
}
static irqreturn_t pcf8574_kp_irq_handler(int irq, void *dev_id)
{
struct kp_data *lp = dev_id;
unsigned char nextstate = read_state(lp);
if (lp->laststate != nextstate) {
int key_down = nextstate < ARRAY_SIZE(lp->btncode);
unsigned short keycode = key_down ?
lp->btncode[nextstate] : lp->btncode[lp->laststate];
input_report_key(lp->idev, keycode, key_down);
input_sync(lp->idev);
lp->laststate = nextstate;
}
return IRQ_HANDLED;
}
static int pcf8574_kp_probe(struct i2c_client *client)
{
int i, ret;
struct input_dev *idev;
struct kp_data *lp;
if (i2c_smbus_write_byte(client, 240) < 0) {
dev_err(&client->dev, "probe: write fail\n");
return -ENODEV;
}
lp = kzalloc(sizeof(*lp), GFP_KERNEL);
if (!lp)
return -ENOMEM;
idev = input_allocate_device();
if (!idev) {
dev_err(&client->dev, "Can't allocate input device\n");
ret = -ENOMEM;
goto fail_allocate;
}
lp->idev = idev;
lp->client = client;
idev->evbit[0] = BIT_MASK(EV_KEY);
idev->keycode = lp->btncode;
idev->keycodesize = sizeof(lp->btncode[0]);
idev->keycodemax = ARRAY_SIZE(lp->btncode);
for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) {
if (lp->btncode[i] <= KEY_MAX) {
lp->btncode[i] = pcf8574_kp_btncode[i];
__set_bit(lp->btncode[i], idev->keybit);
}
}
__clear_bit(KEY_RESERVED, idev->keybit);
sprintf(lp->name, DRV_NAME);
sprintf(lp->phys, "kp_data/input0");
idev->name = lp->name;
idev->phys = lp->phys;
idev->id.bustype = BUS_I2C;
idev->id.vendor = 0x0001;
idev->id.product = 0x0001;
idev->id.version = 0x0100;
lp->laststate = read_state(lp);
ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
DRV_NAME, lp);
if (ret) {
dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
goto fail_free_device;
}
ret = input_register_device(idev);
if (ret) {
dev_err(&client->dev, "input_register_device() failed\n");
goto fail_free_irq;
}
i2c_set_clientdata(client, lp);
return 0;
fail_free_irq:
free_irq(client->irq, lp);
fail_free_device:
input_free_device(idev);
fail_allocate:
kfree(lp);
return ret;
}
static void pcf8574_kp_remove(struct i2c_client *client)
{
struct kp_data *lp = i2c_get_clientdata(client);
free_irq(client->irq, lp);
input_unregister_device(lp->idev);
kfree(lp);
}
static int pcf8574_kp_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
enable_irq(client->irq);
return 0;
}
static int pcf8574_kp_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
disable_irq(client->irq);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(pcf8574_kp_pm_ops,
pcf8574_kp_suspend, pcf8574_kp_resume);
static const struct i2c_device_id pcf8574_kp_id[] = {
{ DRV_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf8574_kp_id);
static struct i2c_driver pcf8574_kp_driver = {
.driver = {
.name = DRV_NAME,
.pm = pm_sleep_ptr(&pcf8574_kp_pm_ops),
},
.probe = pcf8574_kp_probe,
.remove = pcf8574_kp_remove,
.id_table = pcf8574_kp_id,
};
module_i2c_driver(pcf8574_kp_driver);
MODULE_AUTHOR("Michael Hennerich");
MODULE_DESCRIPTION("Keypad input driver for 16 keys connected to PCF8574");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/pcf8574_keypad.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* DRV2665 haptics driver family
*
* Author: Dan Murphy <[email protected]>
*
* Copyright: (C) 2015 Texas Instruments, Inc.
*/
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
/* Contol registers */
#define DRV2665_STATUS 0x00
#define DRV2665_CTRL_1 0x01
#define DRV2665_CTRL_2 0x02
#define DRV2665_FIFO 0x0b
/* Status Register */
#define DRV2665_FIFO_FULL BIT(0)
#define DRV2665_FIFO_EMPTY BIT(1)
/* Control 1 Register */
#define DRV2665_25_VPP_GAIN 0x00
#define DRV2665_50_VPP_GAIN 0x01
#define DRV2665_75_VPP_GAIN 0x02
#define DRV2665_100_VPP_GAIN 0x03
#define DRV2665_DIGITAL_IN 0xfc
#define DRV2665_ANALOG_IN BIT(2)
/* Control 2 Register */
#define DRV2665_BOOST_EN BIT(1)
#define DRV2665_STANDBY BIT(6)
#define DRV2665_DEV_RST BIT(7)
#define DRV2665_5_MS_IDLE_TOUT 0x00
#define DRV2665_10_MS_IDLE_TOUT 0x04
#define DRV2665_15_MS_IDLE_TOUT 0x08
#define DRV2665_20_MS_IDLE_TOUT 0x0c
/**
* struct drv2665_data -
* @input_dev: Pointer to the input device
* @client: Pointer to the I2C client
* @regmap: Register map of the device
* @work: Work item used to off load the enable/disable of the vibration
* @regulator: Pointer to the regulator for the IC
*/
struct drv2665_data {
struct input_dev *input_dev;
struct i2c_client *client;
struct regmap *regmap;
struct work_struct work;
struct regulator *regulator;
};
/* 8kHz Sine wave to stream to the FIFO */
static const u8 drv2665_sine_wave_form[] = {
0x00, 0x10, 0x20, 0x2e, 0x3c, 0x48, 0x53, 0x5b, 0x61, 0x65, 0x66,
0x65, 0x61, 0x5b, 0x53, 0x48, 0x3c, 0x2e, 0x20, 0x10,
0x00, 0xf0, 0xe0, 0xd2, 0xc4, 0xb8, 0xad, 0xa5, 0x9f, 0x9b, 0x9a,
0x9b, 0x9f, 0xa5, 0xad, 0xb8, 0xc4, 0xd2, 0xe0, 0xf0, 0x00,
};
static const struct reg_default drv2665_reg_defs[] = {
{ DRV2665_STATUS, 0x02 },
{ DRV2665_CTRL_1, 0x28 },
{ DRV2665_CTRL_2, 0x40 },
{ DRV2665_FIFO, 0x00 },
};
static void drv2665_worker(struct work_struct *work)
{
struct drv2665_data *haptics =
container_of(work, struct drv2665_data, work);
unsigned int read_buf;
int error;
error = regmap_read(haptics->regmap, DRV2665_STATUS, &read_buf);
if (error) {
dev_err(&haptics->client->dev,
"Failed to read status: %d\n", error);
return;
}
if (read_buf & DRV2665_FIFO_EMPTY) {
error = regmap_bulk_write(haptics->regmap,
DRV2665_FIFO,
drv2665_sine_wave_form,
ARRAY_SIZE(drv2665_sine_wave_form));
if (error) {
dev_err(&haptics->client->dev,
"Failed to write FIFO: %d\n", error);
return;
}
}
}
static int drv2665_haptics_play(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct drv2665_data *haptics = input_get_drvdata(input);
schedule_work(&haptics->work);
return 0;
}
static void drv2665_close(struct input_dev *input)
{
struct drv2665_data *haptics = input_get_drvdata(input);
int error;
cancel_work_sync(&haptics->work);
error = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
DRV2665_STANDBY, DRV2665_STANDBY);
if (error)
dev_err(&haptics->client->dev,
"Failed to enter standby mode: %d\n", error);
}
static const struct reg_sequence drv2665_init_regs[] = {
{ DRV2665_CTRL_2, 0 | DRV2665_10_MS_IDLE_TOUT },
{ DRV2665_CTRL_1, DRV2665_25_VPP_GAIN },
};
static int drv2665_init(struct drv2665_data *haptics)
{
int error;
error = regmap_register_patch(haptics->regmap,
drv2665_init_regs,
ARRAY_SIZE(drv2665_init_regs));
if (error) {
dev_err(&haptics->client->dev,
"Failed to write init registers: %d\n",
error);
return error;
}
return 0;
}
static const struct regmap_config drv2665_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = DRV2665_FIFO,
.reg_defaults = drv2665_reg_defs,
.num_reg_defaults = ARRAY_SIZE(drv2665_reg_defs),
.cache_type = REGCACHE_NONE,
};
static int drv2665_probe(struct i2c_client *client)
{
struct drv2665_data *haptics;
int error;
haptics = devm_kzalloc(&client->dev, sizeof(*haptics), GFP_KERNEL);
if (!haptics)
return -ENOMEM;
haptics->regulator = devm_regulator_get(&client->dev, "vbat");
if (IS_ERR(haptics->regulator)) {
error = PTR_ERR(haptics->regulator);
dev_err(&client->dev,
"unable to get regulator, error: %d\n", error);
return error;
}
haptics->input_dev = devm_input_allocate_device(&client->dev);
if (!haptics->input_dev) {
dev_err(&client->dev, "Failed to allocate input device\n");
return -ENOMEM;
}
haptics->input_dev->name = "drv2665:haptics";
haptics->input_dev->dev.parent = client->dev.parent;
haptics->input_dev->close = drv2665_close;
input_set_drvdata(haptics->input_dev, haptics);
input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
error = input_ff_create_memless(haptics->input_dev, NULL,
drv2665_haptics_play);
if (error) {
dev_err(&client->dev, "input_ff_create() failed: %d\n",
error);
return error;
}
INIT_WORK(&haptics->work, drv2665_worker);
haptics->client = client;
i2c_set_clientdata(client, haptics);
haptics->regmap = devm_regmap_init_i2c(client, &drv2665_regmap_config);
if (IS_ERR(haptics->regmap)) {
error = PTR_ERR(haptics->regmap);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
error);
return error;
}
error = drv2665_init(haptics);
if (error) {
dev_err(&client->dev, "Device init failed: %d\n", error);
return error;
}
error = input_register_device(haptics->input_dev);
if (error) {
dev_err(&client->dev, "couldn't register input device: %d\n",
error);
return error;
}
return 0;
}
static int drv2665_suspend(struct device *dev)
{
struct drv2665_data *haptics = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&haptics->input_dev->mutex);
if (input_device_enabled(haptics->input_dev)) {
ret = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
DRV2665_STANDBY, DRV2665_STANDBY);
if (ret) {
dev_err(dev, "Failed to set standby mode\n");
regulator_disable(haptics->regulator);
goto out;
}
ret = regulator_disable(haptics->regulator);
if (ret) {
dev_err(dev, "Failed to disable regulator\n");
regmap_update_bits(haptics->regmap,
DRV2665_CTRL_2,
DRV2665_STANDBY, 0);
}
}
out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
static int drv2665_resume(struct device *dev)
{
struct drv2665_data *haptics = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&haptics->input_dev->mutex);
if (input_device_enabled(haptics->input_dev)) {
ret = regulator_enable(haptics->regulator);
if (ret) {
dev_err(dev, "Failed to enable regulator\n");
goto out;
}
ret = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
DRV2665_STANDBY, 0);
if (ret) {
dev_err(dev, "Failed to unset standby mode\n");
regulator_disable(haptics->regulator);
goto out;
}
}
out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(drv2665_pm_ops, drv2665_suspend, drv2665_resume);
static const struct i2c_device_id drv2665_id[] = {
{ "drv2665", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, drv2665_id);
#ifdef CONFIG_OF
static const struct of_device_id drv2665_of_match[] = {
{ .compatible = "ti,drv2665", },
{ }
};
MODULE_DEVICE_TABLE(of, drv2665_of_match);
#endif
static struct i2c_driver drv2665_driver = {
.probe = drv2665_probe,
.driver = {
.name = "drv2665-haptics",
.of_match_table = of_match_ptr(drv2665_of_match),
.pm = pm_sleep_ptr(&drv2665_pm_ops),
},
.id_table = drv2665_id,
};
module_i2c_driver(drv2665_driver);
MODULE_DESCRIPTION("TI DRV2665 haptics driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Murphy <[email protected]>");
|
linux-master
|
drivers/input/misc/drv2665.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atmel Atmegaxx Capacitive Touch Button Driver
*
* Copyright (C) 2016 Google, inc.
*/
/*
* It's irrelevant that the HW used to develop captouch driver is based
* on Atmega88PA part and uses QtouchADC parts for sensing touch.
* Calling this driver "captouch" is an arbitrary way to distinguish
* the protocol this driver supported by other atmel/qtouch drivers.
*
* Captouch driver supports a newer/different version of the I2C
* registers/commands than the qt1070.c driver.
* Don't let the similarity of the general driver structure fool you.
*
* For raw i2c access from userspace, use i2cset/i2cget
* to poke at /dev/i2c-N devices.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
/* Maximum number of buttons supported */
#define MAX_NUM_OF_BUTTONS 8
/* Registers */
#define REG_KEY1_THRESHOLD 0x02
#define REG_KEY2_THRESHOLD 0x03
#define REG_KEY3_THRESHOLD 0x04
#define REG_KEY4_THRESHOLD 0x05
#define REG_KEY1_REF_H 0x20
#define REG_KEY1_REF_L 0x21
#define REG_KEY2_REF_H 0x22
#define REG_KEY2_REF_L 0x23
#define REG_KEY3_REF_H 0x24
#define REG_KEY3_REF_L 0x25
#define REG_KEY4_REF_H 0x26
#define REG_KEY4_REF_L 0x27
#define REG_KEY1_DLT_H 0x30
#define REG_KEY1_DLT_L 0x31
#define REG_KEY2_DLT_H 0x32
#define REG_KEY2_DLT_L 0x33
#define REG_KEY3_DLT_H 0x34
#define REG_KEY3_DLT_L 0x35
#define REG_KEY4_DLT_H 0x36
#define REG_KEY4_DLT_L 0x37
#define REG_KEY_STATE 0x3C
/*
* @i2c_client: I2C slave device client pointer
* @input: Input device pointer
* @num_btn: Number of buttons
* @keycodes: map of button# to KeyCode
* @prev_btn: Previous key state to detect button "press" or "release"
* @xfer_buf: I2C transfer buffer
*/
struct atmel_captouch_device {
struct i2c_client *client;
struct input_dev *input;
u32 num_btn;
u32 keycodes[MAX_NUM_OF_BUTTONS];
u8 prev_btn;
u8 xfer_buf[8] ____cacheline_aligned;
};
/*
* Read from I2C slave device
* The protocol is that the client has to provide both the register address
* and the length, and while reading back the device would prepend the data
* with address and length for verification.
*/
static int atmel_read(struct atmel_captouch_device *capdev,
u8 reg, u8 *data, size_t len)
{
struct i2c_client *client = capdev->client;
struct device *dev = &client->dev;
struct i2c_msg msg[2];
int err;
if (len > sizeof(capdev->xfer_buf) - 2)
return -EINVAL;
capdev->xfer_buf[0] = reg;
capdev->xfer_buf[1] = len;
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].buf = capdev->xfer_buf;
msg[0].len = 2;
msg[1].addr = client->addr;
msg[1].flags = I2C_M_RD;
msg[1].buf = capdev->xfer_buf;
msg[1].len = len + 2;
err = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
if (err != ARRAY_SIZE(msg))
return err < 0 ? err : -EIO;
if (capdev->xfer_buf[0] != reg) {
dev_err(dev,
"I2C read error: register address does not match (%#02x vs %02x)\n",
capdev->xfer_buf[0], reg);
return -ECOMM;
}
memcpy(data, &capdev->xfer_buf[2], len);
return 0;
}
/*
* Handle interrupt and report the key changes to the input system.
* Multi-touch can be supported; however, it really depends on whether
* the device can multi-touch.
*/
static irqreturn_t atmel_captouch_isr(int irq, void *data)
{
struct atmel_captouch_device *capdev = data;
struct device *dev = &capdev->client->dev;
int error;
int i;
u8 new_btn;
u8 changed_btn;
error = atmel_read(capdev, REG_KEY_STATE, &new_btn, 1);
if (error) {
dev_err(dev, "failed to read button state: %d\n", error);
goto out;
}
dev_dbg(dev, "%s: button state %#02x\n", __func__, new_btn);
changed_btn = new_btn ^ capdev->prev_btn;
capdev->prev_btn = new_btn;
for (i = 0; i < capdev->num_btn; i++) {
if (changed_btn & BIT(i))
input_report_key(capdev->input,
capdev->keycodes[i],
new_btn & BIT(i));
}
input_sync(capdev->input);
out:
return IRQ_HANDLED;
}
/*
* Probe function to setup the device, input system and interrupt
*/
static int atmel_captouch_probe(struct i2c_client *client)
{
struct atmel_captouch_device *capdev;
struct device *dev = &client->dev;
struct device_node *node;
int i;
int err;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK)) {
dev_err(dev, "needed i2c functionality is not supported\n");
return -EINVAL;
}
capdev = devm_kzalloc(dev, sizeof(*capdev), GFP_KERNEL);
if (!capdev)
return -ENOMEM;
capdev->client = client;
err = atmel_read(capdev, REG_KEY_STATE,
&capdev->prev_btn, sizeof(capdev->prev_btn));
if (err) {
dev_err(dev, "failed to read initial button state: %d\n", err);
return err;
}
capdev->input = devm_input_allocate_device(dev);
if (!capdev->input) {
dev_err(dev, "failed to allocate input device\n");
return -ENOMEM;
}
capdev->input->id.bustype = BUS_I2C;
capdev->input->id.product = 0x880A;
capdev->input->id.version = 0;
capdev->input->name = "ATMegaXX Capacitive Button Controller";
__set_bit(EV_KEY, capdev->input->evbit);
node = dev->of_node;
if (!node) {
dev_err(dev, "failed to find matching node in device tree\n");
return -EINVAL;
}
if (of_property_read_bool(node, "autorepeat"))
__set_bit(EV_REP, capdev->input->evbit);
capdev->num_btn = of_property_count_u32_elems(node, "linux,keymap");
if (capdev->num_btn > MAX_NUM_OF_BUTTONS)
capdev->num_btn = MAX_NUM_OF_BUTTONS;
err = of_property_read_u32_array(node, "linux,keycodes",
capdev->keycodes,
capdev->num_btn);
if (err) {
dev_err(dev,
"failed to read linux,keycode property: %d\n", err);
return err;
}
for (i = 0; i < capdev->num_btn; i++)
__set_bit(capdev->keycodes[i], capdev->input->keybit);
capdev->input->keycode = capdev->keycodes;
capdev->input->keycodesize = sizeof(capdev->keycodes[0]);
capdev->input->keycodemax = capdev->num_btn;
err = input_register_device(capdev->input);
if (err)
return err;
err = devm_request_threaded_irq(dev, client->irq,
NULL, atmel_captouch_isr,
IRQF_ONESHOT,
"atmel_captouch", capdev);
if (err) {
dev_err(dev, "failed to request irq %d: %d\n",
client->irq, err);
return err;
}
return 0;
}
static const struct of_device_id atmel_captouch_of_id[] = {
{
.compatible = "atmel,captouch",
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_captouch_of_id);
static const struct i2c_device_id atmel_captouch_id[] = {
{ "atmel_captouch", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, atmel_captouch_id);
static struct i2c_driver atmel_captouch_driver = {
.probe = atmel_captouch_probe,
.id_table = atmel_captouch_id,
.driver = {
.name = "atmel_captouch",
.of_match_table = atmel_captouch_of_id,
},
};
module_i2c_driver(atmel_captouch_driver);
/* Module information */
MODULE_AUTHOR("Hung-yu Wu <[email protected]>");
MODULE_DESCRIPTION("Atmel ATmegaXX Capacitance Touch Sensor I2C Driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/input/misc/atmel_captouch.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* DRV260X haptics driver family
*
* Author: Dan Murphy <[email protected]>
*
* Copyright: (C) 2014 Texas Instruments, Inc.
*/
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <dt-bindings/input/ti-drv260x.h>
#define DRV260X_STATUS 0x0
#define DRV260X_MODE 0x1
#define DRV260X_RT_PB_IN 0x2
#define DRV260X_LIB_SEL 0x3
#define DRV260X_WV_SEQ_1 0x4
#define DRV260X_WV_SEQ_2 0x5
#define DRV260X_WV_SEQ_3 0x6
#define DRV260X_WV_SEQ_4 0x7
#define DRV260X_WV_SEQ_5 0x8
#define DRV260X_WV_SEQ_6 0x9
#define DRV260X_WV_SEQ_7 0xa
#define DRV260X_WV_SEQ_8 0xb
#define DRV260X_GO 0xc
#define DRV260X_OVERDRIVE_OFF 0xd
#define DRV260X_SUSTAIN_P_OFF 0xe
#define DRV260X_SUSTAIN_N_OFF 0xf
#define DRV260X_BRAKE_OFF 0x10
#define DRV260X_A_TO_V_CTRL 0x11
#define DRV260X_A_TO_V_MIN_INPUT 0x12
#define DRV260X_A_TO_V_MAX_INPUT 0x13
#define DRV260X_A_TO_V_MIN_OUT 0x14
#define DRV260X_A_TO_V_MAX_OUT 0x15
#define DRV260X_RATED_VOLT 0x16
#define DRV260X_OD_CLAMP_VOLT 0x17
#define DRV260X_CAL_COMP 0x18
#define DRV260X_CAL_BACK_EMF 0x19
#define DRV260X_FEEDBACK_CTRL 0x1a
#define DRV260X_CTRL1 0x1b
#define DRV260X_CTRL2 0x1c
#define DRV260X_CTRL3 0x1d
#define DRV260X_CTRL4 0x1e
#define DRV260X_CTRL5 0x1f
#define DRV260X_LRA_LOOP_PERIOD 0x20
#define DRV260X_VBAT_MON 0x21
#define DRV260X_LRA_RES_PERIOD 0x22
#define DRV260X_MAX_REG 0x23
#define DRV260X_GO_BIT 0x01
/* Library Selection */
#define DRV260X_LIB_SEL_MASK 0x07
#define DRV260X_LIB_SEL_RAM 0x0
#define DRV260X_LIB_SEL_OD 0x1
#define DRV260X_LIB_SEL_40_60 0x2
#define DRV260X_LIB_SEL_60_80 0x3
#define DRV260X_LIB_SEL_100_140 0x4
#define DRV260X_LIB_SEL_140_PLUS 0x5
#define DRV260X_LIB_SEL_HIZ_MASK 0x10
#define DRV260X_LIB_SEL_HIZ_EN 0x01
#define DRV260X_LIB_SEL_HIZ_DIS 0
/* Mode register */
#define DRV260X_STANDBY (1 << 6)
#define DRV260X_STANDBY_MASK 0x40
#define DRV260X_INTERNAL_TRIGGER 0x00
#define DRV260X_EXT_TRIGGER_EDGE 0x01
#define DRV260X_EXT_TRIGGER_LEVEL 0x02
#define DRV260X_PWM_ANALOG_IN 0x03
#define DRV260X_AUDIOHAPTIC 0x04
#define DRV260X_RT_PLAYBACK 0x05
#define DRV260X_DIAGNOSTICS 0x06
#define DRV260X_AUTO_CAL 0x07
/* Audio to Haptics Control */
#define DRV260X_AUDIO_HAPTICS_PEAK_10MS (0 << 2)
#define DRV260X_AUDIO_HAPTICS_PEAK_20MS (1 << 2)
#define DRV260X_AUDIO_HAPTICS_PEAK_30MS (2 << 2)
#define DRV260X_AUDIO_HAPTICS_PEAK_40MS (3 << 2)
#define DRV260X_AUDIO_HAPTICS_FILTER_100HZ 0x00
#define DRV260X_AUDIO_HAPTICS_FILTER_125HZ 0x01
#define DRV260X_AUDIO_HAPTICS_FILTER_150HZ 0x02
#define DRV260X_AUDIO_HAPTICS_FILTER_200HZ 0x03
/* Min/Max Input/Output Voltages */
#define DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT 0x19
#define DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT 0x64
#define DRV260X_AUDIO_HAPTICS_MIN_OUT_VOLT 0x19
#define DRV260X_AUDIO_HAPTICS_MAX_OUT_VOLT 0xFF
/* Feedback register */
#define DRV260X_FB_REG_ERM_MODE 0x7f
#define DRV260X_FB_REG_LRA_MODE (1 << 7)
#define DRV260X_BRAKE_FACTOR_MASK 0x1f
#define DRV260X_BRAKE_FACTOR_2X (1 << 0)
#define DRV260X_BRAKE_FACTOR_3X (2 << 4)
#define DRV260X_BRAKE_FACTOR_4X (3 << 4)
#define DRV260X_BRAKE_FACTOR_6X (4 << 4)
#define DRV260X_BRAKE_FACTOR_8X (5 << 4)
#define DRV260X_BRAKE_FACTOR_16 (6 << 4)
#define DRV260X_BRAKE_FACTOR_DIS (7 << 4)
#define DRV260X_LOOP_GAIN_LOW 0xf3
#define DRV260X_LOOP_GAIN_MED (1 << 2)
#define DRV260X_LOOP_GAIN_HIGH (2 << 2)
#define DRV260X_LOOP_GAIN_VERY_HIGH (3 << 2)
#define DRV260X_BEMF_GAIN_0 0xfc
#define DRV260X_BEMF_GAIN_1 (1 << 0)
#define DRV260X_BEMF_GAIN_2 (2 << 0)
#define DRV260X_BEMF_GAIN_3 (3 << 0)
/* Control 1 register */
#define DRV260X_AC_CPLE_EN (1 << 5)
#define DRV260X_STARTUP_BOOST (1 << 7)
/* Control 2 register */
#define DRV260X_IDISS_TIME_45 0
#define DRV260X_IDISS_TIME_75 (1 << 0)
#define DRV260X_IDISS_TIME_150 (1 << 1)
#define DRV260X_IDISS_TIME_225 0x03
#define DRV260X_BLANK_TIME_45 (0 << 2)
#define DRV260X_BLANK_TIME_75 (1 << 2)
#define DRV260X_BLANK_TIME_150 (2 << 2)
#define DRV260X_BLANK_TIME_225 (3 << 2)
#define DRV260X_SAMP_TIME_150 (0 << 4)
#define DRV260X_SAMP_TIME_200 (1 << 4)
#define DRV260X_SAMP_TIME_250 (2 << 4)
#define DRV260X_SAMP_TIME_300 (3 << 4)
#define DRV260X_BRAKE_STABILIZER (1 << 6)
#define DRV260X_UNIDIR_IN (0 << 7)
#define DRV260X_BIDIR_IN (1 << 7)
/* Control 3 Register */
#define DRV260X_LRA_OPEN_LOOP (1 << 0)
#define DRV260X_ANALOG_IN (1 << 1)
#define DRV260X_LRA_DRV_MODE (1 << 2)
#define DRV260X_RTP_UNSIGNED_DATA (1 << 3)
#define DRV260X_SUPPLY_COMP_DIS (1 << 4)
#define DRV260X_ERM_OPEN_LOOP (1 << 5)
#define DRV260X_NG_THRESH_0 (0 << 6)
#define DRV260X_NG_THRESH_2 (1 << 6)
#define DRV260X_NG_THRESH_4 (2 << 6)
#define DRV260X_NG_THRESH_8 (3 << 6)
/* Control 4 Register */
#define DRV260X_AUTOCAL_TIME_150MS (0 << 4)
#define DRV260X_AUTOCAL_TIME_250MS (1 << 4)
#define DRV260X_AUTOCAL_TIME_500MS (2 << 4)
#define DRV260X_AUTOCAL_TIME_1000MS (3 << 4)
/**
* struct drv260x_data -
* @input_dev: Pointer to the input device
* @client: Pointer to the I2C client
* @regmap: Register map of the device
* @work: Work item used to off load the enable/disable of the vibration
* @enable_gpio: Pointer to the gpio used for enable/disabling
* @regulator: Pointer to the regulator for the IC
* @magnitude: Magnitude of the vibration event
* @mode: The operating mode of the IC (LRA_NO_CAL, ERM or LRA)
* @library: The vibration library to be used
* @rated_voltage: The rated_voltage of the actuator
* @overdrive_voltage: The over drive voltage of the actuator
**/
struct drv260x_data {
struct input_dev *input_dev;
struct i2c_client *client;
struct regmap *regmap;
struct work_struct work;
struct gpio_desc *enable_gpio;
struct regulator *regulator;
u8 magnitude;
u32 mode;
u32 library;
int rated_voltage;
int overdrive_voltage;
};
#define DRV260X_DEF_RATED_VOLT 0x90
#define DRV260X_DEF_OD_CLAMP_VOLT 0x90
/*
* Rated and Overdriver Voltages:
* Calculated using the formula r = v * 255 / 5.6
* where r is what will be written to the register
* and v is the rated or overdriver voltage of the actuator
*/
static int drv260x_calculate_voltage(unsigned int voltage)
{
return (voltage * 255 / 5600);
}
static void drv260x_worker(struct work_struct *work)
{
struct drv260x_data *haptics = container_of(work, struct drv260x_data, work);
int error;
gpiod_set_value(haptics->enable_gpio, 1);
/* Data sheet says to wait 250us before trying to communicate */
udelay(250);
error = regmap_write(haptics->regmap,
DRV260X_MODE, DRV260X_RT_PLAYBACK);
if (error) {
dev_err(&haptics->client->dev,
"Failed to write set mode: %d\n", error);
} else {
error = regmap_write(haptics->regmap,
DRV260X_RT_PB_IN, haptics->magnitude);
if (error)
dev_err(&haptics->client->dev,
"Failed to set magnitude: %d\n", error);
}
}
static int drv260x_haptics_play(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct drv260x_data *haptics = input_get_drvdata(input);
haptics->mode = DRV260X_LRA_NO_CAL_MODE;
/* Scale u16 magnitude into u8 register value */
if (effect->u.rumble.strong_magnitude > 0)
haptics->magnitude = effect->u.rumble.strong_magnitude >> 8;
else if (effect->u.rumble.weak_magnitude > 0)
haptics->magnitude = effect->u.rumble.weak_magnitude >> 8;
else
haptics->magnitude = 0;
schedule_work(&haptics->work);
return 0;
}
static void drv260x_close(struct input_dev *input)
{
struct drv260x_data *haptics = input_get_drvdata(input);
int error;
cancel_work_sync(&haptics->work);
error = regmap_write(haptics->regmap, DRV260X_MODE, DRV260X_STANDBY);
if (error)
dev_err(&haptics->client->dev,
"Failed to enter standby mode: %d\n", error);
gpiod_set_value(haptics->enable_gpio, 0);
}
static const struct reg_sequence drv260x_lra_cal_regs[] = {
{ DRV260X_MODE, DRV260X_AUTO_CAL },
{ DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_RTP_UNSIGNED_DATA },
{ DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE |
DRV260X_BRAKE_FACTOR_4X | DRV260X_LOOP_GAIN_HIGH },
};
static const struct reg_sequence drv260x_lra_init_regs[] = {
{ DRV260X_MODE, DRV260X_RT_PLAYBACK },
{ DRV260X_A_TO_V_CTRL, DRV260X_AUDIO_HAPTICS_PEAK_20MS |
DRV260X_AUDIO_HAPTICS_FILTER_125HZ },
{ DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT },
{ DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT },
{ DRV260X_A_TO_V_MIN_OUT, DRV260X_AUDIO_HAPTICS_MIN_OUT_VOLT },
{ DRV260X_A_TO_V_MAX_OUT, DRV260X_AUDIO_HAPTICS_MAX_OUT_VOLT },
{ DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE |
DRV260X_BRAKE_FACTOR_2X | DRV260X_LOOP_GAIN_MED |
DRV260X_BEMF_GAIN_3 },
{ DRV260X_CTRL1, DRV260X_STARTUP_BOOST },
{ DRV260X_CTRL2, DRV260X_SAMP_TIME_250 },
{ DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_RTP_UNSIGNED_DATA | DRV260X_ANALOG_IN },
{ DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
};
static const struct reg_sequence drv260x_erm_cal_regs[] = {
{ DRV260X_MODE, DRV260X_AUTO_CAL },
{ DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT },
{ DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT },
{ DRV260X_A_TO_V_MIN_OUT, DRV260X_AUDIO_HAPTICS_MIN_OUT_VOLT },
{ DRV260X_A_TO_V_MAX_OUT, DRV260X_AUDIO_HAPTICS_MAX_OUT_VOLT },
{ DRV260X_FEEDBACK_CTRL, DRV260X_BRAKE_FACTOR_3X |
DRV260X_LOOP_GAIN_MED | DRV260X_BEMF_GAIN_2 },
{ DRV260X_CTRL1, DRV260X_STARTUP_BOOST },
{ DRV260X_CTRL2, DRV260X_SAMP_TIME_250 | DRV260X_BLANK_TIME_75 |
DRV260X_IDISS_TIME_75 },
{ DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_RTP_UNSIGNED_DATA },
{ DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
};
static int drv260x_init(struct drv260x_data *haptics)
{
int error;
unsigned int cal_buf;
error = regmap_write(haptics->regmap,
DRV260X_RATED_VOLT, haptics->rated_voltage);
if (error) {
dev_err(&haptics->client->dev,
"Failed to write DRV260X_RATED_VOLT register: %d\n",
error);
return error;
}
error = regmap_write(haptics->regmap,
DRV260X_OD_CLAMP_VOLT, haptics->overdrive_voltage);
if (error) {
dev_err(&haptics->client->dev,
"Failed to write DRV260X_OD_CLAMP_VOLT register: %d\n",
error);
return error;
}
switch (haptics->mode) {
case DRV260X_LRA_MODE:
error = regmap_register_patch(haptics->regmap,
drv260x_lra_cal_regs,
ARRAY_SIZE(drv260x_lra_cal_regs));
if (error) {
dev_err(&haptics->client->dev,
"Failed to write LRA calibration registers: %d\n",
error);
return error;
}
break;
case DRV260X_ERM_MODE:
error = regmap_register_patch(haptics->regmap,
drv260x_erm_cal_regs,
ARRAY_SIZE(drv260x_erm_cal_regs));
if (error) {
dev_err(&haptics->client->dev,
"Failed to write ERM calibration registers: %d\n",
error);
return error;
}
error = regmap_update_bits(haptics->regmap, DRV260X_LIB_SEL,
DRV260X_LIB_SEL_MASK,
haptics->library);
if (error) {
dev_err(&haptics->client->dev,
"Failed to write DRV260X_LIB_SEL register: %d\n",
error);
return error;
}
break;
default:
error = regmap_register_patch(haptics->regmap,
drv260x_lra_init_regs,
ARRAY_SIZE(drv260x_lra_init_regs));
if (error) {
dev_err(&haptics->client->dev,
"Failed to write LRA init registers: %d\n",
error);
return error;
}
error = regmap_update_bits(haptics->regmap, DRV260X_LIB_SEL,
DRV260X_LIB_SEL_MASK,
haptics->library);
if (error) {
dev_err(&haptics->client->dev,
"Failed to write DRV260X_LIB_SEL register: %d\n",
error);
return error;
}
/* No need to set GO bit here */
return 0;
}
error = regmap_write(haptics->regmap, DRV260X_GO, DRV260X_GO_BIT);
if (error) {
dev_err(&haptics->client->dev,
"Failed to write GO register: %d\n",
error);
return error;
}
do {
usleep_range(15000, 15500);
error = regmap_read(haptics->regmap, DRV260X_GO, &cal_buf);
if (error) {
dev_err(&haptics->client->dev,
"Failed to read GO register: %d\n",
error);
return error;
}
} while (cal_buf == DRV260X_GO_BIT);
return 0;
}
static const struct regmap_config drv260x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = DRV260X_MAX_REG,
.cache_type = REGCACHE_NONE,
};
static int drv260x_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct drv260x_data *haptics;
u32 voltage;
int error;
haptics = devm_kzalloc(dev, sizeof(*haptics), GFP_KERNEL);
if (!haptics)
return -ENOMEM;
error = device_property_read_u32(dev, "mode", &haptics->mode);
if (error) {
dev_err(dev, "Can't fetch 'mode' property: %d\n", error);
return error;
}
if (haptics->mode < DRV260X_LRA_MODE ||
haptics->mode > DRV260X_ERM_MODE) {
dev_err(dev, "Vibrator mode is invalid: %i\n", haptics->mode);
return -EINVAL;
}
error = device_property_read_u32(dev, "library-sel", &haptics->library);
if (error) {
dev_err(dev, "Can't fetch 'library-sel' property: %d\n", error);
return error;
}
if (haptics->library < DRV260X_LIB_EMPTY ||
haptics->library > DRV260X_ERM_LIB_F) {
dev_err(dev,
"Library value is invalid: %i\n", haptics->library);
return -EINVAL;
}
if (haptics->mode == DRV260X_LRA_MODE &&
haptics->library != DRV260X_LIB_EMPTY &&
haptics->library != DRV260X_LIB_LRA) {
dev_err(dev, "LRA Mode with ERM Library mismatch\n");
return -EINVAL;
}
if (haptics->mode == DRV260X_ERM_MODE &&
(haptics->library == DRV260X_LIB_EMPTY ||
haptics->library == DRV260X_LIB_LRA)) {
dev_err(dev, "ERM Mode with LRA Library mismatch\n");
return -EINVAL;
}
error = device_property_read_u32(dev, "vib-rated-mv", &voltage);
haptics->rated_voltage = error ? DRV260X_DEF_RATED_VOLT :
drv260x_calculate_voltage(voltage);
error = device_property_read_u32(dev, "vib-overdrive-mv", &voltage);
haptics->overdrive_voltage = error ? DRV260X_DEF_OD_CLAMP_VOLT :
drv260x_calculate_voltage(voltage);
haptics->regulator = devm_regulator_get(dev, "vbat");
if (IS_ERR(haptics->regulator)) {
error = PTR_ERR(haptics->regulator);
dev_err(dev, "unable to get regulator, error: %d\n", error);
return error;
}
haptics->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_HIGH);
if (IS_ERR(haptics->enable_gpio))
return PTR_ERR(haptics->enable_gpio);
haptics->input_dev = devm_input_allocate_device(dev);
if (!haptics->input_dev) {
dev_err(dev, "Failed to allocate input device\n");
return -ENOMEM;
}
haptics->input_dev->name = "drv260x:haptics";
haptics->input_dev->close = drv260x_close;
input_set_drvdata(haptics->input_dev, haptics);
input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
error = input_ff_create_memless(haptics->input_dev, NULL,
drv260x_haptics_play);
if (error) {
dev_err(dev, "input_ff_create() failed: %d\n", error);
return error;
}
INIT_WORK(&haptics->work, drv260x_worker);
haptics->client = client;
i2c_set_clientdata(client, haptics);
haptics->regmap = devm_regmap_init_i2c(client, &drv260x_regmap_config);
if (IS_ERR(haptics->regmap)) {
error = PTR_ERR(haptics->regmap);
dev_err(dev, "Failed to allocate register map: %d\n", error);
return error;
}
error = drv260x_init(haptics);
if (error) {
dev_err(dev, "Device init failed: %d\n", error);
return error;
}
error = input_register_device(haptics->input_dev);
if (error) {
dev_err(dev, "couldn't register input device: %d\n", error);
return error;
}
return 0;
}
static int drv260x_suspend(struct device *dev)
{
struct drv260x_data *haptics = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&haptics->input_dev->mutex);
if (input_device_enabled(haptics->input_dev)) {
ret = regmap_update_bits(haptics->regmap,
DRV260X_MODE,
DRV260X_STANDBY_MASK,
DRV260X_STANDBY);
if (ret) {
dev_err(dev, "Failed to set standby mode\n");
goto out;
}
gpiod_set_value(haptics->enable_gpio, 0);
ret = regulator_disable(haptics->regulator);
if (ret) {
dev_err(dev, "Failed to disable regulator\n");
regmap_update_bits(haptics->regmap,
DRV260X_MODE,
DRV260X_STANDBY_MASK, 0);
}
}
out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
static int drv260x_resume(struct device *dev)
{
struct drv260x_data *haptics = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&haptics->input_dev->mutex);
if (input_device_enabled(haptics->input_dev)) {
ret = regulator_enable(haptics->regulator);
if (ret) {
dev_err(dev, "Failed to enable regulator\n");
goto out;
}
ret = regmap_update_bits(haptics->regmap,
DRV260X_MODE,
DRV260X_STANDBY_MASK, 0);
if (ret) {
dev_err(dev, "Failed to unset standby mode\n");
regulator_disable(haptics->regulator);
goto out;
}
gpiod_set_value(haptics->enable_gpio, 1);
}
out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(drv260x_pm_ops, drv260x_suspend, drv260x_resume);
static const struct i2c_device_id drv260x_id[] = {
{ "drv2605l", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, drv260x_id);
static const struct of_device_id drv260x_of_match[] = {
{ .compatible = "ti,drv2604", },
{ .compatible = "ti,drv2604l", },
{ .compatible = "ti,drv2605", },
{ .compatible = "ti,drv2605l", },
{ }
};
MODULE_DEVICE_TABLE(of, drv260x_of_match);
static struct i2c_driver drv260x_driver = {
.probe = drv260x_probe,
.driver = {
.name = "drv260x-haptics",
.of_match_table = drv260x_of_match,
.pm = pm_sleep_ptr(&drv260x_pm_ops),
},
.id_table = drv260x_id,
};
module_i2c_driver(drv260x_driver);
MODULE_DESCRIPTION("TI DRV260x haptics driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Murphy <[email protected]>");
|
linux-master
|
drivers/input/misc/drv260x.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* CPCAP Power Button Input Driver
*
* Copyright (C) 2017 Sebastian Reichel <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/mfd/motorola-cpcap.h>
#define CPCAP_IRQ_ON 23
#define CPCAP_IRQ_ON_BITMASK (1 << (CPCAP_IRQ_ON % 16))
struct cpcap_power_button {
struct regmap *regmap;
struct input_dev *idev;
struct device *dev;
};
static irqreturn_t powerbutton_irq(int irq, void *_button)
{
struct cpcap_power_button *button = _button;
int val;
val = cpcap_sense_virq(button->regmap, irq);
if (val < 0) {
dev_err(button->dev, "irq read failed: %d", val);
return IRQ_HANDLED;
}
pm_wakeup_event(button->dev, 0);
input_report_key(button->idev, KEY_POWER, val);
input_sync(button->idev);
return IRQ_HANDLED;
}
static int cpcap_power_button_probe(struct platform_device *pdev)
{
struct cpcap_power_button *button;
int irq;
int err;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
button = devm_kmalloc(&pdev->dev, sizeof(*button), GFP_KERNEL);
if (!button)
return -ENOMEM;
button->idev = devm_input_allocate_device(&pdev->dev);
if (!button->idev)
return -ENOMEM;
button->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!button->regmap)
return -ENODEV;
button->dev = &pdev->dev;
button->idev->name = "cpcap-pwrbutton";
button->idev->phys = "cpcap-pwrbutton/input0";
input_set_capability(button->idev, EV_KEY, KEY_POWER);
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
powerbutton_irq, IRQF_ONESHOT, "cpcap_pwrbutton", button);
if (err < 0) {
dev_err(&pdev->dev, "IRQ request failed: %d\n", err);
return err;
}
err = input_register_device(button->idev);
if (err) {
dev_err(&pdev->dev, "Input register failed: %d\n", err);
return err;
}
device_init_wakeup(&pdev->dev, true);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id cpcap_pwrbutton_dt_match_table[] = {
{ .compatible = "motorola,cpcap-pwrbutton" },
{},
};
MODULE_DEVICE_TABLE(of, cpcap_pwrbutton_dt_match_table);
#endif
static struct platform_driver cpcap_power_button_driver = {
.probe = cpcap_power_button_probe,
.driver = {
.name = "cpcap-pwrbutton",
.of_match_table = of_match_ptr(cpcap_pwrbutton_dt_match_table),
},
};
module_platform_driver(cpcap_power_button_driver);
MODULE_ALIAS("platform:cpcap-pwrbutton");
MODULE_DESCRIPTION("CPCAP Power Button");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sebastian Reichel <[email protected]>");
|
linux-master
|
drivers/input/misc/cpcap-pwrbutton.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/log2.h>
#include <linux/of.h>
#define PON_CNTL_1 0x1C
#define PON_CNTL_PULL_UP BIT(7)
#define PON_CNTL_TRIG_DELAY_MASK (0x7)
#define PON_CNTL_1_PULL_UP_EN 0xe0
#define PON_CNTL_1_USB_PWR_EN 0x10
#define PON_CNTL_1_WD_EN_RESET 0x08
#define PM8058_SLEEP_CTRL 0x02b
#define PM8921_SLEEP_CTRL 0x10a
#define SLEEP_CTRL_SMPL_EN_RESET 0x04
/* Regulator master enable addresses */
#define REG_PM8058_VREG_EN_MSM 0x018
#define REG_PM8058_VREG_EN_GRP_5_4 0x1c8
/* Regulator control registers for shutdown/reset */
#define PM8058_S0_CTRL 0x004
#define PM8058_S1_CTRL 0x005
#define PM8058_S3_CTRL 0x111
#define PM8058_L21_CTRL 0x120
#define PM8058_L22_CTRL 0x121
#define PM8058_REGULATOR_ENABLE_MASK 0x80
#define PM8058_REGULATOR_ENABLE 0x80
#define PM8058_REGULATOR_DISABLE 0x00
#define PM8058_REGULATOR_PULL_DOWN_MASK 0x40
#define PM8058_REGULATOR_PULL_DOWN_EN 0x40
/* Buck CTRL register */
#define PM8058_SMPS_LEGACY_VREF_SEL 0x20
#define PM8058_SMPS_LEGACY_VPROG_MASK 0x1f
#define PM8058_SMPS_ADVANCED_BAND_MASK 0xC0
#define PM8058_SMPS_ADVANCED_BAND_SHIFT 6
#define PM8058_SMPS_ADVANCED_VPROG_MASK 0x3f
/* Buck TEST2 registers for shutdown/reset */
#define PM8058_S0_TEST2 0x084
#define PM8058_S1_TEST2 0x085
#define PM8058_S3_TEST2 0x11a
#define PM8058_REGULATOR_BANK_WRITE 0x80
#define PM8058_REGULATOR_BANK_MASK 0x70
#define PM8058_REGULATOR_BANK_SHIFT 4
#define PM8058_REGULATOR_BANK_SEL(n) ((n) << PM8058_REGULATOR_BANK_SHIFT)
/* Buck TEST2 register bank 1 */
#define PM8058_SMPS_LEGACY_VLOW_SEL 0x01
/* Buck TEST2 register bank 7 */
#define PM8058_SMPS_ADVANCED_MODE_MASK 0x02
#define PM8058_SMPS_ADVANCED_MODE 0x02
#define PM8058_SMPS_LEGACY_MODE 0x00
/**
* struct pmic8xxx_pwrkey - pmic8xxx pwrkey information
* @key_press_irq: key press irq number
* @regmap: device regmap
* @shutdown_fn: shutdown configuration function
*/
struct pmic8xxx_pwrkey {
int key_press_irq;
struct regmap *regmap;
int (*shutdown_fn)(struct pmic8xxx_pwrkey *, bool);
};
static irqreturn_t pwrkey_press_irq(int irq, void *_pwr)
{
struct input_dev *pwr = _pwr;
input_report_key(pwr, KEY_POWER, 1);
input_sync(pwr);
return IRQ_HANDLED;
}
static irqreturn_t pwrkey_release_irq(int irq, void *_pwr)
{
struct input_dev *pwr = _pwr;
input_report_key(pwr, KEY_POWER, 0);
input_sync(pwr);
return IRQ_HANDLED;
}
static int pmic8xxx_pwrkey_suspend(struct device *dev)
{
struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(pwrkey->key_press_irq);
return 0;
}
static int pmic8xxx_pwrkey_resume(struct device *dev)
{
struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(pwrkey->key_press_irq);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
static void pmic8xxx_pwrkey_shutdown(struct platform_device *pdev)
{
struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
int error;
u8 mask, val;
bool reset = system_state == SYSTEM_RESTART;
if (pwrkey->shutdown_fn) {
error = pwrkey->shutdown_fn(pwrkey, reset);
if (error)
return;
}
/*
* Select action to perform (reset or shutdown) when PS_HOLD goes low.
* Also ensure that KPD, CBL0, and CBL1 pull ups are enabled and that
* USB charging is enabled.
*/
mask = PON_CNTL_1_PULL_UP_EN | PON_CNTL_1_USB_PWR_EN;
mask |= PON_CNTL_1_WD_EN_RESET;
val = mask;
if (!reset)
val &= ~PON_CNTL_1_WD_EN_RESET;
regmap_update_bits(pwrkey->regmap, PON_CNTL_1, mask, val);
}
/*
* Set an SMPS regulator to be disabled in its CTRL register, but enabled
* in the master enable register. Also set it's pull down enable bit.
* Take care to make sure that the output voltage doesn't change if switching
* from advanced mode to legacy mode.
*/
static int pm8058_disable_smps_locally_set_pull_down(struct regmap *regmap,
u16 ctrl_addr, u16 test2_addr, u16 master_enable_addr,
u8 master_enable_bit)
{
int error;
u8 vref_sel, vlow_sel, band, vprog, bank;
unsigned int reg;
bank = PM8058_REGULATOR_BANK_SEL(7);
error = regmap_write(regmap, test2_addr, bank);
if (error)
return error;
error = regmap_read(regmap, test2_addr, ®);
if (error)
return error;
reg &= PM8058_SMPS_ADVANCED_MODE_MASK;
/* Check if in advanced mode. */
if (reg == PM8058_SMPS_ADVANCED_MODE) {
/* Determine current output voltage. */
error = regmap_read(regmap, ctrl_addr, ®);
if (error)
return error;
band = reg & PM8058_SMPS_ADVANCED_BAND_MASK;
band >>= PM8058_SMPS_ADVANCED_BAND_SHIFT;
switch (band) {
case 3:
vref_sel = 0;
vlow_sel = 0;
break;
case 2:
vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
vlow_sel = 0;
break;
case 1:
vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
vlow_sel = PM8058_SMPS_LEGACY_VLOW_SEL;
break;
default:
pr_err("%s: regulator already disabled\n", __func__);
return -EPERM;
}
vprog = reg & PM8058_SMPS_ADVANCED_VPROG_MASK;
/* Round up if fine step is in use. */
vprog = (vprog + 1) >> 1;
if (vprog > PM8058_SMPS_LEGACY_VPROG_MASK)
vprog = PM8058_SMPS_LEGACY_VPROG_MASK;
/* Set VLOW_SEL bit. */
bank = PM8058_REGULATOR_BANK_SEL(1);
error = regmap_write(regmap, test2_addr, bank);
if (error)
return error;
error = regmap_update_bits(regmap, test2_addr,
PM8058_REGULATOR_BANK_WRITE | PM8058_REGULATOR_BANK_MASK
| PM8058_SMPS_LEGACY_VLOW_SEL,
PM8058_REGULATOR_BANK_WRITE |
PM8058_REGULATOR_BANK_SEL(1) | vlow_sel);
if (error)
return error;
/* Switch to legacy mode */
bank = PM8058_REGULATOR_BANK_SEL(7);
error = regmap_write(regmap, test2_addr, bank);
if (error)
return error;
error = regmap_update_bits(regmap, test2_addr,
PM8058_REGULATOR_BANK_WRITE |
PM8058_REGULATOR_BANK_MASK |
PM8058_SMPS_ADVANCED_MODE_MASK,
PM8058_REGULATOR_BANK_WRITE |
PM8058_REGULATOR_BANK_SEL(7) |
PM8058_SMPS_LEGACY_MODE);
if (error)
return error;
/* Enable locally, enable pull down, keep voltage the same. */
error = regmap_update_bits(regmap, ctrl_addr,
PM8058_REGULATOR_ENABLE_MASK |
PM8058_REGULATOR_PULL_DOWN_MASK |
PM8058_SMPS_LEGACY_VREF_SEL |
PM8058_SMPS_LEGACY_VPROG_MASK,
PM8058_REGULATOR_ENABLE | PM8058_REGULATOR_PULL_DOWN_EN
| vref_sel | vprog);
if (error)
return error;
}
/* Enable in master control register. */
error = regmap_update_bits(regmap, master_enable_addr,
master_enable_bit, master_enable_bit);
if (error)
return error;
/* Disable locally and enable pull down. */
return regmap_update_bits(regmap, ctrl_addr,
PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
}
static int pm8058_disable_ldo_locally_set_pull_down(struct regmap *regmap,
u16 ctrl_addr, u16 master_enable_addr, u8 master_enable_bit)
{
int error;
/* Enable LDO in master control register. */
error = regmap_update_bits(regmap, master_enable_addr,
master_enable_bit, master_enable_bit);
if (error)
return error;
/* Disable LDO in CTRL register and set pull down */
return regmap_update_bits(regmap, ctrl_addr,
PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
}
static int pm8058_pwrkey_shutdown(struct pmic8xxx_pwrkey *pwrkey, bool reset)
{
int error;
struct regmap *regmap = pwrkey->regmap;
u8 mask, val;
/* When shutting down, enable active pulldowns on important rails. */
if (!reset) {
/* Disable SMPS's 0,1,3 locally and set pulldown enable bits. */
pm8058_disable_smps_locally_set_pull_down(regmap,
PM8058_S0_CTRL, PM8058_S0_TEST2,
REG_PM8058_VREG_EN_MSM, BIT(7));
pm8058_disable_smps_locally_set_pull_down(regmap,
PM8058_S1_CTRL, PM8058_S1_TEST2,
REG_PM8058_VREG_EN_MSM, BIT(6));
pm8058_disable_smps_locally_set_pull_down(regmap,
PM8058_S3_CTRL, PM8058_S3_TEST2,
REG_PM8058_VREG_EN_GRP_5_4, BIT(7) | BIT(4));
/* Disable LDO 21 locally and set pulldown enable bit. */
pm8058_disable_ldo_locally_set_pull_down(regmap,
PM8058_L21_CTRL, REG_PM8058_VREG_EN_GRP_5_4,
BIT(1));
}
/*
* Fix-up: Set regulator LDO22 to 1.225 V in high power mode. Leave its
* pull-down state intact. This ensures a safe shutdown.
*/
error = regmap_update_bits(regmap, PM8058_L22_CTRL, 0xbf, 0x93);
if (error)
return error;
/* Enable SMPL if resetting is desired */
mask = SLEEP_CTRL_SMPL_EN_RESET;
val = 0;
if (reset)
val = mask;
return regmap_update_bits(regmap, PM8058_SLEEP_CTRL, mask, val);
}
static int pm8921_pwrkey_shutdown(struct pmic8xxx_pwrkey *pwrkey, bool reset)
{
struct regmap *regmap = pwrkey->regmap;
u8 mask = SLEEP_CTRL_SMPL_EN_RESET;
u8 val = 0;
/* Enable SMPL if resetting is desired */
if (reset)
val = mask;
return regmap_update_bits(regmap, PM8921_SLEEP_CTRL, mask, val);
}
static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
{
struct input_dev *pwr;
int key_release_irq = platform_get_irq(pdev, 0);
int key_press_irq = platform_get_irq(pdev, 1);
int err;
unsigned int delay;
unsigned int pon_cntl;
struct regmap *regmap;
struct pmic8xxx_pwrkey *pwrkey;
u32 kpd_delay;
bool pull_up;
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
kpd_delay = 15625;
/* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
dev_err(&pdev->dev, "invalid power key trigger delay\n");
return -EINVAL;
}
pull_up = of_property_read_bool(pdev->dev.of_node, "pull-up");
regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!regmap) {
dev_err(&pdev->dev, "failed to locate regmap for the device\n");
return -ENODEV;
}
pwrkey = devm_kzalloc(&pdev->dev, sizeof(*pwrkey), GFP_KERNEL);
if (!pwrkey)
return -ENOMEM;
pwrkey->shutdown_fn = of_device_get_match_data(&pdev->dev);
pwrkey->regmap = regmap;
pwrkey->key_press_irq = key_press_irq;
pwr = devm_input_allocate_device(&pdev->dev);
if (!pwr) {
dev_dbg(&pdev->dev, "Can't allocate power button\n");
return -ENOMEM;
}
input_set_capability(pwr, EV_KEY, KEY_POWER);
pwr->name = "pmic8xxx_pwrkey";
pwr->phys = "pmic8xxx_pwrkey/input0";
delay = (kpd_delay << 6) / USEC_PER_SEC;
delay = ilog2(delay);
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
if (err < 0) {
dev_err(&pdev->dev, "failed reading PON_CNTL_1 err=%d\n", err);
return err;
}
pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK;
pon_cntl |= (delay & PON_CNTL_TRIG_DELAY_MASK);
if (pull_up)
pon_cntl |= PON_CNTL_PULL_UP;
else
pon_cntl &= ~PON_CNTL_PULL_UP;
err = regmap_write(regmap, PON_CNTL_1, pon_cntl);
if (err < 0) {
dev_err(&pdev->dev, "failed writing PON_CNTL_1 err=%d\n", err);
return err;
}
err = devm_request_irq(&pdev->dev, key_press_irq, pwrkey_press_irq,
IRQF_TRIGGER_RISING,
"pmic8xxx_pwrkey_press", pwr);
if (err) {
dev_err(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
key_press_irq, err);
return err;
}
err = devm_request_irq(&pdev->dev, key_release_irq, pwrkey_release_irq,
IRQF_TRIGGER_RISING,
"pmic8xxx_pwrkey_release", pwr);
if (err) {
dev_err(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
key_release_irq, err);
return err;
}
err = input_register_device(pwr);
if (err) {
dev_err(&pdev->dev, "Can't register power key: %d\n", err);
return err;
}
platform_set_drvdata(pdev, pwrkey);
device_init_wakeup(&pdev->dev, 1);
return 0;
}
static const struct of_device_id pm8xxx_pwr_key_id_table[] = {
{ .compatible = "qcom,pm8058-pwrkey", .data = &pm8058_pwrkey_shutdown },
{ .compatible = "qcom,pm8921-pwrkey", .data = &pm8921_pwrkey_shutdown },
{ }
};
MODULE_DEVICE_TABLE(of, pm8xxx_pwr_key_id_table);
static struct platform_driver pmic8xxx_pwrkey_driver = {
.probe = pmic8xxx_pwrkey_probe,
.shutdown = pmic8xxx_pwrkey_shutdown,
.driver = {
.name = "pm8xxx-pwrkey",
.pm = pm_sleep_ptr(&pm8xxx_pwr_key_pm_ops),
.of_match_table = pm8xxx_pwr_key_id_table,
},
};
module_platform_driver(pmic8xxx_pwrkey_driver);
MODULE_ALIAS("platform:pmic8xxx_pwrkey");
MODULE_DESCRIPTION("PMIC8XXX Power Key driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Trilok Soni <[email protected]>");
|
linux-master
|
drivers/input/misc/pmic8xxx-pwrkey.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* GPIO vibrator driver
*
* Copyright (C) 2019 Luca Weiss <[email protected]>
*
* Based on PWM vibrator driver:
* Copyright (C) 2017 Collabora Ltd.
*
* Based on previous work from:
* Copyright (C) 2012 Dmitry Torokhov <[email protected]>
*
* Based on PWM beeper driver:
* Copyright (C) 2010, Lars-Peter Clausen <[email protected]>
*/
#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
struct gpio_vibrator {
struct input_dev *input;
struct gpio_desc *gpio;
struct regulator *vcc;
struct work_struct play_work;
bool running;
bool vcc_on;
};
static int gpio_vibrator_start(struct gpio_vibrator *vibrator)
{
struct device *pdev = vibrator->input->dev.parent;
int err;
if (!vibrator->vcc_on) {
err = regulator_enable(vibrator->vcc);
if (err) {
dev_err(pdev, "failed to enable regulator: %d\n", err);
return err;
}
vibrator->vcc_on = true;
}
gpiod_set_value_cansleep(vibrator->gpio, 1);
return 0;
}
static void gpio_vibrator_stop(struct gpio_vibrator *vibrator)
{
gpiod_set_value_cansleep(vibrator->gpio, 0);
if (vibrator->vcc_on) {
regulator_disable(vibrator->vcc);
vibrator->vcc_on = false;
}
}
static void gpio_vibrator_play_work(struct work_struct *work)
{
struct gpio_vibrator *vibrator =
container_of(work, struct gpio_vibrator, play_work);
if (vibrator->running)
gpio_vibrator_start(vibrator);
else
gpio_vibrator_stop(vibrator);
}
static int gpio_vibrator_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct gpio_vibrator *vibrator = input_get_drvdata(dev);
int level;
level = effect->u.rumble.strong_magnitude;
if (!level)
level = effect->u.rumble.weak_magnitude;
vibrator->running = level;
schedule_work(&vibrator->play_work);
return 0;
}
static void gpio_vibrator_close(struct input_dev *input)
{
struct gpio_vibrator *vibrator = input_get_drvdata(input);
cancel_work_sync(&vibrator->play_work);
gpio_vibrator_stop(vibrator);
vibrator->running = false;
}
static int gpio_vibrator_probe(struct platform_device *pdev)
{
struct gpio_vibrator *vibrator;
int err;
vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL);
if (!vibrator)
return -ENOMEM;
vibrator->input = devm_input_allocate_device(&pdev->dev);
if (!vibrator->input)
return -ENOMEM;
vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
if (IS_ERR(vibrator->vcc))
return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->vcc),
"Failed to request regulator\n");
vibrator->gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(vibrator->gpio))
return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->gpio),
"Failed to request main gpio\n");
INIT_WORK(&vibrator->play_work, gpio_vibrator_play_work);
vibrator->input->name = "gpio-vibrator";
vibrator->input->id.bustype = BUS_HOST;
vibrator->input->close = gpio_vibrator_close;
input_set_drvdata(vibrator->input, vibrator);
input_set_capability(vibrator->input, EV_FF, FF_RUMBLE);
err = input_ff_create_memless(vibrator->input, NULL,
gpio_vibrator_play_effect);
if (err) {
dev_err(&pdev->dev, "Couldn't create FF dev: %d\n", err);
return err;
}
err = input_register_device(vibrator->input);
if (err) {
dev_err(&pdev->dev, "Couldn't register input dev: %d\n", err);
return err;
}
platform_set_drvdata(pdev, vibrator);
return 0;
}
static int gpio_vibrator_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct gpio_vibrator *vibrator = platform_get_drvdata(pdev);
cancel_work_sync(&vibrator->play_work);
if (vibrator->running)
gpio_vibrator_stop(vibrator);
return 0;
}
static int gpio_vibrator_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct gpio_vibrator *vibrator = platform_get_drvdata(pdev);
if (vibrator->running)
gpio_vibrator_start(vibrator);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(gpio_vibrator_pm_ops,
gpio_vibrator_suspend, gpio_vibrator_resume);
#ifdef CONFIG_OF
static const struct of_device_id gpio_vibra_dt_match_table[] = {
{ .compatible = "gpio-vibrator" },
{}
};
MODULE_DEVICE_TABLE(of, gpio_vibra_dt_match_table);
#endif
static struct platform_driver gpio_vibrator_driver = {
.probe = gpio_vibrator_probe,
.driver = {
.name = "gpio-vibrator",
.pm = pm_sleep_ptr(&gpio_vibrator_pm_ops),
.of_match_table = of_match_ptr(gpio_vibra_dt_match_table),
},
};
module_platform_driver(gpio_vibrator_driver);
MODULE_AUTHOR("Luca Weiss <[email protected]>");
MODULE_DESCRIPTION("GPIO vibrator driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:gpio-vibrator");
|
linux-master
|
drivers/input/misc/gpio-vibra.c
|
// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-or-later
/*
* Dell Wyse 3020 a.k.a. "Ariel" Power Button Driver
*
* Copyright (C) 2020 Lubomir Rintel
*/
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#define RESP_COUNTER(response) (response.header & 0x3)
#define RESP_SIZE(response) ((response.header >> 2) & 0x3)
#define RESP_TYPE(response) ((response.header >> 4) & 0xf)
struct ec_input_response {
u8 reserved;
u8 header;
u8 data[3];
} __packed;
struct ariel_pwrbutton {
struct spi_device *client;
struct input_dev *input;
u8 msg_counter;
};
static int ec_input_read(struct ariel_pwrbutton *priv,
struct ec_input_response *response)
{
u8 read_request[] = { 0x00, 0x5a, 0xa5, 0x00, 0x00 };
struct spi_device *spi = priv->client;
struct spi_transfer t = {
.tx_buf = read_request,
.rx_buf = response,
.len = sizeof(read_request),
};
compiletime_assert(sizeof(read_request) == sizeof(*response),
"SPI xfer request/response size mismatch");
return spi_sync_transfer(spi, &t, 1);
}
static irqreturn_t ec_input_interrupt(int irq, void *dev_id)
{
struct ariel_pwrbutton *priv = dev_id;
struct spi_device *spi = priv->client;
struct ec_input_response response;
int error;
int i;
error = ec_input_read(priv, &response);
if (error < 0) {
dev_err(&spi->dev, "EC read failed: %d\n", error);
goto out;
}
if (priv->msg_counter == RESP_COUNTER(response)) {
dev_warn(&spi->dev, "No new data to read?\n");
goto out;
}
priv->msg_counter = RESP_COUNTER(response);
if (RESP_TYPE(response) != 0x3 && RESP_TYPE(response) != 0xc) {
dev_dbg(&spi->dev, "Ignoring message that's not kbd data\n");
goto out;
}
for (i = 0; i < RESP_SIZE(response); i++) {
switch (response.data[i]) {
case 0x74:
input_report_key(priv->input, KEY_POWER, 1);
input_sync(priv->input);
break;
case 0xf4:
input_report_key(priv->input, KEY_POWER, 0);
input_sync(priv->input);
break;
default:
dev_dbg(&spi->dev, "Unknown scan code: %02x\n",
response.data[i]);
}
}
out:
return IRQ_HANDLED;
}
static int ariel_pwrbutton_probe(struct spi_device *spi)
{
struct ec_input_response response;
struct ariel_pwrbutton *priv;
int error;
if (!spi->irq) {
dev_err(&spi->dev, "Missing IRQ.\n");
return -EINVAL;
}
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->client = spi;
spi_set_drvdata(spi, priv);
priv->input = devm_input_allocate_device(&spi->dev);
if (!priv->input)
return -ENOMEM;
priv->input->name = "Power Button";
priv->input->dev.parent = &spi->dev;
input_set_capability(priv->input, EV_KEY, KEY_POWER);
error = input_register_device(priv->input);
if (error) {
dev_err(&spi->dev, "error registering input device: %d\n", error);
return error;
}
error = ec_input_read(priv, &response);
if (error < 0) {
dev_err(&spi->dev, "EC read failed: %d\n", error);
return error;
}
priv->msg_counter = RESP_COUNTER(response);
error = devm_request_threaded_irq(&spi->dev, spi->irq, NULL,
ec_input_interrupt,
IRQF_ONESHOT,
"Ariel EC Input", priv);
if (error) {
dev_err(&spi->dev, "Failed to request IRQ %d: %d\n",
spi->irq, error);
return error;
}
return 0;
}
static const struct of_device_id ariel_pwrbutton_of_match[] = {
{ .compatible = "dell,wyse-ariel-ec-input" },
{ }
};
MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match);
static const struct spi_device_id ariel_pwrbutton_spi_ids[] = {
{ .name = "wyse-ariel-ec-input" },
{ }
};
MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_spi_ids);
static struct spi_driver ariel_pwrbutton_driver = {
.driver = {
.name = "dell-wyse-ariel-ec-input",
.of_match_table = ariel_pwrbutton_of_match,
},
.probe = ariel_pwrbutton_probe,
.id_table = ariel_pwrbutton_spi_ids,
};
module_spi_driver(ariel_pwrbutton_driver);
MODULE_AUTHOR("Lubomir Rintel <[email protected]>");
MODULE_DESCRIPTION("Dell Wyse 3020 Power Button Input Driver");
MODULE_LICENSE("Dual BSD/GPL");
|
linux-master
|
drivers/input/misc/ariel-pwrbutton.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ON pin driver for Dialog DA9052 PMICs
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <[email protected]>
*/
#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
struct da9052_onkey {
struct da9052 *da9052;
struct input_dev *input;
struct delayed_work work;
};
static void da9052_onkey_query(struct da9052_onkey *onkey)
{
int ret;
ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG);
if (ret < 0) {
dev_err(onkey->da9052->dev,
"Failed to read onkey event err=%d\n", ret);
} else {
/*
* Since interrupt for deassertion of ONKEY pin is not
* generated, onkey event state determines the onkey
* button state.
*/
bool pressed = !(ret & DA9052_STATUSA_NONKEY);
input_report_key(onkey->input, KEY_POWER, pressed);
input_sync(onkey->input);
/*
* Interrupt is generated only when the ONKEY pin
* is asserted. Hence the deassertion of the pin
* is simulated through work queue.
*/
if (pressed)
schedule_delayed_work(&onkey->work,
msecs_to_jiffies(50));
}
}
static void da9052_onkey_work(struct work_struct *work)
{
struct da9052_onkey *onkey = container_of(work, struct da9052_onkey,
work.work);
da9052_onkey_query(onkey);
}
static irqreturn_t da9052_onkey_irq(int irq, void *data)
{
struct da9052_onkey *onkey = data;
da9052_onkey_query(onkey);
return IRQ_HANDLED;
}
static int da9052_onkey_probe(struct platform_device *pdev)
{
struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
struct da9052_onkey *onkey;
struct input_dev *input_dev;
int error;
if (!da9052) {
dev_err(&pdev->dev, "Failed to get the driver's data\n");
return -EINVAL;
}
onkey = kzalloc(sizeof(*onkey), GFP_KERNEL);
input_dev = input_allocate_device();
if (!onkey || !input_dev) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
error = -ENOMEM;
goto err_free_mem;
}
onkey->input = input_dev;
onkey->da9052 = da9052;
INIT_DELAYED_WORK(&onkey->work, da9052_onkey_work);
input_dev->name = "da9052-onkey";
input_dev->phys = "da9052-onkey/input0";
input_dev->dev.parent = &pdev->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY);
__set_bit(KEY_POWER, input_dev->keybit);
error = da9052_request_irq(onkey->da9052, DA9052_IRQ_NONKEY, "ONKEY",
da9052_onkey_irq, onkey);
if (error < 0) {
dev_err(onkey->da9052->dev,
"Failed to register ONKEY IRQ: %d\n", error);
goto err_free_mem;
}
error = input_register_device(onkey->input);
if (error) {
dev_err(&pdev->dev, "Unable to register input device, %d\n",
error);
goto err_free_irq;
}
platform_set_drvdata(pdev, onkey);
return 0;
err_free_irq:
da9052_free_irq(onkey->da9052, DA9052_IRQ_NONKEY, onkey);
cancel_delayed_work_sync(&onkey->work);
err_free_mem:
input_free_device(input_dev);
kfree(onkey);
return error;
}
static int da9052_onkey_remove(struct platform_device *pdev)
{
struct da9052_onkey *onkey = platform_get_drvdata(pdev);
da9052_free_irq(onkey->da9052, DA9052_IRQ_NONKEY, onkey);
cancel_delayed_work_sync(&onkey->work);
input_unregister_device(onkey->input);
kfree(onkey);
return 0;
}
static struct platform_driver da9052_onkey_driver = {
.probe = da9052_onkey_probe,
.remove = da9052_onkey_remove,
.driver = {
.name = "da9052-onkey",
},
};
module_platform_driver(da9052_onkey_driver);
MODULE_AUTHOR("David Dajun Chen <[email protected]>");
MODULE_DESCRIPTION("Onkey driver for DA9052");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da9052-onkey");
|
linux-master
|
drivers/input/misc/da9052_onkey.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* NXP PCF50633 Input Driver
*
* (C) 2006-2008 by Openmoko, Inc.
* Author: Balaji Rao <[email protected]>
* All rights reserved.
*
* Broken down from monstrous PCF50633 driver mainly by
* Harald Welte, Andy Green and Werner Almesberger
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/mfd/pcf50633/core.h>
#define PCF50633_OOCSTAT_ONKEY 0x01
#define PCF50633_REG_OOCSTAT 0x12
#define PCF50633_REG_OOCMODE 0x10
struct pcf50633_input {
struct pcf50633 *pcf;
struct input_dev *input_dev;
};
static void
pcf50633_input_irq(int irq, void *data)
{
struct pcf50633_input *input;
int onkey_released;
input = data;
/* We report only one event depending on the key press status */
onkey_released = pcf50633_reg_read(input->pcf, PCF50633_REG_OOCSTAT)
& PCF50633_OOCSTAT_ONKEY;
if (irq == PCF50633_IRQ_ONKEYF && !onkey_released)
input_report_key(input->input_dev, KEY_POWER, 1);
else if (irq == PCF50633_IRQ_ONKEYR && onkey_released)
input_report_key(input->input_dev, KEY_POWER, 0);
input_sync(input->input_dev);
}
static int pcf50633_input_probe(struct platform_device *pdev)
{
struct pcf50633_input *input;
struct input_dev *input_dev;
int ret;
input = kzalloc(sizeof(*input), GFP_KERNEL);
if (!input)
return -ENOMEM;
input_dev = input_allocate_device();
if (!input_dev) {
kfree(input);
return -ENOMEM;
}
platform_set_drvdata(pdev, input);
input->pcf = dev_to_pcf50633(pdev->dev.parent);
input->input_dev = input_dev;
input_dev->name = "PCF50633 PMU events";
input_dev->id.bustype = BUS_I2C;
input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_PWR);
set_bit(KEY_POWER, input_dev->keybit);
ret = input_register_device(input_dev);
if (ret) {
input_free_device(input_dev);
kfree(input);
return ret;
}
pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYR,
pcf50633_input_irq, input);
pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYF,
pcf50633_input_irq, input);
return 0;
}
static int pcf50633_input_remove(struct platform_device *pdev)
{
struct pcf50633_input *input = platform_get_drvdata(pdev);
pcf50633_free_irq(input->pcf, PCF50633_IRQ_ONKEYR);
pcf50633_free_irq(input->pcf, PCF50633_IRQ_ONKEYF);
input_unregister_device(input->input_dev);
kfree(input);
return 0;
}
static struct platform_driver pcf50633_input_driver = {
.driver = {
.name = "pcf50633-input",
},
.probe = pcf50633_input_probe,
.remove = pcf50633_input_remove,
};
module_platform_driver(pcf50633_input_driver);
MODULE_AUTHOR("Balaji Rao <[email protected]>");
MODULE_DESCRIPTION("PCF50633 input driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pcf50633-input");
|
linux-master
|
drivers/input/misc/pcf50633-input.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AD714X CapTouch Programmable Controller driver (I2C bus)
*
* Copyright 2009-2011 Analog Devices Inc.
*/
#include <linux/input.h> /* BUS_I2C */
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/pm.h>
#include "ad714x.h"
static int ad714x_i2c_write(struct ad714x_chip *chip,
unsigned short reg, unsigned short data)
{
struct i2c_client *client = to_i2c_client(chip->dev);
int error;
chip->xfer_buf[0] = cpu_to_be16(reg);
chip->xfer_buf[1] = cpu_to_be16(data);
error = i2c_master_send(client, (u8 *)chip->xfer_buf,
2 * sizeof(*chip->xfer_buf));
if (unlikely(error < 0)) {
dev_err(&client->dev, "I2C write error: %d\n", error);
return error;
}
return 0;
}
static int ad714x_i2c_read(struct ad714x_chip *chip,
unsigned short reg, unsigned short *data, size_t len)
{
struct i2c_client *client = to_i2c_client(chip->dev);
int i;
int error;
chip->xfer_buf[0] = cpu_to_be16(reg);
error = i2c_master_send(client, (u8 *)chip->xfer_buf,
sizeof(*chip->xfer_buf));
if (error >= 0)
error = i2c_master_recv(client, (u8 *)chip->xfer_buf,
len * sizeof(*chip->xfer_buf));
if (unlikely(error < 0)) {
dev_err(&client->dev, "I2C read error: %d\n", error);
return error;
}
for (i = 0; i < len; i++)
data[i] = be16_to_cpu(chip->xfer_buf[i]);
return 0;
}
static int ad714x_i2c_probe(struct i2c_client *client)
{
struct ad714x_chip *chip;
chip = ad714x_probe(&client->dev, BUS_I2C, client->irq,
ad714x_i2c_read, ad714x_i2c_write);
if (IS_ERR(chip))
return PTR_ERR(chip);
i2c_set_clientdata(client, chip);
return 0;
}
static const struct i2c_device_id ad714x_id[] = {
{ "ad7142_captouch", 0 },
{ "ad7143_captouch", 0 },
{ "ad7147_captouch", 0 },
{ "ad7147a_captouch", 0 },
{ "ad7148_captouch", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad714x_id);
static struct i2c_driver ad714x_i2c_driver = {
.driver = {
.name = "ad714x_captouch",
.pm = pm_sleep_ptr(&ad714x_pm),
},
.probe = ad714x_i2c_probe,
.id_table = ad714x_id,
};
module_i2c_driver(ad714x_i2c_driver);
MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor I2C Bus Driver");
MODULE_AUTHOR("Barry Song <[email protected]>");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/ad714x-i2c.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Supports for the button array on SoC tablets originally running
* Windows 8.
*
* (C) Copyright 2014 Intel Corporation
*/
#include <linux/module.h>
#include <linux/input.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio_keys.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
static bool use_low_level_irq;
module_param(use_low_level_irq, bool, 0444);
MODULE_PARM_DESC(use_low_level_irq, "Use low-level triggered IRQ instead of edge triggered");
struct soc_button_info {
const char *name;
int acpi_index;
unsigned int event_type;
unsigned int event_code;
bool autorepeat;
bool wakeup;
bool active_low;
};
struct soc_device_data {
const struct soc_button_info *button_info;
int (*check)(struct device *dev);
};
/*
* Some of the buttons like volume up/down are auto repeat, while others
* are not. To support both, we register two platform devices, and put
* buttons into them based on whether the key should be auto repeat.
*/
#define BUTTON_TYPES 2
struct soc_button_data {
struct platform_device *children[BUTTON_TYPES];
};
/*
* Some 2-in-1s which use the soc_button_array driver have this ugly issue in
* their DSDT where the _LID method modifies the irq-type settings of the GPIOs
* used for the power and home buttons. The intend of this AML code is to
* disable these buttons when the lid is closed.
* The AML does this by directly poking the GPIO controllers registers. This is
* problematic because when re-enabling the irq, which happens whenever _LID
* gets called with the lid open (e.g. on boot and on resume), it sets the
* irq-type to IRQ_TYPE_LEVEL_LOW. Where as the gpio-keys driver programs the
* type to, and expects it to be, IRQ_TYPE_EDGE_BOTH.
* To work around this we don't set gpio_keys_button.gpio on these 2-in-1s,
* instead we get the irq for the GPIO ourselves, configure it as
* IRQ_TYPE_LEVEL_LOW (to match how the _LID AML code configures it) and pass
* the irq in gpio_keys_button.irq. Below is a list of affected devices.
*/
static const struct dmi_system_id dmi_use_low_level_irq[] = {
{
/*
* Acer Switch 10 SW5-012. _LID method messes with home- and
* power-button GPIO IRQ settings. When (re-)enabling the irq
* it ors in its own flags without clearing the previous set
* ones, leading to an irq-type of IRQ_TYPE_LEVEL_LOW |
* IRQ_TYPE_LEVEL_HIGH causing a continuous interrupt storm.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
},
},
{
/* Acer Switch V 10 SW5-017, same issue as Acer Switch 10 SW5-012. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
},
},
{
/*
* Acer One S1003. _LID method messes with power-button GPIO
* IRQ settings, leading to a non working power-button.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
},
{
/*
* Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button
* IRQ settings, leading to a non working home-button.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "60073"),
DMI_MATCH(DMI_PRODUCT_VERSION, "1051"),
},
},
{} /* Terminating entry */
};
/*
* Some devices have a wrong entry which points to a GPIO which is
* required in another driver, so this driver must not claim it.
*/
static const struct dmi_system_id dmi_invalid_acpi_index[] = {
{
/*
* Lenovo Yoga Book X90F / X90L, the PNP0C40 home button entry
* points to a GPIO which is not a home button and which is
* required by the lenovo-yogabook driver.
*/
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)1l,
},
{} /* Terminating entry */
};
/*
* Get the Nth GPIO number from the ACPI object.
*/
static int soc_button_lookup_gpio(struct device *dev, int acpi_index,
int *gpio_ret, int *irq_ret)
{
struct gpio_desc *desc;
desc = gpiod_get_index(dev, NULL, acpi_index, GPIOD_ASIS);
if (IS_ERR(desc))
return PTR_ERR(desc);
*gpio_ret = desc_to_gpio(desc);
*irq_ret = gpiod_to_irq(desc);
gpiod_put(desc);
return 0;
}
static struct platform_device *
soc_button_device_create(struct platform_device *pdev,
const struct soc_button_info *button_info,
bool autorepeat)
{
const struct soc_button_info *info;
struct platform_device *pd;
struct gpio_keys_button *gpio_keys;
struct gpio_keys_platform_data *gpio_keys_pdata;
const struct dmi_system_id *dmi_id;
int invalid_acpi_index = -1;
int error, gpio, irq;
int n_buttons = 0;
for (info = button_info; info->name; info++)
if (info->autorepeat == autorepeat)
n_buttons++;
gpio_keys_pdata = devm_kzalloc(&pdev->dev,
sizeof(*gpio_keys_pdata) +
sizeof(*gpio_keys) * n_buttons,
GFP_KERNEL);
if (!gpio_keys_pdata)
return ERR_PTR(-ENOMEM);
gpio_keys = (void *)(gpio_keys_pdata + 1);
n_buttons = 0;
dmi_id = dmi_first_match(dmi_invalid_acpi_index);
if (dmi_id)
invalid_acpi_index = (long)dmi_id->driver_data;
for (info = button_info; info->name; info++) {
if (info->autorepeat != autorepeat)
continue;
if (info->acpi_index == invalid_acpi_index)
continue;
error = soc_button_lookup_gpio(&pdev->dev, info->acpi_index, &gpio, &irq);
if (error || irq < 0) {
/*
* Skip GPIO if not present. Note we deliberately
* ignore -EPROBE_DEFER errors here. On some devices
* Intel is using so called virtual GPIOs which are not
* GPIOs at all but some way for AML code to check some
* random status bits without need a custom opregion.
* In some cases the resources table we parse points to
* such a virtual GPIO, since these are not real GPIOs
* we do not have a driver for these so they will never
* show up, therefore we ignore -EPROBE_DEFER.
*/
continue;
}
/* See dmi_use_low_level_irq[] comment */
if (!autorepeat && (use_low_level_irq ||
dmi_check_system(dmi_use_low_level_irq))) {
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
gpio_keys[n_buttons].irq = irq;
gpio_keys[n_buttons].gpio = -ENOENT;
} else {
gpio_keys[n_buttons].gpio = gpio;
}
gpio_keys[n_buttons].type = info->event_type;
gpio_keys[n_buttons].code = info->event_code;
gpio_keys[n_buttons].active_low = info->active_low;
gpio_keys[n_buttons].desc = info->name;
gpio_keys[n_buttons].wakeup = info->wakeup;
/* These devices often use cheap buttons, use 50 ms debounce */
gpio_keys[n_buttons].debounce_interval = 50;
n_buttons++;
}
if (n_buttons == 0) {
error = -ENODEV;
goto err_free_mem;
}
gpio_keys_pdata->buttons = gpio_keys;
gpio_keys_pdata->nbuttons = n_buttons;
gpio_keys_pdata->rep = autorepeat;
pd = platform_device_register_resndata(&pdev->dev, "gpio-keys",
PLATFORM_DEVID_AUTO, NULL, 0,
gpio_keys_pdata,
sizeof(*gpio_keys_pdata));
error = PTR_ERR_OR_ZERO(pd);
if (error) {
dev_err(&pdev->dev,
"failed registering gpio-keys: %d\n", error);
goto err_free_mem;
}
return pd;
err_free_mem:
devm_kfree(&pdev->dev, gpio_keys_pdata);
return ERR_PTR(error);
}
static int soc_button_get_acpi_object_int(const union acpi_object *obj)
{
if (obj->type != ACPI_TYPE_INTEGER)
return -1;
return obj->integer.value;
}
/* Parse a single ACPI0011 _DSD button descriptor */
static int soc_button_parse_btn_desc(struct device *dev,
const union acpi_object *desc,
int collection_uid,
struct soc_button_info *info)
{
int upage, usage;
if (desc->type != ACPI_TYPE_PACKAGE ||
desc->package.count != 5 ||
/* First byte should be 1 (control) */
soc_button_get_acpi_object_int(&desc->package.elements[0]) != 1 ||
/* Third byte should be collection uid */
soc_button_get_acpi_object_int(&desc->package.elements[2]) !=
collection_uid) {
dev_err(dev, "Invalid ACPI Button Descriptor\n");
return -ENODEV;
}
info->event_type = EV_KEY;
info->active_low = true;
info->acpi_index =
soc_button_get_acpi_object_int(&desc->package.elements[1]);
upage = soc_button_get_acpi_object_int(&desc->package.elements[3]);
usage = soc_button_get_acpi_object_int(&desc->package.elements[4]);
/*
* The UUID: fa6bd625-9ce8-470d-a2c7-b3ca36c4282e descriptors use HID
* usage page and usage codes, but otherwise the device is not HID
* compliant: it uses one irq per button instead of generating HID
* input reports and some buttons should generate wakeups where as
* others should not, so we cannot use the HID subsystem.
*
* Luckily all devices only use a few usage page + usage combinations,
* so we can simply check for the known combinations here.
*/
if (upage == 0x01 && usage == 0x81) {
info->name = "power";
info->event_code = KEY_POWER;
info->wakeup = true;
} else if (upage == 0x01 && usage == 0xca) {
info->name = "rotation lock switch";
info->event_type = EV_SW;
info->event_code = SW_ROTATE_LOCK;
} else if (upage == 0x07 && usage == 0xe3) {
info->name = "home";
info->event_code = KEY_LEFTMETA;
info->wakeup = true;
} else if (upage == 0x0c && usage == 0xe9) {
info->name = "volume_up";
info->event_code = KEY_VOLUMEUP;
info->autorepeat = true;
} else if (upage == 0x0c && usage == 0xea) {
info->name = "volume_down";
info->event_code = KEY_VOLUMEDOWN;
info->autorepeat = true;
} else {
dev_warn(dev, "Unknown button index %d upage %02x usage %02x, ignoring\n",
info->acpi_index, upage, usage);
info->name = "unknown";
info->event_code = KEY_RESERVED;
}
return 0;
}
/* ACPI0011 _DSD btns descriptors UUID: fa6bd625-9ce8-470d-a2c7-b3ca36c4282e */
static const u8 btns_desc_uuid[16] = {
0x25, 0xd6, 0x6b, 0xfa, 0xe8, 0x9c, 0x0d, 0x47,
0xa2, 0xc7, 0xb3, 0xca, 0x36, 0xc4, 0x28, 0x2e
};
/* Parse ACPI0011 _DSD button descriptors */
static struct soc_button_info *soc_button_get_button_info(struct device *dev)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
const union acpi_object *desc, *el0, *uuid, *btns_desc = NULL;
struct soc_button_info *button_info;
acpi_status status;
int i, btn, collection_uid = -1;
status = acpi_evaluate_object_typed(ACPI_HANDLE(dev), "_DSD", NULL,
&buf, ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status)) {
dev_err(dev, "ACPI _DSD object not found\n");
return ERR_PTR(-ENODEV);
}
/* Look for the Button Descriptors UUID */
desc = buf.pointer;
for (i = 0; (i + 1) < desc->package.count; i += 2) {
uuid = &desc->package.elements[i];
if (uuid->type != ACPI_TYPE_BUFFER ||
uuid->buffer.length != 16 ||
desc->package.elements[i + 1].type != ACPI_TYPE_PACKAGE) {
break;
}
if (memcmp(uuid->buffer.pointer, btns_desc_uuid, 16) == 0) {
btns_desc = &desc->package.elements[i + 1];
break;
}
}
if (!btns_desc) {
dev_err(dev, "ACPI Button Descriptors not found\n");
button_info = ERR_PTR(-ENODEV);
goto out;
}
/* The first package describes the collection */
el0 = &btns_desc->package.elements[0];
if (el0->type == ACPI_TYPE_PACKAGE &&
el0->package.count == 5 &&
/* First byte should be 0 (collection) */
soc_button_get_acpi_object_int(&el0->package.elements[0]) == 0 &&
/* Third byte should be 0 (top level collection) */
soc_button_get_acpi_object_int(&el0->package.elements[2]) == 0) {
collection_uid = soc_button_get_acpi_object_int(
&el0->package.elements[1]);
}
if (collection_uid == -1) {
dev_err(dev, "Invalid Button Collection Descriptor\n");
button_info = ERR_PTR(-ENODEV);
goto out;
}
/* There are package.count - 1 buttons + 1 terminating empty entry */
button_info = devm_kcalloc(dev, btns_desc->package.count,
sizeof(*button_info), GFP_KERNEL);
if (!button_info) {
button_info = ERR_PTR(-ENOMEM);
goto out;
}
/* Parse the button descriptors */
for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) {
if (soc_button_parse_btn_desc(dev,
&btns_desc->package.elements[i],
collection_uid,
&button_info[btn])) {
button_info = ERR_PTR(-ENODEV);
goto out;
}
}
out:
kfree(buf.pointer);
return button_info;
}
static int soc_button_remove(struct platform_device *pdev)
{
struct soc_button_data *priv = platform_get_drvdata(pdev);
int i;
for (i = 0; i < BUTTON_TYPES; i++)
if (priv->children[i])
platform_device_unregister(priv->children[i]);
return 0;
}
static int soc_button_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct soc_device_data *device_data;
const struct soc_button_info *button_info;
struct soc_button_data *priv;
struct platform_device *pd;
int i;
int error;
device_data = acpi_device_get_match_data(dev);
if (device_data && device_data->check) {
error = device_data->check(dev);
if (error)
return error;
}
if (device_data && device_data->button_info) {
button_info = device_data->button_info;
} else {
button_info = soc_button_get_button_info(dev);
if (IS_ERR(button_info))
return PTR_ERR(button_info);
}
error = gpiod_count(dev, NULL);
if (error < 0) {
dev_dbg(dev, "no GPIO attached, ignoring...\n");
return -ENODEV;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
for (i = 0; i < BUTTON_TYPES; i++) {
pd = soc_button_device_create(pdev, button_info, i == 0);
if (IS_ERR(pd)) {
error = PTR_ERR(pd);
if (error != -ENODEV) {
soc_button_remove(pdev);
return error;
}
continue;
}
priv->children[i] = pd;
}
if (!priv->children[0] && !priv->children[1])
return -ENODEV;
if (!device_data || !device_data->button_info)
devm_kfree(dev, button_info);
return 0;
}
/*
* Definition of buttons on the tablet. The ACPI index of each button
* is defined in section 2.8.7.2 of "Windows ACPI Design Guide for SoC
* Platforms"
*/
static const struct soc_button_info soc_button_PNP0C40[] = {
{ "power", 0, EV_KEY, KEY_POWER, false, true, true },
{ "home", 1, EV_KEY, KEY_LEFTMETA, false, true, true },
{ "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false, true },
{ "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false, true },
{ "rotation_lock", 4, EV_KEY, KEY_ROTATE_LOCK_TOGGLE, false, false, true },
{ }
};
static const struct soc_device_data soc_device_PNP0C40 = {
.button_info = soc_button_PNP0C40,
};
static const struct soc_button_info soc_button_INT33D3[] = {
{ "tablet_mode", 0, EV_SW, SW_TABLET_MODE, false, false, false },
{ }
};
static const struct soc_device_data soc_device_INT33D3 = {
.button_info = soc_button_INT33D3,
};
/*
* Button info for Microsoft Surface 3 (non pro), this is indentical to
* the PNP0C40 info except that the home button is active-high.
*
* The Surface 3 Pro also has a MSHW0028 ACPI device, but that uses a custom
* version of the drivers/platform/x86/intel/hid.c 5 button array ACPI API
* instead. A check() callback is not necessary though as the Surface 3 Pro
* MSHW0028 ACPI device's resource table does not contain any GPIOs.
*/
static const struct soc_button_info soc_button_MSHW0028[] = {
{ "power", 0, EV_KEY, KEY_POWER, false, true, true },
{ "home", 1, EV_KEY, KEY_LEFTMETA, false, true, false },
{ "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false, true },
{ "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false, true },
{ }
};
static const struct soc_device_data soc_device_MSHW0028 = {
.button_info = soc_button_MSHW0028,
};
/*
* Special device check for Surface Book 2 and Surface Pro (2017).
* Both, the Surface Pro 4 (surfacepro3_button.c) and the above mentioned
* devices use MSHW0040 for power and volume buttons, however the way they
* have to be addressed differs. Make sure that we only load this drivers
* for the correct devices by checking the OEM Platform Revision provided by
* the _DSM method.
*/
#define MSHW0040_DSM_REVISION 0x01
#define MSHW0040_DSM_GET_OMPR 0x02 // get OEM Platform Revision
static const guid_t MSHW0040_DSM_UUID =
GUID_INIT(0x6fd05c69, 0xcde3, 0x49f4, 0x95, 0xed, 0xab, 0x16, 0x65,
0x49, 0x80, 0x35);
static int soc_device_check_MSHW0040(struct device *dev)
{
acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object *result;
u64 oem_platform_rev = 0; // valid revisions are nonzero
// get OEM platform revision
result = acpi_evaluate_dsm_typed(handle, &MSHW0040_DSM_UUID,
MSHW0040_DSM_REVISION,
MSHW0040_DSM_GET_OMPR, NULL,
ACPI_TYPE_INTEGER);
if (result) {
oem_platform_rev = result->integer.value;
ACPI_FREE(result);
}
/*
* If the revision is zero here, the _DSM evaluation has failed. This
* indicates that we have a Pro 4 or Book 1 and this driver should not
* be used.
*/
if (oem_platform_rev == 0)
return -ENODEV;
dev_dbg(dev, "OEM Platform Revision %llu\n", oem_platform_rev);
return 0;
}
/*
* Button infos for Microsoft Surface Book 2 and Surface Pro (2017).
* Obtained from DSDT/testing.
*/
static const struct soc_button_info soc_button_MSHW0040[] = {
{ "power", 0, EV_KEY, KEY_POWER, false, true, true },
{ "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false, true },
{ "volume_down", 4, EV_KEY, KEY_VOLUMEDOWN, true, false, true },
{ }
};
static const struct soc_device_data soc_device_MSHW0040 = {
.button_info = soc_button_MSHW0040,
.check = soc_device_check_MSHW0040,
};
static const struct acpi_device_id soc_button_acpi_match[] = {
{ "PNP0C40", (unsigned long)&soc_device_PNP0C40 },
{ "INT33D3", (unsigned long)&soc_device_INT33D3 },
{ "ID9001", (unsigned long)&soc_device_INT33D3 },
{ "ACPI0011", 0 },
/* Microsoft Surface Devices (3th, 5th and 6th generation) */
{ "MSHW0028", (unsigned long)&soc_device_MSHW0028 },
{ "MSHW0040", (unsigned long)&soc_device_MSHW0040 },
{ }
};
MODULE_DEVICE_TABLE(acpi, soc_button_acpi_match);
static struct platform_driver soc_button_driver = {
.probe = soc_button_probe,
.remove = soc_button_remove,
.driver = {
.name = KBUILD_MODNAME,
.acpi_match_table = ACPI_PTR(soc_button_acpi_match),
},
};
module_platform_driver(soc_button_driver);
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/soc_button_array.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* User level driver support for input subsystem
*
* Heavily based on evdev.c by Vojtech Pavlik
*
* Author: Aristeu Sergio Rozanski Filho <[email protected]>
*
* Changes/Revisions:
* 0.4 01/09/2014 (Benjamin Tissoires <[email protected]>)
* - add UI_GET_SYSNAME ioctl
* 0.3 09/04/2006 (Anssi Hannula <[email protected]>)
* - updated ff support for the changes in kernel interface
* - added MODULE_VERSION
* 0.2 16/10/2004 (Micah Dowty <[email protected]>)
* - added force feedback support
* - added UI_SET_PHYS
* 0.1 20/06/2002
* - first public version
*/
#include <uapi/linux/uinput.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/overflow.h>
#include <linux/input/mt.h>
#include "../input-compat.h"
#define UINPUT_NAME "uinput"
#define UINPUT_BUFFER_SIZE 16
#define UINPUT_NUM_REQUESTS 16
#define UINPUT_TIMESTAMP_ALLOWED_OFFSET_SECS 10
enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
struct uinput_request {
unsigned int id;
unsigned int code; /* UI_FF_UPLOAD, UI_FF_ERASE */
int retval;
struct completion done;
union {
unsigned int effect_id;
struct {
struct ff_effect *effect;
struct ff_effect *old;
} upload;
} u;
};
struct uinput_device {
struct input_dev *dev;
struct mutex mutex;
enum uinput_state state;
wait_queue_head_t waitq;
unsigned char ready;
unsigned char head;
unsigned char tail;
struct input_event buff[UINPUT_BUFFER_SIZE];
unsigned int ff_effects_max;
struct uinput_request *requests[UINPUT_NUM_REQUESTS];
wait_queue_head_t requests_waitq;
spinlock_t requests_lock;
};
static int uinput_dev_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
struct uinput_device *udev = input_get_drvdata(dev);
struct timespec64 ts;
ktime_get_ts64(&ts);
udev->buff[udev->head] = (struct input_event) {
.input_event_sec = ts.tv_sec,
.input_event_usec = ts.tv_nsec / NSEC_PER_USEC,
.type = type,
.code = code,
.value = value,
};
udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE;
wake_up_interruptible(&udev->waitq);
return 0;
}
/* Atomically allocate an ID for the given request. Returns 0 on success. */
static bool uinput_request_alloc_id(struct uinput_device *udev,
struct uinput_request *request)
{
unsigned int id;
bool reserved = false;
spin_lock(&udev->requests_lock);
for (id = 0; id < UINPUT_NUM_REQUESTS; id++) {
if (!udev->requests[id]) {
request->id = id;
udev->requests[id] = request;
reserved = true;
break;
}
}
spin_unlock(&udev->requests_lock);
return reserved;
}
static struct uinput_request *uinput_request_find(struct uinput_device *udev,
unsigned int id)
{
/* Find an input request, by ID. Returns NULL if the ID isn't valid. */
if (id >= UINPUT_NUM_REQUESTS)
return NULL;
return udev->requests[id];
}
static int uinput_request_reserve_slot(struct uinput_device *udev,
struct uinput_request *request)
{
/* Allocate slot. If none are available right away, wait. */
return wait_event_interruptible(udev->requests_waitq,
uinput_request_alloc_id(udev, request));
}
static void uinput_request_release_slot(struct uinput_device *udev,
unsigned int id)
{
/* Mark slot as available */
spin_lock(&udev->requests_lock);
udev->requests[id] = NULL;
spin_unlock(&udev->requests_lock);
wake_up(&udev->requests_waitq);
}
static int uinput_request_send(struct uinput_device *udev,
struct uinput_request *request)
{
int retval;
retval = mutex_lock_interruptible(&udev->mutex);
if (retval)
return retval;
if (udev->state != UIST_CREATED) {
retval = -ENODEV;
goto out;
}
init_completion(&request->done);
/*
* Tell our userspace application about this new request
* by queueing an input event.
*/
uinput_dev_event(udev->dev, EV_UINPUT, request->code, request->id);
out:
mutex_unlock(&udev->mutex);
return retval;
}
static int uinput_request_submit(struct uinput_device *udev,
struct uinput_request *request)
{
int retval;
retval = uinput_request_reserve_slot(udev, request);
if (retval)
return retval;
retval = uinput_request_send(udev, request);
if (retval)
goto out;
if (!wait_for_completion_timeout(&request->done, 30 * HZ)) {
retval = -ETIMEDOUT;
goto out;
}
retval = request->retval;
out:
uinput_request_release_slot(udev, request->id);
return retval;
}
/*
* Fail all outstanding requests so handlers don't wait for the userspace
* to finish processing them.
*/
static void uinput_flush_requests(struct uinput_device *udev)
{
struct uinput_request *request;
int i;
spin_lock(&udev->requests_lock);
for (i = 0; i < UINPUT_NUM_REQUESTS; i++) {
request = udev->requests[i];
if (request) {
request->retval = -ENODEV;
complete(&request->done);
}
}
spin_unlock(&udev->requests_lock);
}
static void uinput_dev_set_gain(struct input_dev *dev, u16 gain)
{
uinput_dev_event(dev, EV_FF, FF_GAIN, gain);
}
static void uinput_dev_set_autocenter(struct input_dev *dev, u16 magnitude)
{
uinput_dev_event(dev, EV_FF, FF_AUTOCENTER, magnitude);
}
static int uinput_dev_playback(struct input_dev *dev, int effect_id, int value)
{
return uinput_dev_event(dev, EV_FF, effect_id, value);
}
static int uinput_dev_upload_effect(struct input_dev *dev,
struct ff_effect *effect,
struct ff_effect *old)
{
struct uinput_device *udev = input_get_drvdata(dev);
struct uinput_request request;
/*
* uinput driver does not currently support periodic effects with
* custom waveform since it does not have a way to pass buffer of
* samples (custom_data) to userspace. If ever there is a device
* supporting custom waveforms we would need to define an additional
* ioctl (UI_UPLOAD_SAMPLES) but for now we just bail out.
*/
if (effect->type == FF_PERIODIC &&
effect->u.periodic.waveform == FF_CUSTOM)
return -EINVAL;
request.code = UI_FF_UPLOAD;
request.u.upload.effect = effect;
request.u.upload.old = old;
return uinput_request_submit(udev, &request);
}
static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id)
{
struct uinput_device *udev = input_get_drvdata(dev);
struct uinput_request request;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
request.code = UI_FF_ERASE;
request.u.effect_id = effect_id;
return uinput_request_submit(udev, &request);
}
static int uinput_dev_flush(struct input_dev *dev, struct file *file)
{
/*
* If we are called with file == NULL that means we are tearing
* down the device, and therefore we can not handle FF erase
* requests: either we are handling UI_DEV_DESTROY (and holding
* the udev->mutex), or the file descriptor is closed and there is
* nobody on the other side anymore.
*/
return file ? input_ff_flush(dev, file) : 0;
}
static void uinput_destroy_device(struct uinput_device *udev)
{
const char *name, *phys;
struct input_dev *dev = udev->dev;
enum uinput_state old_state = udev->state;
udev->state = UIST_NEW_DEVICE;
if (dev) {
name = dev->name;
phys = dev->phys;
if (old_state == UIST_CREATED) {
uinput_flush_requests(udev);
input_unregister_device(dev);
} else {
input_free_device(dev);
}
kfree(name);
kfree(phys);
udev->dev = NULL;
}
}
static int uinput_create_device(struct uinput_device *udev)
{
struct input_dev *dev = udev->dev;
int error, nslot;
if (udev->state != UIST_SETUP_COMPLETE) {
printk(KERN_DEBUG "%s: write device info first\n", UINPUT_NAME);
return -EINVAL;
}
if (test_bit(EV_ABS, dev->evbit)) {
input_alloc_absinfo(dev);
if (!dev->absinfo) {
error = -EINVAL;
goto fail1;
}
if (test_bit(ABS_MT_SLOT, dev->absbit)) {
nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
error = input_mt_init_slots(dev, nslot, 0);
if (error)
goto fail1;
} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
input_set_events_per_packet(dev, 60);
}
}
if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) {
printk(KERN_DEBUG "%s: ff_effects_max should be non-zero when FF_BIT is set\n",
UINPUT_NAME);
error = -EINVAL;
goto fail1;
}
if (udev->ff_effects_max) {
error = input_ff_create(dev, udev->ff_effects_max);
if (error)
goto fail1;
dev->ff->upload = uinput_dev_upload_effect;
dev->ff->erase = uinput_dev_erase_effect;
dev->ff->playback = uinput_dev_playback;
dev->ff->set_gain = uinput_dev_set_gain;
dev->ff->set_autocenter = uinput_dev_set_autocenter;
/*
* The standard input_ff_flush() implementation does
* not quite work for uinput as we can't reasonably
* handle FF requests during device teardown.
*/
dev->flush = uinput_dev_flush;
}
dev->event = uinput_dev_event;
input_set_drvdata(udev->dev, udev);
error = input_register_device(udev->dev);
if (error)
goto fail2;
udev->state = UIST_CREATED;
return 0;
fail2: input_ff_destroy(dev);
fail1: uinput_destroy_device(udev);
return error;
}
static int uinput_open(struct inode *inode, struct file *file)
{
struct uinput_device *newdev;
newdev = kzalloc(sizeof(struct uinput_device), GFP_KERNEL);
if (!newdev)
return -ENOMEM;
mutex_init(&newdev->mutex);
spin_lock_init(&newdev->requests_lock);
init_waitqueue_head(&newdev->requests_waitq);
init_waitqueue_head(&newdev->waitq);
newdev->state = UIST_NEW_DEVICE;
file->private_data = newdev;
stream_open(inode, file);
return 0;
}
static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
const struct input_absinfo *abs)
{
int min, max, range;
min = abs->minimum;
max = abs->maximum;
if ((min != 0 || max != 0) && max < min) {
printk(KERN_DEBUG
"%s: invalid abs[%02x] min:%d max:%d\n",
UINPUT_NAME, code, min, max);
return -EINVAL;
}
if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
printk(KERN_DEBUG
"%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
UINPUT_NAME, code, abs->flat, min, max);
return -EINVAL;
}
return 0;
}
static int uinput_validate_absbits(struct input_dev *dev)
{
unsigned int cnt;
int error;
if (!test_bit(EV_ABS, dev->evbit))
return 0;
/*
* Check if absmin/absmax/absfuzz/absflat are sane.
*/
for_each_set_bit(cnt, dev->absbit, ABS_CNT) {
if (!dev->absinfo)
return -EINVAL;
error = uinput_validate_absinfo(dev, cnt, &dev->absinfo[cnt]);
if (error)
return error;
}
return 0;
}
static int uinput_dev_setup(struct uinput_device *udev,
struct uinput_setup __user *arg)
{
struct uinput_setup setup;
struct input_dev *dev;
if (udev->state == UIST_CREATED)
return -EINVAL;
if (copy_from_user(&setup, arg, sizeof(setup)))
return -EFAULT;
if (!setup.name[0])
return -EINVAL;
dev = udev->dev;
dev->id = setup.id;
udev->ff_effects_max = setup.ff_effects_max;
kfree(dev->name);
dev->name = kstrndup(setup.name, UINPUT_MAX_NAME_SIZE, GFP_KERNEL);
if (!dev->name)
return -ENOMEM;
udev->state = UIST_SETUP_COMPLETE;
return 0;
}
static int uinput_abs_setup(struct uinput_device *udev,
struct uinput_setup __user *arg, size_t size)
{
struct uinput_abs_setup setup = {};
struct input_dev *dev;
int error;
if (size > sizeof(setup))
return -E2BIG;
if (udev->state == UIST_CREATED)
return -EINVAL;
if (copy_from_user(&setup, arg, size))
return -EFAULT;
if (setup.code > ABS_MAX)
return -ERANGE;
dev = udev->dev;
error = uinput_validate_absinfo(dev, setup.code, &setup.absinfo);
if (error)
return error;
input_alloc_absinfo(dev);
if (!dev->absinfo)
return -ENOMEM;
set_bit(setup.code, dev->absbit);
dev->absinfo[setup.code] = setup.absinfo;
return 0;
}
/* legacy setup via write() */
static int uinput_setup_device_legacy(struct uinput_device *udev,
const char __user *buffer, size_t count)
{
struct uinput_user_dev *user_dev;
struct input_dev *dev;
int i;
int retval;
if (count != sizeof(struct uinput_user_dev))
return -EINVAL;
if (!udev->dev) {
udev->dev = input_allocate_device();
if (!udev->dev)
return -ENOMEM;
}
dev = udev->dev;
user_dev = memdup_user(buffer, sizeof(struct uinput_user_dev));
if (IS_ERR(user_dev))
return PTR_ERR(user_dev);
udev->ff_effects_max = user_dev->ff_effects_max;
/* Ensure name is filled in */
if (!user_dev->name[0]) {
retval = -EINVAL;
goto exit;
}
kfree(dev->name);
dev->name = kstrndup(user_dev->name, UINPUT_MAX_NAME_SIZE,
GFP_KERNEL);
if (!dev->name) {
retval = -ENOMEM;
goto exit;
}
dev->id.bustype = user_dev->id.bustype;
dev->id.vendor = user_dev->id.vendor;
dev->id.product = user_dev->id.product;
dev->id.version = user_dev->id.version;
for (i = 0; i < ABS_CNT; i++) {
input_abs_set_max(dev, i, user_dev->absmax[i]);
input_abs_set_min(dev, i, user_dev->absmin[i]);
input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
input_abs_set_flat(dev, i, user_dev->absflat[i]);
}
retval = uinput_validate_absbits(dev);
if (retval < 0)
goto exit;
udev->state = UIST_SETUP_COMPLETE;
retval = count;
exit:
kfree(user_dev);
return retval;
}
/*
* Returns true if the given timestamp is valid (i.e., if all the following
* conditions are satisfied), false otherwise.
* 1) given timestamp is positive
* 2) it's within the allowed offset before the current time
* 3) it's not in the future
*/
static bool is_valid_timestamp(const ktime_t timestamp)
{
ktime_t zero_time;
ktime_t current_time;
ktime_t min_time;
ktime_t offset;
zero_time = ktime_set(0, 0);
if (ktime_compare(zero_time, timestamp) >= 0)
return false;
current_time = ktime_get();
offset = ktime_set(UINPUT_TIMESTAMP_ALLOWED_OFFSET_SECS, 0);
min_time = ktime_sub(current_time, offset);
if (ktime_after(min_time, timestamp) || ktime_after(timestamp, current_time))
return false;
return true;
}
static ssize_t uinput_inject_events(struct uinput_device *udev,
const char __user *buffer, size_t count)
{
struct input_event ev;
size_t bytes = 0;
ktime_t timestamp;
if (count != 0 && count < input_event_size())
return -EINVAL;
while (bytes + input_event_size() <= count) {
/*
* Note that even if some events were fetched successfully
* we are still going to return EFAULT instead of partial
* count to let userspace know that it got it's buffers
* all wrong.
*/
if (input_event_from_user(buffer + bytes, &ev))
return -EFAULT;
timestamp = ktime_set(ev.input_event_sec, ev.input_event_usec * NSEC_PER_USEC);
if (is_valid_timestamp(timestamp))
input_set_timestamp(udev->dev, timestamp);
input_event(udev->dev, ev.type, ev.code, ev.value);
bytes += input_event_size();
cond_resched();
}
return bytes;
}
static ssize_t uinput_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct uinput_device *udev = file->private_data;
int retval;
if (count == 0)
return 0;
retval = mutex_lock_interruptible(&udev->mutex);
if (retval)
return retval;
retval = udev->state == UIST_CREATED ?
uinput_inject_events(udev, buffer, count) :
uinput_setup_device_legacy(udev, buffer, count);
mutex_unlock(&udev->mutex);
return retval;
}
static bool uinput_fetch_next_event(struct uinput_device *udev,
struct input_event *event)
{
bool have_event;
spin_lock_irq(&udev->dev->event_lock);
have_event = udev->head != udev->tail;
if (have_event) {
*event = udev->buff[udev->tail];
udev->tail = (udev->tail + 1) % UINPUT_BUFFER_SIZE;
}
spin_unlock_irq(&udev->dev->event_lock);
return have_event;
}
static ssize_t uinput_events_to_user(struct uinput_device *udev,
char __user *buffer, size_t count)
{
struct input_event event;
size_t read = 0;
while (read + input_event_size() <= count &&
uinput_fetch_next_event(udev, &event)) {
if (input_event_to_user(buffer + read, &event))
return -EFAULT;
read += input_event_size();
}
return read;
}
static ssize_t uinput_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct uinput_device *udev = file->private_data;
ssize_t retval;
if (count != 0 && count < input_event_size())
return -EINVAL;
do {
retval = mutex_lock_interruptible(&udev->mutex);
if (retval)
return retval;
if (udev->state != UIST_CREATED)
retval = -ENODEV;
else if (udev->head == udev->tail &&
(file->f_flags & O_NONBLOCK))
retval = -EAGAIN;
else
retval = uinput_events_to_user(udev, buffer, count);
mutex_unlock(&udev->mutex);
if (retval || count == 0)
break;
if (!(file->f_flags & O_NONBLOCK))
retval = wait_event_interruptible(udev->waitq,
udev->head != udev->tail ||
udev->state != UIST_CREATED);
} while (retval == 0);
return retval;
}
static __poll_t uinput_poll(struct file *file, poll_table *wait)
{
struct uinput_device *udev = file->private_data;
__poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uinput is always writable */
poll_wait(file, &udev->waitq, wait);
if (udev->head != udev->tail)
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
static int uinput_release(struct inode *inode, struct file *file)
{
struct uinput_device *udev = file->private_data;
uinput_destroy_device(udev);
kfree(udev);
return 0;
}
#ifdef CONFIG_COMPAT
struct uinput_ff_upload_compat {
__u32 request_id;
__s32 retval;
struct ff_effect_compat effect;
struct ff_effect_compat old;
};
static int uinput_ff_upload_to_user(char __user *buffer,
const struct uinput_ff_upload *ff_up)
{
if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
ff_up_compat.request_id = ff_up->request_id;
ff_up_compat.retval = ff_up->retval;
/*
* It so happens that the pointer that gives us the trouble
* is the last field in the structure. Since we don't support
* custom waveforms in uinput anyway we can just copy the whole
* thing (to the compat size) and ignore the pointer.
*/
memcpy(&ff_up_compat.effect, &ff_up->effect,
sizeof(struct ff_effect_compat));
memcpy(&ff_up_compat.old, &ff_up->old,
sizeof(struct ff_effect_compat));
if (copy_to_user(buffer, &ff_up_compat,
sizeof(struct uinput_ff_upload_compat)))
return -EFAULT;
} else {
if (copy_to_user(buffer, ff_up,
sizeof(struct uinput_ff_upload)))
return -EFAULT;
}
return 0;
}
static int uinput_ff_upload_from_user(const char __user *buffer,
struct uinput_ff_upload *ff_up)
{
if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
if (copy_from_user(&ff_up_compat, buffer,
sizeof(struct uinput_ff_upload_compat)))
return -EFAULT;
ff_up->request_id = ff_up_compat.request_id;
ff_up->retval = ff_up_compat.retval;
memcpy(&ff_up->effect, &ff_up_compat.effect,
sizeof(struct ff_effect_compat));
memcpy(&ff_up->old, &ff_up_compat.old,
sizeof(struct ff_effect_compat));
} else {
if (copy_from_user(ff_up, buffer,
sizeof(struct uinput_ff_upload)))
return -EFAULT;
}
return 0;
}
#else
static int uinput_ff_upload_to_user(char __user *buffer,
const struct uinput_ff_upload *ff_up)
{
if (copy_to_user(buffer, ff_up, sizeof(struct uinput_ff_upload)))
return -EFAULT;
return 0;
}
static int uinput_ff_upload_from_user(const char __user *buffer,
struct uinput_ff_upload *ff_up)
{
if (copy_from_user(ff_up, buffer, sizeof(struct uinput_ff_upload)))
return -EFAULT;
return 0;
}
#endif
#define uinput_set_bit(_arg, _bit, _max) \
({ \
int __ret = 0; \
if (udev->state == UIST_CREATED) \
__ret = -EINVAL; \
else if ((_arg) > (_max)) \
__ret = -EINVAL; \
else set_bit((_arg), udev->dev->_bit); \
__ret; \
})
static int uinput_str_to_user(void __user *dest, const char *str,
unsigned int maxlen)
{
char __user *p = dest;
int len, ret;
if (!str)
return -ENOENT;
if (maxlen == 0)
return -EINVAL;
len = strlen(str) + 1;
if (len > maxlen)
len = maxlen;
ret = copy_to_user(p, str, len);
if (ret)
return -EFAULT;
/* force terminating '\0' */
ret = put_user(0, p + len - 1);
return ret ? -EFAULT : len;
}
static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
unsigned long arg, void __user *p)
{
int retval;
struct uinput_device *udev = file->private_data;
struct uinput_ff_upload ff_up;
struct uinput_ff_erase ff_erase;
struct uinput_request *req;
char *phys;
const char *name;
unsigned int size;
retval = mutex_lock_interruptible(&udev->mutex);
if (retval)
return retval;
if (!udev->dev) {
udev->dev = input_allocate_device();
if (!udev->dev) {
retval = -ENOMEM;
goto out;
}
}
switch (cmd) {
case UI_GET_VERSION:
if (put_user(UINPUT_VERSION, (unsigned int __user *)p))
retval = -EFAULT;
goto out;
case UI_DEV_CREATE:
retval = uinput_create_device(udev);
goto out;
case UI_DEV_DESTROY:
uinput_destroy_device(udev);
goto out;
case UI_DEV_SETUP:
retval = uinput_dev_setup(udev, p);
goto out;
/* UI_ABS_SETUP is handled in the variable size ioctls */
case UI_SET_EVBIT:
retval = uinput_set_bit(arg, evbit, EV_MAX);
goto out;
case UI_SET_KEYBIT:
retval = uinput_set_bit(arg, keybit, KEY_MAX);
goto out;
case UI_SET_RELBIT:
retval = uinput_set_bit(arg, relbit, REL_MAX);
goto out;
case UI_SET_ABSBIT:
retval = uinput_set_bit(arg, absbit, ABS_MAX);
goto out;
case UI_SET_MSCBIT:
retval = uinput_set_bit(arg, mscbit, MSC_MAX);
goto out;
case UI_SET_LEDBIT:
retval = uinput_set_bit(arg, ledbit, LED_MAX);
goto out;
case UI_SET_SNDBIT:
retval = uinput_set_bit(arg, sndbit, SND_MAX);
goto out;
case UI_SET_FFBIT:
retval = uinput_set_bit(arg, ffbit, FF_MAX);
goto out;
case UI_SET_SWBIT:
retval = uinput_set_bit(arg, swbit, SW_MAX);
goto out;
case UI_SET_PROPBIT:
retval = uinput_set_bit(arg, propbit, INPUT_PROP_MAX);
goto out;
case UI_SET_PHYS:
if (udev->state == UIST_CREATED) {
retval = -EINVAL;
goto out;
}
phys = strndup_user(p, 1024);
if (IS_ERR(phys)) {
retval = PTR_ERR(phys);
goto out;
}
kfree(udev->dev->phys);
udev->dev->phys = phys;
goto out;
case UI_BEGIN_FF_UPLOAD:
retval = uinput_ff_upload_from_user(p, &ff_up);
if (retval)
goto out;
req = uinput_request_find(udev, ff_up.request_id);
if (!req || req->code != UI_FF_UPLOAD ||
!req->u.upload.effect) {
retval = -EINVAL;
goto out;
}
ff_up.retval = 0;
ff_up.effect = *req->u.upload.effect;
if (req->u.upload.old)
ff_up.old = *req->u.upload.old;
else
memset(&ff_up.old, 0, sizeof(struct ff_effect));
retval = uinput_ff_upload_to_user(p, &ff_up);
goto out;
case UI_BEGIN_FF_ERASE:
if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
retval = -EFAULT;
goto out;
}
req = uinput_request_find(udev, ff_erase.request_id);
if (!req || req->code != UI_FF_ERASE) {
retval = -EINVAL;
goto out;
}
ff_erase.retval = 0;
ff_erase.effect_id = req->u.effect_id;
if (copy_to_user(p, &ff_erase, sizeof(ff_erase))) {
retval = -EFAULT;
goto out;
}
goto out;
case UI_END_FF_UPLOAD:
retval = uinput_ff_upload_from_user(p, &ff_up);
if (retval)
goto out;
req = uinput_request_find(udev, ff_up.request_id);
if (!req || req->code != UI_FF_UPLOAD ||
!req->u.upload.effect) {
retval = -EINVAL;
goto out;
}
req->retval = ff_up.retval;
complete(&req->done);
goto out;
case UI_END_FF_ERASE:
if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
retval = -EFAULT;
goto out;
}
req = uinput_request_find(udev, ff_erase.request_id);
if (!req || req->code != UI_FF_ERASE) {
retval = -EINVAL;
goto out;
}
req->retval = ff_erase.retval;
complete(&req->done);
goto out;
}
size = _IOC_SIZE(cmd);
/* Now check variable-length commands */
switch (cmd & ~IOCSIZE_MASK) {
case UI_GET_SYSNAME(0):
if (udev->state != UIST_CREATED) {
retval = -ENOENT;
goto out;
}
name = dev_name(&udev->dev->dev);
retval = uinput_str_to_user(p, name, size);
goto out;
case UI_ABS_SETUP & ~IOCSIZE_MASK:
retval = uinput_abs_setup(udev, p, size);
goto out;
}
retval = -EINVAL;
out:
mutex_unlock(&udev->mutex);
return retval;
}
static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return uinput_ioctl_handler(file, cmd, arg, (void __user *)arg);
}
#ifdef CONFIG_COMPAT
/*
* These IOCTLs change their size and thus their numbers between
* 32 and 64 bits.
*/
#define UI_SET_PHYS_COMPAT \
_IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
#define UI_BEGIN_FF_UPLOAD_COMPAT \
_IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload_compat)
#define UI_END_FF_UPLOAD_COMPAT \
_IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload_compat)
static long uinput_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case UI_SET_PHYS_COMPAT:
cmd = UI_SET_PHYS;
break;
case UI_BEGIN_FF_UPLOAD_COMPAT:
cmd = UI_BEGIN_FF_UPLOAD;
break;
case UI_END_FF_UPLOAD_COMPAT:
cmd = UI_END_FF_UPLOAD;
break;
}
return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
}
#endif
static const struct file_operations uinput_fops = {
.owner = THIS_MODULE,
.open = uinput_open,
.release = uinput_release,
.read = uinput_read,
.write = uinput_write,
.poll = uinput_poll,
.unlocked_ioctl = uinput_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = uinput_compat_ioctl,
#endif
.llseek = no_llseek,
};
static struct miscdevice uinput_misc = {
.fops = &uinput_fops,
.minor = UINPUT_MINOR,
.name = UINPUT_NAME,
};
module_misc_device(uinput_misc);
MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
MODULE_ALIAS("devname:" UINPUT_NAME);
MODULE_AUTHOR("Aristeu Sergio Rozanski Filho");
MODULE_DESCRIPTION("User level driver support for input subsystem");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/uinput.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* twl6040-vibra.c - TWL6040 Vibrator driver
*
* Author: Jorge Eduardo Candelaria <[email protected]>
* Author: Misael Lopez Cruz <[email protected]>
*
* Copyright: (C) 2011 Texas Instruments, Inc.
*
* Based on twl4030-vibra.c by Henrik Saari <[email protected]>
* Felipe Balbi <[email protected]>
* Jari Vanhala <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/workqueue.h>
#include <linux/input.h>
#include <linux/mfd/twl6040.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
#define EFFECT_DIR_180_DEG 0x8000
/* Recommended modulation index 85% */
#define TWL6040_VIBRA_MOD 85
#define TWL6040_NUM_SUPPLIES 2
struct vibra_info {
struct device *dev;
struct input_dev *input_dev;
struct work_struct play_work;
int irq;
bool enabled;
int weak_speed;
int strong_speed;
int direction;
unsigned int vibldrv_res;
unsigned int vibrdrv_res;
unsigned int viblmotor_res;
unsigned int vibrmotor_res;
struct regulator_bulk_data supplies[TWL6040_NUM_SUPPLIES];
struct twl6040 *twl6040;
};
static irqreturn_t twl6040_vib_irq_handler(int irq, void *data)
{
struct vibra_info *info = data;
struct twl6040 *twl6040 = info->twl6040;
u8 status;
status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
if (status & TWL6040_VIBLOCDET) {
dev_warn(info->dev, "Left Vibrator overcurrent detected\n");
twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLL,
TWL6040_VIBENA);
}
if (status & TWL6040_VIBROCDET) {
dev_warn(info->dev, "Right Vibrator overcurrent detected\n");
twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLR,
TWL6040_VIBENA);
}
return IRQ_HANDLED;
}
static void twl6040_vibra_enable(struct vibra_info *info)
{
struct twl6040 *twl6040 = info->twl6040;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(info->supplies), info->supplies);
if (ret) {
dev_err(info->dev, "failed to enable regulators %d\n", ret);
return;
}
twl6040_power(info->twl6040, 1);
if (twl6040_get_revid(twl6040) <= TWL6040_REV_ES1_1) {
/*
* ERRATA: Disable overcurrent protection for at least
* 3ms when enabling vibrator drivers to avoid false
* overcurrent detection
*/
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
TWL6040_VIBENA | TWL6040_VIBCTRL);
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
TWL6040_VIBENA | TWL6040_VIBCTRL);
usleep_range(3000, 3500);
}
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
TWL6040_VIBENA);
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
TWL6040_VIBENA);
info->enabled = true;
}
static void twl6040_vibra_disable(struct vibra_info *info)
{
struct twl6040 *twl6040 = info->twl6040;
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL, 0x00);
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR, 0x00);
twl6040_power(info->twl6040, 0);
regulator_bulk_disable(ARRAY_SIZE(info->supplies), info->supplies);
info->enabled = false;
}
static u8 twl6040_vibra_code(int vddvib, int vibdrv_res, int motor_res,
int speed, int direction)
{
int vpk, max_code;
u8 vibdat;
/* output swing */
vpk = (vddvib * motor_res * TWL6040_VIBRA_MOD) /
(100 * (vibdrv_res + motor_res));
/* 50mV per VIBDAT code step */
max_code = vpk / 50;
if (max_code > TWL6040_VIBDAT_MAX)
max_code = TWL6040_VIBDAT_MAX;
/* scale speed to max allowed code */
vibdat = (u8)((speed * max_code) / USHRT_MAX);
/* 2's complement for direction > 180 degrees */
vibdat *= direction;
return vibdat;
}
static void twl6040_vibra_set_effect(struct vibra_info *info)
{
struct twl6040 *twl6040 = info->twl6040;
u8 vibdatl, vibdatr;
int volt;
/* weak motor */
volt = regulator_get_voltage(info->supplies[0].consumer) / 1000;
vibdatl = twl6040_vibra_code(volt, info->vibldrv_res,
info->viblmotor_res,
info->weak_speed, info->direction);
/* strong motor */
volt = regulator_get_voltage(info->supplies[1].consumer) / 1000;
vibdatr = twl6040_vibra_code(volt, info->vibrdrv_res,
info->vibrmotor_res,
info->strong_speed, info->direction);
twl6040_reg_write(twl6040, TWL6040_REG_VIBDATL, vibdatl);
twl6040_reg_write(twl6040, TWL6040_REG_VIBDATR, vibdatr);
}
static void vibra_play_work(struct work_struct *work)
{
struct vibra_info *info = container_of(work,
struct vibra_info, play_work);
int ret;
/* Do not allow effect, while the routing is set to use audio */
ret = twl6040_get_vibralr_status(info->twl6040);
if (ret & TWL6040_VIBSEL) {
dev_info(info->dev, "Vibra is configured for audio\n");
return;
}
if (info->weak_speed || info->strong_speed) {
if (!info->enabled)
twl6040_vibra_enable(info);
twl6040_vibra_set_effect(info);
} else if (info->enabled)
twl6040_vibra_disable(info);
}
static int vibra_play(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct vibra_info *info = input_get_drvdata(input);
info->weak_speed = effect->u.rumble.weak_magnitude;
info->strong_speed = effect->u.rumble.strong_magnitude;
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
schedule_work(&info->play_work);
return 0;
}
static void twl6040_vibra_close(struct input_dev *input)
{
struct vibra_info *info = input_get_drvdata(input);
cancel_work_sync(&info->play_work);
if (info->enabled)
twl6040_vibra_disable(info);
}
static int twl6040_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
cancel_work_sync(&info->play_work);
if (info->enabled)
twl6040_vibra_disable(info);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops,
twl6040_vibra_suspend, NULL);
static int twl6040_vibra_probe(struct platform_device *pdev)
{
struct device *twl6040_core_dev = pdev->dev.parent;
struct device_node *twl6040_core_node;
struct vibra_info *info;
int vddvibl_uV = 0;
int vddvibr_uV = 0;
int error;
twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
"vibra");
if (!twl6040_core_node) {
dev_err(&pdev->dev, "parent of node is missing?\n");
return -EINVAL;
}
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
of_node_put(twl6040_core_node);
dev_err(&pdev->dev, "couldn't allocate memory\n");
return -ENOMEM;
}
info->dev = &pdev->dev;
info->twl6040 = dev_get_drvdata(pdev->dev.parent);
of_property_read_u32(twl6040_core_node, "ti,vibldrv-res",
&info->vibldrv_res);
of_property_read_u32(twl6040_core_node, "ti,vibrdrv-res",
&info->vibrdrv_res);
of_property_read_u32(twl6040_core_node, "ti,viblmotor-res",
&info->viblmotor_res);
of_property_read_u32(twl6040_core_node, "ti,vibrmotor-res",
&info->vibrmotor_res);
of_property_read_u32(twl6040_core_node, "ti,vddvibl-uV", &vddvibl_uV);
of_property_read_u32(twl6040_core_node, "ti,vddvibr-uV", &vddvibr_uV);
of_node_put(twl6040_core_node);
if ((!info->vibldrv_res && !info->viblmotor_res) ||
(!info->vibrdrv_res && !info->vibrmotor_res)) {
dev_err(info->dev, "invalid vibra driver/motor resistance\n");
return -EINVAL;
}
info->irq = platform_get_irq(pdev, 0);
if (info->irq < 0)
return -EINVAL;
error = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
twl6040_vib_irq_handler,
IRQF_ONESHOT,
"twl6040_irq_vib", info);
if (error) {
dev_err(info->dev, "VIB IRQ request failed: %d\n", error);
return error;
}
info->supplies[0].supply = "vddvibl";
info->supplies[1].supply = "vddvibr";
/*
* When booted with Device tree the regulators are attached to the
* parent device (twl6040 MFD core)
*/
error = devm_regulator_bulk_get(twl6040_core_dev,
ARRAY_SIZE(info->supplies),
info->supplies);
if (error) {
dev_err(info->dev, "couldn't get regulators %d\n", error);
return error;
}
if (vddvibl_uV) {
error = regulator_set_voltage(info->supplies[0].consumer,
vddvibl_uV, vddvibl_uV);
if (error) {
dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
error);
return error;
}
}
if (vddvibr_uV) {
error = regulator_set_voltage(info->supplies[1].consumer,
vddvibr_uV, vddvibr_uV);
if (error) {
dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
error);
return error;
}
}
INIT_WORK(&info->play_work, vibra_play_work);
info->input_dev = devm_input_allocate_device(&pdev->dev);
if (!info->input_dev) {
dev_err(info->dev, "couldn't allocate input device\n");
return -ENOMEM;
}
input_set_drvdata(info->input_dev, info);
info->input_dev->name = "twl6040:vibrator";
info->input_dev->id.version = 1;
info->input_dev->close = twl6040_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
error = input_ff_create_memless(info->input_dev, NULL, vibra_play);
if (error) {
dev_err(info->dev, "couldn't register vibrator to FF\n");
return error;
}
error = input_register_device(info->input_dev);
if (error) {
dev_err(info->dev, "couldn't register input device\n");
return error;
}
platform_set_drvdata(pdev, info);
return 0;
}
static struct platform_driver twl6040_vibra_driver = {
.probe = twl6040_vibra_probe,
.driver = {
.name = "twl6040-vibra",
.pm = pm_sleep_ptr(&twl6040_vibra_pm_ops),
},
};
module_platform_driver(twl6040_vibra_driver);
MODULE_ALIAS("platform:twl6040-vibra");
MODULE_DESCRIPTION("TWL6040 Vibra driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jorge Eduardo Candelaria <[email protected]>");
MODULE_AUTHOR("Misael Lopez Cruz <[email protected]>");
|
linux-master
|
drivers/input/misc/twl6040-vibra.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Implements I2C interface for VTI CMA300_D0x Accelerometer driver
*
* Copyright (C) 2010 Texas Instruments
* Author: Hemanth V <[email protected]>
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/input/cma3000.h>
#include "cma3000_d0x.h"
static int cma3000_i2c_set(struct device *dev,
u8 reg, u8 val, char *msg)
{
struct i2c_client *client = to_i2c_client(dev);
int ret;
ret = i2c_smbus_write_byte_data(client, reg, val);
if (ret < 0)
dev_err(&client->dev,
"%s failed (%s, %d)\n", __func__, msg, ret);
return ret;
}
static int cma3000_i2c_read(struct device *dev, u8 reg, char *msg)
{
struct i2c_client *client = to_i2c_client(dev);
int ret;
ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0)
dev_err(&client->dev,
"%s failed (%s, %d)\n", __func__, msg, ret);
return ret;
}
static const struct cma3000_bus_ops cma3000_i2c_bops = {
.bustype = BUS_I2C,
#define CMA3000_BUSI2C (0 << 4)
.ctrl_mod = CMA3000_BUSI2C,
.read = cma3000_i2c_read,
.write = cma3000_i2c_set,
};
static int cma3000_i2c_probe(struct i2c_client *client)
{
struct cma3000_accl_data *data;
data = cma3000_init(&client->dev, client->irq, &cma3000_i2c_bops);
if (IS_ERR(data))
return PTR_ERR(data);
i2c_set_clientdata(client, data);
return 0;
}
static void cma3000_i2c_remove(struct i2c_client *client)
{
struct cma3000_accl_data *data = i2c_get_clientdata(client);
cma3000_exit(data);
}
static int cma3000_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct cma3000_accl_data *data = i2c_get_clientdata(client);
cma3000_suspend(data);
return 0;
}
static int cma3000_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct cma3000_accl_data *data = i2c_get_clientdata(client);
cma3000_resume(data);
return 0;
}
static const struct dev_pm_ops cma3000_i2c_pm_ops = {
.suspend = cma3000_i2c_suspend,
.resume = cma3000_i2c_resume,
};
static const struct i2c_device_id cma3000_i2c_id[] = {
{ "cma3000_d01", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, cma3000_i2c_id);
static struct i2c_driver cma3000_i2c_driver = {
.probe = cma3000_i2c_probe,
.remove = cma3000_i2c_remove,
.id_table = cma3000_i2c_id,
.driver = {
.name = "cma3000_i2c_accl",
.pm = pm_sleep_ptr(&cma3000_i2c_pm_ops),
},
};
module_i2c_driver(cma3000_i2c_driver);
MODULE_DESCRIPTION("CMA3000-D0x Accelerometer I2C Driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Hemanth V <[email protected]>");
|
linux-master
|
drivers/input/misc/cma3000_d0x_i2c.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Rockchip RK805 PMIC Power Key driver
*
* Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
*
* Author: Joseph Chen <[email protected]>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
static irqreturn_t pwrkey_fall_irq(int irq, void *_pwr)
{
struct input_dev *pwr = _pwr;
input_report_key(pwr, KEY_POWER, 1);
input_sync(pwr);
return IRQ_HANDLED;
}
static irqreturn_t pwrkey_rise_irq(int irq, void *_pwr)
{
struct input_dev *pwr = _pwr;
input_report_key(pwr, KEY_POWER, 0);
input_sync(pwr);
return IRQ_HANDLED;
}
static int rk805_pwrkey_probe(struct platform_device *pdev)
{
struct input_dev *pwr;
int fall_irq, rise_irq;
int err;
pwr = devm_input_allocate_device(&pdev->dev);
if (!pwr) {
dev_err(&pdev->dev, "Can't allocate power button\n");
return -ENOMEM;
}
pwr->name = "rk805 pwrkey";
pwr->phys = "rk805_pwrkey/input0";
pwr->id.bustype = BUS_HOST;
input_set_capability(pwr, EV_KEY, KEY_POWER);
fall_irq = platform_get_irq(pdev, 0);
if (fall_irq < 0)
return fall_irq;
rise_irq = platform_get_irq(pdev, 1);
if (rise_irq < 0)
return rise_irq;
err = devm_request_any_context_irq(&pwr->dev, fall_irq,
pwrkey_fall_irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"rk805_pwrkey_fall", pwr);
if (err < 0) {
dev_err(&pdev->dev, "Can't register fall irq: %d\n", err);
return err;
}
err = devm_request_any_context_irq(&pwr->dev, rise_irq,
pwrkey_rise_irq,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"rk805_pwrkey_rise", pwr);
if (err < 0) {
dev_err(&pdev->dev, "Can't register rise irq: %d\n", err);
return err;
}
err = input_register_device(pwr);
if (err) {
dev_err(&pdev->dev, "Can't register power button: %d\n", err);
return err;
}
platform_set_drvdata(pdev, pwr);
device_init_wakeup(&pdev->dev, true);
return 0;
}
static struct platform_driver rk805_pwrkey_driver = {
.probe = rk805_pwrkey_probe,
.driver = {
.name = "rk805-pwrkey",
},
};
module_platform_driver(rk805_pwrkey_driver);
MODULE_ALIAS("platform:rk805-pwrkey");
MODULE_AUTHOR("Joseph Chen <[email protected]>");
MODULE_DESCRIPTION("RK805 PMIC Power Key driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/rk805-pwrkey.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) ST-Ericsson SA 2010
*
* Author: Sundar Iyer <[email protected]> for ST-Ericsson
*
* AB8500 Power-On Key handler
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/of.h>
#include <linux/slab.h>
/**
* struct ab8500_ponkey - ab8500 ponkey information
* @idev: pointer to input device
* @ab8500: ab8500 parent
* @irq_dbf: irq number for falling transition
* @irq_dbr: irq number for rising transition
*/
struct ab8500_ponkey {
struct input_dev *idev;
struct ab8500 *ab8500;
int irq_dbf;
int irq_dbr;
};
/* AB8500 gives us an interrupt when ONKEY is held */
static irqreturn_t ab8500_ponkey_handler(int irq, void *data)
{
struct ab8500_ponkey *ponkey = data;
if (irq == ponkey->irq_dbf)
input_report_key(ponkey->idev, KEY_POWER, true);
else if (irq == ponkey->irq_dbr)
input_report_key(ponkey->idev, KEY_POWER, false);
input_sync(ponkey->idev);
return IRQ_HANDLED;
}
static int ab8500_ponkey_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_ponkey *ponkey;
struct input_dev *input;
int irq_dbf, irq_dbr;
int error;
irq_dbf = platform_get_irq_byname(pdev, "ONKEY_DBF");
if (irq_dbf < 0)
return irq_dbf;
irq_dbr = platform_get_irq_byname(pdev, "ONKEY_DBR");
if (irq_dbr < 0)
return irq_dbr;
ponkey = devm_kzalloc(&pdev->dev, sizeof(struct ab8500_ponkey),
GFP_KERNEL);
if (!ponkey)
return -ENOMEM;
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
ponkey->idev = input;
ponkey->ab8500 = ab8500;
ponkey->irq_dbf = irq_dbf;
ponkey->irq_dbr = irq_dbr;
input->name = "AB8500 POn(PowerOn) Key";
input->dev.parent = &pdev->dev;
input_set_capability(input, EV_KEY, KEY_POWER);
error = devm_request_any_context_irq(&pdev->dev, ponkey->irq_dbf,
ab8500_ponkey_handler, 0,
"ab8500-ponkey-dbf", ponkey);
if (error < 0) {
dev_err(ab8500->dev, "Failed to request dbf IRQ#%d: %d\n",
ponkey->irq_dbf, error);
return error;
}
error = devm_request_any_context_irq(&pdev->dev, ponkey->irq_dbr,
ab8500_ponkey_handler, 0,
"ab8500-ponkey-dbr", ponkey);
if (error < 0) {
dev_err(ab8500->dev, "Failed to request dbr IRQ#%d: %d\n",
ponkey->irq_dbr, error);
return error;
}
error = input_register_device(ponkey->idev);
if (error) {
dev_err(ab8500->dev, "Can't register input device: %d\n", error);
return error;
}
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id ab8500_ponkey_match[] = {
{ .compatible = "stericsson,ab8500-ponkey", },
{}
};
MODULE_DEVICE_TABLE(of, ab8500_ponkey_match);
#endif
static struct platform_driver ab8500_ponkey_driver = {
.driver = {
.name = "ab8500-poweron-key",
.of_match_table = of_match_ptr(ab8500_ponkey_match),
},
.probe = ab8500_ponkey_probe,
};
module_platform_driver(ab8500_ponkey_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <[email protected]>");
MODULE_DESCRIPTION("ST-Ericsson AB8500 Power-ON(Pon) Key driver");
|
linux-master
|
drivers/input/misc/ab8500-ponkey.c
|
/*
* wm831x-on.c - WM831X ON pin driver
*
* Copyright (C) 2009 Wolfson Microelectronics plc
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/mfd/wm831x/core.h>
struct wm831x_on {
struct input_dev *dev;
struct delayed_work work;
struct wm831x *wm831x;
};
/*
* The chip gives us an interrupt when the ON pin is asserted but we
* then need to poll to see when the pin is deasserted.
*/
static void wm831x_poll_on(struct work_struct *work)
{
struct wm831x_on *wm831x_on = container_of(work, struct wm831x_on,
work.work);
struct wm831x *wm831x = wm831x_on->wm831x;
int poll, ret;
ret = wm831x_reg_read(wm831x, WM831X_ON_PIN_CONTROL);
if (ret >= 0) {
poll = !(ret & WM831X_ON_PIN_STS);
input_report_key(wm831x_on->dev, KEY_POWER, poll);
input_sync(wm831x_on->dev);
} else {
dev_err(wm831x->dev, "Failed to read ON status: %d\n", ret);
poll = 1;
}
if (poll)
schedule_delayed_work(&wm831x_on->work, 100);
}
static irqreturn_t wm831x_on_irq(int irq, void *data)
{
struct wm831x_on *wm831x_on = data;
schedule_delayed_work(&wm831x_on->work, 0);
return IRQ_HANDLED;
}
static int wm831x_on_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_on *wm831x_on;
int irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
int ret;
wm831x_on = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_on),
GFP_KERNEL);
if (!wm831x_on) {
dev_err(&pdev->dev, "Can't allocate data\n");
return -ENOMEM;
}
wm831x_on->wm831x = wm831x;
INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on);
wm831x_on->dev = devm_input_allocate_device(&pdev->dev);
if (!wm831x_on->dev) {
dev_err(&pdev->dev, "Can't allocate input dev\n");
ret = -ENOMEM;
goto err;
}
wm831x_on->dev->evbit[0] = BIT_MASK(EV_KEY);
wm831x_on->dev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
wm831x_on->dev->name = "wm831x_on";
wm831x_on->dev->phys = "wm831x_on/input0";
wm831x_on->dev->dev.parent = &pdev->dev;
ret = request_threaded_irq(irq, NULL, wm831x_on_irq,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"wm831x_on",
wm831x_on);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to request IRQ: %d\n", ret);
goto err_input_dev;
}
ret = input_register_device(wm831x_on->dev);
if (ret) {
dev_dbg(&pdev->dev, "Can't register input device: %d\n", ret);
goto err_irq;
}
platform_set_drvdata(pdev, wm831x_on);
return 0;
err_irq:
free_irq(irq, wm831x_on);
err_input_dev:
err:
return ret;
}
static int wm831x_on_remove(struct platform_device *pdev)
{
struct wm831x_on *wm831x_on = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
free_irq(irq, wm831x_on);
cancel_delayed_work_sync(&wm831x_on->work);
return 0;
}
static struct platform_driver wm831x_on_driver = {
.probe = wm831x_on_probe,
.remove = wm831x_on_remove,
.driver = {
.name = "wm831x-on",
},
};
module_platform_driver(wm831x_on_driver);
MODULE_ALIAS("platform:wm831x-on");
MODULE_DESCRIPTION("WM831x ON pin");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Brown <[email protected]>");
|
linux-master
|
drivers/input/misc/wm831x-on.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#define VIB_MAX_LEVEL_mV (3100)
#define VIB_MIN_LEVEL_mV (1200)
#define VIB_MAX_LEVELS (VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV)
#define MAX_FF_SPEED 0xff
struct pm8xxx_regs {
unsigned int enable_addr;
unsigned int enable_mask;
unsigned int drv_addr;
unsigned int drv_mask;
unsigned int drv_shift;
unsigned int drv_en_manual_mask;
};
static const struct pm8xxx_regs pm8058_regs = {
.drv_addr = 0x4A,
.drv_mask = 0xf8,
.drv_shift = 3,
.drv_en_manual_mask = 0xfc,
};
static struct pm8xxx_regs pm8916_regs = {
.enable_addr = 0xc046,
.enable_mask = BIT(7),
.drv_addr = 0xc041,
.drv_mask = 0x1F,
.drv_shift = 0,
.drv_en_manual_mask = 0,
};
/**
* struct pm8xxx_vib - structure to hold vibrator data
* @vib_input_dev: input device supporting force feedback
* @work: work structure to set the vibration parameters
* @regmap: regmap for register read/write
* @regs: registers' info
* @speed: speed of vibration set from userland
* @active: state of vibrator
* @level: level of vibration to set in the chip
* @reg_vib_drv: regs->drv_addr register value
*/
struct pm8xxx_vib {
struct input_dev *vib_input_dev;
struct work_struct work;
struct regmap *regmap;
const struct pm8xxx_regs *regs;
int speed;
int level;
bool active;
u8 reg_vib_drv;
};
/**
* pm8xxx_vib_set - handler to start/stop vibration
* @vib: pointer to vibrator structure
* @on: state to set
*/
static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
{
int rc;
unsigned int val = vib->reg_vib_drv;
const struct pm8xxx_regs *regs = vib->regs;
if (on)
val |= (vib->level << regs->drv_shift) & regs->drv_mask;
else
val &= ~regs->drv_mask;
rc = regmap_write(vib->regmap, regs->drv_addr, val);
if (rc < 0)
return rc;
vib->reg_vib_drv = val;
if (regs->enable_mask)
rc = regmap_update_bits(vib->regmap, regs->enable_addr,
regs->enable_mask, on ? ~0 : 0);
return rc;
}
/**
* pm8xxx_work_handler - worker to set vibration level
* @work: pointer to work_struct
*/
static void pm8xxx_work_handler(struct work_struct *work)
{
struct pm8xxx_vib *vib = container_of(work, struct pm8xxx_vib, work);
const struct pm8xxx_regs *regs = vib->regs;
int rc;
unsigned int val;
rc = regmap_read(vib->regmap, regs->drv_addr, &val);
if (rc < 0)
return;
/*
* pmic vibrator supports voltage ranges from 1.2 to 3.1V, so
* scale the level to fit into these ranges.
*/
if (vib->speed) {
vib->active = true;
vib->level = ((VIB_MAX_LEVELS * vib->speed) / MAX_FF_SPEED) +
VIB_MIN_LEVEL_mV;
vib->level /= 100;
} else {
vib->active = false;
vib->level = VIB_MIN_LEVEL_mV / 100;
}
pm8xxx_vib_set(vib, vib->active);
}
/**
* pm8xxx_vib_close - callback of input close callback
* @dev: input device pointer
*
* Turns off the vibrator.
*/
static void pm8xxx_vib_close(struct input_dev *dev)
{
struct pm8xxx_vib *vib = input_get_drvdata(dev);
cancel_work_sync(&vib->work);
if (vib->active)
pm8xxx_vib_set(vib, false);
}
/**
* pm8xxx_vib_play_effect - function to handle vib effects.
* @dev: input device pointer
* @data: data of effect
* @effect: effect to play
*
* Currently this driver supports only rumble effects.
*/
static int pm8xxx_vib_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct pm8xxx_vib *vib = input_get_drvdata(dev);
vib->speed = effect->u.rumble.strong_magnitude >> 8;
if (!vib->speed)
vib->speed = effect->u.rumble.weak_magnitude >> 9;
schedule_work(&vib->work);
return 0;
}
static int pm8xxx_vib_probe(struct platform_device *pdev)
{
struct pm8xxx_vib *vib;
struct input_dev *input_dev;
int error;
unsigned int val;
const struct pm8xxx_regs *regs;
vib = devm_kzalloc(&pdev->dev, sizeof(*vib), GFP_KERNEL);
if (!vib)
return -ENOMEM;
vib->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!vib->regmap)
return -ENODEV;
input_dev = devm_input_allocate_device(&pdev->dev);
if (!input_dev)
return -ENOMEM;
INIT_WORK(&vib->work, pm8xxx_work_handler);
vib->vib_input_dev = input_dev;
regs = of_device_get_match_data(&pdev->dev);
/* operate in manual mode */
error = regmap_read(vib->regmap, regs->drv_addr, &val);
if (error < 0)
return error;
val &= regs->drv_en_manual_mask;
error = regmap_write(vib->regmap, regs->drv_addr, val);
if (error < 0)
return error;
vib->regs = regs;
vib->reg_vib_drv = val;
input_dev->name = "pm8xxx_vib_ffmemless";
input_dev->id.version = 1;
input_dev->close = pm8xxx_vib_close;
input_set_drvdata(input_dev, vib);
input_set_capability(vib->vib_input_dev, EV_FF, FF_RUMBLE);
error = input_ff_create_memless(input_dev, NULL,
pm8xxx_vib_play_effect);
if (error) {
dev_err(&pdev->dev,
"couldn't register vibrator as FF device\n");
return error;
}
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "couldn't register input device\n");
return error;
}
platform_set_drvdata(pdev, vib);
return 0;
}
static int pm8xxx_vib_suspend(struct device *dev)
{
struct pm8xxx_vib *vib = dev_get_drvdata(dev);
/* Turn off the vibrator */
pm8xxx_vib_set(vib, false);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
static const struct of_device_id pm8xxx_vib_id_table[] = {
{ .compatible = "qcom,pm8058-vib", .data = &pm8058_regs },
{ .compatible = "qcom,pm8921-vib", .data = &pm8058_regs },
{ .compatible = "qcom,pm8916-vib", .data = &pm8916_regs },
{ }
};
MODULE_DEVICE_TABLE(of, pm8xxx_vib_id_table);
static struct platform_driver pm8xxx_vib_driver = {
.probe = pm8xxx_vib_probe,
.driver = {
.name = "pm8xxx-vib",
.pm = pm_sleep_ptr(&pm8xxx_vib_pm_ops),
.of_match_table = pm8xxx_vib_id_table,
},
};
module_platform_driver(pm8xxx_vib_driver);
MODULE_ALIAS("platform:pm8xxx_vib");
MODULE_DESCRIPTION("PMIC8xxx vibrator driver based on ff-memless framework");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Amy Maloche <[email protected]>");
|
linux-master
|
drivers/input/misc/pm8xxx-vibrator.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 Richtek Technology Corp.
* Author: ChiYuan Huang <[email protected]>
*/
#include <linux/bits.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define RT5120_REG_INTSTAT 0x1E
#define RT5120_PWRKEYSTAT_MASK BIT(7)
struct rt5120_priv {
struct regmap *regmap;
struct input_dev *input;
};
static irqreturn_t rt5120_pwrkey_handler(int irq, void *devid)
{
struct rt5120_priv *priv = devid;
unsigned int stat;
int error;
error = regmap_read(priv->regmap, RT5120_REG_INTSTAT, &stat);
if (error)
return IRQ_NONE;
input_report_key(priv->input, KEY_POWER,
!(stat & RT5120_PWRKEYSTAT_MASK));
input_sync(priv->input);
return IRQ_HANDLED;
}
static int rt5120_pwrkey_probe(struct platform_device *pdev)
{
struct rt5120_priv *priv;
struct device *dev = &pdev->dev;
int press_irq, release_irq;
int error;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regmap = dev_get_regmap(dev->parent, NULL);
if (!priv->regmap) {
dev_err(dev, "Failed to init regmap\n");
return -ENODEV;
}
press_irq = platform_get_irq_byname(pdev, "pwrkey-press");
if (press_irq < 0)
return press_irq;
release_irq = platform_get_irq_byname(pdev, "pwrkey-release");
if (release_irq < 0)
return release_irq;
/* Make input device be device resource managed */
priv->input = devm_input_allocate_device(dev);
if (!priv->input)
return -ENOMEM;
priv->input->name = "rt5120_pwrkey";
priv->input->phys = "rt5120_pwrkey/input0";
priv->input->id.bustype = BUS_I2C;
input_set_capability(priv->input, EV_KEY, KEY_POWER);
error = input_register_device(priv->input);
if (error) {
dev_err(dev, "Failed to register input device: %d\n", error);
return error;
}
error = devm_request_threaded_irq(dev, press_irq,
NULL, rt5120_pwrkey_handler,
0, "pwrkey-press", priv);
if (error) {
dev_err(dev,
"Failed to register pwrkey press irq: %d\n", error);
return error;
}
error = devm_request_threaded_irq(dev, release_irq,
NULL, rt5120_pwrkey_handler,
0, "pwrkey-release", priv);
if (error) {
dev_err(dev,
"Failed to register pwrkey release irq: %d\n", error);
return error;
}
return 0;
}
static const struct of_device_id r5120_pwrkey_match_table[] = {
{ .compatible = "richtek,rt5120-pwrkey" },
{}
};
MODULE_DEVICE_TABLE(of, r5120_pwrkey_match_table);
static struct platform_driver rt5120_pwrkey_driver = {
.driver = {
.name = "rt5120-pwrkey",
.of_match_table = r5120_pwrkey_match_table,
},
.probe = rt5120_pwrkey_probe,
};
module_platform_driver(rt5120_pwrkey_driver);
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_DESCRIPTION("Richtek RT5120 power key driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/rt5120-pwrkey.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Kionix, Inc.
* Written by Chris Hudson <[email protected]>
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input/kxtj9.h>
#define NAME "kxtj9"
#define G_MAX 8000
/* OUTPUT REGISTERS */
#define XOUT_L 0x06
#define WHO_AM_I 0x0F
/* CONTROL REGISTERS */
#define INT_REL 0x1A
#define CTRL_REG1 0x1B
#define INT_CTRL1 0x1E
#define DATA_CTRL 0x21
/* CONTROL REGISTER 1 BITS */
#define PC1_OFF 0x7F
#define PC1_ON (1 << 7)
/* Data ready funtion enable bit: set during probe if using irq mode */
#define DRDYE (1 << 5)
/* DATA CONTROL REGISTER BITS */
#define ODR12_5F 0
#define ODR25F 1
#define ODR50F 2
#define ODR100F 3
#define ODR200F 4
#define ODR400F 5
#define ODR800F 6
/* INTERRUPT CONTROL REGISTER 1 BITS */
/* Set these during probe if using irq mode */
#define KXTJ9_IEL (1 << 3)
#define KXTJ9_IEA (1 << 4)
#define KXTJ9_IEN (1 << 5)
/* INPUT_ABS CONSTANTS */
#define FUZZ 3
#define FLAT 3
/* RESUME STATE INDICES */
#define RES_DATA_CTRL 0
#define RES_CTRL_REG1 1
#define RES_INT_CTRL1 2
#define RESUME_ENTRIES 3
/*
* The following table lists the maximum appropriate poll interval for each
* available output data rate.
*/
static const struct {
unsigned int cutoff;
u8 mask;
} kxtj9_odr_table[] = {
{ 3, ODR800F },
{ 5, ODR400F },
{ 10, ODR200F },
{ 20, ODR100F },
{ 40, ODR50F },
{ 80, ODR25F },
{ 0, ODR12_5F},
};
struct kxtj9_data {
struct i2c_client *client;
struct kxtj9_platform_data pdata;
struct input_dev *input_dev;
unsigned int last_poll_interval;
u8 shift;
u8 ctrl_reg1;
u8 data_ctrl;
u8 int_ctrl;
};
static int kxtj9_i2c_read(struct kxtj9_data *tj9, u8 addr, u8 *data, int len)
{
struct i2c_msg msgs[] = {
{
.addr = tj9->client->addr,
.flags = tj9->client->flags,
.len = 1,
.buf = &addr,
},
{
.addr = tj9->client->addr,
.flags = tj9->client->flags | I2C_M_RD,
.len = len,
.buf = data,
},
};
return i2c_transfer(tj9->client->adapter, msgs, 2);
}
static void kxtj9_report_acceleration_data(struct kxtj9_data *tj9)
{
s16 acc_data[3]; /* Data bytes from hardware xL, xH, yL, yH, zL, zH */
s16 x, y, z;
int err;
err = kxtj9_i2c_read(tj9, XOUT_L, (u8 *)acc_data, 6);
if (err < 0)
dev_err(&tj9->client->dev, "accelerometer data read failed\n");
x = le16_to_cpu(acc_data[tj9->pdata.axis_map_x]);
y = le16_to_cpu(acc_data[tj9->pdata.axis_map_y]);
z = le16_to_cpu(acc_data[tj9->pdata.axis_map_z]);
x >>= tj9->shift;
y >>= tj9->shift;
z >>= tj9->shift;
input_report_abs(tj9->input_dev, ABS_X, tj9->pdata.negate_x ? -x : x);
input_report_abs(tj9->input_dev, ABS_Y, tj9->pdata.negate_y ? -y : y);
input_report_abs(tj9->input_dev, ABS_Z, tj9->pdata.negate_z ? -z : z);
input_sync(tj9->input_dev);
}
static irqreturn_t kxtj9_isr(int irq, void *dev)
{
struct kxtj9_data *tj9 = dev;
int err;
/* data ready is the only possible interrupt type */
kxtj9_report_acceleration_data(tj9);
err = i2c_smbus_read_byte_data(tj9->client, INT_REL);
if (err < 0)
dev_err(&tj9->client->dev,
"error clearing interrupt status: %d\n", err);
return IRQ_HANDLED;
}
static int kxtj9_update_g_range(struct kxtj9_data *tj9, u8 new_g_range)
{
switch (new_g_range) {
case KXTJ9_G_2G:
tj9->shift = 4;
break;
case KXTJ9_G_4G:
tj9->shift = 3;
break;
case KXTJ9_G_8G:
tj9->shift = 2;
break;
default:
return -EINVAL;
}
tj9->ctrl_reg1 &= 0xe7;
tj9->ctrl_reg1 |= new_g_range;
return 0;
}
static int kxtj9_update_odr(struct kxtj9_data *tj9, unsigned int poll_interval)
{
int err;
int i;
/* Use the lowest ODR that can support the requested poll interval */
for (i = 0; i < ARRAY_SIZE(kxtj9_odr_table); i++) {
tj9->data_ctrl = kxtj9_odr_table[i].mask;
if (poll_interval < kxtj9_odr_table[i].cutoff)
break;
}
err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, 0);
if (err < 0)
return err;
err = i2c_smbus_write_byte_data(tj9->client, DATA_CTRL, tj9->data_ctrl);
if (err < 0)
return err;
err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
if (err < 0)
return err;
return 0;
}
static int kxtj9_device_power_on(struct kxtj9_data *tj9)
{
if (tj9->pdata.power_on)
return tj9->pdata.power_on();
return 0;
}
static void kxtj9_device_power_off(struct kxtj9_data *tj9)
{
int err;
tj9->ctrl_reg1 &= PC1_OFF;
err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
if (err < 0)
dev_err(&tj9->client->dev, "soft power off failed\n");
if (tj9->pdata.power_off)
tj9->pdata.power_off();
}
static int kxtj9_enable(struct kxtj9_data *tj9)
{
int err;
err = kxtj9_device_power_on(tj9);
if (err < 0)
return err;
/* ensure that PC1 is cleared before updating control registers */
err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, 0);
if (err < 0)
return err;
/* only write INT_CTRL_REG1 if in irq mode */
if (tj9->client->irq) {
err = i2c_smbus_write_byte_data(tj9->client,
INT_CTRL1, tj9->int_ctrl);
if (err < 0)
return err;
}
err = kxtj9_update_g_range(tj9, tj9->pdata.g_range);
if (err < 0)
return err;
/* turn on outputs */
tj9->ctrl_reg1 |= PC1_ON;
err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
if (err < 0)
return err;
err = kxtj9_update_odr(tj9, tj9->last_poll_interval);
if (err < 0)
return err;
/* clear initial interrupt if in irq mode */
if (tj9->client->irq) {
err = i2c_smbus_read_byte_data(tj9->client, INT_REL);
if (err < 0) {
dev_err(&tj9->client->dev,
"error clearing interrupt: %d\n", err);
goto fail;
}
}
return 0;
fail:
kxtj9_device_power_off(tj9);
return err;
}
static void kxtj9_disable(struct kxtj9_data *tj9)
{
kxtj9_device_power_off(tj9);
}
static int kxtj9_input_open(struct input_dev *input)
{
struct kxtj9_data *tj9 = input_get_drvdata(input);
return kxtj9_enable(tj9);
}
static void kxtj9_input_close(struct input_dev *dev)
{
struct kxtj9_data *tj9 = input_get_drvdata(dev);
kxtj9_disable(tj9);
}
/*
* When IRQ mode is selected, we need to provide an interface to allow the user
* to change the output data rate of the part. For consistency, we are using
* the set_poll method, which accepts a poll interval in milliseconds, and then
* calls update_odr() while passing this value as an argument. In IRQ mode, the
* data outputs will not be read AT the requested poll interval, rather, the
* lowest ODR that can support the requested interval. The client application
* will be responsible for retrieving data from the input node at the desired
* interval.
*/
/* Returns currently selected poll interval (in ms) */
static ssize_t kxtj9_get_poll(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
return sprintf(buf, "%d\n", tj9->last_poll_interval);
}
/* Allow users to select a new poll interval (in ms) */
static ssize_t kxtj9_set_poll(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
struct input_dev *input_dev = tj9->input_dev;
unsigned int interval;
int error;
error = kstrtouint(buf, 10, &interval);
if (error < 0)
return error;
/* Lock the device to prevent races with open/close (and itself) */
mutex_lock(&input_dev->mutex);
disable_irq(client->irq);
/*
* Set current interval to the greater of the minimum interval or
* the requested interval
*/
tj9->last_poll_interval = max(interval, tj9->pdata.min_interval);
kxtj9_update_odr(tj9, tj9->last_poll_interval);
enable_irq(client->irq);
mutex_unlock(&input_dev->mutex);
return count;
}
static DEVICE_ATTR(poll, S_IRUGO|S_IWUSR, kxtj9_get_poll, kxtj9_set_poll);
static struct attribute *kxtj9_attributes[] = {
&dev_attr_poll.attr,
NULL
};
static struct attribute_group kxtj9_attribute_group = {
.attrs = kxtj9_attributes
};
static void kxtj9_poll(struct input_dev *input)
{
struct kxtj9_data *tj9 = input_get_drvdata(input);
unsigned int poll_interval = input_get_poll_interval(input);
kxtj9_report_acceleration_data(tj9);
if (poll_interval != tj9->last_poll_interval) {
kxtj9_update_odr(tj9, poll_interval);
tj9->last_poll_interval = poll_interval;
}
}
static void kxtj9_platform_exit(void *data)
{
struct kxtj9_data *tj9 = data;
if (tj9->pdata.exit)
tj9->pdata.exit();
}
static int kxtj9_verify(struct kxtj9_data *tj9)
{
int retval;
retval = kxtj9_device_power_on(tj9);
if (retval < 0)
return retval;
retval = i2c_smbus_read_byte_data(tj9->client, WHO_AM_I);
if (retval < 0) {
dev_err(&tj9->client->dev, "read err int source\n");
goto out;
}
retval = (retval != 0x07 && retval != 0x08) ? -EIO : 0;
out:
kxtj9_device_power_off(tj9);
return retval;
}
static int kxtj9_probe(struct i2c_client *client)
{
const struct kxtj9_platform_data *pdata =
dev_get_platdata(&client->dev);
struct kxtj9_data *tj9;
struct input_dev *input_dev;
int err;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev, "client is not i2c capable\n");
return -ENXIO;
}
if (!pdata) {
dev_err(&client->dev, "platform data is NULL; exiting\n");
return -EINVAL;
}
tj9 = devm_kzalloc(&client->dev, sizeof(*tj9), GFP_KERNEL);
if (!tj9) {
dev_err(&client->dev,
"failed to allocate memory for module data\n");
return -ENOMEM;
}
tj9->client = client;
tj9->pdata = *pdata;
if (pdata->init) {
err = pdata->init();
if (err < 0)
return err;
}
err = devm_add_action_or_reset(&client->dev, kxtj9_platform_exit, tj9);
if (err)
return err;
err = kxtj9_verify(tj9);
if (err < 0) {
dev_err(&client->dev, "device not recognized\n");
return err;
}
i2c_set_clientdata(client, tj9);
tj9->ctrl_reg1 = tj9->pdata.res_12bit | tj9->pdata.g_range;
tj9->last_poll_interval = tj9->pdata.init_interval;
input_dev = devm_input_allocate_device(&client->dev);
if (!input_dev) {
dev_err(&client->dev, "input device allocate failed\n");
return -ENOMEM;
}
input_set_drvdata(input_dev, tj9);
tj9->input_dev = input_dev;
input_dev->name = "kxtj9_accel";
input_dev->id.bustype = BUS_I2C;
input_dev->open = kxtj9_input_open;
input_dev->close = kxtj9_input_close;
input_set_abs_params(input_dev, ABS_X, -G_MAX, G_MAX, FUZZ, FLAT);
input_set_abs_params(input_dev, ABS_Y, -G_MAX, G_MAX, FUZZ, FLAT);
input_set_abs_params(input_dev, ABS_Z, -G_MAX, G_MAX, FUZZ, FLAT);
if (client->irq <= 0) {
err = input_setup_polling(input_dev, kxtj9_poll);
if (err)
return err;
}
err = input_register_device(input_dev);
if (err) {
dev_err(&client->dev,
"unable to register input polled device %s: %d\n",
input_dev->name, err);
return err;
}
if (client->irq) {
/* If in irq mode, populate INT_CTRL_REG1 and enable DRDY. */
tj9->int_ctrl |= KXTJ9_IEN | KXTJ9_IEA | KXTJ9_IEL;
tj9->ctrl_reg1 |= DRDYE;
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, kxtj9_isr,
IRQF_TRIGGER_RISING |
IRQF_ONESHOT,
"kxtj9-irq", tj9);
if (err) {
dev_err(&client->dev, "request irq failed: %d\n", err);
return err;
}
err = devm_device_add_group(&client->dev,
&kxtj9_attribute_group);
if (err) {
dev_err(&client->dev, "sysfs create failed: %d\n", err);
return err;
}
}
return 0;
}
static int kxtj9_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
struct input_dev *input_dev = tj9->input_dev;
mutex_lock(&input_dev->mutex);
if (input_device_enabled(input_dev))
kxtj9_disable(tj9);
mutex_unlock(&input_dev->mutex);
return 0;
}
static int kxtj9_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
struct input_dev *input_dev = tj9->input_dev;
mutex_lock(&input_dev->mutex);
if (input_device_enabled(input_dev))
kxtj9_enable(tj9);
mutex_unlock(&input_dev->mutex);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(kxtj9_pm_ops, kxtj9_suspend, kxtj9_resume);
static const struct i2c_device_id kxtj9_id[] = {
{ NAME, 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, kxtj9_id);
static struct i2c_driver kxtj9_driver = {
.driver = {
.name = NAME,
.pm = pm_sleep_ptr(&kxtj9_pm_ops),
},
.probe = kxtj9_probe,
.id_table = kxtj9_id,
};
module_i2c_driver(kxtj9_driver);
MODULE_DESCRIPTION("KXTJ9 accelerometer driver");
MODULE_AUTHOR("Chris Hudson <[email protected]>");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/kxtj9.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2010, Lars-Peter Clausen <[email protected]>
* PWM beeper driver
*/
#include <linux/input.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
struct pwm_beeper {
struct input_dev *input;
struct pwm_device *pwm;
struct regulator *amplifier;
struct work_struct work;
unsigned long period;
unsigned int bell_frequency;
bool suspended;
bool amplifier_on;
};
#define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
static int pwm_beeper_on(struct pwm_beeper *beeper, unsigned long period)
{
struct pwm_state state;
int error;
pwm_get_state(beeper->pwm, &state);
state.enabled = true;
state.period = period;
pwm_set_relative_duty_cycle(&state, 50, 100);
error = pwm_apply_state(beeper->pwm, &state);
if (error)
return error;
if (!beeper->amplifier_on) {
error = regulator_enable(beeper->amplifier);
if (error) {
pwm_disable(beeper->pwm);
return error;
}
beeper->amplifier_on = true;
}
return 0;
}
static void pwm_beeper_off(struct pwm_beeper *beeper)
{
if (beeper->amplifier_on) {
regulator_disable(beeper->amplifier);
beeper->amplifier_on = false;
}
pwm_disable(beeper->pwm);
}
static void pwm_beeper_work(struct work_struct *work)
{
struct pwm_beeper *beeper = container_of(work, struct pwm_beeper, work);
unsigned long period = READ_ONCE(beeper->period);
if (period)
pwm_beeper_on(beeper, period);
else
pwm_beeper_off(beeper);
}
static int pwm_beeper_event(struct input_dev *input,
unsigned int type, unsigned int code, int value)
{
struct pwm_beeper *beeper = input_get_drvdata(input);
if (type != EV_SND || value < 0)
return -EINVAL;
switch (code) {
case SND_BELL:
value = value ? beeper->bell_frequency : 0;
break;
case SND_TONE:
break;
default:
return -EINVAL;
}
if (value == 0)
beeper->period = 0;
else
beeper->period = HZ_TO_NANOSECONDS(value);
if (!beeper->suspended)
schedule_work(&beeper->work);
return 0;
}
static void pwm_beeper_stop(struct pwm_beeper *beeper)
{
cancel_work_sync(&beeper->work);
pwm_beeper_off(beeper);
}
static void pwm_beeper_close(struct input_dev *input)
{
struct pwm_beeper *beeper = input_get_drvdata(input);
pwm_beeper_stop(beeper);
}
static int pwm_beeper_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pwm_beeper *beeper;
struct pwm_state state;
u32 bell_frequency;
int error;
beeper = devm_kzalloc(dev, sizeof(*beeper), GFP_KERNEL);
if (!beeper)
return -ENOMEM;
beeper->pwm = devm_pwm_get(dev, NULL);
if (IS_ERR(beeper->pwm))
return dev_err_probe(dev, PTR_ERR(beeper->pwm), "Failed to request PWM device\n");
/* Sync up PWM state and ensure it is off. */
pwm_init_state(beeper->pwm, &state);
state.enabled = false;
error = pwm_apply_state(beeper->pwm, &state);
if (error) {
dev_err(dev, "failed to apply initial PWM state: %d\n",
error);
return error;
}
beeper->amplifier = devm_regulator_get(dev, "amp");
if (IS_ERR(beeper->amplifier))
return dev_err_probe(dev, PTR_ERR(beeper->amplifier),
"Failed to get 'amp' regulator\n");
INIT_WORK(&beeper->work, pwm_beeper_work);
error = device_property_read_u32(dev, "beeper-hz", &bell_frequency);
if (error) {
bell_frequency = 1000;
dev_dbg(dev,
"failed to parse 'beeper-hz' property, using default: %uHz\n",
bell_frequency);
}
beeper->bell_frequency = bell_frequency;
beeper->input = devm_input_allocate_device(dev);
if (!beeper->input) {
dev_err(dev, "Failed to allocate input device\n");
return -ENOMEM;
}
beeper->input->name = "pwm-beeper";
beeper->input->phys = "pwm/input0";
beeper->input->id.bustype = BUS_HOST;
beeper->input->id.vendor = 0x001f;
beeper->input->id.product = 0x0001;
beeper->input->id.version = 0x0100;
input_set_capability(beeper->input, EV_SND, SND_TONE);
input_set_capability(beeper->input, EV_SND, SND_BELL);
beeper->input->event = pwm_beeper_event;
beeper->input->close = pwm_beeper_close;
input_set_drvdata(beeper->input, beeper);
error = input_register_device(beeper->input);
if (error) {
dev_err(dev, "Failed to register input device: %d\n", error);
return error;
}
platform_set_drvdata(pdev, beeper);
return 0;
}
static int pwm_beeper_suspend(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
/*
* Spinlock is taken here is not to protect write to
* beeper->suspended, but to ensure that pwm_beeper_event
* does not re-submit work once flag is set.
*/
spin_lock_irq(&beeper->input->event_lock);
beeper->suspended = true;
spin_unlock_irq(&beeper->input->event_lock);
pwm_beeper_stop(beeper);
return 0;
}
static int pwm_beeper_resume(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
spin_lock_irq(&beeper->input->event_lock);
beeper->suspended = false;
spin_unlock_irq(&beeper->input->event_lock);
/* Let worker figure out if we should resume beeping */
schedule_work(&beeper->work);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(pwm_beeper_pm_ops,
pwm_beeper_suspend, pwm_beeper_resume);
#ifdef CONFIG_OF
static const struct of_device_id pwm_beeper_match[] = {
{ .compatible = "pwm-beeper", },
{ },
};
MODULE_DEVICE_TABLE(of, pwm_beeper_match);
#endif
static struct platform_driver pwm_beeper_driver = {
.probe = pwm_beeper_probe,
.driver = {
.name = "pwm-beeper",
.pm = pm_sleep_ptr(&pwm_beeper_pm_ops),
.of_match_table = of_match_ptr(pwm_beeper_match),
},
};
module_platform_driver(pwm_beeper_driver);
MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
MODULE_DESCRIPTION("PWM beeper driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pwm-beeper");
|
linux-master
|
drivers/input/misc/pwm-beeper.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/usb/input/yealink.c
*
* Copyright (c) 2005 Henk Vergonet <[email protected]>
*/
/*
* Description:
* Driver for the USB-P1K voip usb phone.
* This device is produced by Yealink Network Technology Co Ltd
* but may be branded under several names:
* - Yealink usb-p1k
* - Tiptel 115
* - ...
*
* This driver is based on:
* - the usbb2k-api http://savannah.nongnu.org/projects/usbb2k-api/
* - information from http://memeteau.free.fr/usbb2k
* - the xpad-driver drivers/input/joystick/xpad.c
*
* Thanks to:
* - Olivier Vandorpe, for providing the usbb2k-api.
* - Martin Diehl, for spotting my memory allocation bug.
*
* History:
* 20050527 henk First version, functional keyboard. Keyboard events
* will pop-up on the ../input/eventX bus.
* 20050531 henk Added led, LCD, dialtone and sysfs interface.
* 20050610 henk Cleanups, make it ready for public consumption.
* 20050630 henk Cleanups, fixes in response to comments.
* 20050701 henk sysfs write serialisation, fix potential unload races
* 20050801 henk Added ringtone, restructure USB
* 20050816 henk Merge 2.6.13-rc6
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/rwsem.h>
#include <linux/usb/input.h>
#include <linux/map_to_7segment.h>
#include "yealink.h"
#define DRIVER_VERSION "yld-20051230"
#define YEALINK_POLLING_FREQUENCY 10 /* in [Hz] */
struct yld_status {
u8 lcd[24];
u8 led;
u8 dialtone;
u8 ringtone;
u8 keynum;
} __attribute__ ((packed));
/*
* Register the LCD segment and icon map
*/
#define _LOC(k,l) { .a = (k), .m = (l) }
#define _SEG(t, a, am, b, bm, c, cm, d, dm, e, em, f, fm, g, gm) \
{ .type = (t), \
.u = { .s = { _LOC(a, am), _LOC(b, bm), _LOC(c, cm), \
_LOC(d, dm), _LOC(e, em), _LOC(g, gm), \
_LOC(f, fm) } } }
#define _PIC(t, h, hm, n) \
{ .type = (t), \
.u = { .p = { .name = (n), .a = (h), .m = (hm) } } }
static const struct lcd_segment_map {
char type;
union {
struct pictogram_map {
u8 a,m;
char name[10];
} p;
struct segment_map {
u8 a,m;
} s[7];
} u;
} lcdMap[] = {
#include "yealink.h"
};
struct yealink_dev {
struct input_dev *idev; /* input device */
struct usb_device *udev; /* usb device */
struct usb_interface *intf; /* usb interface */
/* irq input channel */
struct yld_ctl_packet *irq_data;
dma_addr_t irq_dma;
struct urb *urb_irq;
/* control output channel */
struct yld_ctl_packet *ctl_data;
dma_addr_t ctl_dma;
struct usb_ctrlrequest *ctl_req;
struct urb *urb_ctl;
char phys[64]; /* physical device path */
u8 lcdMap[ARRAY_SIZE(lcdMap)]; /* state of LCD, LED ... */
int key_code; /* last reported key */
unsigned int shutdown:1;
int stat_ix;
union {
struct yld_status s;
u8 b[sizeof(struct yld_status)];
} master, copy;
};
/*******************************************************************************
* Yealink lcd interface
******************************************************************************/
/*
* Register a default 7 segment character set
*/
static SEG7_DEFAULT_MAP(map_seg7);
/* Display a char,
* char '\9' and '\n' are placeholders and do not overwrite the original text.
* A space will always hide an icon.
*/
static int setChar(struct yealink_dev *yld, int el, int chr)
{
int i, a, m, val;
if (el >= ARRAY_SIZE(lcdMap))
return -EINVAL;
if (chr == '\t' || chr == '\n')
return 0;
yld->lcdMap[el] = chr;
if (lcdMap[el].type == '.') {
a = lcdMap[el].u.p.a;
m = lcdMap[el].u.p.m;
if (chr != ' ')
yld->master.b[a] |= m;
else
yld->master.b[a] &= ~m;
return 0;
}
val = map_to_seg7(&map_seg7, chr);
for (i = 0; i < ARRAY_SIZE(lcdMap[0].u.s); i++) {
m = lcdMap[el].u.s[i].m;
if (m == 0)
continue;
a = lcdMap[el].u.s[i].a;
if (val & 1)
yld->master.b[a] |= m;
else
yld->master.b[a] &= ~m;
val = val >> 1;
}
return 0;
};
/*******************************************************************************
* Yealink key interface
******************************************************************************/
/* Map device buttons to internal key events.
*
* USB-P1K button layout:
*
* up
* IN OUT
* down
*
* pickup C hangup
* 1 2 3
* 4 5 6
* 7 8 9
* * 0 #
*
* The "up" and "down" keys, are symbolised by arrows on the button.
* The "pickup" and "hangup" keys are symbolised by a green and red phone
* on the button.
*/
static int map_p1k_to_key(int scancode)
{
switch(scancode) { /* phone key: */
case 0x23: return KEY_LEFT; /* IN */
case 0x33: return KEY_UP; /* up */
case 0x04: return KEY_RIGHT; /* OUT */
case 0x24: return KEY_DOWN; /* down */
case 0x03: return KEY_ENTER; /* pickup */
case 0x14: return KEY_BACKSPACE; /* C */
case 0x13: return KEY_ESC; /* hangup */
case 0x00: return KEY_1; /* 1 */
case 0x01: return KEY_2; /* 2 */
case 0x02: return KEY_3; /* 3 */
case 0x10: return KEY_4; /* 4 */
case 0x11: return KEY_5; /* 5 */
case 0x12: return KEY_6; /* 6 */
case 0x20: return KEY_7; /* 7 */
case 0x21: return KEY_8; /* 8 */
case 0x22: return KEY_9; /* 9 */
case 0x30: return KEY_KPASTERISK; /* * */
case 0x31: return KEY_0; /* 0 */
case 0x32: return KEY_LEFTSHIFT |
KEY_3 << 8; /* # */
}
return -EINVAL;
}
/* Completes a request by converting the data into events for the
* input subsystem.
*
* The key parameter can be cascaded: key2 << 8 | key1
*/
static void report_key(struct yealink_dev *yld, int key)
{
struct input_dev *idev = yld->idev;
if (yld->key_code >= 0) {
/* old key up */
input_report_key(idev, yld->key_code & 0xff, 0);
if (yld->key_code >> 8)
input_report_key(idev, yld->key_code >> 8, 0);
}
yld->key_code = key;
if (key >= 0) {
/* new valid key */
input_report_key(idev, key & 0xff, 1);
if (key >> 8)
input_report_key(idev, key >> 8, 1);
}
input_sync(idev);
}
/*******************************************************************************
* Yealink usb communication interface
******************************************************************************/
static int yealink_cmd(struct yealink_dev *yld, struct yld_ctl_packet *p)
{
u8 *buf = (u8 *)p;
int i;
u8 sum = 0;
for(i=0; i<USB_PKT_LEN-1; i++)
sum -= buf[i];
p->sum = sum;
return usb_control_msg(yld->udev,
usb_sndctrlpipe(yld->udev, 0),
USB_REQ_SET_CONFIGURATION,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
0x200, 3,
p, sizeof(*p),
USB_CTRL_SET_TIMEOUT);
}
static u8 default_ringtone[] = {
0xEF, /* volume [0-255] */
0xFB, 0x1E, 0x00, 0x0C, /* 1250 [hz], 12/100 [s] */
0xFC, 0x18, 0x00, 0x0C, /* 1000 [hz], 12/100 [s] */
0xFB, 0x1E, 0x00, 0x0C,
0xFC, 0x18, 0x00, 0x0C,
0xFB, 0x1E, 0x00, 0x0C,
0xFC, 0x18, 0x00, 0x0C,
0xFB, 0x1E, 0x00, 0x0C,
0xFC, 0x18, 0x00, 0x0C,
0xFF, 0xFF, 0x01, 0x90, /* silent, 400/100 [s] */
0x00, 0x00 /* end of sequence */
};
static int yealink_set_ringtone(struct yealink_dev *yld, u8 *buf, size_t size)
{
struct yld_ctl_packet *p = yld->ctl_data;
int ix, len;
if (size <= 0)
return -EINVAL;
/* Set the ringtone volume */
memset(yld->ctl_data, 0, sizeof(*(yld->ctl_data)));
yld->ctl_data->cmd = CMD_RING_VOLUME;
yld->ctl_data->size = 1;
yld->ctl_data->data[0] = buf[0];
yealink_cmd(yld, p);
buf++;
size--;
p->cmd = CMD_RING_NOTE;
ix = 0;
while (size != ix) {
len = size - ix;
if (len > sizeof(p->data))
len = sizeof(p->data);
p->size = len;
p->offset = cpu_to_be16(ix);
memcpy(p->data, &buf[ix], len);
yealink_cmd(yld, p);
ix += len;
}
return 0;
}
/* keep stat_master & stat_copy in sync.
*/
static int yealink_do_idle_tasks(struct yealink_dev *yld)
{
u8 val;
int i, ix, len;
ix = yld->stat_ix;
memset(yld->ctl_data, 0, sizeof(*(yld->ctl_data)));
yld->ctl_data->cmd = CMD_KEYPRESS;
yld->ctl_data->size = 1;
yld->ctl_data->sum = 0xff - CMD_KEYPRESS;
/* If state update pointer wraps do a KEYPRESS first. */
if (ix >= sizeof(yld->master)) {
yld->stat_ix = 0;
return 0;
}
/* find update candidates: copy != master */
do {
val = yld->master.b[ix];
if (val != yld->copy.b[ix])
goto send_update;
} while (++ix < sizeof(yld->master));
/* nothing todo, wait a bit and poll for a KEYPRESS */
yld->stat_ix = 0;
/* TODO how can we wait abit. ??
* msleep_interruptible(1000 / YEALINK_POLLING_FREQUENCY);
*/
return 0;
send_update:
/* Setup an appropriate update request */
yld->copy.b[ix] = val;
yld->ctl_data->data[0] = val;
switch(ix) {
case offsetof(struct yld_status, led):
yld->ctl_data->cmd = CMD_LED;
yld->ctl_data->sum = -1 - CMD_LED - val;
break;
case offsetof(struct yld_status, dialtone):
yld->ctl_data->cmd = CMD_DIALTONE;
yld->ctl_data->sum = -1 - CMD_DIALTONE - val;
break;
case offsetof(struct yld_status, ringtone):
yld->ctl_data->cmd = CMD_RINGTONE;
yld->ctl_data->sum = -1 - CMD_RINGTONE - val;
break;
case offsetof(struct yld_status, keynum):
val--;
val &= 0x1f;
yld->ctl_data->cmd = CMD_SCANCODE;
yld->ctl_data->offset = cpu_to_be16(val);
yld->ctl_data->data[0] = 0;
yld->ctl_data->sum = -1 - CMD_SCANCODE - val;
break;
default:
len = sizeof(yld->master.s.lcd) - ix;
if (len > sizeof(yld->ctl_data->data))
len = sizeof(yld->ctl_data->data);
/* Combine up to <len> consecutive LCD bytes in a singe request
*/
yld->ctl_data->cmd = CMD_LCD;
yld->ctl_data->offset = cpu_to_be16(ix);
yld->ctl_data->size = len;
yld->ctl_data->sum = -CMD_LCD - ix - val - len;
for(i=1; i<len; i++) {
ix++;
val = yld->master.b[ix];
yld->copy.b[ix] = val;
yld->ctl_data->data[i] = val;
yld->ctl_data->sum -= val;
}
}
yld->stat_ix = ix + 1;
return 1;
}
/* Decide on how to handle responses
*
* The state transition diagram is somethhing like:
*
* syncState<--+
* | |
* | idle
* \|/ |
* init --ok--> waitForKey --ok--> getKey
* ^ ^ |
* | +-------ok-------+
* error,start
*
*/
static void urb_irq_callback(struct urb *urb)
{
struct yealink_dev *yld = urb->context;
int ret, status = urb->status;
if (status)
dev_err(&yld->intf->dev, "%s - urb status %d\n",
__func__, status);
switch (yld->irq_data->cmd) {
case CMD_KEYPRESS:
yld->master.s.keynum = yld->irq_data->data[0];
break;
case CMD_SCANCODE:
dev_dbg(&yld->intf->dev, "get scancode %x\n",
yld->irq_data->data[0]);
report_key(yld, map_p1k_to_key(yld->irq_data->data[0]));
break;
default:
dev_err(&yld->intf->dev, "unexpected response %x\n",
yld->irq_data->cmd);
}
yealink_do_idle_tasks(yld);
if (!yld->shutdown) {
ret = usb_submit_urb(yld->urb_ctl, GFP_ATOMIC);
if (ret && ret != -EPERM)
dev_err(&yld->intf->dev,
"%s - usb_submit_urb failed %d\n",
__func__, ret);
}
}
static void urb_ctl_callback(struct urb *urb)
{
struct yealink_dev *yld = urb->context;
int ret = 0, status = urb->status;
if (status)
dev_err(&yld->intf->dev, "%s - urb status %d\n",
__func__, status);
switch (yld->ctl_data->cmd) {
case CMD_KEYPRESS:
case CMD_SCANCODE:
/* ask for a response */
if (!yld->shutdown)
ret = usb_submit_urb(yld->urb_irq, GFP_ATOMIC);
break;
default:
/* send new command */
yealink_do_idle_tasks(yld);
if (!yld->shutdown)
ret = usb_submit_urb(yld->urb_ctl, GFP_ATOMIC);
break;
}
if (ret && ret != -EPERM)
dev_err(&yld->intf->dev, "%s - usb_submit_urb failed %d\n",
__func__, ret);
}
/*******************************************************************************
* input event interface
******************************************************************************/
/* TODO should we issue a ringtone on a SND_BELL event?
static int input_ev(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
if (type != EV_SND)
return -EINVAL;
switch (code) {
case SND_BELL:
case SND_TONE:
break;
default:
return -EINVAL;
}
return 0;
}
*/
static int input_open(struct input_dev *dev)
{
struct yealink_dev *yld = input_get_drvdata(dev);
int i, ret;
dev_dbg(&yld->intf->dev, "%s\n", __func__);
/* force updates to device */
for (i = 0; i<sizeof(yld->master); i++)
yld->copy.b[i] = ~yld->master.b[i];
yld->key_code = -1; /* no keys pressed */
yealink_set_ringtone(yld, default_ringtone, sizeof(default_ringtone));
/* issue INIT */
memset(yld->ctl_data, 0, sizeof(*(yld->ctl_data)));
yld->ctl_data->cmd = CMD_INIT;
yld->ctl_data->size = 10;
yld->ctl_data->sum = 0x100-CMD_INIT-10;
if ((ret = usb_submit_urb(yld->urb_ctl, GFP_KERNEL)) != 0) {
dev_dbg(&yld->intf->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, ret);
return ret;
}
return 0;
}
static void input_close(struct input_dev *dev)
{
struct yealink_dev *yld = input_get_drvdata(dev);
yld->shutdown = 1;
/*
* Make sure the flag is seen by other CPUs before we start
* killing URBs so new URBs won't be submitted
*/
smp_wmb();
usb_kill_urb(yld->urb_ctl);
usb_kill_urb(yld->urb_irq);
yld->shutdown = 0;
smp_wmb();
}
/*******************************************************************************
* sysfs interface
******************************************************************************/
static DECLARE_RWSEM(sysfs_rwsema);
/* Interface to the 7-segments translation table aka. char set.
*/
static ssize_t show_map(struct device *dev, struct device_attribute *attr,
char *buf)
{
memcpy(buf, &map_seg7, sizeof(map_seg7));
return sizeof(map_seg7);
}
static ssize_t store_map(struct device *dev, struct device_attribute *attr,
const char *buf, size_t cnt)
{
if (cnt != sizeof(map_seg7))
return -EINVAL;
memcpy(&map_seg7, buf, sizeof(map_seg7));
return sizeof(map_seg7);
}
/* Interface to the LCD.
*/
/* Reading /sys/../lineX will return the format string with its settings:
*
* Example:
* cat ./line3
* 888888888888
* Linux Rocks!
*/
static ssize_t show_line(struct device *dev, char *buf, int a, int b)
{
struct yealink_dev *yld;
int i;
down_read(&sysfs_rwsema);
yld = dev_get_drvdata(dev);
if (yld == NULL) {
up_read(&sysfs_rwsema);
return -ENODEV;
}
for (i = a; i < b; i++)
*buf++ = lcdMap[i].type;
*buf++ = '\n';
for (i = a; i < b; i++)
*buf++ = yld->lcdMap[i];
*buf++ = '\n';
*buf = 0;
up_read(&sysfs_rwsema);
return 3 + ((b - a) << 1);
}
static ssize_t show_line1(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_line(dev, buf, LCD_LINE1_OFFSET, LCD_LINE2_OFFSET);
}
static ssize_t show_line2(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_line(dev, buf, LCD_LINE2_OFFSET, LCD_LINE3_OFFSET);
}
static ssize_t show_line3(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_line(dev, buf, LCD_LINE3_OFFSET, LCD_LINE4_OFFSET);
}
/* Writing to /sys/../lineX will set the coresponding LCD line.
* - Excess characters are ignored.
* - If less characters are written than allowed, the remaining digits are
* unchanged.
* - The '\n' or '\t' char is a placeholder, it does not overwrite the
* original content.
*/
static ssize_t store_line(struct device *dev, const char *buf, size_t count,
int el, size_t len)
{
struct yealink_dev *yld;
int i;
down_write(&sysfs_rwsema);
yld = dev_get_drvdata(dev);
if (yld == NULL) {
up_write(&sysfs_rwsema);
return -ENODEV;
}
if (len > count)
len = count;
for (i = 0; i < len; i++)
setChar(yld, el++, buf[i]);
up_write(&sysfs_rwsema);
return count;
}
static ssize_t store_line1(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return store_line(dev, buf, count, LCD_LINE1_OFFSET, LCD_LINE1_SIZE);
}
static ssize_t store_line2(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return store_line(dev, buf, count, LCD_LINE2_OFFSET, LCD_LINE2_SIZE);
}
static ssize_t store_line3(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return store_line(dev, buf, count, LCD_LINE3_OFFSET, LCD_LINE3_SIZE);
}
/* Interface to visible and audible "icons", these include:
* pictures on the LCD, the LED, and the dialtone signal.
*/
/* Get a list of "switchable elements" with their current state. */
static ssize_t get_icons(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct yealink_dev *yld;
int i, ret = 1;
down_read(&sysfs_rwsema);
yld = dev_get_drvdata(dev);
if (yld == NULL) {
up_read(&sysfs_rwsema);
return -ENODEV;
}
for (i = 0; i < ARRAY_SIZE(lcdMap); i++) {
if (lcdMap[i].type != '.')
continue;
ret += sprintf(&buf[ret], "%s %s\n",
yld->lcdMap[i] == ' ' ? " " : "on",
lcdMap[i].u.p.name);
}
up_read(&sysfs_rwsema);
return ret;
}
/* Change the visibility of a particular element. */
static ssize_t set_icon(struct device *dev, const char *buf, size_t count,
int chr)
{
struct yealink_dev *yld;
int i;
down_write(&sysfs_rwsema);
yld = dev_get_drvdata(dev);
if (yld == NULL) {
up_write(&sysfs_rwsema);
return -ENODEV;
}
for (i = 0; i < ARRAY_SIZE(lcdMap); i++) {
if (lcdMap[i].type != '.')
continue;
if (strncmp(buf, lcdMap[i].u.p.name, count) == 0) {
setChar(yld, i, chr);
break;
}
}
up_write(&sysfs_rwsema);
return count;
}
static ssize_t show_icon(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return set_icon(dev, buf, count, buf[0]);
}
static ssize_t hide_icon(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return set_icon(dev, buf, count, ' ');
}
/* Upload a ringtone to the device.
*/
/* Stores raw ringtone data in the phone */
static ssize_t store_ringtone(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct yealink_dev *yld;
down_write(&sysfs_rwsema);
yld = dev_get_drvdata(dev);
if (yld == NULL) {
up_write(&sysfs_rwsema);
return -ENODEV;
}
/* TODO locking with async usb control interface??? */
yealink_set_ringtone(yld, (char *)buf, count);
up_write(&sysfs_rwsema);
return count;
}
#define _M444 S_IRUGO
#define _M664 S_IRUGO|S_IWUSR|S_IWGRP
#define _M220 S_IWUSR|S_IWGRP
static DEVICE_ATTR(map_seg7 , _M664, show_map , store_map );
static DEVICE_ATTR(line1 , _M664, show_line1 , store_line1 );
static DEVICE_ATTR(line2 , _M664, show_line2 , store_line2 );
static DEVICE_ATTR(line3 , _M664, show_line3 , store_line3 );
static DEVICE_ATTR(get_icons , _M444, get_icons , NULL );
static DEVICE_ATTR(show_icon , _M220, NULL , show_icon );
static DEVICE_ATTR(hide_icon , _M220, NULL , hide_icon );
static DEVICE_ATTR(ringtone , _M220, NULL , store_ringtone);
static struct attribute *yld_attributes[] = {
&dev_attr_line1.attr,
&dev_attr_line2.attr,
&dev_attr_line3.attr,
&dev_attr_get_icons.attr,
&dev_attr_show_icon.attr,
&dev_attr_hide_icon.attr,
&dev_attr_map_seg7.attr,
&dev_attr_ringtone.attr,
NULL
};
static const struct attribute_group yld_attr_group = {
.attrs = yld_attributes
};
/*******************************************************************************
* Linux interface and usb initialisation
******************************************************************************/
struct driver_info {
char *name;
};
static const struct driver_info info_P1K = {
.name = "Yealink usb-p1k",
};
static const struct usb_device_id usb_table [] = {
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x6993,
.idProduct = 0xb001,
.bInterfaceClass = USB_CLASS_HID,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&info_P1K
},
{ }
};
static int usb_cleanup(struct yealink_dev *yld, int err)
{
if (yld == NULL)
return err;
if (yld->idev) {
if (err)
input_free_device(yld->idev);
else
input_unregister_device(yld->idev);
}
usb_free_urb(yld->urb_irq);
usb_free_urb(yld->urb_ctl);
kfree(yld->ctl_req);
usb_free_coherent(yld->udev, USB_PKT_LEN, yld->ctl_data, yld->ctl_dma);
usb_free_coherent(yld->udev, USB_PKT_LEN, yld->irq_data, yld->irq_dma);
kfree(yld);
return err;
}
static void usb_disconnect(struct usb_interface *intf)
{
struct yealink_dev *yld;
down_write(&sysfs_rwsema);
yld = usb_get_intfdata(intf);
sysfs_remove_group(&intf->dev.kobj, &yld_attr_group);
usb_set_intfdata(intf, NULL);
up_write(&sysfs_rwsema);
usb_cleanup(yld, 0);
}
static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev (intf);
struct driver_info *nfo = (struct driver_info *)id->driver_info;
struct usb_host_interface *interface;
struct usb_endpoint_descriptor *endpoint;
struct yealink_dev *yld;
struct input_dev *input_dev;
int ret, pipe, i;
interface = intf->cur_altsetting;
if (interface->desc.bNumEndpoints < 1)
return -ENODEV;
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
yld = kzalloc(sizeof(struct yealink_dev), GFP_KERNEL);
if (!yld)
return -ENOMEM;
yld->udev = udev;
yld->intf = intf;
yld->idev = input_dev = input_allocate_device();
if (!input_dev)
return usb_cleanup(yld, -ENOMEM);
/* allocate usb buffers */
yld->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
GFP_KERNEL, &yld->irq_dma);
if (yld->irq_data == NULL)
return usb_cleanup(yld, -ENOMEM);
yld->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
GFP_KERNEL, &yld->ctl_dma);
if (!yld->ctl_data)
return usb_cleanup(yld, -ENOMEM);
yld->ctl_req = kmalloc(sizeof(*(yld->ctl_req)), GFP_KERNEL);
if (yld->ctl_req == NULL)
return usb_cleanup(yld, -ENOMEM);
/* allocate urb structures */
yld->urb_irq = usb_alloc_urb(0, GFP_KERNEL);
if (yld->urb_irq == NULL)
return usb_cleanup(yld, -ENOMEM);
yld->urb_ctl = usb_alloc_urb(0, GFP_KERNEL);
if (yld->urb_ctl == NULL)
return usb_cleanup(yld, -ENOMEM);
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
ret = usb_maxpacket(udev, pipe);
if (ret != USB_PKT_LEN)
dev_err(&intf->dev, "invalid payload size %d, expected %zd\n",
ret, USB_PKT_LEN);
/* initialise irq urb */
usb_fill_int_urb(yld->urb_irq, udev, pipe, yld->irq_data,
USB_PKT_LEN,
urb_irq_callback,
yld, endpoint->bInterval);
yld->urb_irq->transfer_dma = yld->irq_dma;
yld->urb_irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
yld->urb_irq->dev = udev;
/* initialise ctl urb */
yld->ctl_req->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE |
USB_DIR_OUT;
yld->ctl_req->bRequest = USB_REQ_SET_CONFIGURATION;
yld->ctl_req->wValue = cpu_to_le16(0x200);
yld->ctl_req->wIndex = cpu_to_le16(interface->desc.bInterfaceNumber);
yld->ctl_req->wLength = cpu_to_le16(USB_PKT_LEN);
usb_fill_control_urb(yld->urb_ctl, udev, usb_sndctrlpipe(udev, 0),
(void *)yld->ctl_req, yld->ctl_data, USB_PKT_LEN,
urb_ctl_callback, yld);
yld->urb_ctl->transfer_dma = yld->ctl_dma;
yld->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
yld->urb_ctl->dev = udev;
/* find out the physical bus location */
usb_make_path(udev, yld->phys, sizeof(yld->phys));
strlcat(yld->phys, "/input0", sizeof(yld->phys));
/* register settings for the input device */
input_dev->name = nfo->name;
input_dev->phys = yld->phys;
usb_to_input_id(udev, &input_dev->id);
input_dev->dev.parent = &intf->dev;
input_set_drvdata(input_dev, yld);
input_dev->open = input_open;
input_dev->close = input_close;
/* input_dev->event = input_ev; TODO */
/* register available key events */
input_dev->evbit[0] = BIT_MASK(EV_KEY);
for (i = 0; i < 256; i++) {
int k = map_p1k_to_key(i);
if (k >= 0) {
set_bit(k & 0xff, input_dev->keybit);
if (k >> 8)
set_bit(k >> 8, input_dev->keybit);
}
}
ret = input_register_device(yld->idev);
if (ret)
return usb_cleanup(yld, ret);
usb_set_intfdata(intf, yld);
/* clear visible elements */
for (i = 0; i < ARRAY_SIZE(lcdMap); i++)
setChar(yld, i, ' ');
/* display driver version on LCD line 3 */
store_line3(&intf->dev, NULL,
DRIVER_VERSION, sizeof(DRIVER_VERSION));
/* Register sysfs hooks (don't care about failure) */
ret = sysfs_create_group(&intf->dev.kobj, &yld_attr_group);
return 0;
}
static struct usb_driver yealink_driver = {
.name = "yealink",
.probe = usb_probe,
.disconnect = usb_disconnect,
.id_table = usb_table,
};
module_usb_driver(yealink_driver);
MODULE_DEVICE_TABLE (usb, usb_table);
MODULE_AUTHOR("Henk Vergonet");
MODULE_DESCRIPTION("Yealink phone driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/yealink.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* twl4030-vibra.c - TWL4030 Vibrator driver
*
* Copyright (C) 2008-2010 Nokia Corporation
*
* Written by Henrik Saari <[email protected]>
* Updates by Felipe Balbi <[email protected]>
* Input by Jari Vanhala <[email protected]>
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/workqueue.h>
#include <linux/mfd/twl.h>
#include <linux/mfd/twl4030-audio.h>
#include <linux/input.h>
#include <linux/slab.h>
/* MODULE ID2 */
#define LEDEN 0x00
/* ForceFeedback */
#define EFFECT_DIR_180_DEG 0x8000 /* range is 0 - 0xFFFF */
struct vibra_info {
struct device *dev;
struct input_dev *input_dev;
struct work_struct play_work;
bool enabled;
int speed;
int direction;
bool coexist;
};
static void vibra_disable_leds(void)
{
u8 reg;
/* Disable LEDA & LEDB, cannot be used with vibra (PWM) */
twl_i2c_read_u8(TWL4030_MODULE_LED, ®, LEDEN);
reg &= ~0x03;
twl_i2c_write_u8(TWL4030_MODULE_LED, LEDEN, reg);
}
/* Powers H-Bridge and enables audio clk */
static void vibra_enable(struct vibra_info *info)
{
u8 reg;
twl4030_audio_enable_resource(TWL4030_AUDIO_RES_POWER);
/* turn H-Bridge on */
twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
®, TWL4030_REG_VIBRA_CTL);
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg | TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
twl4030_audio_enable_resource(TWL4030_AUDIO_RES_APLL);
info->enabled = true;
}
static void vibra_disable(struct vibra_info *info)
{
u8 reg;
/* Power down H-Bridge */
twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
®, TWL4030_REG_VIBRA_CTL);
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
twl4030_audio_disable_resource(TWL4030_AUDIO_RES_APLL);
twl4030_audio_disable_resource(TWL4030_AUDIO_RES_POWER);
info->enabled = false;
}
static void vibra_play_work(struct work_struct *work)
{
struct vibra_info *info = container_of(work,
struct vibra_info, play_work);
int dir;
int pwm;
u8 reg;
dir = info->direction;
pwm = info->speed;
twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
®, TWL4030_REG_VIBRA_CTL);
if (pwm && (!info->coexist || !(reg & TWL4030_VIBRA_SEL))) {
if (!info->enabled)
vibra_enable(info);
/* set vibra rotation direction */
twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
®, TWL4030_REG_VIBRA_CTL);
reg = (dir) ? (reg | TWL4030_VIBRA_DIR) :
(reg & ~TWL4030_VIBRA_DIR);
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
reg, TWL4030_REG_VIBRA_CTL);
/* set PWM, 1 = max, 255 = min */
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
256 - pwm, TWL4030_REG_VIBRA_SET);
} else {
if (info->enabled)
vibra_disable(info);
}
}
/*** Input/ForceFeedback ***/
static int vibra_play(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct vibra_info *info = input_get_drvdata(input);
info->speed = effect->u.rumble.strong_magnitude >> 8;
if (!info->speed)
info->speed = effect->u.rumble.weak_magnitude >> 9;
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 0 : 1;
schedule_work(&info->play_work);
return 0;
}
static void twl4030_vibra_close(struct input_dev *input)
{
struct vibra_info *info = input_get_drvdata(input);
cancel_work_sync(&info->play_work);
if (info->enabled)
vibra_disable(info);
}
/*** Module ***/
static int twl4030_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
if (info->enabled)
vibra_disable(info);
return 0;
}
static int twl4030_vibra_resume(struct device *dev)
{
vibra_disable_leds();
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
static bool twl4030_vibra_check_coexist(struct device_node *parent)
{
struct device_node *node;
node = of_get_child_by_name(parent, "codec");
if (node) {
of_node_put(node);
return true;
}
return false;
}
static int twl4030_vibra_probe(struct platform_device *pdev)
{
struct device_node *twl4030_core_node = pdev->dev.parent->of_node;
struct vibra_info *info;
int ret;
if (!twl4030_core_node) {
dev_dbg(&pdev->dev, "twl4030 OF node is missing\n");
return -EINVAL;
}
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = &pdev->dev;
info->coexist = twl4030_vibra_check_coexist(twl4030_core_node);
INIT_WORK(&info->play_work, vibra_play_work);
info->input_dev = devm_input_allocate_device(&pdev->dev);
if (info->input_dev == NULL) {
dev_err(&pdev->dev, "couldn't allocate input device\n");
return -ENOMEM;
}
input_set_drvdata(info->input_dev, info);
info->input_dev->name = "twl4030:vibrator";
info->input_dev->id.version = 1;
info->input_dev->close = twl4030_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't register vibrator to FF\n");
return ret;
}
ret = input_register_device(info->input_dev);
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't register input device\n");
goto err_iff;
}
vibra_disable_leds();
platform_set_drvdata(pdev, info);
return 0;
err_iff:
input_ff_destroy(info->input_dev);
return ret;
}
static struct platform_driver twl4030_vibra_driver = {
.probe = twl4030_vibra_probe,
.driver = {
.name = "twl4030-vibra",
.pm = pm_sleep_ptr(&twl4030_vibra_pm_ops),
},
};
module_platform_driver(twl4030_vibra_driver);
MODULE_ALIAS("platform:twl4030-vibra");
MODULE_DESCRIPTION("TWL4030 Vibra driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nokia Corporation");
|
linux-master
|
drivers/input/misc/twl4030-vibra.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cobalt button interface driver.
*
* Copyright (C) 2007-2008 Yoichi Yuasa <[email protected]>
*/
#include <linux/input.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define BUTTONS_POLL_INTERVAL 30 /* msec */
#define BUTTONS_COUNT_THRESHOLD 3
#define BUTTONS_STATUS_MASK 0xfe000000
static const unsigned short cobalt_map[] = {
KEY_RESERVED,
KEY_RESTART,
KEY_LEFT,
KEY_UP,
KEY_DOWN,
KEY_RIGHT,
KEY_ENTER,
KEY_SELECT
};
struct buttons_dev {
unsigned short keymap[ARRAY_SIZE(cobalt_map)];
int count[ARRAY_SIZE(cobalt_map)];
void __iomem *reg;
};
static void handle_buttons(struct input_dev *input)
{
struct buttons_dev *bdev = input_get_drvdata(input);
uint32_t status;
int i;
status = ~readl(bdev->reg) >> 24;
for (i = 0; i < ARRAY_SIZE(bdev->keymap); i++) {
if (status & (1U << i)) {
if (++bdev->count[i] == BUTTONS_COUNT_THRESHOLD) {
input_event(input, EV_MSC, MSC_SCAN, i);
input_report_key(input, bdev->keymap[i], 1);
input_sync(input);
}
} else {
if (bdev->count[i] >= BUTTONS_COUNT_THRESHOLD) {
input_event(input, EV_MSC, MSC_SCAN, i);
input_report_key(input, bdev->keymap[i], 0);
input_sync(input);
}
bdev->count[i] = 0;
}
}
}
static int cobalt_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
struct input_dev *input;
struct resource *res;
int error, i;
bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
if (!bdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EBUSY;
bdev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!bdev->reg)
return -ENOMEM;
memcpy(bdev->keymap, cobalt_map, sizeof(bdev->keymap));
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
input_set_drvdata(input, bdev);
input->name = "Cobalt buttons";
input->phys = "cobalt/input0";
input->id.bustype = BUS_HOST;
input->keycode = bdev->keymap;
input->keycodemax = ARRAY_SIZE(bdev->keymap);
input->keycodesize = sizeof(unsigned short);
input_set_capability(input, EV_MSC, MSC_SCAN);
__set_bit(EV_KEY, input->evbit);
for (i = 0; i < ARRAY_SIZE(cobalt_map); i++)
__set_bit(bdev->keymap[i], input->keybit);
__clear_bit(KEY_RESERVED, input->keybit);
error = input_setup_polling(input, handle_buttons);
if (error)
return error;
input_set_poll_interval(input, BUTTONS_POLL_INTERVAL);
error = input_register_device(input);
if (error)
return error;
return 0;
}
MODULE_AUTHOR("Yoichi Yuasa <[email protected]>");
MODULE_DESCRIPTION("Cobalt button interface driver");
MODULE_LICENSE("GPL");
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:Cobalt buttons");
static struct platform_driver cobalt_buttons_driver = {
.probe = cobalt_buttons_probe,
.driver = {
.name = "Cobalt buttons",
},
};
module_platform_driver(cobalt_buttons_driver);
|
linux-master
|
drivers/input/misc/cobalt_btns.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ADLX345/346 Three-Axis Digital Accelerometers (SPI Interface)
*
* Enter bugs at http://blackfin.uclinux.org/
*
* Copyright (C) 2009 Michael Hennerich, Analog Devices Inc.
*/
#include <linux/input.h> /* BUS_SPI */
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/pm.h>
#include <linux/types.h>
#include "adxl34x.h"
#define MAX_SPI_FREQ_HZ 5000000
#define MAX_FREQ_NO_FIFODELAY 1500000
#define ADXL34X_CMD_MULTB (1 << 6)
#define ADXL34X_CMD_READ (1 << 7)
#define ADXL34X_WRITECMD(reg) (reg & 0x3F)
#define ADXL34X_READCMD(reg) (ADXL34X_CMD_READ | (reg & 0x3F))
#define ADXL34X_READMB_CMD(reg) (ADXL34X_CMD_READ | ADXL34X_CMD_MULTB \
| (reg & 0x3F))
static int adxl34x_spi_read(struct device *dev, unsigned char reg)
{
struct spi_device *spi = to_spi_device(dev);
unsigned char cmd;
cmd = ADXL34X_READCMD(reg);
return spi_w8r8(spi, cmd);
}
static int adxl34x_spi_write(struct device *dev,
unsigned char reg, unsigned char val)
{
struct spi_device *spi = to_spi_device(dev);
unsigned char buf[2];
buf[0] = ADXL34X_WRITECMD(reg);
buf[1] = val;
return spi_write(spi, buf, sizeof(buf));
}
static int adxl34x_spi_read_block(struct device *dev,
unsigned char reg, int count,
void *buf)
{
struct spi_device *spi = to_spi_device(dev);
ssize_t status;
reg = ADXL34X_READMB_CMD(reg);
status = spi_write_then_read(spi, ®, 1, buf, count);
return (status < 0) ? status : 0;
}
static const struct adxl34x_bus_ops adxl34x_spi_bops = {
.bustype = BUS_SPI,
.write = adxl34x_spi_write,
.read = adxl34x_spi_read,
.read_block = adxl34x_spi_read_block,
};
static int adxl34x_spi_probe(struct spi_device *spi)
{
struct adxl34x *ac;
/* don't exceed max specified SPI CLK frequency */
if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) {
dev_err(&spi->dev, "SPI CLK %d Hz too fast\n", spi->max_speed_hz);
return -EINVAL;
}
ac = adxl34x_probe(&spi->dev, spi->irq,
spi->max_speed_hz > MAX_FREQ_NO_FIFODELAY,
&adxl34x_spi_bops);
if (IS_ERR(ac))
return PTR_ERR(ac);
spi_set_drvdata(spi, ac);
return 0;
}
static void adxl34x_spi_remove(struct spi_device *spi)
{
struct adxl34x *ac = spi_get_drvdata(spi);
adxl34x_remove(ac);
}
static struct spi_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
.pm = pm_sleep_ptr(&adxl34x_pm),
},
.probe = adxl34x_spi_probe,
.remove = adxl34x_spi_remove,
};
module_spi_driver(adxl34x_driver);
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("ADXL345/346 Three-Axis Digital Accelerometer SPI Bus Driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/adxl34x-spi.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Azoteq IQS269A Capacitive Touch Controller
*
* Copyright (C) 2020 Jeff LaBundy <[email protected]>
*
* This driver registers up to 3 input devices: one representing capacitive or
* inductive keys as well as Hall-effect switches, and one for each of the two
* axial sliders presented by the device.
*/
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#define IQS269_VER_INFO 0x00
#define IQS269_VER_INFO_PROD_NUM 0x4F
#define IQS269_SYS_FLAGS 0x02
#define IQS269_SYS_FLAGS_SHOW_RESET BIT(15)
#define IQS269_SYS_FLAGS_PWR_MODE_MASK GENMASK(12, 11)
#define IQS269_SYS_FLAGS_PWR_MODE_SHIFT 11
#define IQS269_SYS_FLAGS_IN_ATI BIT(10)
#define IQS269_CHx_COUNTS 0x08
#define IQS269_SLIDER_X 0x30
#define IQS269_CAL_DATA_A 0x35
#define IQS269_CAL_DATA_A_HALL_BIN_L_MASK GENMASK(15, 12)
#define IQS269_CAL_DATA_A_HALL_BIN_L_SHIFT 12
#define IQS269_CAL_DATA_A_HALL_BIN_R_MASK GENMASK(11, 8)
#define IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT 8
#define IQS269_SYS_SETTINGS 0x80
#define IQS269_SYS_SETTINGS_CLK_DIV BIT(15)
#define IQS269_SYS_SETTINGS_ULP_AUTO BIT(14)
#define IQS269_SYS_SETTINGS_DIS_AUTO BIT(13)
#define IQS269_SYS_SETTINGS_PWR_MODE_MASK GENMASK(12, 11)
#define IQS269_SYS_SETTINGS_PWR_MODE_SHIFT 11
#define IQS269_SYS_SETTINGS_PWR_MODE_MAX 3
#define IQS269_SYS_SETTINGS_ULP_UPDATE_MASK GENMASK(10, 8)
#define IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT 8
#define IQS269_SYS_SETTINGS_ULP_UPDATE_MAX 7
#define IQS269_SYS_SETTINGS_RESEED_OFFSET BIT(6)
#define IQS269_SYS_SETTINGS_EVENT_MODE BIT(5)
#define IQS269_SYS_SETTINGS_EVENT_MODE_LP BIT(4)
#define IQS269_SYS_SETTINGS_REDO_ATI BIT(2)
#define IQS269_SYS_SETTINGS_ACK_RESET BIT(0)
#define IQS269_FILT_STR_LP_LTA_MASK GENMASK(7, 6)
#define IQS269_FILT_STR_LP_LTA_SHIFT 6
#define IQS269_FILT_STR_LP_CNT_MASK GENMASK(5, 4)
#define IQS269_FILT_STR_LP_CNT_SHIFT 4
#define IQS269_FILT_STR_NP_LTA_MASK GENMASK(3, 2)
#define IQS269_FILT_STR_NP_LTA_SHIFT 2
#define IQS269_FILT_STR_NP_CNT_MASK GENMASK(1, 0)
#define IQS269_FILT_STR_MAX 3
#define IQS269_EVENT_MASK_SYS BIT(6)
#define IQS269_EVENT_MASK_DEEP BIT(2)
#define IQS269_EVENT_MASK_TOUCH BIT(1)
#define IQS269_EVENT_MASK_PROX BIT(0)
#define IQS269_RATE_NP_MS_MAX 255
#define IQS269_RATE_LP_MS_MAX 255
#define IQS269_RATE_ULP_MS_MAX 4080
#define IQS269_TIMEOUT_PWR_MS_MAX 130560
#define IQS269_TIMEOUT_LTA_MS_MAX 130560
#define IQS269_MISC_A_ATI_BAND_DISABLE BIT(15)
#define IQS269_MISC_A_ATI_LP_ONLY BIT(14)
#define IQS269_MISC_A_ATI_BAND_TIGHTEN BIT(13)
#define IQS269_MISC_A_FILT_DISABLE BIT(12)
#define IQS269_MISC_A_GPIO3_SELECT_MASK GENMASK(10, 8)
#define IQS269_MISC_A_GPIO3_SELECT_SHIFT 8
#define IQS269_MISC_A_DUAL_DIR BIT(6)
#define IQS269_MISC_A_TX_FREQ_MASK GENMASK(5, 4)
#define IQS269_MISC_A_TX_FREQ_SHIFT 4
#define IQS269_MISC_A_TX_FREQ_MAX 3
#define IQS269_MISC_A_GLOBAL_CAP_SIZE BIT(0)
#define IQS269_MISC_B_RESEED_UI_SEL_MASK GENMASK(7, 6)
#define IQS269_MISC_B_RESEED_UI_SEL_SHIFT 6
#define IQS269_MISC_B_RESEED_UI_SEL_MAX 3
#define IQS269_MISC_B_TRACKING_UI_ENABLE BIT(4)
#define IQS269_MISC_B_FILT_STR_SLIDER GENMASK(1, 0)
#define IQS269_CHx_ENG_A_MEAS_CAP_SIZE BIT(15)
#define IQS269_CHx_ENG_A_RX_GND_INACTIVE BIT(13)
#define IQS269_CHx_ENG_A_LOCAL_CAP_SIZE BIT(12)
#define IQS269_CHx_ENG_A_ATI_MODE_MASK GENMASK(9, 8)
#define IQS269_CHx_ENG_A_ATI_MODE_SHIFT 8
#define IQS269_CHx_ENG_A_ATI_MODE_MAX 3
#define IQS269_CHx_ENG_A_INV_LOGIC BIT(7)
#define IQS269_CHx_ENG_A_PROJ_BIAS_MASK GENMASK(6, 5)
#define IQS269_CHx_ENG_A_PROJ_BIAS_SHIFT 5
#define IQS269_CHx_ENG_A_PROJ_BIAS_MAX 3
#define IQS269_CHx_ENG_A_SENSE_MODE_MASK GENMASK(3, 0)
#define IQS269_CHx_ENG_A_SENSE_MODE_MAX 15
#define IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE BIT(13)
#define IQS269_CHx_ENG_B_SENSE_FREQ_MASK GENMASK(10, 9)
#define IQS269_CHx_ENG_B_SENSE_FREQ_SHIFT 9
#define IQS269_CHx_ENG_B_SENSE_FREQ_MAX 3
#define IQS269_CHx_ENG_B_STATIC_ENABLE BIT(8)
#define IQS269_CHx_ENG_B_ATI_BASE_MASK GENMASK(7, 6)
#define IQS269_CHx_ENG_B_ATI_BASE_75 0x00
#define IQS269_CHx_ENG_B_ATI_BASE_100 0x40
#define IQS269_CHx_ENG_B_ATI_BASE_150 0x80
#define IQS269_CHx_ENG_B_ATI_BASE_200 0xC0
#define IQS269_CHx_ENG_B_ATI_TARGET_MASK GENMASK(5, 0)
#define IQS269_CHx_ENG_B_ATI_TARGET_MAX 2016
#define IQS269_CHx_WEIGHT_MAX 255
#define IQS269_CHx_THRESH_MAX 255
#define IQS269_CHx_HYST_DEEP_MASK GENMASK(7, 4)
#define IQS269_CHx_HYST_DEEP_SHIFT 4
#define IQS269_CHx_HYST_TOUCH_MASK GENMASK(3, 0)
#define IQS269_CHx_HYST_MAX 15
#define IQS269_CHx_HALL_INACTIVE 6
#define IQS269_CHx_HALL_ACTIVE 7
#define IQS269_HALL_PAD_R BIT(0)
#define IQS269_HALL_PAD_L BIT(1)
#define IQS269_HALL_PAD_INV BIT(6)
#define IQS269_HALL_UI 0xF5
#define IQS269_HALL_UI_ENABLE BIT(15)
#define IQS269_MAX_REG 0xFF
#define IQS269_NUM_CH 8
#define IQS269_NUM_SL 2
#define iqs269_irq_wait() usleep_range(200, 250)
enum iqs269_local_cap_size {
IQS269_LOCAL_CAP_SIZE_0,
IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY,
IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5,
};
enum iqs269_st_offs {
IQS269_ST_OFFS_PROX,
IQS269_ST_OFFS_DIR,
IQS269_ST_OFFS_TOUCH,
IQS269_ST_OFFS_DEEP,
};
enum iqs269_th_offs {
IQS269_TH_OFFS_PROX,
IQS269_TH_OFFS_TOUCH,
IQS269_TH_OFFS_DEEP,
};
enum iqs269_event_id {
IQS269_EVENT_PROX_DN,
IQS269_EVENT_PROX_UP,
IQS269_EVENT_TOUCH_DN,
IQS269_EVENT_TOUCH_UP,
IQS269_EVENT_DEEP_DN,
IQS269_EVENT_DEEP_UP,
};
struct iqs269_switch_desc {
unsigned int code;
bool enabled;
};
struct iqs269_event_desc {
const char *name;
enum iqs269_st_offs st_offs;
enum iqs269_th_offs th_offs;
bool dir_up;
u8 mask;
};
static const struct iqs269_event_desc iqs269_events[] = {
[IQS269_EVENT_PROX_DN] = {
.name = "event-prox",
.st_offs = IQS269_ST_OFFS_PROX,
.th_offs = IQS269_TH_OFFS_PROX,
.mask = IQS269_EVENT_MASK_PROX,
},
[IQS269_EVENT_PROX_UP] = {
.name = "event-prox-alt",
.st_offs = IQS269_ST_OFFS_PROX,
.th_offs = IQS269_TH_OFFS_PROX,
.dir_up = true,
.mask = IQS269_EVENT_MASK_PROX,
},
[IQS269_EVENT_TOUCH_DN] = {
.name = "event-touch",
.st_offs = IQS269_ST_OFFS_TOUCH,
.th_offs = IQS269_TH_OFFS_TOUCH,
.mask = IQS269_EVENT_MASK_TOUCH,
},
[IQS269_EVENT_TOUCH_UP] = {
.name = "event-touch-alt",
.st_offs = IQS269_ST_OFFS_TOUCH,
.th_offs = IQS269_TH_OFFS_TOUCH,
.dir_up = true,
.mask = IQS269_EVENT_MASK_TOUCH,
},
[IQS269_EVENT_DEEP_DN] = {
.name = "event-deep",
.st_offs = IQS269_ST_OFFS_DEEP,
.th_offs = IQS269_TH_OFFS_DEEP,
.mask = IQS269_EVENT_MASK_DEEP,
},
[IQS269_EVENT_DEEP_UP] = {
.name = "event-deep-alt",
.st_offs = IQS269_ST_OFFS_DEEP,
.th_offs = IQS269_TH_OFFS_DEEP,
.dir_up = true,
.mask = IQS269_EVENT_MASK_DEEP,
},
};
struct iqs269_ver_info {
u8 prod_num;
u8 sw_num;
u8 hw_num;
u8 padding;
} __packed;
struct iqs269_ch_reg {
u8 rx_enable;
u8 tx_enable;
__be16 engine_a;
__be16 engine_b;
__be16 ati_comp;
u8 thresh[3];
u8 hyst;
u8 assoc_select;
u8 assoc_weight;
} __packed;
struct iqs269_sys_reg {
__be16 general;
u8 active;
u8 filter;
u8 reseed;
u8 event_mask;
u8 rate_np;
u8 rate_lp;
u8 rate_ulp;
u8 timeout_pwr;
u8 timeout_rdy;
u8 timeout_lta;
__be16 misc_a;
__be16 misc_b;
u8 blocking;
u8 padding;
u8 slider_select[IQS269_NUM_SL];
u8 timeout_tap;
u8 timeout_swipe;
u8 thresh_swipe;
u8 redo_ati;
struct iqs269_ch_reg ch_reg[IQS269_NUM_CH];
} __packed;
struct iqs269_flags {
__be16 system;
u8 gesture;
u8 padding;
u8 states[4];
} __packed;
struct iqs269_private {
struct i2c_client *client;
struct regmap *regmap;
struct mutex lock;
struct iqs269_switch_desc switches[ARRAY_SIZE(iqs269_events)];
struct iqs269_sys_reg sys_reg;
struct completion ati_done;
struct input_dev *keypad;
struct input_dev *slider[IQS269_NUM_SL];
unsigned int keycode[ARRAY_SIZE(iqs269_events) * IQS269_NUM_CH];
unsigned int ch_num;
bool hall_enable;
bool ati_current;
};
static int iqs269_ati_mode_set(struct iqs269_private *iqs269,
unsigned int ch_num, unsigned int mode)
{
struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
u16 engine_a;
if (ch_num >= IQS269_NUM_CH)
return -EINVAL;
if (mode > IQS269_CHx_ENG_A_ATI_MODE_MAX)
return -EINVAL;
mutex_lock(&iqs269->lock);
engine_a = be16_to_cpu(ch_reg[ch_num].engine_a);
engine_a &= ~IQS269_CHx_ENG_A_ATI_MODE_MASK;
engine_a |= (mode << IQS269_CHx_ENG_A_ATI_MODE_SHIFT);
ch_reg[ch_num].engine_a = cpu_to_be16(engine_a);
iqs269->ati_current = false;
mutex_unlock(&iqs269->lock);
return 0;
}
static int iqs269_ati_mode_get(struct iqs269_private *iqs269,
unsigned int ch_num, unsigned int *mode)
{
struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
u16 engine_a;
if (ch_num >= IQS269_NUM_CH)
return -EINVAL;
mutex_lock(&iqs269->lock);
engine_a = be16_to_cpu(ch_reg[ch_num].engine_a);
mutex_unlock(&iqs269->lock);
engine_a &= IQS269_CHx_ENG_A_ATI_MODE_MASK;
*mode = (engine_a >> IQS269_CHx_ENG_A_ATI_MODE_SHIFT);
return 0;
}
static int iqs269_ati_base_set(struct iqs269_private *iqs269,
unsigned int ch_num, unsigned int base)
{
struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
u16 engine_b;
if (ch_num >= IQS269_NUM_CH)
return -EINVAL;
switch (base) {
case 75:
base = IQS269_CHx_ENG_B_ATI_BASE_75;
break;
case 100:
base = IQS269_CHx_ENG_B_ATI_BASE_100;
break;
case 150:
base = IQS269_CHx_ENG_B_ATI_BASE_150;
break;
case 200:
base = IQS269_CHx_ENG_B_ATI_BASE_200;
break;
default:
return -EINVAL;
}
mutex_lock(&iqs269->lock);
engine_b = be16_to_cpu(ch_reg[ch_num].engine_b);
engine_b &= ~IQS269_CHx_ENG_B_ATI_BASE_MASK;
engine_b |= base;
ch_reg[ch_num].engine_b = cpu_to_be16(engine_b);
iqs269->ati_current = false;
mutex_unlock(&iqs269->lock);
return 0;
}
static int iqs269_ati_base_get(struct iqs269_private *iqs269,
unsigned int ch_num, unsigned int *base)
{
struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
u16 engine_b;
if (ch_num >= IQS269_NUM_CH)
return -EINVAL;
mutex_lock(&iqs269->lock);
engine_b = be16_to_cpu(ch_reg[ch_num].engine_b);
mutex_unlock(&iqs269->lock);
switch (engine_b & IQS269_CHx_ENG_B_ATI_BASE_MASK) {
case IQS269_CHx_ENG_B_ATI_BASE_75:
*base = 75;
return 0;
case IQS269_CHx_ENG_B_ATI_BASE_100:
*base = 100;
return 0;
case IQS269_CHx_ENG_B_ATI_BASE_150:
*base = 150;
return 0;
case IQS269_CHx_ENG_B_ATI_BASE_200:
*base = 200;
return 0;
default:
return -EINVAL;
}
}
static int iqs269_ati_target_set(struct iqs269_private *iqs269,
unsigned int ch_num, unsigned int target)
{
struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
u16 engine_b;
if (ch_num >= IQS269_NUM_CH)
return -EINVAL;
if (target > IQS269_CHx_ENG_B_ATI_TARGET_MAX)
return -EINVAL;
mutex_lock(&iqs269->lock);
engine_b = be16_to_cpu(ch_reg[ch_num].engine_b);
engine_b &= ~IQS269_CHx_ENG_B_ATI_TARGET_MASK;
engine_b |= target / 32;
ch_reg[ch_num].engine_b = cpu_to_be16(engine_b);
iqs269->ati_current = false;
mutex_unlock(&iqs269->lock);
return 0;
}
static int iqs269_ati_target_get(struct iqs269_private *iqs269,
unsigned int ch_num, unsigned int *target)
{
struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
u16 engine_b;
if (ch_num >= IQS269_NUM_CH)
return -EINVAL;
mutex_lock(&iqs269->lock);
engine_b = be16_to_cpu(ch_reg[ch_num].engine_b);
mutex_unlock(&iqs269->lock);
*target = (engine_b & IQS269_CHx_ENG_B_ATI_TARGET_MASK) * 32;
return 0;
}
static int iqs269_parse_mask(const struct fwnode_handle *fwnode,
const char *propname, u8 *mask)
{
unsigned int val[IQS269_NUM_CH];
int count, error, i;
count = fwnode_property_count_u32(fwnode, propname);
if (count < 0)
return 0;
if (count > IQS269_NUM_CH)
return -EINVAL;
error = fwnode_property_read_u32_array(fwnode, propname, val, count);
if (error)
return error;
*mask = 0;
for (i = 0; i < count; i++) {
if (val[i] >= IQS269_NUM_CH)
return -EINVAL;
*mask |= BIT(val[i]);
}
return 0;
}
static int iqs269_parse_chan(struct iqs269_private *iqs269,
const struct fwnode_handle *ch_node)
{
struct i2c_client *client = iqs269->client;
struct fwnode_handle *ev_node;
struct iqs269_ch_reg *ch_reg;
u16 engine_a, engine_b;
unsigned int reg, val;
int error, i;
error = fwnode_property_read_u32(ch_node, "reg", ®);
if (error) {
dev_err(&client->dev, "Failed to read channel number: %d\n",
error);
return error;
} else if (reg >= IQS269_NUM_CH) {
dev_err(&client->dev, "Invalid channel number: %u\n", reg);
return -EINVAL;
}
iqs269->sys_reg.active |= BIT(reg);
if (!fwnode_property_present(ch_node, "azoteq,reseed-disable"))
iqs269->sys_reg.reseed |= BIT(reg);
if (fwnode_property_present(ch_node, "azoteq,blocking-enable"))
iqs269->sys_reg.blocking |= BIT(reg);
if (fwnode_property_present(ch_node, "azoteq,slider0-select"))
iqs269->sys_reg.slider_select[0] |= BIT(reg);
if (fwnode_property_present(ch_node, "azoteq,slider1-select"))
iqs269->sys_reg.slider_select[1] |= BIT(reg);
ch_reg = &iqs269->sys_reg.ch_reg[reg];
error = iqs269_parse_mask(ch_node, "azoteq,rx-enable",
&ch_reg->rx_enable);
if (error) {
dev_err(&client->dev, "Invalid channel %u RX enable mask: %d\n",
reg, error);
return error;
}
error = iqs269_parse_mask(ch_node, "azoteq,tx-enable",
&ch_reg->tx_enable);
if (error) {
dev_err(&client->dev, "Invalid channel %u TX enable mask: %d\n",
reg, error);
return error;
}
engine_a = be16_to_cpu(ch_reg->engine_a);
engine_b = be16_to_cpu(ch_reg->engine_b);
engine_a |= IQS269_CHx_ENG_A_MEAS_CAP_SIZE;
if (fwnode_property_present(ch_node, "azoteq,meas-cap-decrease"))
engine_a &= ~IQS269_CHx_ENG_A_MEAS_CAP_SIZE;
engine_a |= IQS269_CHx_ENG_A_RX_GND_INACTIVE;
if (fwnode_property_present(ch_node, "azoteq,rx-float-inactive"))
engine_a &= ~IQS269_CHx_ENG_A_RX_GND_INACTIVE;
engine_a &= ~IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
engine_b &= ~IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
if (!fwnode_property_read_u32(ch_node, "azoteq,local-cap-size", &val)) {
switch (val) {
case IQS269_LOCAL_CAP_SIZE_0:
break;
case IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5:
engine_a |= IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
fallthrough;
case IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY:
engine_b |= IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
break;
default:
dev_err(&client->dev,
"Invalid channel %u local cap. size: %u\n", reg,
val);
return -EINVAL;
}
}
engine_a &= ~IQS269_CHx_ENG_A_INV_LOGIC;
if (fwnode_property_present(ch_node, "azoteq,invert-enable"))
engine_a |= IQS269_CHx_ENG_A_INV_LOGIC;
if (!fwnode_property_read_u32(ch_node, "azoteq,proj-bias", &val)) {
if (val > IQS269_CHx_ENG_A_PROJ_BIAS_MAX) {
dev_err(&client->dev,
"Invalid channel %u bias current: %u\n", reg,
val);
return -EINVAL;
}
engine_a &= ~IQS269_CHx_ENG_A_PROJ_BIAS_MASK;
engine_a |= (val << IQS269_CHx_ENG_A_PROJ_BIAS_SHIFT);
}
if (!fwnode_property_read_u32(ch_node, "azoteq,sense-mode", &val)) {
if (val > IQS269_CHx_ENG_A_SENSE_MODE_MAX) {
dev_err(&client->dev,
"Invalid channel %u sensing mode: %u\n", reg,
val);
return -EINVAL;
}
engine_a &= ~IQS269_CHx_ENG_A_SENSE_MODE_MASK;
engine_a |= val;
}
if (!fwnode_property_read_u32(ch_node, "azoteq,sense-freq", &val)) {
if (val > IQS269_CHx_ENG_B_SENSE_FREQ_MAX) {
dev_err(&client->dev,
"Invalid channel %u sensing frequency: %u\n",
reg, val);
return -EINVAL;
}
engine_b &= ~IQS269_CHx_ENG_B_SENSE_FREQ_MASK;
engine_b |= (val << IQS269_CHx_ENG_B_SENSE_FREQ_SHIFT);
}
engine_b &= ~IQS269_CHx_ENG_B_STATIC_ENABLE;
if (fwnode_property_present(ch_node, "azoteq,static-enable"))
engine_b |= IQS269_CHx_ENG_B_STATIC_ENABLE;
ch_reg->engine_a = cpu_to_be16(engine_a);
ch_reg->engine_b = cpu_to_be16(engine_b);
if (!fwnode_property_read_u32(ch_node, "azoteq,ati-mode", &val)) {
error = iqs269_ati_mode_set(iqs269, reg, val);
if (error) {
dev_err(&client->dev,
"Invalid channel %u ATI mode: %u\n", reg, val);
return error;
}
}
if (!fwnode_property_read_u32(ch_node, "azoteq,ati-base", &val)) {
error = iqs269_ati_base_set(iqs269, reg, val);
if (error) {
dev_err(&client->dev,
"Invalid channel %u ATI base: %u\n", reg, val);
return error;
}
}
if (!fwnode_property_read_u32(ch_node, "azoteq,ati-target", &val)) {
error = iqs269_ati_target_set(iqs269, reg, val);
if (error) {
dev_err(&client->dev,
"Invalid channel %u ATI target: %u\n", reg,
val);
return error;
}
}
error = iqs269_parse_mask(ch_node, "azoteq,assoc-select",
&ch_reg->assoc_select);
if (error) {
dev_err(&client->dev, "Invalid channel %u association: %d\n",
reg, error);
return error;
}
if (!fwnode_property_read_u32(ch_node, "azoteq,assoc-weight", &val)) {
if (val > IQS269_CHx_WEIGHT_MAX) {
dev_err(&client->dev,
"Invalid channel %u associated weight: %u\n",
reg, val);
return -EINVAL;
}
ch_reg->assoc_weight = val;
}
for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
ev_node = fwnode_get_named_child_node(ch_node,
iqs269_events[i].name);
if (!ev_node)
continue;
if (!fwnode_property_read_u32(ev_node, "azoteq,thresh", &val)) {
if (val > IQS269_CHx_THRESH_MAX) {
dev_err(&client->dev,
"Invalid channel %u threshold: %u\n",
reg, val);
fwnode_handle_put(ev_node);
return -EINVAL;
}
ch_reg->thresh[iqs269_events[i].th_offs] = val;
}
if (!fwnode_property_read_u32(ev_node, "azoteq,hyst", &val)) {
u8 *hyst = &ch_reg->hyst;
if (val > IQS269_CHx_HYST_MAX) {
dev_err(&client->dev,
"Invalid channel %u hysteresis: %u\n",
reg, val);
fwnode_handle_put(ev_node);
return -EINVAL;
}
if (i == IQS269_EVENT_DEEP_DN ||
i == IQS269_EVENT_DEEP_UP) {
*hyst &= ~IQS269_CHx_HYST_DEEP_MASK;
*hyst |= (val << IQS269_CHx_HYST_DEEP_SHIFT);
} else if (i == IQS269_EVENT_TOUCH_DN ||
i == IQS269_EVENT_TOUCH_UP) {
*hyst &= ~IQS269_CHx_HYST_TOUCH_MASK;
*hyst |= val;
}
}
error = fwnode_property_read_u32(ev_node, "linux,code", &val);
fwnode_handle_put(ev_node);
if (error == -EINVAL) {
continue;
} else if (error) {
dev_err(&client->dev,
"Failed to read channel %u code: %d\n", reg,
error);
return error;
}
switch (reg) {
case IQS269_CHx_HALL_ACTIVE:
if (iqs269->hall_enable) {
iqs269->switches[i].code = val;
iqs269->switches[i].enabled = true;
}
fallthrough;
case IQS269_CHx_HALL_INACTIVE:
if (iqs269->hall_enable)
break;
fallthrough;
default:
iqs269->keycode[i * IQS269_NUM_CH + reg] = val;
}
iqs269->sys_reg.event_mask &= ~iqs269_events[i].mask;
}
return 0;
}
static int iqs269_parse_prop(struct iqs269_private *iqs269)
{
struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg;
struct i2c_client *client = iqs269->client;
struct fwnode_handle *ch_node;
u16 general, misc_a, misc_b;
unsigned int val;
int error;
iqs269->hall_enable = device_property_present(&client->dev,
"azoteq,hall-enable");
error = regmap_raw_read(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg,
sizeof(*sys_reg));
if (error)
return error;
if (!device_property_read_u32(&client->dev, "azoteq,filt-str-lp-lta",
&val)) {
if (val > IQS269_FILT_STR_MAX) {
dev_err(&client->dev, "Invalid filter strength: %u\n",
val);
return -EINVAL;
}
sys_reg->filter &= ~IQS269_FILT_STR_LP_LTA_MASK;
sys_reg->filter |= (val << IQS269_FILT_STR_LP_LTA_SHIFT);
}
if (!device_property_read_u32(&client->dev, "azoteq,filt-str-lp-cnt",
&val)) {
if (val > IQS269_FILT_STR_MAX) {
dev_err(&client->dev, "Invalid filter strength: %u\n",
val);
return -EINVAL;
}
sys_reg->filter &= ~IQS269_FILT_STR_LP_CNT_MASK;
sys_reg->filter |= (val << IQS269_FILT_STR_LP_CNT_SHIFT);
}
if (!device_property_read_u32(&client->dev, "azoteq,filt-str-np-lta",
&val)) {
if (val > IQS269_FILT_STR_MAX) {
dev_err(&client->dev, "Invalid filter strength: %u\n",
val);
return -EINVAL;
}
sys_reg->filter &= ~IQS269_FILT_STR_NP_LTA_MASK;
sys_reg->filter |= (val << IQS269_FILT_STR_NP_LTA_SHIFT);
}
if (!device_property_read_u32(&client->dev, "azoteq,filt-str-np-cnt",
&val)) {
if (val > IQS269_FILT_STR_MAX) {
dev_err(&client->dev, "Invalid filter strength: %u\n",
val);
return -EINVAL;
}
sys_reg->filter &= ~IQS269_FILT_STR_NP_CNT_MASK;
sys_reg->filter |= val;
}
if (!device_property_read_u32(&client->dev, "azoteq,rate-np-ms",
&val)) {
if (val > IQS269_RATE_NP_MS_MAX) {
dev_err(&client->dev, "Invalid report rate: %u\n", val);
return -EINVAL;
}
sys_reg->rate_np = val;
}
if (!device_property_read_u32(&client->dev, "azoteq,rate-lp-ms",
&val)) {
if (val > IQS269_RATE_LP_MS_MAX) {
dev_err(&client->dev, "Invalid report rate: %u\n", val);
return -EINVAL;
}
sys_reg->rate_lp = val;
}
if (!device_property_read_u32(&client->dev, "azoteq,rate-ulp-ms",
&val)) {
if (val > IQS269_RATE_ULP_MS_MAX) {
dev_err(&client->dev, "Invalid report rate: %u\n", val);
return -EINVAL;
}
sys_reg->rate_ulp = val / 16;
}
if (!device_property_read_u32(&client->dev, "azoteq,timeout-pwr-ms",
&val)) {
if (val > IQS269_TIMEOUT_PWR_MS_MAX) {
dev_err(&client->dev, "Invalid timeout: %u\n", val);
return -EINVAL;
}
sys_reg->timeout_pwr = val / 512;
}
if (!device_property_read_u32(&client->dev, "azoteq,timeout-lta-ms",
&val)) {
if (val > IQS269_TIMEOUT_LTA_MS_MAX) {
dev_err(&client->dev, "Invalid timeout: %u\n", val);
return -EINVAL;
}
sys_reg->timeout_lta = val / 512;
}
misc_a = be16_to_cpu(sys_reg->misc_a);
misc_b = be16_to_cpu(sys_reg->misc_b);
misc_a &= ~IQS269_MISC_A_ATI_BAND_DISABLE;
if (device_property_present(&client->dev, "azoteq,ati-band-disable"))
misc_a |= IQS269_MISC_A_ATI_BAND_DISABLE;
misc_a &= ~IQS269_MISC_A_ATI_LP_ONLY;
if (device_property_present(&client->dev, "azoteq,ati-lp-only"))
misc_a |= IQS269_MISC_A_ATI_LP_ONLY;
misc_a &= ~IQS269_MISC_A_ATI_BAND_TIGHTEN;
if (device_property_present(&client->dev, "azoteq,ati-band-tighten"))
misc_a |= IQS269_MISC_A_ATI_BAND_TIGHTEN;
misc_a &= ~IQS269_MISC_A_FILT_DISABLE;
if (device_property_present(&client->dev, "azoteq,filt-disable"))
misc_a |= IQS269_MISC_A_FILT_DISABLE;
if (!device_property_read_u32(&client->dev, "azoteq,gpio3-select",
&val)) {
if (val >= IQS269_NUM_CH) {
dev_err(&client->dev, "Invalid GPIO3 selection: %u\n",
val);
return -EINVAL;
}
misc_a &= ~IQS269_MISC_A_GPIO3_SELECT_MASK;
misc_a |= (val << IQS269_MISC_A_GPIO3_SELECT_SHIFT);
}
misc_a &= ~IQS269_MISC_A_DUAL_DIR;
if (device_property_present(&client->dev, "azoteq,dual-direction"))
misc_a |= IQS269_MISC_A_DUAL_DIR;
if (!device_property_read_u32(&client->dev, "azoteq,tx-freq", &val)) {
if (val > IQS269_MISC_A_TX_FREQ_MAX) {
dev_err(&client->dev,
"Invalid excitation frequency: %u\n", val);
return -EINVAL;
}
misc_a &= ~IQS269_MISC_A_TX_FREQ_MASK;
misc_a |= (val << IQS269_MISC_A_TX_FREQ_SHIFT);
}
misc_a &= ~IQS269_MISC_A_GLOBAL_CAP_SIZE;
if (device_property_present(&client->dev, "azoteq,global-cap-increase"))
misc_a |= IQS269_MISC_A_GLOBAL_CAP_SIZE;
if (!device_property_read_u32(&client->dev, "azoteq,reseed-select",
&val)) {
if (val > IQS269_MISC_B_RESEED_UI_SEL_MAX) {
dev_err(&client->dev, "Invalid reseed selection: %u\n",
val);
return -EINVAL;
}
misc_b &= ~IQS269_MISC_B_RESEED_UI_SEL_MASK;
misc_b |= (val << IQS269_MISC_B_RESEED_UI_SEL_SHIFT);
}
misc_b &= ~IQS269_MISC_B_TRACKING_UI_ENABLE;
if (device_property_present(&client->dev, "azoteq,tracking-enable"))
misc_b |= IQS269_MISC_B_TRACKING_UI_ENABLE;
if (!device_property_read_u32(&client->dev, "azoteq,filt-str-slider",
&val)) {
if (val > IQS269_FILT_STR_MAX) {
dev_err(&client->dev, "Invalid filter strength: %u\n",
val);
return -EINVAL;
}
misc_b &= ~IQS269_MISC_B_FILT_STR_SLIDER;
misc_b |= val;
}
sys_reg->misc_a = cpu_to_be16(misc_a);
sys_reg->misc_b = cpu_to_be16(misc_b);
sys_reg->active = 0;
sys_reg->reseed = 0;
sys_reg->blocking = 0;
sys_reg->slider_select[0] = 0;
sys_reg->slider_select[1] = 0;
sys_reg->event_mask = ~((u8)IQS269_EVENT_MASK_SYS);
device_for_each_child_node(&client->dev, ch_node) {
error = iqs269_parse_chan(iqs269, ch_node);
if (error) {
fwnode_handle_put(ch_node);
return error;
}
}
/*
* Volunteer all active channels to participate in ATI when REDO-ATI is
* manually triggered.
*/
sys_reg->redo_ati = sys_reg->active;
general = be16_to_cpu(sys_reg->general);
if (device_property_present(&client->dev, "azoteq,clk-div"))
general |= IQS269_SYS_SETTINGS_CLK_DIV;
/*
* Configure the device to automatically switch between normal and low-
* power modes as a function of sensing activity. Ultra-low-power mode,
* if enabled, is reserved for suspend.
*/
general &= ~IQS269_SYS_SETTINGS_ULP_AUTO;
general &= ~IQS269_SYS_SETTINGS_DIS_AUTO;
general &= ~IQS269_SYS_SETTINGS_PWR_MODE_MASK;
if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode",
&val)) {
if (val > IQS269_SYS_SETTINGS_PWR_MODE_MAX) {
dev_err(&client->dev, "Invalid suspend mode: %u\n",
val);
return -EINVAL;
}
general |= (val << IQS269_SYS_SETTINGS_PWR_MODE_SHIFT);
}
if (!device_property_read_u32(&client->dev, "azoteq,ulp-update",
&val)) {
if (val > IQS269_SYS_SETTINGS_ULP_UPDATE_MAX) {
dev_err(&client->dev, "Invalid update rate: %u\n", val);
return -EINVAL;
}
general &= ~IQS269_SYS_SETTINGS_ULP_UPDATE_MASK;
general |= (val << IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT);
}
general &= ~IQS269_SYS_SETTINGS_RESEED_OFFSET;
if (device_property_present(&client->dev, "azoteq,reseed-offset"))
general |= IQS269_SYS_SETTINGS_RESEED_OFFSET;
general |= IQS269_SYS_SETTINGS_EVENT_MODE;
/*
* As per the datasheet, enable streaming during normal-power mode if
* either slider is in use. In that case, the device returns to event
* mode during low-power mode.
*/
if (sys_reg->slider_select[0] || sys_reg->slider_select[1])
general |= IQS269_SYS_SETTINGS_EVENT_MODE_LP;
general |= IQS269_SYS_SETTINGS_REDO_ATI;
general |= IQS269_SYS_SETTINGS_ACK_RESET;
sys_reg->general = cpu_to_be16(general);
return 0;
}
static int iqs269_dev_init(struct iqs269_private *iqs269)
{
int error;
mutex_lock(&iqs269->lock);
error = regmap_update_bits(iqs269->regmap, IQS269_HALL_UI,
IQS269_HALL_UI_ENABLE,
iqs269->hall_enable ? ~0 : 0);
if (error)
goto err_mutex;
error = regmap_raw_write(iqs269->regmap, IQS269_SYS_SETTINGS,
&iqs269->sys_reg, sizeof(iqs269->sys_reg));
if (error)
goto err_mutex;
/*
* The following delay gives the device time to deassert its RDY output
* so as to prevent an interrupt from being serviced prematurely.
*/
usleep_range(2000, 2100);
iqs269->ati_current = true;
err_mutex:
mutex_unlock(&iqs269->lock);
return error;
}
static int iqs269_input_init(struct iqs269_private *iqs269)
{
struct i2c_client *client = iqs269->client;
unsigned int sw_code, keycode;
int error, i, j;
iqs269->keypad = devm_input_allocate_device(&client->dev);
if (!iqs269->keypad)
return -ENOMEM;
iqs269->keypad->keycodemax = ARRAY_SIZE(iqs269->keycode);
iqs269->keypad->keycode = iqs269->keycode;
iqs269->keypad->keycodesize = sizeof(*iqs269->keycode);
iqs269->keypad->name = "iqs269a_keypad";
iqs269->keypad->id.bustype = BUS_I2C;
for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
sw_code = iqs269->switches[i].code;
for (j = 0; j < IQS269_NUM_CH; j++) {
keycode = iqs269->keycode[i * IQS269_NUM_CH + j];
/*
* Hall-effect sensing repurposes a pair of dedicated
* channels, only one of which reports events.
*/
switch (j) {
case IQS269_CHx_HALL_ACTIVE:
if (iqs269->hall_enable &&
iqs269->switches[i].enabled)
input_set_capability(iqs269->keypad,
EV_SW, sw_code);
fallthrough;
case IQS269_CHx_HALL_INACTIVE:
if (iqs269->hall_enable)
continue;
fallthrough;
default:
if (keycode != KEY_RESERVED)
input_set_capability(iqs269->keypad,
EV_KEY, keycode);
}
}
}
for (i = 0; i < IQS269_NUM_SL; i++) {
if (!iqs269->sys_reg.slider_select[i])
continue;
iqs269->slider[i] = devm_input_allocate_device(&client->dev);
if (!iqs269->slider[i])
return -ENOMEM;
iqs269->slider[i]->name = i ? "iqs269a_slider_1"
: "iqs269a_slider_0";
iqs269->slider[i]->id.bustype = BUS_I2C;
input_set_capability(iqs269->slider[i], EV_KEY, BTN_TOUCH);
input_set_abs_params(iqs269->slider[i], ABS_X, 0, 255, 0, 0);
error = input_register_device(iqs269->slider[i]);
if (error) {
dev_err(&client->dev,
"Failed to register slider %d: %d\n", i, error);
return error;
}
}
return 0;
}
static int iqs269_report(struct iqs269_private *iqs269)
{
struct i2c_client *client = iqs269->client;
struct iqs269_flags flags;
unsigned int sw_code, keycode;
int error, i, j;
u8 slider_x[IQS269_NUM_SL];
u8 dir_mask, state;
error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS, &flags,
sizeof(flags));
if (error) {
dev_err(&client->dev, "Failed to read device status: %d\n",
error);
return error;
}
/*
* The device resets itself if its own watchdog bites, which can happen
* in the event of an I2C communication error. In this case, the device
* asserts a SHOW_RESET interrupt and all registers must be restored.
*/
if (be16_to_cpu(flags.system) & IQS269_SYS_FLAGS_SHOW_RESET) {
dev_err(&client->dev, "Unexpected device reset\n");
error = iqs269_dev_init(iqs269);
if (error)
dev_err(&client->dev,
"Failed to re-initialize device: %d\n", error);
return error;
}
if (be16_to_cpu(flags.system) & IQS269_SYS_FLAGS_IN_ATI)
return 0;
error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X, slider_x,
sizeof(slider_x));
if (error) {
dev_err(&client->dev, "Failed to read slider position: %d\n",
error);
return error;
}
for (i = 0; i < IQS269_NUM_SL; i++) {
if (!iqs269->sys_reg.slider_select[i])
continue;
/*
* Report BTN_TOUCH if any channel that participates in the
* slider is in a state of touch.
*/
if (flags.states[IQS269_ST_OFFS_TOUCH] &
iqs269->sys_reg.slider_select[i]) {
input_report_key(iqs269->slider[i], BTN_TOUCH, 1);
input_report_abs(iqs269->slider[i], ABS_X, slider_x[i]);
} else {
input_report_key(iqs269->slider[i], BTN_TOUCH, 0);
}
input_sync(iqs269->slider[i]);
}
for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
dir_mask = flags.states[IQS269_ST_OFFS_DIR];
if (!iqs269_events[i].dir_up)
dir_mask = ~dir_mask;
state = flags.states[iqs269_events[i].st_offs] & dir_mask;
sw_code = iqs269->switches[i].code;
for (j = 0; j < IQS269_NUM_CH; j++) {
keycode = iqs269->keycode[i * IQS269_NUM_CH + j];
switch (j) {
case IQS269_CHx_HALL_ACTIVE:
if (iqs269->hall_enable &&
iqs269->switches[i].enabled)
input_report_switch(iqs269->keypad,
sw_code,
state & BIT(j));
fallthrough;
case IQS269_CHx_HALL_INACTIVE:
if (iqs269->hall_enable)
continue;
fallthrough;
default:
input_report_key(iqs269->keypad, keycode,
state & BIT(j));
}
}
}
input_sync(iqs269->keypad);
/*
* The following completion signals that ATI has finished, any initial
* switch states have been reported and the keypad can be registered.
*/
complete_all(&iqs269->ati_done);
return 0;
}
static irqreturn_t iqs269_irq(int irq, void *context)
{
struct iqs269_private *iqs269 = context;
if (iqs269_report(iqs269))
return IRQ_NONE;
/*
* The device does not deassert its interrupt (RDY) pin until shortly
* after receiving an I2C stop condition; the following delay ensures
* the interrupt handler does not return before this time.
*/
iqs269_irq_wait();
return IRQ_HANDLED;
}
static ssize_t counts_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
struct i2c_client *client = iqs269->client;
__le16 counts;
int error;
if (!iqs269->ati_current || iqs269->hall_enable)
return -EPERM;
if (!completion_done(&iqs269->ati_done))
return -EBUSY;
/*
* Unsolicited I2C communication prompts the device to assert its RDY
* pin, so disable the interrupt line until the operation is finished
* and RDY has been deasserted.
*/
disable_irq(client->irq);
error = regmap_raw_read(iqs269->regmap,
IQS269_CHx_COUNTS + iqs269->ch_num * 2,
&counts, sizeof(counts));
iqs269_irq_wait();
enable_irq(client->irq);
if (error)
return error;
return scnprintf(buf, PAGE_SIZE, "%u\n", le16_to_cpu(counts));
}
static ssize_t hall_bin_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
struct i2c_client *client = iqs269->client;
unsigned int val;
int error;
disable_irq(client->irq);
error = regmap_read(iqs269->regmap, IQS269_CAL_DATA_A, &val);
iqs269_irq_wait();
enable_irq(client->irq);
if (error)
return error;
switch (ch_reg[IQS269_CHx_HALL_ACTIVE].rx_enable &
ch_reg[IQS269_CHx_HALL_INACTIVE].rx_enable) {
case IQS269_HALL_PAD_R:
val &= IQS269_CAL_DATA_A_HALL_BIN_R_MASK;
val >>= IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT;
break;
case IQS269_HALL_PAD_L:
val &= IQS269_CAL_DATA_A_HALL_BIN_L_MASK;
val >>= IQS269_CAL_DATA_A_HALL_BIN_L_SHIFT;
break;
default:
return -EINVAL;
}
return scnprintf(buf, PAGE_SIZE, "%u\n", val);
}
static ssize_t hall_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->hall_enable);
}
static ssize_t hall_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
mutex_lock(&iqs269->lock);
iqs269->hall_enable = val;
iqs269->ati_current = false;
mutex_unlock(&iqs269->lock);
return count;
}
static ssize_t ch_number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ch_num);
}
static ssize_t ch_number_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
if (val >= IQS269_NUM_CH)
return -EINVAL;
iqs269->ch_num = val;
return count;
}
static ssize_t rx_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
return scnprintf(buf, PAGE_SIZE, "%u\n",
ch_reg[iqs269->ch_num].rx_enable);
}
static ssize_t rx_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
if (val > 0xFF)
return -EINVAL;
mutex_lock(&iqs269->lock);
ch_reg[iqs269->ch_num].rx_enable = val;
iqs269->ati_current = false;
mutex_unlock(&iqs269->lock);
return count;
}
static ssize_t ati_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
unsigned int val;
int error;
error = iqs269_ati_mode_get(iqs269, iqs269->ch_num, &val);
if (error)
return error;
return scnprintf(buf, PAGE_SIZE, "%u\n", val);
}
static ssize_t ati_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
error = iqs269_ati_mode_set(iqs269, iqs269->ch_num, val);
if (error)
return error;
return count;
}
static ssize_t ati_base_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
unsigned int val;
int error;
error = iqs269_ati_base_get(iqs269, iqs269->ch_num, &val);
if (error)
return error;
return scnprintf(buf, PAGE_SIZE, "%u\n", val);
}
static ssize_t ati_base_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
error = iqs269_ati_base_set(iqs269, iqs269->ch_num, val);
if (error)
return error;
return count;
}
static ssize_t ati_target_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
unsigned int val;
int error;
error = iqs269_ati_target_get(iqs269, iqs269->ch_num, &val);
if (error)
return error;
return scnprintf(buf, PAGE_SIZE, "%u\n", val);
}
static ssize_t ati_target_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
error = iqs269_ati_target_set(iqs269, iqs269->ch_num, val);
if (error)
return error;
return count;
}
static ssize_t ati_trigger_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%u\n",
iqs269->ati_current &&
completion_done(&iqs269->ati_done));
}
static ssize_t ati_trigger_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
struct i2c_client *client = iqs269->client;
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
if (!val)
return count;
disable_irq(client->irq);
reinit_completion(&iqs269->ati_done);
error = iqs269_dev_init(iqs269);
iqs269_irq_wait();
enable_irq(client->irq);
if (error)
return error;
if (!wait_for_completion_timeout(&iqs269->ati_done,
msecs_to_jiffies(2000)))
return -ETIMEDOUT;
return count;
}
static DEVICE_ATTR_RO(counts);
static DEVICE_ATTR_RO(hall_bin);
static DEVICE_ATTR_RW(hall_enable);
static DEVICE_ATTR_RW(ch_number);
static DEVICE_ATTR_RW(rx_enable);
static DEVICE_ATTR_RW(ati_mode);
static DEVICE_ATTR_RW(ati_base);
static DEVICE_ATTR_RW(ati_target);
static DEVICE_ATTR_RW(ati_trigger);
static struct attribute *iqs269_attrs[] = {
&dev_attr_counts.attr,
&dev_attr_hall_bin.attr,
&dev_attr_hall_enable.attr,
&dev_attr_ch_number.attr,
&dev_attr_rx_enable.attr,
&dev_attr_ati_mode.attr,
&dev_attr_ati_base.attr,
&dev_attr_ati_target.attr,
&dev_attr_ati_trigger.attr,
NULL,
};
static const struct attribute_group iqs269_attr_group = {
.attrs = iqs269_attrs,
};
static const struct regmap_config iqs269_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = IQS269_MAX_REG,
};
static int iqs269_probe(struct i2c_client *client)
{
struct iqs269_ver_info ver_info;
struct iqs269_private *iqs269;
int error;
iqs269 = devm_kzalloc(&client->dev, sizeof(*iqs269), GFP_KERNEL);
if (!iqs269)
return -ENOMEM;
i2c_set_clientdata(client, iqs269);
iqs269->client = client;
iqs269->regmap = devm_regmap_init_i2c(client, &iqs269_regmap_config);
if (IS_ERR(iqs269->regmap)) {
error = PTR_ERR(iqs269->regmap);
dev_err(&client->dev, "Failed to initialize register map: %d\n",
error);
return error;
}
mutex_init(&iqs269->lock);
init_completion(&iqs269->ati_done);
error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO, &ver_info,
sizeof(ver_info));
if (error)
return error;
if (ver_info.prod_num != IQS269_VER_INFO_PROD_NUM) {
dev_err(&client->dev, "Unrecognized product number: 0x%02X\n",
ver_info.prod_num);
return -EINVAL;
}
error = iqs269_parse_prop(iqs269);
if (error)
return error;
error = iqs269_dev_init(iqs269);
if (error) {
dev_err(&client->dev, "Failed to initialize device: %d\n",
error);
return error;
}
error = iqs269_input_init(iqs269);
if (error)
return error;
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, iqs269_irq, IRQF_ONESHOT,
client->name, iqs269);
if (error) {
dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
return error;
}
if (!wait_for_completion_timeout(&iqs269->ati_done,
msecs_to_jiffies(2000))) {
dev_err(&client->dev, "Failed to complete ATI\n");
return -ETIMEDOUT;
}
/*
* The keypad may include one or more switches and is not registered
* until ATI is complete and the initial switch states are read.
*/
error = input_register_device(iqs269->keypad);
if (error) {
dev_err(&client->dev, "Failed to register keypad: %d\n", error);
return error;
}
error = devm_device_add_group(&client->dev, &iqs269_attr_group);
if (error)
dev_err(&client->dev, "Failed to add attributes: %d\n", error);
return error;
}
static u16 iqs269_general_get(struct iqs269_private *iqs269)
{
u16 general = be16_to_cpu(iqs269->sys_reg.general);
general &= ~IQS269_SYS_SETTINGS_REDO_ATI;
general &= ~IQS269_SYS_SETTINGS_ACK_RESET;
return general | IQS269_SYS_SETTINGS_DIS_AUTO;
}
static int iqs269_suspend(struct device *dev)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
struct i2c_client *client = iqs269->client;
int error;
u16 general = iqs269_general_get(iqs269);
if (!(general & IQS269_SYS_SETTINGS_PWR_MODE_MASK))
return 0;
disable_irq(client->irq);
error = regmap_write(iqs269->regmap, IQS269_SYS_SETTINGS, general);
iqs269_irq_wait();
enable_irq(client->irq);
return error;
}
static int iqs269_resume(struct device *dev)
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
struct i2c_client *client = iqs269->client;
int error;
u16 general = iqs269_general_get(iqs269);
if (!(general & IQS269_SYS_SETTINGS_PWR_MODE_MASK))
return 0;
disable_irq(client->irq);
error = regmap_write(iqs269->regmap, IQS269_SYS_SETTINGS,
general & ~IQS269_SYS_SETTINGS_PWR_MODE_MASK);
if (!error)
error = regmap_write(iqs269->regmap, IQS269_SYS_SETTINGS,
general & ~IQS269_SYS_SETTINGS_DIS_AUTO);
iqs269_irq_wait();
enable_irq(client->irq);
return error;
}
static DEFINE_SIMPLE_DEV_PM_OPS(iqs269_pm, iqs269_suspend, iqs269_resume);
static const struct of_device_id iqs269_of_match[] = {
{ .compatible = "azoteq,iqs269a" },
{ }
};
MODULE_DEVICE_TABLE(of, iqs269_of_match);
static struct i2c_driver iqs269_i2c_driver = {
.driver = {
.name = "iqs269a",
.of_match_table = iqs269_of_match,
.pm = pm_sleep_ptr(&iqs269_pm),
},
.probe = iqs269_probe,
};
module_i2c_driver(iqs269_i2c_driver);
MODULE_AUTHOR("Jeff LaBundy <[email protected]>");
MODULE_DESCRIPTION("Azoteq IQS269A Capacitive Touch Controller");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/iqs269a.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for IMS Passenger Control Unit Devices
*
* Copyright (C) 2013 The IMS Company
*/
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/ihex.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/usb/input.h>
#include <linux/usb/cdc.h>
#include <asm/unaligned.h>
#define IMS_PCU_KEYMAP_LEN 32
struct ims_pcu_buttons {
struct input_dev *input;
char name[32];
char phys[32];
unsigned short keymap[IMS_PCU_KEYMAP_LEN];
};
struct ims_pcu_gamepad {
struct input_dev *input;
char name[32];
char phys[32];
};
struct ims_pcu_backlight {
struct led_classdev cdev;
char name[32];
};
#define IMS_PCU_PART_NUMBER_LEN 15
#define IMS_PCU_SERIAL_NUMBER_LEN 8
#define IMS_PCU_DOM_LEN 8
#define IMS_PCU_FW_VERSION_LEN (9 + 1)
#define IMS_PCU_BL_VERSION_LEN (9 + 1)
#define IMS_PCU_BL_RESET_REASON_LEN (2 + 1)
#define IMS_PCU_PCU_B_DEVICE_ID 5
#define IMS_PCU_BUF_SIZE 128
struct ims_pcu {
struct usb_device *udev;
struct device *dev; /* control interface's device, used for logging */
unsigned int device_no;
bool bootloader_mode;
char part_number[IMS_PCU_PART_NUMBER_LEN];
char serial_number[IMS_PCU_SERIAL_NUMBER_LEN];
char date_of_manufacturing[IMS_PCU_DOM_LEN];
char fw_version[IMS_PCU_FW_VERSION_LEN];
char bl_version[IMS_PCU_BL_VERSION_LEN];
char reset_reason[IMS_PCU_BL_RESET_REASON_LEN];
int update_firmware_status;
u8 device_id;
u8 ofn_reg_addr;
struct usb_interface *ctrl_intf;
struct usb_endpoint_descriptor *ep_ctrl;
struct urb *urb_ctrl;
u8 *urb_ctrl_buf;
dma_addr_t ctrl_dma;
size_t max_ctrl_size;
struct usb_interface *data_intf;
struct usb_endpoint_descriptor *ep_in;
struct urb *urb_in;
u8 *urb_in_buf;
dma_addr_t read_dma;
size_t max_in_size;
struct usb_endpoint_descriptor *ep_out;
u8 *urb_out_buf;
size_t max_out_size;
u8 read_buf[IMS_PCU_BUF_SIZE];
u8 read_pos;
u8 check_sum;
bool have_stx;
bool have_dle;
u8 cmd_buf[IMS_PCU_BUF_SIZE];
u8 ack_id;
u8 expected_response;
u8 cmd_buf_len;
struct completion cmd_done;
struct mutex cmd_mutex;
u32 fw_start_addr;
u32 fw_end_addr;
struct completion async_firmware_done;
struct ims_pcu_buttons buttons;
struct ims_pcu_gamepad *gamepad;
struct ims_pcu_backlight backlight;
bool setup_complete; /* Input and LED devices have been created */
};
/*********************************************************************
* Buttons Input device support *
*********************************************************************/
static const unsigned short ims_pcu_keymap_1[] = {
[1] = KEY_ATTENDANT_OFF,
[2] = KEY_ATTENDANT_ON,
[3] = KEY_LIGHTS_TOGGLE,
[4] = KEY_VOLUMEUP,
[5] = KEY_VOLUMEDOWN,
[6] = KEY_INFO,
};
static const unsigned short ims_pcu_keymap_2[] = {
[4] = KEY_VOLUMEUP,
[5] = KEY_VOLUMEDOWN,
[6] = KEY_INFO,
};
static const unsigned short ims_pcu_keymap_3[] = {
[1] = KEY_HOMEPAGE,
[2] = KEY_ATTENDANT_TOGGLE,
[3] = KEY_LIGHTS_TOGGLE,
[4] = KEY_VOLUMEUP,
[5] = KEY_VOLUMEDOWN,
[6] = KEY_DISPLAYTOGGLE,
[18] = KEY_PLAYPAUSE,
};
static const unsigned short ims_pcu_keymap_4[] = {
[1] = KEY_ATTENDANT_OFF,
[2] = KEY_ATTENDANT_ON,
[3] = KEY_LIGHTS_TOGGLE,
[4] = KEY_VOLUMEUP,
[5] = KEY_VOLUMEDOWN,
[6] = KEY_INFO,
[18] = KEY_PLAYPAUSE,
};
static const unsigned short ims_pcu_keymap_5[] = {
[1] = KEY_ATTENDANT_OFF,
[2] = KEY_ATTENDANT_ON,
[3] = KEY_LIGHTS_TOGGLE,
};
struct ims_pcu_device_info {
const unsigned short *keymap;
size_t keymap_len;
bool has_gamepad;
};
#define IMS_PCU_DEVINFO(_n, _gamepad) \
[_n] = { \
.keymap = ims_pcu_keymap_##_n, \
.keymap_len = ARRAY_SIZE(ims_pcu_keymap_##_n), \
.has_gamepad = _gamepad, \
}
static const struct ims_pcu_device_info ims_pcu_device_info[] = {
IMS_PCU_DEVINFO(1, true),
IMS_PCU_DEVINFO(2, true),
IMS_PCU_DEVINFO(3, true),
IMS_PCU_DEVINFO(4, true),
IMS_PCU_DEVINFO(5, false),
};
static void ims_pcu_buttons_report(struct ims_pcu *pcu, u32 data)
{
struct ims_pcu_buttons *buttons = &pcu->buttons;
struct input_dev *input = buttons->input;
int i;
for (i = 0; i < 32; i++) {
unsigned short keycode = buttons->keymap[i];
if (keycode != KEY_RESERVED)
input_report_key(input, keycode, data & (1UL << i));
}
input_sync(input);
}
static int ims_pcu_setup_buttons(struct ims_pcu *pcu,
const unsigned short *keymap,
size_t keymap_len)
{
struct ims_pcu_buttons *buttons = &pcu->buttons;
struct input_dev *input;
int i;
int error;
input = input_allocate_device();
if (!input) {
dev_err(pcu->dev,
"Not enough memory for input input device\n");
return -ENOMEM;
}
snprintf(buttons->name, sizeof(buttons->name),
"IMS PCU#%d Button Interface", pcu->device_no);
usb_make_path(pcu->udev, buttons->phys, sizeof(buttons->phys));
strlcat(buttons->phys, "/input0", sizeof(buttons->phys));
memcpy(buttons->keymap, keymap, sizeof(*keymap) * keymap_len);
input->name = buttons->name;
input->phys = buttons->phys;
usb_to_input_id(pcu->udev, &input->id);
input->dev.parent = &pcu->ctrl_intf->dev;
input->keycode = buttons->keymap;
input->keycodemax = ARRAY_SIZE(buttons->keymap);
input->keycodesize = sizeof(buttons->keymap[0]);
__set_bit(EV_KEY, input->evbit);
for (i = 0; i < IMS_PCU_KEYMAP_LEN; i++)
__set_bit(buttons->keymap[i], input->keybit);
__clear_bit(KEY_RESERVED, input->keybit);
error = input_register_device(input);
if (error) {
dev_err(pcu->dev,
"Failed to register buttons input device: %d\n",
error);
input_free_device(input);
return error;
}
buttons->input = input;
return 0;
}
static void ims_pcu_destroy_buttons(struct ims_pcu *pcu)
{
struct ims_pcu_buttons *buttons = &pcu->buttons;
input_unregister_device(buttons->input);
}
/*********************************************************************
* Gamepad Input device support *
*********************************************************************/
static void ims_pcu_gamepad_report(struct ims_pcu *pcu, u32 data)
{
struct ims_pcu_gamepad *gamepad = pcu->gamepad;
struct input_dev *input = gamepad->input;
int x, y;
x = !!(data & (1 << 14)) - !!(data & (1 << 13));
y = !!(data & (1 << 12)) - !!(data & (1 << 11));
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
input_report_key(input, BTN_A, data & (1 << 7));
input_report_key(input, BTN_B, data & (1 << 8));
input_report_key(input, BTN_X, data & (1 << 9));
input_report_key(input, BTN_Y, data & (1 << 10));
input_report_key(input, BTN_START, data & (1 << 15));
input_report_key(input, BTN_SELECT, data & (1 << 16));
input_sync(input);
}
static int ims_pcu_setup_gamepad(struct ims_pcu *pcu)
{
struct ims_pcu_gamepad *gamepad;
struct input_dev *input;
int error;
gamepad = kzalloc(sizeof(struct ims_pcu_gamepad), GFP_KERNEL);
input = input_allocate_device();
if (!gamepad || !input) {
dev_err(pcu->dev,
"Not enough memory for gamepad device\n");
error = -ENOMEM;
goto err_free_mem;
}
gamepad->input = input;
snprintf(gamepad->name, sizeof(gamepad->name),
"IMS PCU#%d Gamepad Interface", pcu->device_no);
usb_make_path(pcu->udev, gamepad->phys, sizeof(gamepad->phys));
strlcat(gamepad->phys, "/input1", sizeof(gamepad->phys));
input->name = gamepad->name;
input->phys = gamepad->phys;
usb_to_input_id(pcu->udev, &input->id);
input->dev.parent = &pcu->ctrl_intf->dev;
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_A, input->keybit);
__set_bit(BTN_B, input->keybit);
__set_bit(BTN_X, input->keybit);
__set_bit(BTN_Y, input->keybit);
__set_bit(BTN_START, input->keybit);
__set_bit(BTN_SELECT, input->keybit);
__set_bit(EV_ABS, input->evbit);
input_set_abs_params(input, ABS_X, -1, 1, 0, 0);
input_set_abs_params(input, ABS_Y, -1, 1, 0, 0);
error = input_register_device(input);
if (error) {
dev_err(pcu->dev,
"Failed to register gamepad input device: %d\n",
error);
goto err_free_mem;
}
pcu->gamepad = gamepad;
return 0;
err_free_mem:
input_free_device(input);
kfree(gamepad);
return error;
}
static void ims_pcu_destroy_gamepad(struct ims_pcu *pcu)
{
struct ims_pcu_gamepad *gamepad = pcu->gamepad;
input_unregister_device(gamepad->input);
kfree(gamepad);
}
/*********************************************************************
* PCU Communication protocol handling *
*********************************************************************/
#define IMS_PCU_PROTOCOL_STX 0x02
#define IMS_PCU_PROTOCOL_ETX 0x03
#define IMS_PCU_PROTOCOL_DLE 0x10
/* PCU commands */
#define IMS_PCU_CMD_STATUS 0xa0
#define IMS_PCU_CMD_PCU_RESET 0xa1
#define IMS_PCU_CMD_RESET_REASON 0xa2
#define IMS_PCU_CMD_SEND_BUTTONS 0xa3
#define IMS_PCU_CMD_JUMP_TO_BTLDR 0xa4
#define IMS_PCU_CMD_GET_INFO 0xa5
#define IMS_PCU_CMD_SET_BRIGHTNESS 0xa6
#define IMS_PCU_CMD_EEPROM 0xa7
#define IMS_PCU_CMD_GET_FW_VERSION 0xa8
#define IMS_PCU_CMD_GET_BL_VERSION 0xa9
#define IMS_PCU_CMD_SET_INFO 0xab
#define IMS_PCU_CMD_GET_BRIGHTNESS 0xac
#define IMS_PCU_CMD_GET_DEVICE_ID 0xae
#define IMS_PCU_CMD_SPECIAL_INFO 0xb0
#define IMS_PCU_CMD_BOOTLOADER 0xb1 /* Pass data to bootloader */
#define IMS_PCU_CMD_OFN_SET_CONFIG 0xb3
#define IMS_PCU_CMD_OFN_GET_CONFIG 0xb4
/* PCU responses */
#define IMS_PCU_RSP_STATUS 0xc0
#define IMS_PCU_RSP_PCU_RESET 0 /* Originally 0xc1 */
#define IMS_PCU_RSP_RESET_REASON 0xc2
#define IMS_PCU_RSP_SEND_BUTTONS 0xc3
#define IMS_PCU_RSP_JUMP_TO_BTLDR 0 /* Originally 0xc4 */
#define IMS_PCU_RSP_GET_INFO 0xc5
#define IMS_PCU_RSP_SET_BRIGHTNESS 0xc6
#define IMS_PCU_RSP_EEPROM 0xc7
#define IMS_PCU_RSP_GET_FW_VERSION 0xc8
#define IMS_PCU_RSP_GET_BL_VERSION 0xc9
#define IMS_PCU_RSP_SET_INFO 0xcb
#define IMS_PCU_RSP_GET_BRIGHTNESS 0xcc
#define IMS_PCU_RSP_CMD_INVALID 0xcd
#define IMS_PCU_RSP_GET_DEVICE_ID 0xce
#define IMS_PCU_RSP_SPECIAL_INFO 0xd0
#define IMS_PCU_RSP_BOOTLOADER 0xd1 /* Bootloader response */
#define IMS_PCU_RSP_OFN_SET_CONFIG 0xd2
#define IMS_PCU_RSP_OFN_GET_CONFIG 0xd3
#define IMS_PCU_RSP_EVNT_BUTTONS 0xe0 /* Unsolicited, button state */
#define IMS_PCU_GAMEPAD_MASK 0x0001ff80UL /* Bits 7 through 16 */
#define IMS_PCU_MIN_PACKET_LEN 3
#define IMS_PCU_DATA_OFFSET 2
#define IMS_PCU_CMD_WRITE_TIMEOUT 100 /* msec */
#define IMS_PCU_CMD_RESPONSE_TIMEOUT 500 /* msec */
static void ims_pcu_report_events(struct ims_pcu *pcu)
{
u32 data = get_unaligned_be32(&pcu->read_buf[3]);
ims_pcu_buttons_report(pcu, data & ~IMS_PCU_GAMEPAD_MASK);
if (pcu->gamepad)
ims_pcu_gamepad_report(pcu, data);
}
static void ims_pcu_handle_response(struct ims_pcu *pcu)
{
switch (pcu->read_buf[0]) {
case IMS_PCU_RSP_EVNT_BUTTONS:
if (likely(pcu->setup_complete))
ims_pcu_report_events(pcu);
break;
default:
/*
* See if we got command completion.
* If both the sequence and response code match save
* the data and signal completion.
*/
if (pcu->read_buf[0] == pcu->expected_response &&
pcu->read_buf[1] == pcu->ack_id - 1) {
memcpy(pcu->cmd_buf, pcu->read_buf, pcu->read_pos);
pcu->cmd_buf_len = pcu->read_pos;
complete(&pcu->cmd_done);
}
break;
}
}
static void ims_pcu_process_data(struct ims_pcu *pcu, struct urb *urb)
{
int i;
for (i = 0; i < urb->actual_length; i++) {
u8 data = pcu->urb_in_buf[i];
/* Skip everything until we get Start Xmit */
if (!pcu->have_stx && data != IMS_PCU_PROTOCOL_STX)
continue;
if (pcu->have_dle) {
pcu->have_dle = false;
pcu->read_buf[pcu->read_pos++] = data;
pcu->check_sum += data;
continue;
}
switch (data) {
case IMS_PCU_PROTOCOL_STX:
if (pcu->have_stx)
dev_warn(pcu->dev,
"Unexpected STX at byte %d, discarding old data\n",
pcu->read_pos);
pcu->have_stx = true;
pcu->have_dle = false;
pcu->read_pos = 0;
pcu->check_sum = 0;
break;
case IMS_PCU_PROTOCOL_DLE:
pcu->have_dle = true;
break;
case IMS_PCU_PROTOCOL_ETX:
if (pcu->read_pos < IMS_PCU_MIN_PACKET_LEN) {
dev_warn(pcu->dev,
"Short packet received (%d bytes), ignoring\n",
pcu->read_pos);
} else if (pcu->check_sum != 0) {
dev_warn(pcu->dev,
"Invalid checksum in packet (%d bytes), ignoring\n",
pcu->read_pos);
} else {
ims_pcu_handle_response(pcu);
}
pcu->have_stx = false;
pcu->have_dle = false;
pcu->read_pos = 0;
break;
default:
pcu->read_buf[pcu->read_pos++] = data;
pcu->check_sum += data;
break;
}
}
}
static bool ims_pcu_byte_needs_escape(u8 byte)
{
return byte == IMS_PCU_PROTOCOL_STX ||
byte == IMS_PCU_PROTOCOL_ETX ||
byte == IMS_PCU_PROTOCOL_DLE;
}
static int ims_pcu_send_cmd_chunk(struct ims_pcu *pcu,
u8 command, int chunk, int len)
{
int error;
error = usb_bulk_msg(pcu->udev,
usb_sndbulkpipe(pcu->udev,
pcu->ep_out->bEndpointAddress),
pcu->urb_out_buf, len,
NULL, IMS_PCU_CMD_WRITE_TIMEOUT);
if (error < 0) {
dev_dbg(pcu->dev,
"Sending 0x%02x command failed at chunk %d: %d\n",
command, chunk, error);
return error;
}
return 0;
}
static int ims_pcu_send_command(struct ims_pcu *pcu,
u8 command, const u8 *data, int len)
{
int count = 0;
int chunk = 0;
int delta;
int i;
int error;
u8 csum = 0;
u8 ack_id;
pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_STX;
/* We know the command need not be escaped */
pcu->urb_out_buf[count++] = command;
csum += command;
ack_id = pcu->ack_id++;
if (ack_id == 0xff)
ack_id = pcu->ack_id++;
if (ims_pcu_byte_needs_escape(ack_id))
pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_DLE;
pcu->urb_out_buf[count++] = ack_id;
csum += ack_id;
for (i = 0; i < len; i++) {
delta = ims_pcu_byte_needs_escape(data[i]) ? 2 : 1;
if (count + delta >= pcu->max_out_size) {
error = ims_pcu_send_cmd_chunk(pcu, command,
++chunk, count);
if (error)
return error;
count = 0;
}
if (delta == 2)
pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_DLE;
pcu->urb_out_buf[count++] = data[i];
csum += data[i];
}
csum = 1 + ~csum;
delta = ims_pcu_byte_needs_escape(csum) ? 3 : 2;
if (count + delta >= pcu->max_out_size) {
error = ims_pcu_send_cmd_chunk(pcu, command, ++chunk, count);
if (error)
return error;
count = 0;
}
if (delta == 3)
pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_DLE;
pcu->urb_out_buf[count++] = csum;
pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_ETX;
return ims_pcu_send_cmd_chunk(pcu, command, ++chunk, count);
}
static int __ims_pcu_execute_command(struct ims_pcu *pcu,
u8 command, const void *data, size_t len,
u8 expected_response, int response_time)
{
int error;
pcu->expected_response = expected_response;
init_completion(&pcu->cmd_done);
error = ims_pcu_send_command(pcu, command, data, len);
if (error)
return error;
if (expected_response &&
!wait_for_completion_timeout(&pcu->cmd_done,
msecs_to_jiffies(response_time))) {
dev_dbg(pcu->dev, "Command 0x%02x timed out\n", command);
return -ETIMEDOUT;
}
return 0;
}
#define ims_pcu_execute_command(pcu, code, data, len) \
__ims_pcu_execute_command(pcu, \
IMS_PCU_CMD_##code, data, len, \
IMS_PCU_RSP_##code, \
IMS_PCU_CMD_RESPONSE_TIMEOUT)
#define ims_pcu_execute_query(pcu, code) \
ims_pcu_execute_command(pcu, code, NULL, 0)
/* Bootloader commands */
#define IMS_PCU_BL_CMD_QUERY_DEVICE 0xa1
#define IMS_PCU_BL_CMD_UNLOCK_CONFIG 0xa2
#define IMS_PCU_BL_CMD_ERASE_APP 0xa3
#define IMS_PCU_BL_CMD_PROGRAM_DEVICE 0xa4
#define IMS_PCU_BL_CMD_PROGRAM_COMPLETE 0xa5
#define IMS_PCU_BL_CMD_READ_APP 0xa6
#define IMS_PCU_BL_CMD_RESET_DEVICE 0xa7
#define IMS_PCU_BL_CMD_LAUNCH_APP 0xa8
/* Bootloader commands */
#define IMS_PCU_BL_RSP_QUERY_DEVICE 0xc1
#define IMS_PCU_BL_RSP_UNLOCK_CONFIG 0xc2
#define IMS_PCU_BL_RSP_ERASE_APP 0xc3
#define IMS_PCU_BL_RSP_PROGRAM_DEVICE 0xc4
#define IMS_PCU_BL_RSP_PROGRAM_COMPLETE 0xc5
#define IMS_PCU_BL_RSP_READ_APP 0xc6
#define IMS_PCU_BL_RSP_RESET_DEVICE 0 /* originally 0xa7 */
#define IMS_PCU_BL_RSP_LAUNCH_APP 0 /* originally 0xa8 */
#define IMS_PCU_BL_DATA_OFFSET 3
static int __ims_pcu_execute_bl_command(struct ims_pcu *pcu,
u8 command, const void *data, size_t len,
u8 expected_response, int response_time)
{
int error;
pcu->cmd_buf[0] = command;
if (data)
memcpy(&pcu->cmd_buf[1], data, len);
error = __ims_pcu_execute_command(pcu,
IMS_PCU_CMD_BOOTLOADER, pcu->cmd_buf, len + 1,
expected_response ? IMS_PCU_RSP_BOOTLOADER : 0,
response_time);
if (error) {
dev_err(pcu->dev,
"Failure when sending 0x%02x command to bootloader, error: %d\n",
pcu->cmd_buf[0], error);
return error;
}
if (expected_response && pcu->cmd_buf[2] != expected_response) {
dev_err(pcu->dev,
"Unexpected response from bootloader: 0x%02x, wanted 0x%02x\n",
pcu->cmd_buf[2], expected_response);
return -EINVAL;
}
return 0;
}
#define ims_pcu_execute_bl_command(pcu, code, data, len, timeout) \
__ims_pcu_execute_bl_command(pcu, \
IMS_PCU_BL_CMD_##code, data, len, \
IMS_PCU_BL_RSP_##code, timeout) \
#define IMS_PCU_INFO_PART_OFFSET 2
#define IMS_PCU_INFO_DOM_OFFSET 17
#define IMS_PCU_INFO_SERIAL_OFFSET 25
#define IMS_PCU_SET_INFO_SIZE 31
static int ims_pcu_get_info(struct ims_pcu *pcu)
{
int error;
error = ims_pcu_execute_query(pcu, GET_INFO);
if (error) {
dev_err(pcu->dev,
"GET_INFO command failed, error: %d\n", error);
return error;
}
memcpy(pcu->part_number,
&pcu->cmd_buf[IMS_PCU_INFO_PART_OFFSET],
sizeof(pcu->part_number));
memcpy(pcu->date_of_manufacturing,
&pcu->cmd_buf[IMS_PCU_INFO_DOM_OFFSET],
sizeof(pcu->date_of_manufacturing));
memcpy(pcu->serial_number,
&pcu->cmd_buf[IMS_PCU_INFO_SERIAL_OFFSET],
sizeof(pcu->serial_number));
return 0;
}
static int ims_pcu_set_info(struct ims_pcu *pcu)
{
int error;
memcpy(&pcu->cmd_buf[IMS_PCU_INFO_PART_OFFSET],
pcu->part_number, sizeof(pcu->part_number));
memcpy(&pcu->cmd_buf[IMS_PCU_INFO_DOM_OFFSET],
pcu->date_of_manufacturing, sizeof(pcu->date_of_manufacturing));
memcpy(&pcu->cmd_buf[IMS_PCU_INFO_SERIAL_OFFSET],
pcu->serial_number, sizeof(pcu->serial_number));
error = ims_pcu_execute_command(pcu, SET_INFO,
&pcu->cmd_buf[IMS_PCU_DATA_OFFSET],
IMS_PCU_SET_INFO_SIZE);
if (error) {
dev_err(pcu->dev,
"Failed to update device information, error: %d\n",
error);
return error;
}
return 0;
}
static int ims_pcu_switch_to_bootloader(struct ims_pcu *pcu)
{
int error;
/* Execute jump to the bootoloader */
error = ims_pcu_execute_command(pcu, JUMP_TO_BTLDR, NULL, 0);
if (error) {
dev_err(pcu->dev,
"Failure when sending JUMP TO BOOTLOADER command, error: %d\n",
error);
return error;
}
return 0;
}
/*********************************************************************
* Firmware Update handling *
*********************************************************************/
#define IMS_PCU_FIRMWARE_NAME "imspcu.fw"
struct ims_pcu_flash_fmt {
__le32 addr;
u8 len;
u8 data[];
};
static unsigned int ims_pcu_count_fw_records(const struct firmware *fw)
{
const struct ihex_binrec *rec = (const struct ihex_binrec *)fw->data;
unsigned int count = 0;
while (rec) {
count++;
rec = ihex_next_binrec(rec);
}
return count;
}
static int ims_pcu_verify_block(struct ims_pcu *pcu,
u32 addr, u8 len, const u8 *data)
{
struct ims_pcu_flash_fmt *fragment;
int error;
fragment = (void *)&pcu->cmd_buf[1];
put_unaligned_le32(addr, &fragment->addr);
fragment->len = len;
error = ims_pcu_execute_bl_command(pcu, READ_APP, NULL, 5,
IMS_PCU_CMD_RESPONSE_TIMEOUT);
if (error) {
dev_err(pcu->dev,
"Failed to retrieve block at 0x%08x, len %d, error: %d\n",
addr, len, error);
return error;
}
fragment = (void *)&pcu->cmd_buf[IMS_PCU_BL_DATA_OFFSET];
if (get_unaligned_le32(&fragment->addr) != addr ||
fragment->len != len) {
dev_err(pcu->dev,
"Wrong block when retrieving 0x%08x (0x%08x), len %d (%d)\n",
addr, get_unaligned_le32(&fragment->addr),
len, fragment->len);
return -EINVAL;
}
if (memcmp(fragment->data, data, len)) {
dev_err(pcu->dev,
"Mismatch in block at 0x%08x, len %d\n",
addr, len);
return -EINVAL;
}
return 0;
}
static int ims_pcu_flash_firmware(struct ims_pcu *pcu,
const struct firmware *fw,
unsigned int n_fw_records)
{
const struct ihex_binrec *rec = (const struct ihex_binrec *)fw->data;
struct ims_pcu_flash_fmt *fragment;
unsigned int count = 0;
u32 addr;
u8 len;
int error;
error = ims_pcu_execute_bl_command(pcu, ERASE_APP, NULL, 0, 2000);
if (error) {
dev_err(pcu->dev,
"Failed to erase application image, error: %d\n",
error);
return error;
}
while (rec) {
/*
* The firmware format is messed up for some reason.
* The address twice that of what is needed for some
* reason and we end up overwriting half of the data
* with the next record.
*/
addr = be32_to_cpu(rec->addr) / 2;
len = be16_to_cpu(rec->len);
fragment = (void *)&pcu->cmd_buf[1];
put_unaligned_le32(addr, &fragment->addr);
fragment->len = len;
memcpy(fragment->data, rec->data, len);
error = ims_pcu_execute_bl_command(pcu, PROGRAM_DEVICE,
NULL, len + 5,
IMS_PCU_CMD_RESPONSE_TIMEOUT);
if (error) {
dev_err(pcu->dev,
"Failed to write block at 0x%08x, len %d, error: %d\n",
addr, len, error);
return error;
}
if (addr >= pcu->fw_start_addr && addr < pcu->fw_end_addr) {
error = ims_pcu_verify_block(pcu, addr, len, rec->data);
if (error)
return error;
}
count++;
pcu->update_firmware_status = (count * 100) / n_fw_records;
rec = ihex_next_binrec(rec);
}
error = ims_pcu_execute_bl_command(pcu, PROGRAM_COMPLETE,
NULL, 0, 2000);
if (error)
dev_err(pcu->dev,
"Failed to send PROGRAM_COMPLETE, error: %d\n",
error);
return 0;
}
static int ims_pcu_handle_firmware_update(struct ims_pcu *pcu,
const struct firmware *fw)
{
unsigned int n_fw_records;
int retval;
dev_info(pcu->dev, "Updating firmware %s, size: %zu\n",
IMS_PCU_FIRMWARE_NAME, fw->size);
n_fw_records = ims_pcu_count_fw_records(fw);
retval = ims_pcu_flash_firmware(pcu, fw, n_fw_records);
if (retval)
goto out;
retval = ims_pcu_execute_bl_command(pcu, LAUNCH_APP, NULL, 0, 0);
if (retval)
dev_err(pcu->dev,
"Failed to start application image, error: %d\n",
retval);
out:
pcu->update_firmware_status = retval;
sysfs_notify(&pcu->dev->kobj, NULL, "update_firmware_status");
return retval;
}
static void ims_pcu_process_async_firmware(const struct firmware *fw,
void *context)
{
struct ims_pcu *pcu = context;
int error;
if (!fw) {
dev_err(pcu->dev, "Failed to get firmware %s\n",
IMS_PCU_FIRMWARE_NAME);
goto out;
}
error = ihex_validate_fw(fw);
if (error) {
dev_err(pcu->dev, "Firmware %s is invalid\n",
IMS_PCU_FIRMWARE_NAME);
goto out;
}
mutex_lock(&pcu->cmd_mutex);
ims_pcu_handle_firmware_update(pcu, fw);
mutex_unlock(&pcu->cmd_mutex);
release_firmware(fw);
out:
complete(&pcu->async_firmware_done);
}
/*********************************************************************
* Backlight LED device support *
*********************************************************************/
#define IMS_PCU_MAX_BRIGHTNESS 31998
static int ims_pcu_backlight_set_brightness(struct led_classdev *cdev,
enum led_brightness value)
{
struct ims_pcu_backlight *backlight =
container_of(cdev, struct ims_pcu_backlight, cdev);
struct ims_pcu *pcu =
container_of(backlight, struct ims_pcu, backlight);
__le16 br_val = cpu_to_le16(value);
int error;
mutex_lock(&pcu->cmd_mutex);
error = ims_pcu_execute_command(pcu, SET_BRIGHTNESS,
&br_val, sizeof(br_val));
if (error && error != -ENODEV)
dev_warn(pcu->dev,
"Failed to set desired brightness %u, error: %d\n",
value, error);
mutex_unlock(&pcu->cmd_mutex);
return error;
}
static enum led_brightness
ims_pcu_backlight_get_brightness(struct led_classdev *cdev)
{
struct ims_pcu_backlight *backlight =
container_of(cdev, struct ims_pcu_backlight, cdev);
struct ims_pcu *pcu =
container_of(backlight, struct ims_pcu, backlight);
int brightness;
int error;
mutex_lock(&pcu->cmd_mutex);
error = ims_pcu_execute_query(pcu, GET_BRIGHTNESS);
if (error) {
dev_warn(pcu->dev,
"Failed to get current brightness, error: %d\n",
error);
/* Assume the LED is OFF */
brightness = LED_OFF;
} else {
brightness =
get_unaligned_le16(&pcu->cmd_buf[IMS_PCU_DATA_OFFSET]);
}
mutex_unlock(&pcu->cmd_mutex);
return brightness;
}
static int ims_pcu_setup_backlight(struct ims_pcu *pcu)
{
struct ims_pcu_backlight *backlight = &pcu->backlight;
int error;
snprintf(backlight->name, sizeof(backlight->name),
"pcu%d::kbd_backlight", pcu->device_no);
backlight->cdev.name = backlight->name;
backlight->cdev.max_brightness = IMS_PCU_MAX_BRIGHTNESS;
backlight->cdev.brightness_get = ims_pcu_backlight_get_brightness;
backlight->cdev.brightness_set_blocking =
ims_pcu_backlight_set_brightness;
error = led_classdev_register(pcu->dev, &backlight->cdev);
if (error) {
dev_err(pcu->dev,
"Failed to register backlight LED device, error: %d\n",
error);
return error;
}
return 0;
}
static void ims_pcu_destroy_backlight(struct ims_pcu *pcu)
{
struct ims_pcu_backlight *backlight = &pcu->backlight;
led_classdev_unregister(&backlight->cdev);
}
/*********************************************************************
* Sysfs attributes handling *
*********************************************************************/
struct ims_pcu_attribute {
struct device_attribute dattr;
size_t field_offset;
int field_length;
};
static ssize_t ims_pcu_attribute_show(struct device *dev,
struct device_attribute *dattr,
char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
struct ims_pcu_attribute *attr =
container_of(dattr, struct ims_pcu_attribute, dattr);
char *field = (char *)pcu + attr->field_offset;
return scnprintf(buf, PAGE_SIZE, "%.*s\n", attr->field_length, field);
}
static ssize_t ims_pcu_attribute_store(struct device *dev,
struct device_attribute *dattr,
const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
struct ims_pcu_attribute *attr =
container_of(dattr, struct ims_pcu_attribute, dattr);
char *field = (char *)pcu + attr->field_offset;
size_t data_len;
int error;
if (count > attr->field_length)
return -EINVAL;
data_len = strnlen(buf, attr->field_length);
if (data_len > attr->field_length)
return -EINVAL;
error = mutex_lock_interruptible(&pcu->cmd_mutex);
if (error)
return error;
memset(field, 0, attr->field_length);
memcpy(field, buf, data_len);
error = ims_pcu_set_info(pcu);
/*
* Even if update failed, let's fetch the info again as we just
* clobbered one of the fields.
*/
ims_pcu_get_info(pcu);
mutex_unlock(&pcu->cmd_mutex);
return error < 0 ? error : count;
}
#define IMS_PCU_ATTR(_field, _mode) \
struct ims_pcu_attribute ims_pcu_attr_##_field = { \
.dattr = __ATTR(_field, _mode, \
ims_pcu_attribute_show, \
ims_pcu_attribute_store), \
.field_offset = offsetof(struct ims_pcu, _field), \
.field_length = sizeof(((struct ims_pcu *)NULL)->_field), \
}
#define IMS_PCU_RO_ATTR(_field) \
IMS_PCU_ATTR(_field, S_IRUGO)
#define IMS_PCU_RW_ATTR(_field) \
IMS_PCU_ATTR(_field, S_IRUGO | S_IWUSR)
static IMS_PCU_RW_ATTR(part_number);
static IMS_PCU_RW_ATTR(serial_number);
static IMS_PCU_RW_ATTR(date_of_manufacturing);
static IMS_PCU_RO_ATTR(fw_version);
static IMS_PCU_RO_ATTR(bl_version);
static IMS_PCU_RO_ATTR(reset_reason);
static ssize_t ims_pcu_reset_device(struct device *dev,
struct device_attribute *dattr,
const char *buf, size_t count)
{
static const u8 reset_byte = 1;
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
int value;
int error;
error = kstrtoint(buf, 0, &value);
if (error)
return error;
if (value != 1)
return -EINVAL;
dev_info(pcu->dev, "Attempting to reset device\n");
error = ims_pcu_execute_command(pcu, PCU_RESET, &reset_byte, 1);
if (error) {
dev_info(pcu->dev,
"Failed to reset device, error: %d\n",
error);
return error;
}
return count;
}
static DEVICE_ATTR(reset_device, S_IWUSR, NULL, ims_pcu_reset_device);
static ssize_t ims_pcu_update_firmware_store(struct device *dev,
struct device_attribute *dattr,
const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
const struct firmware *fw = NULL;
int value;
int error;
error = kstrtoint(buf, 0, &value);
if (error)
return error;
if (value != 1)
return -EINVAL;
error = mutex_lock_interruptible(&pcu->cmd_mutex);
if (error)
return error;
error = request_ihex_firmware(&fw, IMS_PCU_FIRMWARE_NAME, pcu->dev);
if (error) {
dev_err(pcu->dev, "Failed to request firmware %s, error: %d\n",
IMS_PCU_FIRMWARE_NAME, error);
goto out;
}
/*
* If we are already in bootloader mode we can proceed with
* flashing the firmware.
*
* If we are in application mode, then we need to switch into
* bootloader mode, which will cause the device to disconnect
* and reconnect as different device.
*/
if (pcu->bootloader_mode)
error = ims_pcu_handle_firmware_update(pcu, fw);
else
error = ims_pcu_switch_to_bootloader(pcu);
release_firmware(fw);
out:
mutex_unlock(&pcu->cmd_mutex);
return error ?: count;
}
static DEVICE_ATTR(update_firmware, S_IWUSR,
NULL, ims_pcu_update_firmware_store);
static ssize_t
ims_pcu_update_firmware_status_show(struct device *dev,
struct device_attribute *dattr,
char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
return scnprintf(buf, PAGE_SIZE, "%d\n", pcu->update_firmware_status);
}
static DEVICE_ATTR(update_firmware_status, S_IRUGO,
ims_pcu_update_firmware_status_show, NULL);
static struct attribute *ims_pcu_attrs[] = {
&ims_pcu_attr_part_number.dattr.attr,
&ims_pcu_attr_serial_number.dattr.attr,
&ims_pcu_attr_date_of_manufacturing.dattr.attr,
&ims_pcu_attr_fw_version.dattr.attr,
&ims_pcu_attr_bl_version.dattr.attr,
&ims_pcu_attr_reset_reason.dattr.attr,
&dev_attr_reset_device.attr,
&dev_attr_update_firmware.attr,
&dev_attr_update_firmware_status.attr,
NULL
};
static umode_t ims_pcu_is_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
umode_t mode = attr->mode;
if (pcu->bootloader_mode) {
if (attr != &dev_attr_update_firmware_status.attr &&
attr != &dev_attr_update_firmware.attr &&
attr != &dev_attr_reset_device.attr) {
mode = 0;
}
} else {
if (attr == &dev_attr_update_firmware_status.attr)
mode = 0;
}
return mode;
}
static const struct attribute_group ims_pcu_attr_group = {
.is_visible = ims_pcu_is_attr_visible,
.attrs = ims_pcu_attrs,
};
/* Support for a separate OFN attribute group */
#define OFN_REG_RESULT_OFFSET 2
static int ims_pcu_read_ofn_config(struct ims_pcu *pcu, u8 addr, u8 *data)
{
int error;
s16 result;
error = ims_pcu_execute_command(pcu, OFN_GET_CONFIG,
&addr, sizeof(addr));
if (error)
return error;
result = (s16)get_unaligned_le16(pcu->cmd_buf + OFN_REG_RESULT_OFFSET);
if (result < 0)
return -EIO;
/* We only need LSB */
*data = pcu->cmd_buf[OFN_REG_RESULT_OFFSET];
return 0;
}
static int ims_pcu_write_ofn_config(struct ims_pcu *pcu, u8 addr, u8 data)
{
u8 buffer[] = { addr, data };
int error;
s16 result;
error = ims_pcu_execute_command(pcu, OFN_SET_CONFIG,
&buffer, sizeof(buffer));
if (error)
return error;
result = (s16)get_unaligned_le16(pcu->cmd_buf + OFN_REG_RESULT_OFFSET);
if (result < 0)
return -EIO;
return 0;
}
static ssize_t ims_pcu_ofn_reg_data_show(struct device *dev,
struct device_attribute *dattr,
char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
int error;
u8 data;
mutex_lock(&pcu->cmd_mutex);
error = ims_pcu_read_ofn_config(pcu, pcu->ofn_reg_addr, &data);
mutex_unlock(&pcu->cmd_mutex);
if (error)
return error;
return scnprintf(buf, PAGE_SIZE, "%x\n", data);
}
static ssize_t ims_pcu_ofn_reg_data_store(struct device *dev,
struct device_attribute *dattr,
const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
int error;
u8 value;
error = kstrtou8(buf, 0, &value);
if (error)
return error;
mutex_lock(&pcu->cmd_mutex);
error = ims_pcu_write_ofn_config(pcu, pcu->ofn_reg_addr, value);
mutex_unlock(&pcu->cmd_mutex);
return error ?: count;
}
static DEVICE_ATTR(reg_data, S_IRUGO | S_IWUSR,
ims_pcu_ofn_reg_data_show, ims_pcu_ofn_reg_data_store);
static ssize_t ims_pcu_ofn_reg_addr_show(struct device *dev,
struct device_attribute *dattr,
char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
int error;
mutex_lock(&pcu->cmd_mutex);
error = scnprintf(buf, PAGE_SIZE, "%x\n", pcu->ofn_reg_addr);
mutex_unlock(&pcu->cmd_mutex);
return error;
}
static ssize_t ims_pcu_ofn_reg_addr_store(struct device *dev,
struct device_attribute *dattr,
const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
int error;
u8 value;
error = kstrtou8(buf, 0, &value);
if (error)
return error;
mutex_lock(&pcu->cmd_mutex);
pcu->ofn_reg_addr = value;
mutex_unlock(&pcu->cmd_mutex);
return count;
}
static DEVICE_ATTR(reg_addr, S_IRUGO | S_IWUSR,
ims_pcu_ofn_reg_addr_show, ims_pcu_ofn_reg_addr_store);
struct ims_pcu_ofn_bit_attribute {
struct device_attribute dattr;
u8 addr;
u8 nr;
};
static ssize_t ims_pcu_ofn_bit_show(struct device *dev,
struct device_attribute *dattr,
char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
struct ims_pcu_ofn_bit_attribute *attr =
container_of(dattr, struct ims_pcu_ofn_bit_attribute, dattr);
int error;
u8 data;
mutex_lock(&pcu->cmd_mutex);
error = ims_pcu_read_ofn_config(pcu, attr->addr, &data);
mutex_unlock(&pcu->cmd_mutex);
if (error)
return error;
return scnprintf(buf, PAGE_SIZE, "%d\n", !!(data & (1 << attr->nr)));
}
static ssize_t ims_pcu_ofn_bit_store(struct device *dev,
struct device_attribute *dattr,
const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
struct ims_pcu_ofn_bit_attribute *attr =
container_of(dattr, struct ims_pcu_ofn_bit_attribute, dattr);
int error;
int value;
u8 data;
error = kstrtoint(buf, 0, &value);
if (error)
return error;
if (value > 1)
return -EINVAL;
mutex_lock(&pcu->cmd_mutex);
error = ims_pcu_read_ofn_config(pcu, attr->addr, &data);
if (!error) {
if (value)
data |= 1U << attr->nr;
else
data &= ~(1U << attr->nr);
error = ims_pcu_write_ofn_config(pcu, attr->addr, data);
}
mutex_unlock(&pcu->cmd_mutex);
return error ?: count;
}
#define IMS_PCU_OFN_BIT_ATTR(_field, _addr, _nr) \
struct ims_pcu_ofn_bit_attribute ims_pcu_ofn_attr_##_field = { \
.dattr = __ATTR(_field, S_IWUSR | S_IRUGO, \
ims_pcu_ofn_bit_show, ims_pcu_ofn_bit_store), \
.addr = _addr, \
.nr = _nr, \
}
static IMS_PCU_OFN_BIT_ATTR(engine_enable, 0x60, 7);
static IMS_PCU_OFN_BIT_ATTR(speed_enable, 0x60, 6);
static IMS_PCU_OFN_BIT_ATTR(assert_enable, 0x60, 5);
static IMS_PCU_OFN_BIT_ATTR(xyquant_enable, 0x60, 4);
static IMS_PCU_OFN_BIT_ATTR(xyscale_enable, 0x60, 1);
static IMS_PCU_OFN_BIT_ATTR(scale_x2, 0x63, 6);
static IMS_PCU_OFN_BIT_ATTR(scale_y2, 0x63, 7);
static struct attribute *ims_pcu_ofn_attrs[] = {
&dev_attr_reg_data.attr,
&dev_attr_reg_addr.attr,
&ims_pcu_ofn_attr_engine_enable.dattr.attr,
&ims_pcu_ofn_attr_speed_enable.dattr.attr,
&ims_pcu_ofn_attr_assert_enable.dattr.attr,
&ims_pcu_ofn_attr_xyquant_enable.dattr.attr,
&ims_pcu_ofn_attr_xyscale_enable.dattr.attr,
&ims_pcu_ofn_attr_scale_x2.dattr.attr,
&ims_pcu_ofn_attr_scale_y2.dattr.attr,
NULL
};
static const struct attribute_group ims_pcu_ofn_attr_group = {
.name = "ofn",
.attrs = ims_pcu_ofn_attrs,
};
static void ims_pcu_irq(struct urb *urb)
{
struct ims_pcu *pcu = urb->context;
int retval, status;
status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(pcu->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(pcu->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
dev_dbg(pcu->dev, "%s: received %d: %*ph\n", __func__,
urb->actual_length, urb->actual_length, pcu->urb_in_buf);
if (urb == pcu->urb_in)
ims_pcu_process_data(pcu, urb);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval && retval != -ENODEV)
dev_err(pcu->dev, "%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
static int ims_pcu_buffers_alloc(struct ims_pcu *pcu)
{
int error;
pcu->urb_in_buf = usb_alloc_coherent(pcu->udev, pcu->max_in_size,
GFP_KERNEL, &pcu->read_dma);
if (!pcu->urb_in_buf) {
dev_err(pcu->dev,
"Failed to allocate memory for read buffer\n");
return -ENOMEM;
}
pcu->urb_in = usb_alloc_urb(0, GFP_KERNEL);
if (!pcu->urb_in) {
dev_err(pcu->dev, "Failed to allocate input URB\n");
error = -ENOMEM;
goto err_free_urb_in_buf;
}
pcu->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
pcu->urb_in->transfer_dma = pcu->read_dma;
usb_fill_bulk_urb(pcu->urb_in, pcu->udev,
usb_rcvbulkpipe(pcu->udev,
pcu->ep_in->bEndpointAddress),
pcu->urb_in_buf, pcu->max_in_size,
ims_pcu_irq, pcu);
/*
* We are using usb_bulk_msg() for sending so there is no point
* in allocating memory with usb_alloc_coherent().
*/
pcu->urb_out_buf = kmalloc(pcu->max_out_size, GFP_KERNEL);
if (!pcu->urb_out_buf) {
dev_err(pcu->dev, "Failed to allocate memory for write buffer\n");
error = -ENOMEM;
goto err_free_in_urb;
}
pcu->urb_ctrl_buf = usb_alloc_coherent(pcu->udev, pcu->max_ctrl_size,
GFP_KERNEL, &pcu->ctrl_dma);
if (!pcu->urb_ctrl_buf) {
dev_err(pcu->dev,
"Failed to allocate memory for read buffer\n");
error = -ENOMEM;
goto err_free_urb_out_buf;
}
pcu->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL);
if (!pcu->urb_ctrl) {
dev_err(pcu->dev, "Failed to allocate input URB\n");
error = -ENOMEM;
goto err_free_urb_ctrl_buf;
}
pcu->urb_ctrl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
pcu->urb_ctrl->transfer_dma = pcu->ctrl_dma;
usb_fill_int_urb(pcu->urb_ctrl, pcu->udev,
usb_rcvintpipe(pcu->udev,
pcu->ep_ctrl->bEndpointAddress),
pcu->urb_ctrl_buf, pcu->max_ctrl_size,
ims_pcu_irq, pcu, pcu->ep_ctrl->bInterval);
return 0;
err_free_urb_ctrl_buf:
usb_free_coherent(pcu->udev, pcu->max_ctrl_size,
pcu->urb_ctrl_buf, pcu->ctrl_dma);
err_free_urb_out_buf:
kfree(pcu->urb_out_buf);
err_free_in_urb:
usb_free_urb(pcu->urb_in);
err_free_urb_in_buf:
usb_free_coherent(pcu->udev, pcu->max_in_size,
pcu->urb_in_buf, pcu->read_dma);
return error;
}
static void ims_pcu_buffers_free(struct ims_pcu *pcu)
{
usb_kill_urb(pcu->urb_in);
usb_free_urb(pcu->urb_in);
usb_free_coherent(pcu->udev, pcu->max_out_size,
pcu->urb_in_buf, pcu->read_dma);
kfree(pcu->urb_out_buf);
usb_kill_urb(pcu->urb_ctrl);
usb_free_urb(pcu->urb_ctrl);
usb_free_coherent(pcu->udev, pcu->max_ctrl_size,
pcu->urb_ctrl_buf, pcu->ctrl_dma);
}
static const struct usb_cdc_union_desc *
ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
{
const void *buf = intf->altsetting->extra;
size_t buflen = intf->altsetting->extralen;
struct usb_cdc_union_desc *union_desc;
if (!buf) {
dev_err(&intf->dev, "Missing descriptor data\n");
return NULL;
}
if (!buflen) {
dev_err(&intf->dev, "Zero length descriptor\n");
return NULL;
}
while (buflen >= sizeof(*union_desc)) {
union_desc = (struct usb_cdc_union_desc *)buf;
if (union_desc->bLength > buflen) {
dev_err(&intf->dev, "Too large descriptor\n");
return NULL;
}
if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "Found union header\n");
if (union_desc->bLength >= sizeof(*union_desc))
return union_desc;
dev_err(&intf->dev,
"Union descriptor too short (%d vs %zd)\n",
union_desc->bLength, sizeof(*union_desc));
return NULL;
}
buflen -= union_desc->bLength;
buf += union_desc->bLength;
}
dev_err(&intf->dev, "Missing CDC union descriptor\n");
return NULL;
}
static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pcu)
{
const struct usb_cdc_union_desc *union_desc;
struct usb_host_interface *alt;
union_desc = ims_pcu_get_cdc_union_desc(intf);
if (!union_desc)
return -EINVAL;
pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bMasterInterface0);
if (!pcu->ctrl_intf)
return -EINVAL;
alt = pcu->ctrl_intf->cur_altsetting;
if (alt->desc.bNumEndpoints < 1)
return -ENODEV;
pcu->ep_ctrl = &alt->endpoint[0].desc;
pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
pcu->data_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bSlaveInterface0);
if (!pcu->data_intf)
return -EINVAL;
alt = pcu->data_intf->cur_altsetting;
if (alt->desc.bNumEndpoints != 2) {
dev_err(pcu->dev,
"Incorrect number of endpoints on data interface (%d)\n",
alt->desc.bNumEndpoints);
return -EINVAL;
}
pcu->ep_out = &alt->endpoint[0].desc;
if (!usb_endpoint_is_bulk_out(pcu->ep_out)) {
dev_err(pcu->dev,
"First endpoint on data interface is not BULK OUT\n");
return -EINVAL;
}
pcu->max_out_size = usb_endpoint_maxp(pcu->ep_out);
if (pcu->max_out_size < 8) {
dev_err(pcu->dev,
"Max OUT packet size is too small (%zd)\n",
pcu->max_out_size);
return -EINVAL;
}
pcu->ep_in = &alt->endpoint[1].desc;
if (!usb_endpoint_is_bulk_in(pcu->ep_in)) {
dev_err(pcu->dev,
"Second endpoint on data interface is not BULK IN\n");
return -EINVAL;
}
pcu->max_in_size = usb_endpoint_maxp(pcu->ep_in);
if (pcu->max_in_size < 8) {
dev_err(pcu->dev,
"Max IN packet size is too small (%zd)\n",
pcu->max_in_size);
return -EINVAL;
}
return 0;
}
static int ims_pcu_start_io(struct ims_pcu *pcu)
{
int error;
error = usb_submit_urb(pcu->urb_ctrl, GFP_KERNEL);
if (error) {
dev_err(pcu->dev,
"Failed to start control IO - usb_submit_urb failed with result: %d\n",
error);
return -EIO;
}
error = usb_submit_urb(pcu->urb_in, GFP_KERNEL);
if (error) {
dev_err(pcu->dev,
"Failed to start IO - usb_submit_urb failed with result: %d\n",
error);
usb_kill_urb(pcu->urb_ctrl);
return -EIO;
}
return 0;
}
static void ims_pcu_stop_io(struct ims_pcu *pcu)
{
usb_kill_urb(pcu->urb_in);
usb_kill_urb(pcu->urb_ctrl);
}
static int ims_pcu_line_setup(struct ims_pcu *pcu)
{
struct usb_host_interface *interface = pcu->ctrl_intf->cur_altsetting;
struct usb_cdc_line_coding *line = (void *)pcu->cmd_buf;
int error;
memset(line, 0, sizeof(*line));
line->dwDTERate = cpu_to_le32(57600);
line->bDataBits = 8;
error = usb_control_msg(pcu->udev, usb_sndctrlpipe(pcu->udev, 0),
USB_CDC_REQ_SET_LINE_CODING,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, interface->desc.bInterfaceNumber,
line, sizeof(struct usb_cdc_line_coding),
5000);
if (error < 0) {
dev_err(pcu->dev, "Failed to set line coding, error: %d\n",
error);
return error;
}
error = usb_control_msg(pcu->udev, usb_sndctrlpipe(pcu->udev, 0),
USB_CDC_REQ_SET_CONTROL_LINE_STATE,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x03, interface->desc.bInterfaceNumber,
NULL, 0, 5000);
if (error < 0) {
dev_err(pcu->dev, "Failed to set line state, error: %d\n",
error);
return error;
}
return 0;
}
static int ims_pcu_get_device_info(struct ims_pcu *pcu)
{
int error;
error = ims_pcu_get_info(pcu);
if (error)
return error;
error = ims_pcu_execute_query(pcu, GET_FW_VERSION);
if (error) {
dev_err(pcu->dev,
"GET_FW_VERSION command failed, error: %d\n", error);
return error;
}
snprintf(pcu->fw_version, sizeof(pcu->fw_version),
"%02d%02d%02d%02d.%c%c",
pcu->cmd_buf[2], pcu->cmd_buf[3], pcu->cmd_buf[4], pcu->cmd_buf[5],
pcu->cmd_buf[6], pcu->cmd_buf[7]);
error = ims_pcu_execute_query(pcu, GET_BL_VERSION);
if (error) {
dev_err(pcu->dev,
"GET_BL_VERSION command failed, error: %d\n", error);
return error;
}
snprintf(pcu->bl_version, sizeof(pcu->bl_version),
"%02d%02d%02d%02d.%c%c",
pcu->cmd_buf[2], pcu->cmd_buf[3], pcu->cmd_buf[4], pcu->cmd_buf[5],
pcu->cmd_buf[6], pcu->cmd_buf[7]);
error = ims_pcu_execute_query(pcu, RESET_REASON);
if (error) {
dev_err(pcu->dev,
"RESET_REASON command failed, error: %d\n", error);
return error;
}
snprintf(pcu->reset_reason, sizeof(pcu->reset_reason),
"%02x", pcu->cmd_buf[IMS_PCU_DATA_OFFSET]);
dev_dbg(pcu->dev,
"P/N: %s, MD: %s, S/N: %s, FW: %s, BL: %s, RR: %s\n",
pcu->part_number,
pcu->date_of_manufacturing,
pcu->serial_number,
pcu->fw_version,
pcu->bl_version,
pcu->reset_reason);
return 0;
}
static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
{
int error;
error = ims_pcu_execute_query(pcu, GET_DEVICE_ID);
if (error) {
dev_err(pcu->dev,
"GET_DEVICE_ID command failed, error: %d\n", error);
return error;
}
*device_id = pcu->cmd_buf[IMS_PCU_DATA_OFFSET];
dev_dbg(pcu->dev, "Detected device ID: %d\n", *device_id);
return 0;
}
static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
{
static atomic_t device_no = ATOMIC_INIT(-1);
const struct ims_pcu_device_info *info;
int error;
error = ims_pcu_get_device_info(pcu);
if (error) {
/* Device does not respond to basic queries, hopeless */
return error;
}
error = ims_pcu_identify_type(pcu, &pcu->device_id);
if (error) {
dev_err(pcu->dev,
"Failed to identify device, error: %d\n", error);
/*
* Do not signal error, but do not create input nor
* backlight devices either, let userspace figure this
* out (flash a new firmware?).
*/
return 0;
}
if (pcu->device_id >= ARRAY_SIZE(ims_pcu_device_info) ||
!ims_pcu_device_info[pcu->device_id].keymap) {
dev_err(pcu->dev, "Device ID %d is not valid\n", pcu->device_id);
/* Same as above, punt to userspace */
return 0;
}
/* Device appears to be operable, complete initialization */
pcu->device_no = atomic_inc_return(&device_no);
/*
* PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
*/
if (pcu->device_id != IMS_PCU_PCU_B_DEVICE_ID) {
error = sysfs_create_group(&pcu->dev->kobj,
&ims_pcu_ofn_attr_group);
if (error)
return error;
}
error = ims_pcu_setup_backlight(pcu);
if (error)
return error;
info = &ims_pcu_device_info[pcu->device_id];
error = ims_pcu_setup_buttons(pcu, info->keymap, info->keymap_len);
if (error)
goto err_destroy_backlight;
if (info->has_gamepad) {
error = ims_pcu_setup_gamepad(pcu);
if (error)
goto err_destroy_buttons;
}
pcu->setup_complete = true;
return 0;
err_destroy_buttons:
ims_pcu_destroy_buttons(pcu);
err_destroy_backlight:
ims_pcu_destroy_backlight(pcu);
return error;
}
static void ims_pcu_destroy_application_mode(struct ims_pcu *pcu)
{
if (pcu->setup_complete) {
pcu->setup_complete = false;
mb(); /* make sure flag setting is not reordered */
if (pcu->gamepad)
ims_pcu_destroy_gamepad(pcu);
ims_pcu_destroy_buttons(pcu);
ims_pcu_destroy_backlight(pcu);
if (pcu->device_id != IMS_PCU_PCU_B_DEVICE_ID)
sysfs_remove_group(&pcu->dev->kobj,
&ims_pcu_ofn_attr_group);
}
}
static int ims_pcu_init_bootloader_mode(struct ims_pcu *pcu)
{
int error;
error = ims_pcu_execute_bl_command(pcu, QUERY_DEVICE, NULL, 0,
IMS_PCU_CMD_RESPONSE_TIMEOUT);
if (error) {
dev_err(pcu->dev, "Bootloader does not respond, aborting\n");
return error;
}
pcu->fw_start_addr =
get_unaligned_le32(&pcu->cmd_buf[IMS_PCU_DATA_OFFSET + 11]);
pcu->fw_end_addr =
get_unaligned_le32(&pcu->cmd_buf[IMS_PCU_DATA_OFFSET + 15]);
dev_info(pcu->dev,
"Device is in bootloader mode (addr 0x%08x-0x%08x), requesting firmware\n",
pcu->fw_start_addr, pcu->fw_end_addr);
error = request_firmware_nowait(THIS_MODULE, true,
IMS_PCU_FIRMWARE_NAME,
pcu->dev, GFP_KERNEL, pcu,
ims_pcu_process_async_firmware);
if (error) {
/* This error is not fatal, let userspace have another chance */
complete(&pcu->async_firmware_done);
}
return 0;
}
static void ims_pcu_destroy_bootloader_mode(struct ims_pcu *pcu)
{
/* Make sure our initial firmware request has completed */
wait_for_completion(&pcu->async_firmware_done);
}
#define IMS_PCU_APPLICATION_MODE 0
#define IMS_PCU_BOOTLOADER_MODE 1
static struct usb_driver ims_pcu_driver;
static int ims_pcu_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct ims_pcu *pcu;
int error;
pcu = kzalloc(sizeof(struct ims_pcu), GFP_KERNEL);
if (!pcu)
return -ENOMEM;
pcu->dev = &intf->dev;
pcu->udev = udev;
pcu->bootloader_mode = id->driver_info == IMS_PCU_BOOTLOADER_MODE;
mutex_init(&pcu->cmd_mutex);
init_completion(&pcu->cmd_done);
init_completion(&pcu->async_firmware_done);
error = ims_pcu_parse_cdc_data(intf, pcu);
if (error)
goto err_free_mem;
error = usb_driver_claim_interface(&ims_pcu_driver,
pcu->data_intf, pcu);
if (error) {
dev_err(&intf->dev,
"Unable to claim corresponding data interface: %d\n",
error);
goto err_free_mem;
}
usb_set_intfdata(pcu->ctrl_intf, pcu);
error = ims_pcu_buffers_alloc(pcu);
if (error)
goto err_unclaim_intf;
error = ims_pcu_start_io(pcu);
if (error)
goto err_free_buffers;
error = ims_pcu_line_setup(pcu);
if (error)
goto err_stop_io;
error = sysfs_create_group(&intf->dev.kobj, &ims_pcu_attr_group);
if (error)
goto err_stop_io;
error = pcu->bootloader_mode ?
ims_pcu_init_bootloader_mode(pcu) :
ims_pcu_init_application_mode(pcu);
if (error)
goto err_remove_sysfs;
return 0;
err_remove_sysfs:
sysfs_remove_group(&intf->dev.kobj, &ims_pcu_attr_group);
err_stop_io:
ims_pcu_stop_io(pcu);
err_free_buffers:
ims_pcu_buffers_free(pcu);
err_unclaim_intf:
usb_driver_release_interface(&ims_pcu_driver, pcu->data_intf);
err_free_mem:
kfree(pcu);
return error;
}
static void ims_pcu_disconnect(struct usb_interface *intf)
{
struct ims_pcu *pcu = usb_get_intfdata(intf);
struct usb_host_interface *alt = intf->cur_altsetting;
usb_set_intfdata(intf, NULL);
/*
* See if we are dealing with control or data interface. The cleanup
* happens when we unbind primary (control) interface.
*/
if (alt->desc.bInterfaceClass != USB_CLASS_COMM)
return;
sysfs_remove_group(&intf->dev.kobj, &ims_pcu_attr_group);
ims_pcu_stop_io(pcu);
if (pcu->bootloader_mode)
ims_pcu_destroy_bootloader_mode(pcu);
else
ims_pcu_destroy_application_mode(pcu);
ims_pcu_buffers_free(pcu);
kfree(pcu);
}
#ifdef CONFIG_PM
static int ims_pcu_suspend(struct usb_interface *intf,
pm_message_t message)
{
struct ims_pcu *pcu = usb_get_intfdata(intf);
struct usb_host_interface *alt = intf->cur_altsetting;
if (alt->desc.bInterfaceClass == USB_CLASS_COMM)
ims_pcu_stop_io(pcu);
return 0;
}
static int ims_pcu_resume(struct usb_interface *intf)
{
struct ims_pcu *pcu = usb_get_intfdata(intf);
struct usb_host_interface *alt = intf->cur_altsetting;
int retval = 0;
if (alt->desc.bInterfaceClass == USB_CLASS_COMM) {
retval = ims_pcu_start_io(pcu);
if (retval == 0)
retval = ims_pcu_line_setup(pcu);
}
return retval;
}
#endif
static const struct usb_device_id ims_pcu_id_table[] = {
{
USB_DEVICE_AND_INTERFACE_INFO(0x04d8, 0x0082,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER),
.driver_info = IMS_PCU_APPLICATION_MODE,
},
{
USB_DEVICE_AND_INTERFACE_INFO(0x04d8, 0x0083,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER),
.driver_info = IMS_PCU_BOOTLOADER_MODE,
},
{ }
};
static struct usb_driver ims_pcu_driver = {
.name = "ims_pcu",
.id_table = ims_pcu_id_table,
.probe = ims_pcu_probe,
.disconnect = ims_pcu_disconnect,
#ifdef CONFIG_PM
.suspend = ims_pcu_suspend,
.resume = ims_pcu_resume,
.reset_resume = ims_pcu_resume,
#endif
};
module_usb_driver(ims_pcu_driver);
MODULE_DESCRIPTION("IMS Passenger Control Unit driver");
MODULE_AUTHOR("Dmitry Torokhov <[email protected]>");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/ims-pcu.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MAXIM MAX77693/MAX77843 Haptic device driver
*
* Copyright (C) 2014,2015 Samsung Electronics
* Jaewon Kim <[email protected]>
* Krzysztof Kozlowski <[email protected]>
*
* This program is not provided / owned by Maxim Integrated Products.
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/max77693.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
#include <linux/mfd/max77843-private.h>
#define MAX_MAGNITUDE_SHIFT 16
enum max77693_haptic_motor_type {
MAX77693_HAPTIC_ERM = 0,
MAX77693_HAPTIC_LRA,
};
enum max77693_haptic_pulse_mode {
MAX77693_HAPTIC_EXTERNAL_MODE = 0,
MAX77693_HAPTIC_INTERNAL_MODE,
};
enum max77693_haptic_pwm_divisor {
MAX77693_HAPTIC_PWM_DIVISOR_32 = 0,
MAX77693_HAPTIC_PWM_DIVISOR_64,
MAX77693_HAPTIC_PWM_DIVISOR_128,
MAX77693_HAPTIC_PWM_DIVISOR_256,
};
struct max77693_haptic {
enum max77693_types dev_type;
struct regmap *regmap_pmic;
struct regmap *regmap_haptic;
struct device *dev;
struct input_dev *input_dev;
struct pwm_device *pwm_dev;
struct regulator *motor_reg;
bool enabled;
bool suspend_state;
unsigned int magnitude;
unsigned int pwm_duty;
enum max77693_haptic_motor_type type;
enum max77693_haptic_pulse_mode mode;
struct work_struct work;
};
static int max77693_haptic_set_duty_cycle(struct max77693_haptic *haptic)
{
struct pwm_args pargs;
int delta;
int error;
pwm_get_args(haptic->pwm_dev, &pargs);
delta = (pargs.period + haptic->pwm_duty) / 2;
error = pwm_config(haptic->pwm_dev, delta, pargs.period);
if (error) {
dev_err(haptic->dev, "failed to configure pwm: %d\n", error);
return error;
}
return 0;
}
static int max77843_haptic_bias(struct max77693_haptic *haptic, bool on)
{
int error;
if (haptic->dev_type != TYPE_MAX77843)
return 0;
error = regmap_update_bits(haptic->regmap_haptic,
MAX77843_SYS_REG_MAINCTRL1,
MAX77843_MAINCTRL1_BIASEN_MASK,
on << MAINCTRL1_BIASEN_SHIFT);
if (error) {
dev_err(haptic->dev, "failed to %s bias: %d\n",
on ? "enable" : "disable", error);
return error;
}
return 0;
}
static int max77693_haptic_configure(struct max77693_haptic *haptic,
bool enable)
{
unsigned int value, config_reg;
int error;
switch (haptic->dev_type) {
case TYPE_MAX77693:
value = ((haptic->type << MAX77693_CONFIG2_MODE) |
(enable << MAX77693_CONFIG2_MEN) |
(haptic->mode << MAX77693_CONFIG2_HTYP) |
MAX77693_HAPTIC_PWM_DIVISOR_128);
config_reg = MAX77693_HAPTIC_REG_CONFIG2;
break;
case TYPE_MAX77843:
value = (haptic->type << MCONFIG_MODE_SHIFT) |
(enable << MCONFIG_MEN_SHIFT) |
MAX77693_HAPTIC_PWM_DIVISOR_128;
config_reg = MAX77843_HAP_REG_MCONFIG;
break;
default:
return -EINVAL;
}
error = regmap_write(haptic->regmap_haptic,
config_reg, value);
if (error) {
dev_err(haptic->dev,
"failed to update haptic config: %d\n", error);
return error;
}
return 0;
}
static int max77693_haptic_lowsys(struct max77693_haptic *haptic, bool enable)
{
int error;
if (haptic->dev_type != TYPE_MAX77693)
return 0;
error = regmap_update_bits(haptic->regmap_pmic,
MAX77693_PMIC_REG_LSCNFG,
MAX77693_PMIC_LOW_SYS_MASK,
enable << MAX77693_PMIC_LOW_SYS_SHIFT);
if (error) {
dev_err(haptic->dev, "cannot update pmic regmap: %d\n", error);
return error;
}
return 0;
}
static void max77693_haptic_enable(struct max77693_haptic *haptic)
{
int error;
if (haptic->enabled)
return;
error = pwm_enable(haptic->pwm_dev);
if (error) {
dev_err(haptic->dev,
"failed to enable haptic pwm device: %d\n", error);
return;
}
error = max77693_haptic_lowsys(haptic, true);
if (error)
goto err_enable_lowsys;
error = max77693_haptic_configure(haptic, true);
if (error)
goto err_enable_config;
haptic->enabled = true;
return;
err_enable_config:
max77693_haptic_lowsys(haptic, false);
err_enable_lowsys:
pwm_disable(haptic->pwm_dev);
}
static void max77693_haptic_disable(struct max77693_haptic *haptic)
{
int error;
if (!haptic->enabled)
return;
error = max77693_haptic_configure(haptic, false);
if (error)
return;
error = max77693_haptic_lowsys(haptic, false);
if (error)
goto err_disable_lowsys;
pwm_disable(haptic->pwm_dev);
haptic->enabled = false;
return;
err_disable_lowsys:
max77693_haptic_configure(haptic, true);
}
static void max77693_haptic_play_work(struct work_struct *work)
{
struct max77693_haptic *haptic =
container_of(work, struct max77693_haptic, work);
int error;
error = max77693_haptic_set_duty_cycle(haptic);
if (error) {
dev_err(haptic->dev, "failed to set duty cycle: %d\n", error);
return;
}
if (haptic->magnitude)
max77693_haptic_enable(haptic);
else
max77693_haptic_disable(haptic);
}
static int max77693_haptic_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct max77693_haptic *haptic = input_get_drvdata(dev);
struct pwm_args pargs;
u64 period_mag_multi;
haptic->magnitude = effect->u.rumble.strong_magnitude;
if (!haptic->magnitude)
haptic->magnitude = effect->u.rumble.weak_magnitude;
/*
* The magnitude comes from force-feedback interface.
* The formula to convert magnitude to pwm_duty as follows:
* - pwm_duty = (magnitude * pwm_period) / MAX_MAGNITUDE(0xFFFF)
*/
pwm_get_args(haptic->pwm_dev, &pargs);
period_mag_multi = (u64)pargs.period * haptic->magnitude;
haptic->pwm_duty = (unsigned int)(period_mag_multi >>
MAX_MAGNITUDE_SHIFT);
schedule_work(&haptic->work);
return 0;
}
static int max77693_haptic_open(struct input_dev *dev)
{
struct max77693_haptic *haptic = input_get_drvdata(dev);
int error;
error = max77843_haptic_bias(haptic, true);
if (error)
return error;
error = regulator_enable(haptic->motor_reg);
if (error) {
dev_err(haptic->dev,
"failed to enable regulator: %d\n", error);
return error;
}
return 0;
}
static void max77693_haptic_close(struct input_dev *dev)
{
struct max77693_haptic *haptic = input_get_drvdata(dev);
int error;
cancel_work_sync(&haptic->work);
max77693_haptic_disable(haptic);
error = regulator_disable(haptic->motor_reg);
if (error)
dev_err(haptic->dev,
"failed to disable regulator: %d\n", error);
max77843_haptic_bias(haptic, false);
}
static int max77693_haptic_probe(struct platform_device *pdev)
{
struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
struct max77693_haptic *haptic;
int error;
haptic = devm_kzalloc(&pdev->dev, sizeof(*haptic), GFP_KERNEL);
if (!haptic)
return -ENOMEM;
haptic->regmap_pmic = max77693->regmap;
haptic->dev = &pdev->dev;
haptic->type = MAX77693_HAPTIC_LRA;
haptic->mode = MAX77693_HAPTIC_EXTERNAL_MODE;
haptic->suspend_state = false;
/* Variant-specific init */
haptic->dev_type = platform_get_device_id(pdev)->driver_data;
switch (haptic->dev_type) {
case TYPE_MAX77693:
haptic->regmap_haptic = max77693->regmap_haptic;
break;
case TYPE_MAX77843:
haptic->regmap_haptic = max77693->regmap;
break;
default:
dev_err(&pdev->dev, "unsupported device type: %u\n",
haptic->dev_type);
return -EINVAL;
}
INIT_WORK(&haptic->work, max77693_haptic_play_work);
/* Get pwm and regulatot for haptic device */
haptic->pwm_dev = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(haptic->pwm_dev)) {
dev_err(&pdev->dev, "failed to get pwm device\n");
return PTR_ERR(haptic->pwm_dev);
}
/*
* FIXME: pwm_apply_args() should be removed when switching to the
* atomic PWM API.
*/
pwm_apply_args(haptic->pwm_dev);
haptic->motor_reg = devm_regulator_get(&pdev->dev, "haptic");
if (IS_ERR(haptic->motor_reg)) {
dev_err(&pdev->dev, "failed to get regulator\n");
return PTR_ERR(haptic->motor_reg);
}
/* Initialize input device for haptic device */
haptic->input_dev = devm_input_allocate_device(&pdev->dev);
if (!haptic->input_dev) {
dev_err(&pdev->dev, "failed to allocate input device\n");
return -ENOMEM;
}
haptic->input_dev->name = "max77693-haptic";
haptic->input_dev->id.version = 1;
haptic->input_dev->dev.parent = &pdev->dev;
haptic->input_dev->open = max77693_haptic_open;
haptic->input_dev->close = max77693_haptic_close;
input_set_drvdata(haptic->input_dev, haptic);
input_set_capability(haptic->input_dev, EV_FF, FF_RUMBLE);
error = input_ff_create_memless(haptic->input_dev, NULL,
max77693_haptic_play_effect);
if (error) {
dev_err(&pdev->dev, "failed to create force-feedback\n");
return error;
}
error = input_register_device(haptic->input_dev);
if (error) {
dev_err(&pdev->dev, "failed to register input device\n");
return error;
}
platform_set_drvdata(pdev, haptic);
return 0;
}
static int max77693_haptic_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max77693_haptic *haptic = platform_get_drvdata(pdev);
if (haptic->enabled) {
max77693_haptic_disable(haptic);
haptic->suspend_state = true;
}
return 0;
}
static int max77693_haptic_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max77693_haptic *haptic = platform_get_drvdata(pdev);
if (haptic->suspend_state) {
max77693_haptic_enable(haptic);
haptic->suspend_state = false;
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
max77693_haptic_suspend,
max77693_haptic_resume);
static const struct platform_device_id max77693_haptic_id[] = {
{ "max77693-haptic", TYPE_MAX77693 },
{ "max77843-haptic", TYPE_MAX77843 },
{},
};
MODULE_DEVICE_TABLE(platform, max77693_haptic_id);
static struct platform_driver max77693_haptic_driver = {
.driver = {
.name = "max77693-haptic",
.pm = pm_sleep_ptr(&max77693_haptic_pm_ops),
},
.probe = max77693_haptic_probe,
.id_table = max77693_haptic_id,
};
module_platform_driver(max77693_haptic_driver);
MODULE_AUTHOR("Jaewon Kim <[email protected]>");
MODULE_AUTHOR("Krzysztof Kozlowski <[email protected]>");
MODULE_DESCRIPTION("MAXIM 77693/77843 Haptic driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/max77693-haptic.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Freescale's 3-Axis Accelerometer MMA8450
*
* Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/mod_devicetable.h>
#define MMA8450_DRV_NAME "mma8450"
#define MODE_CHANGE_DELAY_MS 100
#define POLL_INTERVAL 100
#define POLL_INTERVAL_MAX 500
/* register definitions */
#define MMA8450_STATUS 0x00
#define MMA8450_STATUS_ZXYDR 0x08
#define MMA8450_OUT_X8 0x01
#define MMA8450_OUT_Y8 0x02
#define MMA8450_OUT_Z8 0x03
#define MMA8450_OUT_X_LSB 0x05
#define MMA8450_OUT_X_MSB 0x06
#define MMA8450_OUT_Y_LSB 0x07
#define MMA8450_OUT_Y_MSB 0x08
#define MMA8450_OUT_Z_LSB 0x09
#define MMA8450_OUT_Z_MSB 0x0a
#define MMA8450_XYZ_DATA_CFG 0x16
#define MMA8450_CTRL_REG1 0x38
#define MMA8450_CTRL_REG2 0x39
static int mma8450_read(struct i2c_client *c, unsigned int off)
{
int ret;
ret = i2c_smbus_read_byte_data(c, off);
if (ret < 0)
dev_err(&c->dev,
"failed to read register 0x%02x, error %d\n",
off, ret);
return ret;
}
static int mma8450_write(struct i2c_client *c, unsigned int off, u8 v)
{
int error;
error = i2c_smbus_write_byte_data(c, off, v);
if (error < 0) {
dev_err(&c->dev,
"failed to write to register 0x%02x, error %d\n",
off, error);
return error;
}
return 0;
}
static int mma8450_read_block(struct i2c_client *c, unsigned int off,
u8 *buf, size_t size)
{
int err;
err = i2c_smbus_read_i2c_block_data(c, off, size, buf);
if (err < 0) {
dev_err(&c->dev,
"failed to read block data at 0x%02x, error %d\n",
MMA8450_OUT_X_LSB, err);
return err;
}
return 0;
}
static void mma8450_poll(struct input_dev *input)
{
struct i2c_client *c = input_get_drvdata(input);
int x, y, z;
int ret;
u8 buf[6];
ret = mma8450_read(c, MMA8450_STATUS);
if (ret < 0)
return;
if (!(ret & MMA8450_STATUS_ZXYDR))
return;
ret = mma8450_read_block(c, MMA8450_OUT_X_LSB, buf, sizeof(buf));
if (ret < 0)
return;
x = ((int)(s8)buf[1] << 4) | (buf[0] & 0xf);
y = ((int)(s8)buf[3] << 4) | (buf[2] & 0xf);
z = ((int)(s8)buf[5] << 4) | (buf[4] & 0xf);
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
input_report_abs(input, ABS_Z, z);
input_sync(input);
}
/* Initialize the MMA8450 chip */
static int mma8450_open(struct input_dev *input)
{
struct i2c_client *c = input_get_drvdata(input);
int err;
/* enable all events from X/Y/Z, no FIFO */
err = mma8450_write(c, MMA8450_XYZ_DATA_CFG, 0x07);
if (err)
return err;
/*
* Sleep mode poll rate - 50Hz
* System output data rate - 400Hz
* Full scale selection - Active, +/- 2G
*/
err = mma8450_write(c, MMA8450_CTRL_REG1, 0x01);
if (err)
return err;
msleep(MODE_CHANGE_DELAY_MS);
return 0;
}
static void mma8450_close(struct input_dev *input)
{
struct i2c_client *c = input_get_drvdata(input);
mma8450_write(c, MMA8450_CTRL_REG1, 0x00);
mma8450_write(c, MMA8450_CTRL_REG2, 0x01);
}
/*
* I2C init/probing/exit functions
*/
static int mma8450_probe(struct i2c_client *c)
{
struct input_dev *input;
int err;
input = devm_input_allocate_device(&c->dev);
if (!input)
return -ENOMEM;
input_set_drvdata(input, c);
input->name = MMA8450_DRV_NAME;
input->id.bustype = BUS_I2C;
input->open = mma8450_open;
input->close = mma8450_close;
input_set_abs_params(input, ABS_X, -2048, 2047, 32, 32);
input_set_abs_params(input, ABS_Y, -2048, 2047, 32, 32);
input_set_abs_params(input, ABS_Z, -2048, 2047, 32, 32);
err = input_setup_polling(input, mma8450_poll);
if (err) {
dev_err(&c->dev, "failed to set up polling\n");
return err;
}
input_set_poll_interval(input, POLL_INTERVAL);
input_set_max_poll_interval(input, POLL_INTERVAL_MAX);
err = input_register_device(input);
if (err) {
dev_err(&c->dev, "failed to register input device\n");
return err;
}
return 0;
}
static const struct i2c_device_id mma8450_id[] = {
{ MMA8450_DRV_NAME, 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, mma8450_id);
static const struct of_device_id mma8450_dt_ids[] = {
{ .compatible = "fsl,mma8450", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mma8450_dt_ids);
static struct i2c_driver mma8450_driver = {
.driver = {
.name = MMA8450_DRV_NAME,
.of_match_table = mma8450_dt_ids,
},
.probe = mma8450_probe,
.id_table = mma8450_id,
};
module_i2c_driver(mma8450_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MMA8450 3-Axis Accelerometer Driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/mma8450.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Arizona haptics driver
*
* Copyright 2012 Wolfson Microelectronics plc
*
* Author: Mark Brown <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
struct arizona_haptics {
struct arizona *arizona;
struct input_dev *input_dev;
struct work_struct work;
struct mutex mutex;
u8 intensity;
};
static void arizona_haptics_work(struct work_struct *work)
{
struct arizona_haptics *haptics = container_of(work,
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
struct snd_soc_component *component =
snd_soc_dapm_to_component(arizona->dapm);
int ret;
if (!haptics->arizona->dapm) {
dev_err(arizona->dev, "No DAPM context\n");
return;
}
if (haptics->intensity) {
ret = regmap_update_bits(arizona->regmap,
ARIZONA_HAPTICS_PHASE_2_INTENSITY,
ARIZONA_PHASE2_INTENSITY_MASK,
haptics->intensity);
if (ret != 0) {
dev_err(arizona->dev, "Failed to set intensity: %d\n",
ret);
return;
}
/* This enable sequence will be a noop if already enabled */
ret = regmap_update_bits(arizona->regmap,
ARIZONA_HAPTICS_CONTROL_1,
ARIZONA_HAP_CTRL_MASK,
1 << ARIZONA_HAP_CTRL_SHIFT);
if (ret != 0) {
dev_err(arizona->dev, "Failed to start haptics: %d\n",
ret);
return;
}
ret = snd_soc_component_enable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
return;
}
ret = snd_soc_dapm_sync(arizona->dapm);
if (ret != 0) {
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
ret);
return;
}
} else {
/* This disable sequence will be a noop if already enabled */
ret = snd_soc_component_disable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
return;
}
ret = snd_soc_dapm_sync(arizona->dapm);
if (ret != 0) {
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
ret);
return;
}
ret = regmap_update_bits(arizona->regmap,
ARIZONA_HAPTICS_CONTROL_1,
ARIZONA_HAP_CTRL_MASK, 0);
if (ret != 0) {
dev_err(arizona->dev, "Failed to stop haptics: %d\n",
ret);
return;
}
}
}
static int arizona_haptics_play(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
struct arizona *arizona = haptics->arizona;
if (!arizona->dapm) {
dev_err(arizona->dev, "No DAPM context\n");
return -EBUSY;
}
if (effect->u.rumble.strong_magnitude) {
/* Scale the magnitude into the range the device supports */
if (arizona->pdata.hap_act) {
haptics->intensity =
effect->u.rumble.strong_magnitude >> 9;
if (effect->direction < 0x8000)
haptics->intensity += 0x7f;
} else {
haptics->intensity =
effect->u.rumble.strong_magnitude >> 8;
}
} else {
haptics->intensity = 0;
}
schedule_work(&haptics->work);
return 0;
}
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
struct snd_soc_component *component;
cancel_work_sync(&haptics->work);
if (haptics->arizona->dapm) {
component = snd_soc_dapm_to_component(haptics->arizona->dapm);
snd_soc_component_disable_pin(component, "HAPTICS");
}
}
static int arizona_haptics_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct arizona_haptics *haptics;
int ret;
haptics = devm_kzalloc(&pdev->dev, sizeof(*haptics), GFP_KERNEL);
if (!haptics)
return -ENOMEM;
haptics->arizona = arizona;
ret = regmap_update_bits(arizona->regmap, ARIZONA_HAPTICS_CONTROL_1,
ARIZONA_HAP_ACT, arizona->pdata.hap_act);
if (ret != 0) {
dev_err(arizona->dev, "Failed to set haptics actuator: %d\n",
ret);
return ret;
}
INIT_WORK(&haptics->work, arizona_haptics_work);
haptics->input_dev = devm_input_allocate_device(&pdev->dev);
if (!haptics->input_dev) {
dev_err(arizona->dev, "Failed to allocate input device\n");
return -ENOMEM;
}
input_set_drvdata(haptics->input_dev, haptics);
haptics->input_dev->name = "arizona:haptics";
haptics->input_dev->close = arizona_haptics_close;
__set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
ret = input_ff_create_memless(haptics->input_dev, NULL,
arizona_haptics_play);
if (ret < 0) {
dev_err(arizona->dev, "input_ff_create_memless() failed: %d\n",
ret);
return ret;
}
ret = input_register_device(haptics->input_dev);
if (ret < 0) {
dev_err(arizona->dev, "couldn't register input device: %d\n",
ret);
return ret;
}
return 0;
}
static struct platform_driver arizona_haptics_driver = {
.probe = arizona_haptics_probe,
.driver = {
.name = "arizona-haptics",
},
};
module_platform_driver(arizona_haptics_driver);
MODULE_ALIAS("platform:arizona-haptics");
MODULE_DESCRIPTION("Arizona haptics driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Brown <[email protected]>");
|
linux-master
|
drivers/input/misc/arizona-haptics.c
|
// SPDX-License-Identifier: GPL-2.0
//
// Driver for TPS65219 Push Button
//
// Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/tps65219.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
struct tps65219_pwrbutton {
struct device *dev;
struct input_dev *idev;
char phys[32];
};
static irqreturn_t tps65219_pb_push_irq(int irq, void *_pwr)
{
struct tps65219_pwrbutton *pwr = _pwr;
input_report_key(pwr->idev, KEY_POWER, 1);
pm_wakeup_event(pwr->dev, 0);
input_sync(pwr->idev);
return IRQ_HANDLED;
}
static irqreturn_t tps65219_pb_release_irq(int irq, void *_pwr)
{
struct tps65219_pwrbutton *pwr = _pwr;
input_report_key(pwr->idev, KEY_POWER, 0);
input_sync(pwr->idev);
return IRQ_HANDLED;
}
static int tps65219_pb_probe(struct platform_device *pdev)
{
struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct tps65219_pwrbutton *pwr;
struct input_dev *idev;
int error;
int push_irq;
int release_irq;
pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
if (!pwr)
return -ENOMEM;
idev = devm_input_allocate_device(dev);
if (!idev)
return -ENOMEM;
idev->name = pdev->name;
snprintf(pwr->phys, sizeof(pwr->phys), "%s/input0",
pdev->name);
idev->phys = pwr->phys;
idev->id.bustype = BUS_I2C;
input_set_capability(idev, EV_KEY, KEY_POWER);
pwr->dev = dev;
pwr->idev = idev;
device_init_wakeup(dev, true);
push_irq = platform_get_irq(pdev, 0);
if (push_irq < 0)
return -EINVAL;
release_irq = platform_get_irq(pdev, 1);
if (release_irq < 0)
return -EINVAL;
error = devm_request_threaded_irq(dev, push_irq, NULL,
tps65219_pb_push_irq,
IRQF_ONESHOT,
dev->init_name, pwr);
if (error) {
dev_err(dev, "failed to request push IRQ #%d: %d\n", push_irq,
error);
return error;
}
error = devm_request_threaded_irq(dev, release_irq, NULL,
tps65219_pb_release_irq,
IRQF_ONESHOT,
dev->init_name, pwr);
if (error) {
dev_err(dev, "failed to request release IRQ #%d: %d\n",
release_irq, error);
return error;
}
error = input_register_device(idev);
if (error) {
dev_err(dev, "Can't register power button: %d\n", error);
return error;
}
/* Enable interrupts for the pushbutton */
regmap_clear_bits(tps->regmap, TPS65219_REG_MASK_CONFIG,
TPS65219_REG_MASK_INT_FOR_PB_MASK);
/* Set PB/EN/VSENSE pin to be a pushbutton */
regmap_update_bits(tps->regmap, TPS65219_REG_MFP_2_CONFIG,
TPS65219_MFP_2_EN_PB_VSENSE_MASK, TPS65219_MFP_2_PB);
return 0;
}
static void tps65219_pb_remove(struct platform_device *pdev)
{
struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
int ret;
/* Disable interrupt for the pushbutton */
ret = regmap_set_bits(tps->regmap, TPS65219_REG_MASK_CONFIG,
TPS65219_REG_MASK_INT_FOR_PB_MASK);
if (ret)
dev_warn(&pdev->dev, "Failed to disable irq (%pe)\n", ERR_PTR(ret));
}
static const struct platform_device_id tps65219_pwrbtn_id_table[] = {
{ "tps65219-pwrbutton", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, tps65219_pwrbtn_id_table);
static struct platform_driver tps65219_pb_driver = {
.probe = tps65219_pb_probe,
.remove_new = tps65219_pb_remove,
.driver = {
.name = "tps65219_pwrbutton",
},
.id_table = tps65219_pwrbtn_id_table,
};
module_platform_driver(tps65219_pb_driver);
MODULE_DESCRIPTION("TPS65219 Power Button");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Markus Schneider-Pargmann <[email protected]");
|
linux-master
|
drivers/input/misc/tps65219-pwrbutton.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for PC-speaker like devices found on various Sparc systems.
*
* Copyright (c) 2002 Vojtech Pavlik
* Copyright (c) 2002, 2006, 2008 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/io.h>
MODULE_AUTHOR("David S. Miller <[email protected]>");
MODULE_DESCRIPTION("Sparc Speaker beeper driver");
MODULE_LICENSE("GPL");
struct grover_beep_info {
void __iomem *freq_regs;
void __iomem *enable_reg;
};
struct bbc_beep_info {
u32 clock_freq;
void __iomem *regs;
};
struct sparcspkr_state {
const char *name;
int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value);
spinlock_t lock;
struct input_dev *input_dev;
union {
struct grover_beep_info grover;
struct bbc_beep_info bbc;
} u;
};
static u32 bbc_count_to_reg(struct bbc_beep_info *info, unsigned int count)
{
u32 val, clock_freq = info->clock_freq;
int i;
if (!count)
return 0;
if (count <= clock_freq >> 20)
return 1 << 18;
if (count >= clock_freq >> 12)
return 1 << 10;
val = 1 << 18;
for (i = 19; i >= 11; i--) {
val >>= 1;
if (count <= clock_freq >> i)
break;
}
return val;
}
static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
struct sparcspkr_state *state = dev_get_drvdata(dev->dev.parent);
struct bbc_beep_info *info = &state->u.bbc;
unsigned int count = 0;
unsigned long flags;
if (type != EV_SND)
return -1;
switch (code) {
case SND_BELL: if (value) value = 1000;
case SND_TONE: break;
default: return -1;
}
if (value > 20 && value < 32767)
count = 1193182 / value;
count = bbc_count_to_reg(info, count);
spin_lock_irqsave(&state->lock, flags);
if (count) {
sbus_writeb(0x01, info->regs + 0);
sbus_writeb(0x00, info->regs + 2);
sbus_writeb((count >> 16) & 0xff, info->regs + 3);
sbus_writeb((count >> 8) & 0xff, info->regs + 4);
sbus_writeb(0x00, info->regs + 5);
} else {
sbus_writeb(0x00, info->regs + 0);
}
spin_unlock_irqrestore(&state->lock, flags);
return 0;
}
static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
struct sparcspkr_state *state = dev_get_drvdata(dev->dev.parent);
struct grover_beep_info *info = &state->u.grover;
unsigned int count = 0;
unsigned long flags;
if (type != EV_SND)
return -1;
switch (code) {
case SND_BELL: if (value) value = 1000;
case SND_TONE: break;
default: return -1;
}
if (value > 20 && value < 32767)
count = 1193182 / value;
spin_lock_irqsave(&state->lock, flags);
if (count) {
/* enable counter 2 */
sbus_writeb(sbus_readb(info->enable_reg) | 3, info->enable_reg);
/* set command for counter 2, 2 byte write */
sbus_writeb(0xB6, info->freq_regs + 1);
/* select desired HZ */
sbus_writeb(count & 0xff, info->freq_regs + 0);
sbus_writeb((count >> 8) & 0xff, info->freq_regs + 0);
} else {
/* disable counter 2 */
sbus_writeb(sbus_readb(info->enable_reg) & 0xFC, info->enable_reg);
}
spin_unlock_irqrestore(&state->lock, flags);
return 0;
}
static int sparcspkr_probe(struct device *dev)
{
struct sparcspkr_state *state = dev_get_drvdata(dev);
struct input_dev *input_dev;
int error;
input_dev = input_allocate_device();
if (!input_dev)
return -ENOMEM;
input_dev->name = state->name;
input_dev->phys = "sparc/input0";
input_dev->id.bustype = BUS_ISA;
input_dev->id.vendor = 0x001f;
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
input_dev->dev.parent = dev;
input_dev->evbit[0] = BIT_MASK(EV_SND);
input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
input_dev->event = state->event;
error = input_register_device(input_dev);
if (error) {
input_free_device(input_dev);
return error;
}
state->input_dev = input_dev;
return 0;
}
static void sparcspkr_shutdown(struct platform_device *dev)
{
struct sparcspkr_state *state = platform_get_drvdata(dev);
struct input_dev *input_dev = state->input_dev;
/* turn off the speaker */
state->event(input_dev, EV_SND, SND_BELL, 0);
}
static int bbc_beep_probe(struct platform_device *op)
{
struct sparcspkr_state *state;
struct bbc_beep_info *info;
struct device_node *dp;
int err = -ENOMEM;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
goto out_err;
state->name = "Sparc BBC Speaker";
state->event = bbc_spkr_event;
spin_lock_init(&state->lock);
dp = of_find_node_by_path("/");
err = -ENODEV;
if (!dp)
goto out_free;
info = &state->u.bbc;
info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0);
of_node_put(dp);
if (!info->clock_freq)
goto out_free;
info->regs = of_ioremap(&op->resource[0], 0, 6, "bbc beep");
if (!info->regs)
goto out_free;
platform_set_drvdata(op, state);
err = sparcspkr_probe(&op->dev);
if (err)
goto out_clear_drvdata;
return 0;
out_clear_drvdata:
of_iounmap(&op->resource[0], info->regs, 6);
out_free:
kfree(state);
out_err:
return err;
}
static int bbc_remove(struct platform_device *op)
{
struct sparcspkr_state *state = platform_get_drvdata(op);
struct input_dev *input_dev = state->input_dev;
struct bbc_beep_info *info = &state->u.bbc;
/* turn off the speaker */
state->event(input_dev, EV_SND, SND_BELL, 0);
input_unregister_device(input_dev);
of_iounmap(&op->resource[0], info->regs, 6);
kfree(state);
return 0;
}
static const struct of_device_id bbc_beep_match[] = {
{
.name = "beep",
.compatible = "SUNW,bbc-beep",
},
{},
};
MODULE_DEVICE_TABLE(of, bbc_beep_match);
static struct platform_driver bbc_beep_driver = {
.driver = {
.name = "bbcbeep",
.of_match_table = bbc_beep_match,
},
.probe = bbc_beep_probe,
.remove = bbc_remove,
.shutdown = sparcspkr_shutdown,
};
static int grover_beep_probe(struct platform_device *op)
{
struct sparcspkr_state *state;
struct grover_beep_info *info;
int err = -ENOMEM;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
goto out_err;
state->name = "Sparc Grover Speaker";
state->event = grover_spkr_event;
spin_lock_init(&state->lock);
info = &state->u.grover;
info->freq_regs = of_ioremap(&op->resource[2], 0, 2, "grover beep freq");
if (!info->freq_regs)
goto out_free;
info->enable_reg = of_ioremap(&op->resource[3], 0, 1, "grover beep enable");
if (!info->enable_reg)
goto out_unmap_freq_regs;
platform_set_drvdata(op, state);
err = sparcspkr_probe(&op->dev);
if (err)
goto out_clear_drvdata;
return 0;
out_clear_drvdata:
of_iounmap(&op->resource[3], info->enable_reg, 1);
out_unmap_freq_regs:
of_iounmap(&op->resource[2], info->freq_regs, 2);
out_free:
kfree(state);
out_err:
return err;
}
static int grover_remove(struct platform_device *op)
{
struct sparcspkr_state *state = platform_get_drvdata(op);
struct grover_beep_info *info = &state->u.grover;
struct input_dev *input_dev = state->input_dev;
/* turn off the speaker */
state->event(input_dev, EV_SND, SND_BELL, 0);
input_unregister_device(input_dev);
of_iounmap(&op->resource[3], info->enable_reg, 1);
of_iounmap(&op->resource[2], info->freq_regs, 2);
kfree(state);
return 0;
}
static const struct of_device_id grover_beep_match[] = {
{
.name = "beep",
.compatible = "SUNW,smbus-beep",
},
{},
};
MODULE_DEVICE_TABLE(of, grover_beep_match);
static struct platform_driver grover_beep_driver = {
.driver = {
.name = "groverbeep",
.of_match_table = grover_beep_match,
},
.probe = grover_beep_probe,
.remove = grover_remove,
.shutdown = sparcspkr_shutdown,
};
static struct platform_driver * const drivers[] = {
&bbc_beep_driver,
&grover_beep_driver,
};
static int __init sparcspkr_init(void)
{
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
static void __exit sparcspkr_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(sparcspkr_init);
module_exit(sparcspkr_exit);
|
linux-master
|
drivers/input/misc/sparcspkr.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SGI Volume Button interface driver
*
* Copyright (C) 2008 Thomas Bogendoerfer <[email protected]>
*/
#include <linux/input.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#ifdef CONFIG_SGI_IP22
#include <asm/sgi/ioc.h>
static inline u8 button_status(void)
{
u8 status;
status = readb(&sgioc->panel) ^ 0xa0;
return ((status & 0x80) >> 6) | ((status & 0x20) >> 5);
}
#endif
#ifdef CONFIG_SGI_IP32
#include <asm/ip32/mace.h>
static inline u8 button_status(void)
{
u64 status;
status = readq(&mace->perif.audio.control);
writeq(status & ~(3U << 23), &mace->perif.audio.control);
return (status >> 23) & 3;
}
#endif
#define BUTTONS_POLL_INTERVAL 30 /* msec */
#define BUTTONS_COUNT_THRESHOLD 3
static const unsigned short sgi_map[] = {
KEY_VOLUMEDOWN,
KEY_VOLUMEUP
};
struct buttons_dev {
unsigned short keymap[ARRAY_SIZE(sgi_map)];
int count[ARRAY_SIZE(sgi_map)];
};
static void handle_buttons(struct input_dev *input)
{
struct buttons_dev *bdev = input_get_drvdata(input);
u8 status;
int i;
status = button_status();
for (i = 0; i < ARRAY_SIZE(bdev->keymap); i++) {
if (status & (1U << i)) {
if (++bdev->count[i] == BUTTONS_COUNT_THRESHOLD) {
input_event(input, EV_MSC, MSC_SCAN, i);
input_report_key(input, bdev->keymap[i], 1);
input_sync(input);
}
} else {
if (bdev->count[i] >= BUTTONS_COUNT_THRESHOLD) {
input_event(input, EV_MSC, MSC_SCAN, i);
input_report_key(input, bdev->keymap[i], 0);
input_sync(input);
}
bdev->count[i] = 0;
}
}
}
static int sgi_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
struct input_dev *input;
int error, i;
bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
if (!bdev)
return -ENOMEM;
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
memcpy(bdev->keymap, sgi_map, sizeof(bdev->keymap));
input_set_drvdata(input, bdev);
input->name = "SGI buttons";
input->phys = "sgi/input0";
input->id.bustype = BUS_HOST;
input->keycode = bdev->keymap;
input->keycodemax = ARRAY_SIZE(bdev->keymap);
input->keycodesize = sizeof(unsigned short);
input_set_capability(input, EV_MSC, MSC_SCAN);
__set_bit(EV_KEY, input->evbit);
for (i = 0; i < ARRAY_SIZE(sgi_map); i++)
__set_bit(bdev->keymap[i], input->keybit);
__clear_bit(KEY_RESERVED, input->keybit);
error = input_setup_polling(input, handle_buttons);
if (error)
return error;
input_set_poll_interval(input, BUTTONS_POLL_INTERVAL);
error = input_register_device(input);
if (error)
return error;
return 0;
}
static struct platform_driver sgi_buttons_driver = {
.probe = sgi_buttons_probe,
.driver = {
.name = "sgibtns",
},
};
module_platform_driver(sgi_buttons_driver);
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/sgi_btns.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Onkey driver for Actions Semi ATC260x PMICs.
*
* Copyright (c) 2020 Cristian Ciocaltea <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mfd/atc260x/core.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
/* <2s for short press, >2s for long press */
#define KEY_PRESS_TIME_SEC 2
/* Driver internals */
enum atc260x_onkey_reset_status {
KEY_RESET_HW_DEFAULT,
KEY_RESET_DISABLED,
KEY_RESET_USER_SEL,
};
struct atc260x_onkey_params {
u32 reg_int_ctl;
u32 kdwn_state_bm;
u32 long_int_pnd_bm;
u32 short_int_pnd_bm;
u32 kdwn_int_pnd_bm;
u32 press_int_en_bm;
u32 kdwn_int_en_bm;
u32 press_time_bm;
u32 reset_en_bm;
u32 reset_time_bm;
};
struct atc260x_onkey {
struct atc260x *atc260x;
const struct atc260x_onkey_params *params;
struct input_dev *input_dev;
struct delayed_work work;
int irq;
};
static const struct atc260x_onkey_params atc2603c_onkey_params = {
.reg_int_ctl = ATC2603C_PMU_SYS_CTL2,
.long_int_pnd_bm = ATC2603C_PMU_SYS_CTL2_ONOFF_LONG_PRESS,
.short_int_pnd_bm = ATC2603C_PMU_SYS_CTL2_ONOFF_SHORT_PRESS,
.kdwn_int_pnd_bm = ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_PD,
.press_int_en_bm = ATC2603C_PMU_SYS_CTL2_ONOFF_INT_EN,
.kdwn_int_en_bm = ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN,
.kdwn_state_bm = ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS,
.press_time_bm = ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_TIME,
.reset_en_bm = ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_RESET_EN,
.reset_time_bm = ATC2603C_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL,
};
static const struct atc260x_onkey_params atc2609a_onkey_params = {
.reg_int_ctl = ATC2609A_PMU_SYS_CTL2,
.long_int_pnd_bm = ATC2609A_PMU_SYS_CTL2_ONOFF_LONG_PRESS,
.short_int_pnd_bm = ATC2609A_PMU_SYS_CTL2_ONOFF_SHORT_PRESS,
.kdwn_int_pnd_bm = ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_PD,
.press_int_en_bm = ATC2609A_PMU_SYS_CTL2_ONOFF_LSP_INT_EN,
.kdwn_int_en_bm = ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN,
.kdwn_state_bm = ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS,
.press_time_bm = ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_TIME,
.reset_en_bm = ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_EN,
.reset_time_bm = ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL,
};
static int atc2603x_onkey_hw_init(struct atc260x_onkey *onkey,
enum atc260x_onkey_reset_status reset_status,
u32 reset_time, u32 press_time)
{
u32 reg_bm, reg_val;
reg_bm = onkey->params->long_int_pnd_bm |
onkey->params->short_int_pnd_bm |
onkey->params->kdwn_int_pnd_bm |
onkey->params->press_int_en_bm |
onkey->params->kdwn_int_en_bm;
reg_val = reg_bm | press_time;
reg_bm |= onkey->params->press_time_bm;
if (reset_status == KEY_RESET_DISABLED) {
reg_bm |= onkey->params->reset_en_bm;
} else if (reset_status == KEY_RESET_USER_SEL) {
reg_bm |= onkey->params->reset_en_bm |
onkey->params->reset_time_bm;
reg_val |= onkey->params->reset_en_bm | reset_time;
}
return regmap_update_bits(onkey->atc260x->regmap,
onkey->params->reg_int_ctl, reg_bm, reg_val);
}
static void atc260x_onkey_query(struct atc260x_onkey *onkey)
{
u32 reg_bits;
int ret, key_down;
ret = regmap_read(onkey->atc260x->regmap,
onkey->params->reg_int_ctl, &key_down);
if (ret) {
key_down = 1;
dev_err(onkey->atc260x->dev,
"Failed to read onkey status: %d\n", ret);
} else {
key_down &= onkey->params->kdwn_state_bm;
}
/*
* The hardware generates interrupt only when the onkey pin is
* asserted. Hence, the deassertion of the pin is simulated through
* work queue.
*/
if (key_down) {
schedule_delayed_work(&onkey->work, msecs_to_jiffies(200));
return;
}
/*
* The key-down status bit is cleared when the On/Off button
* is released.
*/
input_report_key(onkey->input_dev, KEY_POWER, 0);
input_sync(onkey->input_dev);
reg_bits = onkey->params->long_int_pnd_bm |
onkey->params->short_int_pnd_bm |
onkey->params->kdwn_int_pnd_bm |
onkey->params->press_int_en_bm |
onkey->params->kdwn_int_en_bm;
/* Clear key press pending events and enable key press interrupts. */
regmap_update_bits(onkey->atc260x->regmap, onkey->params->reg_int_ctl,
reg_bits, reg_bits);
}
static void atc260x_onkey_work(struct work_struct *work)
{
struct atc260x_onkey *onkey = container_of(work, struct atc260x_onkey,
work.work);
atc260x_onkey_query(onkey);
}
static irqreturn_t atc260x_onkey_irq(int irq, void *data)
{
struct atc260x_onkey *onkey = data;
int ret;
/* Disable key press interrupts. */
ret = regmap_update_bits(onkey->atc260x->regmap,
onkey->params->reg_int_ctl,
onkey->params->press_int_en_bm |
onkey->params->kdwn_int_en_bm, 0);
if (ret)
dev_err(onkey->atc260x->dev,
"Failed to disable interrupts: %d\n", ret);
input_report_key(onkey->input_dev, KEY_POWER, 1);
input_sync(onkey->input_dev);
atc260x_onkey_query(onkey);
return IRQ_HANDLED;
}
static int atc260x_onkey_open(struct input_dev *dev)
{
struct atc260x_onkey *onkey = input_get_drvdata(dev);
enable_irq(onkey->irq);
return 0;
}
static void atc260x_onkey_close(struct input_dev *dev)
{
struct atc260x_onkey *onkey = input_get_drvdata(dev);
disable_irq(onkey->irq);
cancel_delayed_work_sync(&onkey->work);
}
static int atc260x_onkey_probe(struct platform_device *pdev)
{
struct atc260x *atc260x = dev_get_drvdata(pdev->dev.parent);
struct atc260x_onkey *onkey;
struct input_dev *input_dev;
enum atc260x_onkey_reset_status reset_status;
u32 press_time = KEY_PRESS_TIME_SEC, reset_time = 0;
int val, error;
onkey = devm_kzalloc(&pdev->dev, sizeof(*onkey), GFP_KERNEL);
if (!onkey)
return -ENOMEM;
error = device_property_read_u32(pdev->dev.parent,
"reset-time-sec", &val);
if (error) {
reset_status = KEY_RESET_HW_DEFAULT;
} else if (val) {
if (val < 6 || val > 12) {
dev_err(&pdev->dev, "reset-time-sec out of range\n");
return -EINVAL;
}
reset_status = KEY_RESET_USER_SEL;
reset_time = (val - 6) / 2;
} else {
reset_status = KEY_RESET_DISABLED;
dev_dbg(&pdev->dev, "Disabled reset on long-press\n");
}
switch (atc260x->ic_type) {
case ATC2603C:
onkey->params = &atc2603c_onkey_params;
press_time = FIELD_PREP(ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_TIME,
press_time);
reset_time = FIELD_PREP(ATC2603C_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL,
reset_time);
break;
case ATC2609A:
onkey->params = &atc2609a_onkey_params;
press_time = FIELD_PREP(ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_TIME,
press_time);
reset_time = FIELD_PREP(ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL,
reset_time);
break;
default:
dev_err(&pdev->dev,
"OnKey not supported for ATC260x PMIC type: %u\n",
atc260x->ic_type);
return -EINVAL;
}
input_dev = devm_input_allocate_device(&pdev->dev);
if (!input_dev) {
dev_err(&pdev->dev, "Failed to allocate input device\n");
return -ENOMEM;
}
onkey->input_dev = input_dev;
onkey->atc260x = atc260x;
input_dev->name = "atc260x-onkey";
input_dev->phys = "atc260x-onkey/input0";
input_dev->open = atc260x_onkey_open;
input_dev->close = atc260x_onkey_close;
input_set_capability(input_dev, EV_KEY, KEY_POWER);
input_set_drvdata(input_dev, onkey);
INIT_DELAYED_WORK(&onkey->work, atc260x_onkey_work);
onkey->irq = platform_get_irq(pdev, 0);
if (onkey->irq < 0)
return onkey->irq;
error = devm_request_threaded_irq(&pdev->dev, onkey->irq, NULL,
atc260x_onkey_irq, IRQF_ONESHOT,
dev_name(&pdev->dev), onkey);
if (error) {
dev_err(&pdev->dev,
"Failed to register IRQ %d: %d\n", onkey->irq, error);
return error;
}
/* Keep IRQ disabled until atc260x_onkey_open() is called. */
disable_irq(onkey->irq);
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev,
"Failed to register input device: %d\n", error);
return error;
}
error = atc2603x_onkey_hw_init(onkey, reset_status,
reset_time, press_time);
if (error)
return error;
device_init_wakeup(&pdev->dev, true);
return 0;
}
static struct platform_driver atc260x_onkey_driver = {
.probe = atc260x_onkey_probe,
.driver = {
.name = "atc260x-onkey",
},
};
module_platform_driver(atc260x_onkey_driver);
MODULE_DESCRIPTION("Onkey driver for ATC260x PMICs");
MODULE_AUTHOR("Cristian Ciocaltea <[email protected]>");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/atc260x-onkey.c
|
/*
* Xen para-virtual input device
*
* Copyright (C) 2005 Anthony Liguori <[email protected]>
* Copyright (C) 2006-2008 Red Hat, Inc., Markus Armbruster <[email protected]>
*
* Based on linux/drivers/input/mouse/sermouse.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/slab.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/grant_table.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/io/fbif.h>
#include <xen/interface/io/kbdif.h>
#include <xen/xenbus.h>
#include <xen/platform_pci.h>
struct xenkbd_info {
struct input_dev *kbd;
struct input_dev *ptr;
struct input_dev *mtouch;
struct xenkbd_page *page;
int gref;
int irq;
struct xenbus_device *xbdev;
char phys[32];
/* current MT slot/contact ID we are injecting events in */
int mtouch_cur_contact_id;
};
enum { KPARAM_X, KPARAM_Y, KPARAM_CNT };
static int ptr_size[KPARAM_CNT] = { XENFB_WIDTH, XENFB_HEIGHT };
module_param_array(ptr_size, int, NULL, 0444);
MODULE_PARM_DESC(ptr_size,
"Pointing device width, height in pixels (default 800,600)");
static void xenkbd_remove(struct xenbus_device *);
static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
static void xenkbd_disconnect_backend(struct xenkbd_info *);
/*
* Note: if you need to send out events, see xenfb_do_update() for how
* to do that.
*/
static void xenkbd_handle_motion_event(struct xenkbd_info *info,
struct xenkbd_motion *motion)
{
if (unlikely(!info->ptr))
return;
input_report_rel(info->ptr, REL_X, motion->rel_x);
input_report_rel(info->ptr, REL_Y, motion->rel_y);
if (motion->rel_z)
input_report_rel(info->ptr, REL_WHEEL, -motion->rel_z);
input_sync(info->ptr);
}
static void xenkbd_handle_position_event(struct xenkbd_info *info,
struct xenkbd_position *pos)
{
if (unlikely(!info->ptr))
return;
input_report_abs(info->ptr, ABS_X, pos->abs_x);
input_report_abs(info->ptr, ABS_Y, pos->abs_y);
if (pos->rel_z)
input_report_rel(info->ptr, REL_WHEEL, -pos->rel_z);
input_sync(info->ptr);
}
static void xenkbd_handle_key_event(struct xenkbd_info *info,
struct xenkbd_key *key)
{
struct input_dev *dev;
int value = key->pressed;
if (test_bit(key->keycode, info->ptr->keybit)) {
dev = info->ptr;
} else if (test_bit(key->keycode, info->kbd->keybit)) {
dev = info->kbd;
if (key->pressed && test_bit(key->keycode, info->kbd->key))
value = 2; /* Mark as autorepeat */
} else {
pr_warn("unhandled keycode 0x%x\n", key->keycode);
return;
}
if (unlikely(!dev))
return;
input_event(dev, EV_KEY, key->keycode, value);
input_sync(dev);
}
static void xenkbd_handle_mt_event(struct xenkbd_info *info,
struct xenkbd_mtouch *mtouch)
{
if (unlikely(!info->mtouch))
return;
if (mtouch->contact_id != info->mtouch_cur_contact_id) {
info->mtouch_cur_contact_id = mtouch->contact_id;
input_mt_slot(info->mtouch, mtouch->contact_id);
}
switch (mtouch->event_type) {
case XENKBD_MT_EV_DOWN:
input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, true);
fallthrough;
case XENKBD_MT_EV_MOTION:
input_report_abs(info->mtouch, ABS_MT_POSITION_X,
mtouch->u.pos.abs_x);
input_report_abs(info->mtouch, ABS_MT_POSITION_Y,
mtouch->u.pos.abs_y);
break;
case XENKBD_MT_EV_SHAPE:
input_report_abs(info->mtouch, ABS_MT_TOUCH_MAJOR,
mtouch->u.shape.major);
input_report_abs(info->mtouch, ABS_MT_TOUCH_MINOR,
mtouch->u.shape.minor);
break;
case XENKBD_MT_EV_ORIENT:
input_report_abs(info->mtouch, ABS_MT_ORIENTATION,
mtouch->u.orientation);
break;
case XENKBD_MT_EV_UP:
input_mt_report_slot_inactive(info->mtouch);
break;
case XENKBD_MT_EV_SYN:
input_mt_sync_frame(info->mtouch);
input_sync(info->mtouch);
break;
}
}
static void xenkbd_handle_event(struct xenkbd_info *info,
union xenkbd_in_event *event)
{
switch (event->type) {
case XENKBD_TYPE_MOTION:
xenkbd_handle_motion_event(info, &event->motion);
break;
case XENKBD_TYPE_KEY:
xenkbd_handle_key_event(info, &event->key);
break;
case XENKBD_TYPE_POS:
xenkbd_handle_position_event(info, &event->pos);
break;
case XENKBD_TYPE_MTOUCH:
xenkbd_handle_mt_event(info, &event->mtouch);
break;
}
}
static irqreturn_t input_handler(int rq, void *dev_id)
{
struct xenkbd_info *info = dev_id;
struct xenkbd_page *page = info->page;
__u32 cons, prod;
prod = page->in_prod;
if (prod == page->in_cons)
return IRQ_HANDLED;
rmb(); /* ensure we see ring contents up to prod */
for (cons = page->in_cons; cons != prod; cons++)
xenkbd_handle_event(info, &XENKBD_IN_RING_REF(page, cons));
mb(); /* ensure we got ring contents */
page->in_cons = cons;
notify_remote_via_irq(info->irq);
return IRQ_HANDLED;
}
static int xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int ret, i;
bool with_mtouch, with_kbd, with_ptr;
struct xenkbd_info *info;
struct input_dev *kbd, *ptr, *mtouch;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
return -ENOMEM;
}
dev_set_drvdata(&dev->dev, info);
info->xbdev = dev;
info->irq = -1;
info->gref = -1;
snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!info->page)
goto error_nomem;
/*
* The below are reverse logic, e.g. if the feature is set, then
* do not expose the corresponding virtual device.
*/
with_kbd = !xenbus_read_unsigned(dev->otherend,
XENKBD_FIELD_FEAT_DSBL_KEYBRD, 0);
with_ptr = !xenbus_read_unsigned(dev->otherend,
XENKBD_FIELD_FEAT_DSBL_POINTER, 0);
/* Direct logic: if set, then create multi-touch device. */
with_mtouch = xenbus_read_unsigned(dev->otherend,
XENKBD_FIELD_FEAT_MTOUCH, 0);
if (with_mtouch) {
ret = xenbus_write(XBT_NIL, dev->nodename,
XENKBD_FIELD_REQ_MTOUCH, "1");
if (ret) {
pr_warn("xenkbd: can't request multi-touch");
with_mtouch = 0;
}
}
/* keyboard */
if (with_kbd) {
kbd = input_allocate_device();
if (!kbd)
goto error_nomem;
kbd->name = "Xen Virtual Keyboard";
kbd->phys = info->phys;
kbd->id.bustype = BUS_PCI;
kbd->id.vendor = 0x5853;
kbd->id.product = 0xffff;
__set_bit(EV_KEY, kbd->evbit);
for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
__set_bit(i, kbd->keybit);
for (i = KEY_OK; i < KEY_MAX; i++)
__set_bit(i, kbd->keybit);
ret = input_register_device(kbd);
if (ret) {
input_free_device(kbd);
xenbus_dev_fatal(dev, ret,
"input_register_device(kbd)");
goto error;
}
info->kbd = kbd;
}
/* pointing device */
if (with_ptr) {
unsigned int abs;
/* Set input abs params to match backend screen res */
abs = xenbus_read_unsigned(dev->otherend,
XENKBD_FIELD_FEAT_ABS_POINTER, 0);
ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend,
XENKBD_FIELD_WIDTH,
ptr_size[KPARAM_X]);
ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend,
XENKBD_FIELD_HEIGHT,
ptr_size[KPARAM_Y]);
if (abs) {
ret = xenbus_write(XBT_NIL, dev->nodename,
XENKBD_FIELD_REQ_ABS_POINTER, "1");
if (ret) {
pr_warn("xenkbd: can't request abs-pointer\n");
abs = 0;
}
}
ptr = input_allocate_device();
if (!ptr)
goto error_nomem;
ptr->name = "Xen Virtual Pointer";
ptr->phys = info->phys;
ptr->id.bustype = BUS_PCI;
ptr->id.vendor = 0x5853;
ptr->id.product = 0xfffe;
if (abs) {
__set_bit(EV_ABS, ptr->evbit);
input_set_abs_params(ptr, ABS_X, 0,
ptr_size[KPARAM_X], 0, 0);
input_set_abs_params(ptr, ABS_Y, 0,
ptr_size[KPARAM_Y], 0, 0);
} else {
input_set_capability(ptr, EV_REL, REL_X);
input_set_capability(ptr, EV_REL, REL_Y);
}
input_set_capability(ptr, EV_REL, REL_WHEEL);
__set_bit(EV_KEY, ptr->evbit);
for (i = BTN_LEFT; i <= BTN_TASK; i++)
__set_bit(i, ptr->keybit);
ret = input_register_device(ptr);
if (ret) {
input_free_device(ptr);
xenbus_dev_fatal(dev, ret,
"input_register_device(ptr)");
goto error;
}
info->ptr = ptr;
}
/* multi-touch device */
if (with_mtouch) {
int num_cont, width, height;
mtouch = input_allocate_device();
if (!mtouch)
goto error_nomem;
num_cont = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_NUM_CONTACTS,
1);
width = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_WIDTH,
XENFB_WIDTH);
height = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_HEIGHT,
XENFB_HEIGHT);
mtouch->name = "Xen Virtual Multi-touch";
mtouch->phys = info->phys;
mtouch->id.bustype = BUS_PCI;
mtouch->id.vendor = 0x5853;
mtouch->id.product = 0xfffd;
input_set_abs_params(mtouch, ABS_MT_TOUCH_MAJOR,
0, 255, 0, 0);
input_set_abs_params(mtouch, ABS_MT_POSITION_X,
0, width, 0, 0);
input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
0, height, 0, 0);
ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
if (ret) {
input_free_device(mtouch);
xenbus_dev_fatal(info->xbdev, ret,
"input_mt_init_slots");
goto error;
}
ret = input_register_device(mtouch);
if (ret) {
input_free_device(mtouch);
xenbus_dev_fatal(info->xbdev, ret,
"input_register_device(mtouch)");
goto error;
}
info->mtouch_cur_contact_id = -1;
info->mtouch = mtouch;
}
if (!(with_kbd || with_ptr || with_mtouch)) {
ret = -ENXIO;
goto error;
}
ret = xenkbd_connect_backend(dev, info);
if (ret < 0)
goto error;
return 0;
error_nomem:
ret = -ENOMEM;
xenbus_dev_fatal(dev, ret, "allocating device memory");
error:
xenkbd_remove(dev);
return ret;
}
static int xenkbd_resume(struct xenbus_device *dev)
{
struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
xenkbd_disconnect_backend(info);
memset(info->page, 0, PAGE_SIZE);
return xenkbd_connect_backend(dev, info);
}
static void xenkbd_remove(struct xenbus_device *dev)
{
struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
xenkbd_disconnect_backend(info);
if (info->kbd)
input_unregister_device(info->kbd);
if (info->ptr)
input_unregister_device(info->ptr);
if (info->mtouch)
input_unregister_device(info->mtouch);
free_page((unsigned long)info->page);
kfree(info);
}
static int xenkbd_connect_backend(struct xenbus_device *dev,
struct xenkbd_info *info)
{
int ret, evtchn;
struct xenbus_transaction xbt;
ret = gnttab_grant_foreign_access(dev->otherend_id,
virt_to_gfn(info->page), 0);
if (ret < 0)
return ret;
info->gref = ret;
ret = xenbus_alloc_evtchn(dev, &evtchn);
if (ret)
goto error_grant;
ret = bind_evtchn_to_irqhandler(evtchn, input_handler,
0, dev->devicetype, info);
if (ret < 0) {
xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
goto error_evtchan;
}
info->irq = ret;
again:
ret = xenbus_transaction_start(&xbt);
if (ret) {
xenbus_dev_fatal(dev, ret, "starting transaction");
goto error_irqh;
}
ret = xenbus_printf(xbt, dev->nodename, XENKBD_FIELD_RING_REF, "%lu",
virt_to_gfn(info->page));
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, XENKBD_FIELD_RING_GREF,
"%u", info->gref);
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, XENKBD_FIELD_EVT_CHANNEL, "%u",
evtchn);
if (ret)
goto error_xenbus;
ret = xenbus_transaction_end(xbt, 0);
if (ret) {
if (ret == -EAGAIN)
goto again;
xenbus_dev_fatal(dev, ret, "completing transaction");
goto error_irqh;
}
xenbus_switch_state(dev, XenbusStateInitialised);
return 0;
error_xenbus:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, ret, "writing xenstore");
error_irqh:
unbind_from_irqhandler(info->irq, info);
info->irq = -1;
error_evtchan:
xenbus_free_evtchn(dev, evtchn);
error_grant:
gnttab_end_foreign_access(info->gref, NULL);
info->gref = -1;
return ret;
}
static void xenkbd_disconnect_backend(struct xenkbd_info *info)
{
if (info->irq >= 0)
unbind_from_irqhandler(info->irq, info);
info->irq = -1;
if (info->gref >= 0)
gnttab_end_foreign_access(info->gref, NULL);
info->gref = -1;
}
static void xenkbd_backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
break;
case XenbusStateInitWait:
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateConnected:
/*
* Work around xenbus race condition: If backend goes
* through InitWait to Connected fast enough, we can
* get Connected twice here.
*/
if (dev->state != XenbusStateConnected)
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
}
}
static const struct xenbus_device_id xenkbd_ids[] = {
{ XENKBD_DRIVER_NAME },
{ "" }
};
static struct xenbus_driver xenkbd_driver = {
.ids = xenkbd_ids,
.probe = xenkbd_probe,
.remove = xenkbd_remove,
.resume = xenkbd_resume,
.otherend_changed = xenkbd_backend_changed,
.not_essential = true,
};
static int __init xenkbd_init(void)
{
if (!xen_domain())
return -ENODEV;
/* Nothing to do if running in dom0. */
if (xen_initial_domain())
return -ENODEV;
if (!xen_has_pv_devices())
return -ENODEV;
return xenbus_register_frontend(&xenkbd_driver);
}
static void __exit xenkbd_cleanup(void)
{
xenbus_unregister_driver(&xenkbd_driver);
}
module_init(xenkbd_init);
module_exit(xenkbd_cleanup);
MODULE_DESCRIPTION("Xen virtual keyboard/pointer device frontend");
MODULE_LICENSE("GPL");
MODULE_ALIAS("xen:" XENKBD_DRIVER_NAME);
|
linux-master
|
drivers/input/misc/xen-kbdfront.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ADXL345/346 Three-Axis Digital Accelerometers
*
* Enter bugs at http://blackfin.uclinux.org/
*
* Copyright (C) 2009 Michael Hennerich, Analog Devices Inc.
*/
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/input/adxl34x.h>
#include <linux/module.h>
#include "adxl34x.h"
/* ADXL345/6 Register Map */
#define DEVID 0x00 /* R Device ID */
#define THRESH_TAP 0x1D /* R/W Tap threshold */
#define OFSX 0x1E /* R/W X-axis offset */
#define OFSY 0x1F /* R/W Y-axis offset */
#define OFSZ 0x20 /* R/W Z-axis offset */
#define DUR 0x21 /* R/W Tap duration */
#define LATENT 0x22 /* R/W Tap latency */
#define WINDOW 0x23 /* R/W Tap window */
#define THRESH_ACT 0x24 /* R/W Activity threshold */
#define THRESH_INACT 0x25 /* R/W Inactivity threshold */
#define TIME_INACT 0x26 /* R/W Inactivity time */
#define ACT_INACT_CTL 0x27 /* R/W Axis enable control for activity and */
/* inactivity detection */
#define THRESH_FF 0x28 /* R/W Free-fall threshold */
#define TIME_FF 0x29 /* R/W Free-fall time */
#define TAP_AXES 0x2A /* R/W Axis control for tap/double tap */
#define ACT_TAP_STATUS 0x2B /* R Source of tap/double tap */
#define BW_RATE 0x2C /* R/W Data rate and power mode control */
#define POWER_CTL 0x2D /* R/W Power saving features control */
#define INT_ENABLE 0x2E /* R/W Interrupt enable control */
#define INT_MAP 0x2F /* R/W Interrupt mapping control */
#define INT_SOURCE 0x30 /* R Source of interrupts */
#define DATA_FORMAT 0x31 /* R/W Data format control */
#define DATAX0 0x32 /* R X-Axis Data 0 */
#define DATAX1 0x33 /* R X-Axis Data 1 */
#define DATAY0 0x34 /* R Y-Axis Data 0 */
#define DATAY1 0x35 /* R Y-Axis Data 1 */
#define DATAZ0 0x36 /* R Z-Axis Data 0 */
#define DATAZ1 0x37 /* R Z-Axis Data 1 */
#define FIFO_CTL 0x38 /* R/W FIFO control */
#define FIFO_STATUS 0x39 /* R FIFO status */
#define TAP_SIGN 0x3A /* R Sign and source for tap/double tap */
/* Orientation ADXL346 only */
#define ORIENT_CONF 0x3B /* R/W Orientation configuration */
#define ORIENT 0x3C /* R Orientation status */
/* DEVIDs */
#define ID_ADXL345 0xE5
#define ID_ADXL346 0xE6
/* INT_ENABLE/INT_MAP/INT_SOURCE Bits */
#define DATA_READY (1 << 7)
#define SINGLE_TAP (1 << 6)
#define DOUBLE_TAP (1 << 5)
#define ACTIVITY (1 << 4)
#define INACTIVITY (1 << 3)
#define FREE_FALL (1 << 2)
#define WATERMARK (1 << 1)
#define OVERRUN (1 << 0)
/* ACT_INACT_CONTROL Bits */
#define ACT_ACDC (1 << 7)
#define ACT_X_EN (1 << 6)
#define ACT_Y_EN (1 << 5)
#define ACT_Z_EN (1 << 4)
#define INACT_ACDC (1 << 3)
#define INACT_X_EN (1 << 2)
#define INACT_Y_EN (1 << 1)
#define INACT_Z_EN (1 << 0)
/* TAP_AXES Bits */
#define SUPPRESS (1 << 3)
#define TAP_X_EN (1 << 2)
#define TAP_Y_EN (1 << 1)
#define TAP_Z_EN (1 << 0)
/* ACT_TAP_STATUS Bits */
#define ACT_X_SRC (1 << 6)
#define ACT_Y_SRC (1 << 5)
#define ACT_Z_SRC (1 << 4)
#define ASLEEP (1 << 3)
#define TAP_X_SRC (1 << 2)
#define TAP_Y_SRC (1 << 1)
#define TAP_Z_SRC (1 << 0)
/* BW_RATE Bits */
#define LOW_POWER (1 << 4)
#define RATE(x) ((x) & 0xF)
/* POWER_CTL Bits */
#define PCTL_LINK (1 << 5)
#define PCTL_AUTO_SLEEP (1 << 4)
#define PCTL_MEASURE (1 << 3)
#define PCTL_SLEEP (1 << 2)
#define PCTL_WAKEUP(x) ((x) & 0x3)
/* DATA_FORMAT Bits */
#define SELF_TEST (1 << 7)
#define SPI (1 << 6)
#define INT_INVERT (1 << 5)
#define FULL_RES (1 << 3)
#define JUSTIFY (1 << 2)
#define RANGE(x) ((x) & 0x3)
#define RANGE_PM_2g 0
#define RANGE_PM_4g 1
#define RANGE_PM_8g 2
#define RANGE_PM_16g 3
/*
* Maximum value our axis may get in full res mode for the input device
* (signed 13 bits)
*/
#define ADXL_FULLRES_MAX_VAL 4096
/*
* Maximum value our axis may get in fixed res mode for the input device
* (signed 10 bits)
*/
#define ADXL_FIXEDRES_MAX_VAL 512
/* FIFO_CTL Bits */
#define FIFO_MODE(x) (((x) & 0x3) << 6)
#define FIFO_BYPASS 0
#define FIFO_FIFO 1
#define FIFO_STREAM 2
#define FIFO_TRIGGER 3
#define TRIGGER (1 << 5)
#define SAMPLES(x) ((x) & 0x1F)
/* FIFO_STATUS Bits */
#define FIFO_TRIG (1 << 7)
#define ENTRIES(x) ((x) & 0x3F)
/* TAP_SIGN Bits ADXL346 only */
#define XSIGN (1 << 6)
#define YSIGN (1 << 5)
#define ZSIGN (1 << 4)
#define XTAP (1 << 3)
#define YTAP (1 << 2)
#define ZTAP (1 << 1)
/* ORIENT_CONF ADXL346 only */
#define ORIENT_DEADZONE(x) (((x) & 0x7) << 4)
#define ORIENT_DIVISOR(x) ((x) & 0x7)
/* ORIENT ADXL346 only */
#define ADXL346_2D_VALID (1 << 6)
#define ADXL346_2D_ORIENT(x) (((x) & 0x30) >> 4)
#define ADXL346_3D_VALID (1 << 3)
#define ADXL346_3D_ORIENT(x) ((x) & 0x7)
#define ADXL346_2D_PORTRAIT_POS 0 /* +X */
#define ADXL346_2D_PORTRAIT_NEG 1 /* -X */
#define ADXL346_2D_LANDSCAPE_POS 2 /* +Y */
#define ADXL346_2D_LANDSCAPE_NEG 3 /* -Y */
#define ADXL346_3D_FRONT 3 /* +X */
#define ADXL346_3D_BACK 4 /* -X */
#define ADXL346_3D_RIGHT 2 /* +Y */
#define ADXL346_3D_LEFT 5 /* -Y */
#define ADXL346_3D_TOP 1 /* +Z */
#define ADXL346_3D_BOTTOM 6 /* -Z */
#undef ADXL_DEBUG
#define ADXL_X_AXIS 0
#define ADXL_Y_AXIS 1
#define ADXL_Z_AXIS 2
#define AC_READ(ac, reg) ((ac)->bops->read((ac)->dev, reg))
#define AC_WRITE(ac, reg, val) ((ac)->bops->write((ac)->dev, reg, val))
struct axis_triple {
int x;
int y;
int z;
};
struct adxl34x {
struct device *dev;
struct input_dev *input;
struct mutex mutex; /* reentrant protection for struct */
struct adxl34x_platform_data pdata;
struct axis_triple swcal;
struct axis_triple hwcal;
struct axis_triple saved;
char phys[32];
unsigned orient2d_saved;
unsigned orient3d_saved;
bool disabled; /* P: mutex */
bool opened; /* P: mutex */
bool suspended; /* P: mutex */
bool fifo_delay;
int irq;
unsigned model;
unsigned int_mask;
const struct adxl34x_bus_ops *bops;
};
static const struct adxl34x_platform_data adxl34x_default_init = {
.tap_threshold = 35,
.tap_duration = 3,
.tap_latency = 20,
.tap_window = 20,
.tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN,
.act_axis_control = 0xFF,
.activity_threshold = 6,
.inactivity_threshold = 4,
.inactivity_time = 3,
.free_fall_threshold = 8,
.free_fall_time = 0x20,
.data_rate = 8,
.data_range = ADXL_FULL_RES,
.ev_type = EV_ABS,
.ev_code_x = ABS_X, /* EV_REL */
.ev_code_y = ABS_Y, /* EV_REL */
.ev_code_z = ABS_Z, /* EV_REL */
.ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY {x,y,z} */
.power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
.fifo_mode = ADXL_FIFO_STREAM,
.watermark = 0,
};
static void adxl34x_get_triple(struct adxl34x *ac, struct axis_triple *axis)
{
__le16 buf[3];
ac->bops->read_block(ac->dev, DATAX0, DATAZ1 - DATAX0 + 1, buf);
mutex_lock(&ac->mutex);
ac->saved.x = (s16) le16_to_cpu(buf[0]);
axis->x = ac->saved.x;
ac->saved.y = (s16) le16_to_cpu(buf[1]);
axis->y = ac->saved.y;
ac->saved.z = (s16) le16_to_cpu(buf[2]);
axis->z = ac->saved.z;
mutex_unlock(&ac->mutex);
}
static void adxl34x_service_ev_fifo(struct adxl34x *ac)
{
struct adxl34x_platform_data *pdata = &ac->pdata;
struct axis_triple axis;
adxl34x_get_triple(ac, &axis);
input_event(ac->input, pdata->ev_type, pdata->ev_code_x,
axis.x - ac->swcal.x);
input_event(ac->input, pdata->ev_type, pdata->ev_code_y,
axis.y - ac->swcal.y);
input_event(ac->input, pdata->ev_type, pdata->ev_code_z,
axis.z - ac->swcal.z);
}
static void adxl34x_report_key_single(struct input_dev *input, int key)
{
input_report_key(input, key, true);
input_sync(input);
input_report_key(input, key, false);
}
static void adxl34x_send_key_events(struct adxl34x *ac,
struct adxl34x_platform_data *pdata, int status, int press)
{
int i;
for (i = ADXL_X_AXIS; i <= ADXL_Z_AXIS; i++) {
if (status & (1 << (ADXL_Z_AXIS - i)))
input_report_key(ac->input,
pdata->ev_code_tap[i], press);
}
}
static void adxl34x_do_tap(struct adxl34x *ac,
struct adxl34x_platform_data *pdata, int status)
{
adxl34x_send_key_events(ac, pdata, status, true);
input_sync(ac->input);
adxl34x_send_key_events(ac, pdata, status, false);
}
static irqreturn_t adxl34x_irq(int irq, void *handle)
{
struct adxl34x *ac = handle;
struct adxl34x_platform_data *pdata = &ac->pdata;
int int_stat, tap_stat, samples, orient, orient_code;
/*
* ACT_TAP_STATUS should be read before clearing the interrupt
* Avoid reading ACT_TAP_STATUS in case TAP detection is disabled
*/
if (pdata->tap_axis_control & (TAP_X_EN | TAP_Y_EN | TAP_Z_EN))
tap_stat = AC_READ(ac, ACT_TAP_STATUS);
else
tap_stat = 0;
int_stat = AC_READ(ac, INT_SOURCE);
if (int_stat & FREE_FALL)
adxl34x_report_key_single(ac->input, pdata->ev_code_ff);
if (int_stat & OVERRUN)
dev_dbg(ac->dev, "OVERRUN\n");
if (int_stat & (SINGLE_TAP | DOUBLE_TAP)) {
adxl34x_do_tap(ac, pdata, tap_stat);
if (int_stat & DOUBLE_TAP)
adxl34x_do_tap(ac, pdata, tap_stat);
}
if (pdata->ev_code_act_inactivity) {
if (int_stat & ACTIVITY)
input_report_key(ac->input,
pdata->ev_code_act_inactivity, 1);
if (int_stat & INACTIVITY)
input_report_key(ac->input,
pdata->ev_code_act_inactivity, 0);
}
/*
* ORIENTATION SENSING ADXL346 only
*/
if (pdata->orientation_enable) {
orient = AC_READ(ac, ORIENT);
if ((pdata->orientation_enable & ADXL_EN_ORIENTATION_2D) &&
(orient & ADXL346_2D_VALID)) {
orient_code = ADXL346_2D_ORIENT(orient);
/* Report orientation only when it changes */
if (ac->orient2d_saved != orient_code) {
ac->orient2d_saved = orient_code;
adxl34x_report_key_single(ac->input,
pdata->ev_codes_orient_2d[orient_code]);
}
}
if ((pdata->orientation_enable & ADXL_EN_ORIENTATION_3D) &&
(orient & ADXL346_3D_VALID)) {
orient_code = ADXL346_3D_ORIENT(orient) - 1;
/* Report orientation only when it changes */
if (ac->orient3d_saved != orient_code) {
ac->orient3d_saved = orient_code;
adxl34x_report_key_single(ac->input,
pdata->ev_codes_orient_3d[orient_code]);
}
}
}
if (int_stat & (DATA_READY | WATERMARK)) {
if (pdata->fifo_mode)
samples = ENTRIES(AC_READ(ac, FIFO_STATUS)) + 1;
else
samples = 1;
for (; samples > 0; samples--) {
adxl34x_service_ev_fifo(ac);
/*
* To ensure that the FIFO has
* completely popped, there must be at least 5 us between
* the end of reading the data registers, signified by the
* transition to register 0x38 from 0x37 or the CS pin
* going high, and the start of new reads of the FIFO or
* reading the FIFO_STATUS register. For SPI operation at
* 1.5 MHz or lower, the register addressing portion of the
* transmission is sufficient delay to ensure the FIFO has
* completely popped. It is necessary for SPI operation
* greater than 1.5 MHz to de-assert the CS pin to ensure a
* total of 5 us, which is at most 3.4 us at 5 MHz
* operation.
*/
if (ac->fifo_delay && (samples > 1))
udelay(3);
}
}
input_sync(ac->input);
return IRQ_HANDLED;
}
static void __adxl34x_disable(struct adxl34x *ac)
{
/*
* A '0' places the ADXL34x into standby mode
* with minimum power consumption.
*/
AC_WRITE(ac, POWER_CTL, 0);
}
static void __adxl34x_enable(struct adxl34x *ac)
{
AC_WRITE(ac, POWER_CTL, ac->pdata.power_mode | PCTL_MEASURE);
}
static int adxl34x_suspend(struct device *dev)
{
struct adxl34x *ac = dev_get_drvdata(dev);
mutex_lock(&ac->mutex);
if (!ac->suspended && !ac->disabled && ac->opened)
__adxl34x_disable(ac);
ac->suspended = true;
mutex_unlock(&ac->mutex);
return 0;
}
static int adxl34x_resume(struct device *dev)
{
struct adxl34x *ac = dev_get_drvdata(dev);
mutex_lock(&ac->mutex);
if (ac->suspended && !ac->disabled && ac->opened)
__adxl34x_enable(ac);
ac->suspended = false;
mutex_unlock(&ac->mutex);
return 0;
}
static ssize_t adxl34x_disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adxl34x *ac = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", ac->disabled);
}
static ssize_t adxl34x_disable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
mutex_lock(&ac->mutex);
if (!ac->suspended && ac->opened) {
if (val) {
if (!ac->disabled)
__adxl34x_disable(ac);
} else {
if (ac->disabled)
__adxl34x_enable(ac);
}
}
ac->disabled = !!val;
mutex_unlock(&ac->mutex);
return count;
}
static DEVICE_ATTR(disable, 0664, adxl34x_disable_show, adxl34x_disable_store);
static ssize_t adxl34x_calibrate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adxl34x *ac = dev_get_drvdata(dev);
ssize_t count;
mutex_lock(&ac->mutex);
count = sprintf(buf, "%d,%d,%d\n",
ac->hwcal.x * 4 + ac->swcal.x,
ac->hwcal.y * 4 + ac->swcal.y,
ac->hwcal.z * 4 + ac->swcal.z);
mutex_unlock(&ac->mutex);
return count;
}
static ssize_t adxl34x_calibrate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
/*
* Hardware offset calibration has a resolution of 15.6 mg/LSB.
* We use HW calibration and handle the remaining bits in SW. (4mg/LSB)
*/
mutex_lock(&ac->mutex);
ac->hwcal.x -= (ac->saved.x / 4);
ac->swcal.x = ac->saved.x % 4;
ac->hwcal.y -= (ac->saved.y / 4);
ac->swcal.y = ac->saved.y % 4;
ac->hwcal.z -= (ac->saved.z / 4);
ac->swcal.z = ac->saved.z % 4;
AC_WRITE(ac, OFSX, (s8) ac->hwcal.x);
AC_WRITE(ac, OFSY, (s8) ac->hwcal.y);
AC_WRITE(ac, OFSZ, (s8) ac->hwcal.z);
mutex_unlock(&ac->mutex);
return count;
}
static DEVICE_ATTR(calibrate, 0664,
adxl34x_calibrate_show, adxl34x_calibrate_store);
static ssize_t adxl34x_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adxl34x *ac = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", RATE(ac->pdata.data_rate));
}
static ssize_t adxl34x_rate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
unsigned char val;
int error;
error = kstrtou8(buf, 10, &val);
if (error)
return error;
mutex_lock(&ac->mutex);
ac->pdata.data_rate = RATE(val);
AC_WRITE(ac, BW_RATE,
ac->pdata.data_rate |
(ac->pdata.low_power_mode ? LOW_POWER : 0));
mutex_unlock(&ac->mutex);
return count;
}
static DEVICE_ATTR(rate, 0664, adxl34x_rate_show, adxl34x_rate_store);
static ssize_t adxl34x_autosleep_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adxl34x *ac = dev_get_drvdata(dev);
return sprintf(buf, "%u\n",
ac->pdata.power_mode & (PCTL_AUTO_SLEEP | PCTL_LINK) ? 1 : 0);
}
static ssize_t adxl34x_autosleep_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
unsigned int val;
int error;
error = kstrtouint(buf, 10, &val);
if (error)
return error;
mutex_lock(&ac->mutex);
if (val)
ac->pdata.power_mode |= (PCTL_AUTO_SLEEP | PCTL_LINK);
else
ac->pdata.power_mode &= ~(PCTL_AUTO_SLEEP | PCTL_LINK);
if (!ac->disabled && !ac->suspended && ac->opened)
AC_WRITE(ac, POWER_CTL, ac->pdata.power_mode | PCTL_MEASURE);
mutex_unlock(&ac->mutex);
return count;
}
static DEVICE_ATTR(autosleep, 0664,
adxl34x_autosleep_show, adxl34x_autosleep_store);
static ssize_t adxl34x_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adxl34x *ac = dev_get_drvdata(dev);
ssize_t count;
mutex_lock(&ac->mutex);
count = sprintf(buf, "(%d, %d, %d)\n",
ac->saved.x, ac->saved.y, ac->saved.z);
mutex_unlock(&ac->mutex);
return count;
}
static DEVICE_ATTR(position, S_IRUGO, adxl34x_position_show, NULL);
#ifdef ADXL_DEBUG
static ssize_t adxl34x_write_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
unsigned int val;
int error;
/*
* This allows basic ADXL register write access for debug purposes.
*/
error = kstrtouint(buf, 16, &val);
if (error)
return error;
mutex_lock(&ac->mutex);
AC_WRITE(ac, val >> 8, val & 0xFF);
mutex_unlock(&ac->mutex);
return count;
}
static DEVICE_ATTR(write, 0664, NULL, adxl34x_write_store);
#endif
static struct attribute *adxl34x_attributes[] = {
&dev_attr_disable.attr,
&dev_attr_calibrate.attr,
&dev_attr_rate.attr,
&dev_attr_autosleep.attr,
&dev_attr_position.attr,
#ifdef ADXL_DEBUG
&dev_attr_write.attr,
#endif
NULL
};
static const struct attribute_group adxl34x_attr_group = {
.attrs = adxl34x_attributes,
};
static int adxl34x_input_open(struct input_dev *input)
{
struct adxl34x *ac = input_get_drvdata(input);
mutex_lock(&ac->mutex);
if (!ac->suspended && !ac->disabled)
__adxl34x_enable(ac);
ac->opened = true;
mutex_unlock(&ac->mutex);
return 0;
}
static void adxl34x_input_close(struct input_dev *input)
{
struct adxl34x *ac = input_get_drvdata(input);
mutex_lock(&ac->mutex);
if (!ac->suspended && !ac->disabled)
__adxl34x_disable(ac);
ac->opened = false;
mutex_unlock(&ac->mutex);
}
struct adxl34x *adxl34x_probe(struct device *dev, int irq,
bool fifo_delay_default,
const struct adxl34x_bus_ops *bops)
{
struct adxl34x *ac;
struct input_dev *input_dev;
const struct adxl34x_platform_data *pdata;
int err, range, i;
int revid;
if (!irq) {
dev_err(dev, "no IRQ?\n");
err = -ENODEV;
goto err_out;
}
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
input_dev = input_allocate_device();
if (!ac || !input_dev) {
err = -ENOMEM;
goto err_free_mem;
}
ac->fifo_delay = fifo_delay_default;
pdata = dev_get_platdata(dev);
if (!pdata) {
dev_dbg(dev,
"No platform data: Using default initialization\n");
pdata = &adxl34x_default_init;
}
ac->pdata = *pdata;
pdata = &ac->pdata;
ac->input = input_dev;
ac->dev = dev;
ac->irq = irq;
ac->bops = bops;
mutex_init(&ac->mutex);
input_dev->name = "ADXL34x accelerometer";
revid = AC_READ(ac, DEVID);
switch (revid) {
case ID_ADXL345:
ac->model = 345;
break;
case ID_ADXL346:
ac->model = 346;
break;
default:
dev_err(dev, "Failed to probe %s\n", input_dev->name);
err = -ENODEV;
goto err_free_mem;
}
snprintf(ac->phys, sizeof(ac->phys), "%s/input0", dev_name(dev));
input_dev->phys = ac->phys;
input_dev->dev.parent = dev;
input_dev->id.product = ac->model;
input_dev->id.bustype = bops->bustype;
input_dev->open = adxl34x_input_open;
input_dev->close = adxl34x_input_close;
input_set_drvdata(input_dev, ac);
__set_bit(ac->pdata.ev_type, input_dev->evbit);
if (ac->pdata.ev_type == EV_REL) {
__set_bit(REL_X, input_dev->relbit);
__set_bit(REL_Y, input_dev->relbit);
__set_bit(REL_Z, input_dev->relbit);
} else {
/* EV_ABS */
__set_bit(ABS_X, input_dev->absbit);
__set_bit(ABS_Y, input_dev->absbit);
__set_bit(ABS_Z, input_dev->absbit);
if (pdata->data_range & FULL_RES)
range = ADXL_FULLRES_MAX_VAL; /* Signed 13-bit */
else
range = ADXL_FIXEDRES_MAX_VAL; /* Signed 10-bit */
input_set_abs_params(input_dev, ABS_X, -range, range, 3, 3);
input_set_abs_params(input_dev, ABS_Y, -range, range, 3, 3);
input_set_abs_params(input_dev, ABS_Z, -range, range, 3, 3);
}
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(pdata->ev_code_tap[ADXL_X_AXIS], input_dev->keybit);
__set_bit(pdata->ev_code_tap[ADXL_Y_AXIS], input_dev->keybit);
__set_bit(pdata->ev_code_tap[ADXL_Z_AXIS], input_dev->keybit);
if (pdata->ev_code_ff) {
ac->int_mask = FREE_FALL;
__set_bit(pdata->ev_code_ff, input_dev->keybit);
}
if (pdata->ev_code_act_inactivity)
__set_bit(pdata->ev_code_act_inactivity, input_dev->keybit);
ac->int_mask |= ACTIVITY | INACTIVITY;
if (pdata->watermark) {
ac->int_mask |= WATERMARK;
if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS)
ac->pdata.fifo_mode |= FIFO_STREAM;
} else {
ac->int_mask |= DATA_READY;
}
if (pdata->tap_axis_control & (TAP_X_EN | TAP_Y_EN | TAP_Z_EN))
ac->int_mask |= SINGLE_TAP | DOUBLE_TAP;
if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS)
ac->fifo_delay = false;
AC_WRITE(ac, POWER_CTL, 0);
err = request_threaded_irq(ac->irq, NULL, adxl34x_irq,
IRQF_ONESHOT, dev_name(dev), ac);
if (err) {
dev_err(dev, "irq %d busy?\n", ac->irq);
goto err_free_mem;
}
err = sysfs_create_group(&dev->kobj, &adxl34x_attr_group);
if (err)
goto err_free_irq;
err = input_register_device(input_dev);
if (err)
goto err_remove_attr;
AC_WRITE(ac, OFSX, pdata->x_axis_offset);
ac->hwcal.x = pdata->x_axis_offset;
AC_WRITE(ac, OFSY, pdata->y_axis_offset);
ac->hwcal.y = pdata->y_axis_offset;
AC_WRITE(ac, OFSZ, pdata->z_axis_offset);
ac->hwcal.z = pdata->z_axis_offset;
AC_WRITE(ac, THRESH_TAP, pdata->tap_threshold);
AC_WRITE(ac, DUR, pdata->tap_duration);
AC_WRITE(ac, LATENT, pdata->tap_latency);
AC_WRITE(ac, WINDOW, pdata->tap_window);
AC_WRITE(ac, THRESH_ACT, pdata->activity_threshold);
AC_WRITE(ac, THRESH_INACT, pdata->inactivity_threshold);
AC_WRITE(ac, TIME_INACT, pdata->inactivity_time);
AC_WRITE(ac, THRESH_FF, pdata->free_fall_threshold);
AC_WRITE(ac, TIME_FF, pdata->free_fall_time);
AC_WRITE(ac, TAP_AXES, pdata->tap_axis_control);
AC_WRITE(ac, ACT_INACT_CTL, pdata->act_axis_control);
AC_WRITE(ac, BW_RATE, RATE(ac->pdata.data_rate) |
(pdata->low_power_mode ? LOW_POWER : 0));
AC_WRITE(ac, DATA_FORMAT, pdata->data_range);
AC_WRITE(ac, FIFO_CTL, FIFO_MODE(pdata->fifo_mode) |
SAMPLES(pdata->watermark));
if (pdata->use_int2) {
/* Map all INTs to INT2 */
AC_WRITE(ac, INT_MAP, ac->int_mask | OVERRUN);
} else {
/* Map all INTs to INT1 */
AC_WRITE(ac, INT_MAP, 0);
}
if (ac->model == 346 && ac->pdata.orientation_enable) {
AC_WRITE(ac, ORIENT_CONF,
ORIENT_DEADZONE(ac->pdata.deadzone_angle) |
ORIENT_DIVISOR(ac->pdata.divisor_length));
ac->orient2d_saved = 1234;
ac->orient3d_saved = 1234;
if (pdata->orientation_enable & ADXL_EN_ORIENTATION_3D)
for (i = 0; i < ARRAY_SIZE(pdata->ev_codes_orient_3d); i++)
__set_bit(pdata->ev_codes_orient_3d[i],
input_dev->keybit);
if (pdata->orientation_enable & ADXL_EN_ORIENTATION_2D)
for (i = 0; i < ARRAY_SIZE(pdata->ev_codes_orient_2d); i++)
__set_bit(pdata->ev_codes_orient_2d[i],
input_dev->keybit);
} else {
ac->pdata.orientation_enable = 0;
}
AC_WRITE(ac, INT_ENABLE, ac->int_mask | OVERRUN);
ac->pdata.power_mode &= (PCTL_AUTO_SLEEP | PCTL_LINK);
return ac;
err_remove_attr:
sysfs_remove_group(&dev->kobj, &adxl34x_attr_group);
err_free_irq:
free_irq(ac->irq, ac);
err_free_mem:
input_free_device(input_dev);
kfree(ac);
err_out:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(adxl34x_probe);
void adxl34x_remove(struct adxl34x *ac)
{
sysfs_remove_group(&ac->dev->kobj, &adxl34x_attr_group);
free_irq(ac->irq, ac);
input_unregister_device(ac->input);
dev_dbg(ac->dev, "unregistered accelerometer\n");
kfree(ac);
}
EXPORT_SYMBOL_GPL(adxl34x_remove);
EXPORT_GPL_SIMPLE_DEV_PM_OPS(adxl34x_pm, adxl34x_suspend, adxl34x_resume);
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("ADXL345/346 Three-Axis Digital Accelerometer Driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/adxl34x.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulator haptic driver
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Jaewon Kim <[email protected]>
* Author: Hyunhee Kim <[email protected]>
*/
#include <linux/input.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/regulator-haptic.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#define MAX_MAGNITUDE_SHIFT 16
struct regulator_haptic {
struct device *dev;
struct input_dev *input_dev;
struct regulator *regulator;
struct work_struct work;
struct mutex mutex;
bool active;
bool suspended;
unsigned int max_volt;
unsigned int min_volt;
unsigned int magnitude;
};
static int regulator_haptic_toggle(struct regulator_haptic *haptic, bool on)
{
int error;
if (haptic->active != on) {
error = on ? regulator_enable(haptic->regulator) :
regulator_disable(haptic->regulator);
if (error) {
dev_err(haptic->dev,
"failed to switch regulator %s: %d\n",
on ? "on" : "off", error);
return error;
}
haptic->active = on;
}
return 0;
}
static int regulator_haptic_set_voltage(struct regulator_haptic *haptic,
unsigned int magnitude)
{
u64 volt_mag_multi;
unsigned int intensity;
int error;
volt_mag_multi = (u64)(haptic->max_volt - haptic->min_volt) * magnitude;
intensity = (unsigned int)(volt_mag_multi >> MAX_MAGNITUDE_SHIFT);
error = regulator_set_voltage(haptic->regulator,
intensity + haptic->min_volt,
haptic->max_volt);
if (error) {
dev_err(haptic->dev, "cannot set regulator voltage to %d: %d\n",
intensity + haptic->min_volt, error);
return error;
}
regulator_haptic_toggle(haptic, !!magnitude);
return 0;
}
static void regulator_haptic_work(struct work_struct *work)
{
struct regulator_haptic *haptic = container_of(work,
struct regulator_haptic, work);
mutex_lock(&haptic->mutex);
if (!haptic->suspended)
regulator_haptic_set_voltage(haptic, haptic->magnitude);
mutex_unlock(&haptic->mutex);
}
static int regulator_haptic_play_effect(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct regulator_haptic *haptic = input_get_drvdata(input);
haptic->magnitude = effect->u.rumble.strong_magnitude;
if (!haptic->magnitude)
haptic->magnitude = effect->u.rumble.weak_magnitude;
schedule_work(&haptic->work);
return 0;
}
static void regulator_haptic_close(struct input_dev *input)
{
struct regulator_haptic *haptic = input_get_drvdata(input);
cancel_work_sync(&haptic->work);
regulator_haptic_set_voltage(haptic, 0);
}
static int __maybe_unused
regulator_haptic_parse_dt(struct device *dev, struct regulator_haptic *haptic)
{
struct device_node *node;
int error;
node = dev->of_node;
if(!node) {
dev_err(dev, "Missing device tree data\n");
return -EINVAL;
}
error = of_property_read_u32(node, "max-microvolt", &haptic->max_volt);
if (error) {
dev_err(dev, "cannot parse max-microvolt\n");
return error;
}
error = of_property_read_u32(node, "min-microvolt", &haptic->min_volt);
if (error) {
dev_err(dev, "cannot parse min-microvolt\n");
return error;
}
return 0;
}
static int regulator_haptic_probe(struct platform_device *pdev)
{
const struct regulator_haptic_data *pdata = dev_get_platdata(&pdev->dev);
struct regulator_haptic *haptic;
struct input_dev *input_dev;
int error;
haptic = devm_kzalloc(&pdev->dev, sizeof(*haptic), GFP_KERNEL);
if (!haptic)
return -ENOMEM;
platform_set_drvdata(pdev, haptic);
haptic->dev = &pdev->dev;
mutex_init(&haptic->mutex);
INIT_WORK(&haptic->work, regulator_haptic_work);
if (pdata) {
haptic->max_volt = pdata->max_volt;
haptic->min_volt = pdata->min_volt;
} else if (IS_ENABLED(CONFIG_OF)) {
error = regulator_haptic_parse_dt(&pdev->dev, haptic);
if (error)
return error;
} else {
dev_err(&pdev->dev, "Missing platform data\n");
return -EINVAL;
}
haptic->regulator = devm_regulator_get_exclusive(&pdev->dev, "haptic");
if (IS_ERR(haptic->regulator)) {
dev_err(&pdev->dev, "failed to get regulator\n");
return PTR_ERR(haptic->regulator);
}
input_dev = devm_input_allocate_device(&pdev->dev);
if (!input_dev)
return -ENOMEM;
haptic->input_dev = input_dev;
haptic->input_dev->name = "regulator-haptic";
haptic->input_dev->dev.parent = &pdev->dev;
haptic->input_dev->close = regulator_haptic_close;
input_set_drvdata(haptic->input_dev, haptic);
input_set_capability(haptic->input_dev, EV_FF, FF_RUMBLE);
error = input_ff_create_memless(input_dev, NULL,
regulator_haptic_play_effect);
if (error) {
dev_err(&pdev->dev, "failed to create force-feedback\n");
return error;
}
error = input_register_device(haptic->input_dev);
if (error) {
dev_err(&pdev->dev, "failed to register input device\n");
return error;
}
return 0;
}
static int regulator_haptic_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct regulator_haptic *haptic = platform_get_drvdata(pdev);
int error;
error = mutex_lock_interruptible(&haptic->mutex);
if (error)
return error;
regulator_haptic_set_voltage(haptic, 0);
haptic->suspended = true;
mutex_unlock(&haptic->mutex);
return 0;
}
static int regulator_haptic_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct regulator_haptic *haptic = platform_get_drvdata(pdev);
unsigned int magnitude;
mutex_lock(&haptic->mutex);
haptic->suspended = false;
magnitude = READ_ONCE(haptic->magnitude);
if (magnitude)
regulator_haptic_set_voltage(haptic, magnitude);
mutex_unlock(&haptic->mutex);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(regulator_haptic_pm_ops,
regulator_haptic_suspend, regulator_haptic_resume);
static const struct of_device_id regulator_haptic_dt_match[] = {
{ .compatible = "regulator-haptic" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, regulator_haptic_dt_match);
static struct platform_driver regulator_haptic_driver = {
.probe = regulator_haptic_probe,
.driver = {
.name = "regulator-haptic",
.of_match_table = regulator_haptic_dt_match,
.pm = pm_sleep_ptr(®ulator_haptic_pm_ops),
},
};
module_platform_driver(regulator_haptic_driver);
MODULE_AUTHOR("Jaewon Kim <[email protected]>");
MODULE_AUTHOR("Hyunhee Kim <[email protected]>");
MODULE_DESCRIPTION("Regulator haptic driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/regulator-haptic.c
|
/*
* 88pm860x_onkey.c - Marvell 88PM860x ONKEY driver
*
* Copyright (C) 2009-2010 Marvell International Ltd.
* Haojian Zhuang <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mfd/88pm860x.h>
#include <linux/slab.h>
#include <linux/device.h>
#define PM8607_WAKEUP 0x0b
#define LONG_ONKEY_EN (1 << 1)
#define ONKEY_STATUS (1 << 0)
struct pm860x_onkey_info {
struct input_dev *idev;
struct pm860x_chip *chip;
struct i2c_client *i2c;
struct device *dev;
int irq;
};
/* 88PM860x gives us an interrupt when ONKEY is held */
static irqreturn_t pm860x_onkey_handler(int irq, void *data)
{
struct pm860x_onkey_info *info = data;
int ret;
ret = pm860x_reg_read(info->i2c, PM8607_STATUS_2);
ret &= ONKEY_STATUS;
input_report_key(info->idev, KEY_POWER, ret);
input_sync(info->idev);
/* Enable 8-second long onkey detection */
pm860x_set_bits(info->i2c, PM8607_WAKEUP, 3, LONG_ONKEY_EN);
return IRQ_HANDLED;
}
static int pm860x_onkey_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_onkey_info *info;
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
info = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_onkey_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->chip = chip;
info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
info->dev = &pdev->dev;
info->irq = irq;
info->idev = devm_input_allocate_device(&pdev->dev);
if (!info->idev) {
dev_err(chip->dev, "Failed to allocate input dev\n");
return -ENOMEM;
}
info->idev->name = "88pm860x_on";
info->idev->phys = "88pm860x_on/input0";
info->idev->id.bustype = BUS_I2C;
info->idev->dev.parent = &pdev->dev;
info->idev->evbit[0] = BIT_MASK(EV_KEY);
info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
ret = input_register_device(info->idev);
if (ret) {
dev_err(chip->dev, "Can't register input device: %d\n", ret);
return ret;
}
ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
pm860x_onkey_handler, IRQF_ONESHOT,
"onkey", info);
if (ret < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
info->irq, ret);
return ret;
}
platform_set_drvdata(pdev, info);
device_init_wakeup(&pdev->dev, 1);
return 0;
}
static int pm860x_onkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
if (device_may_wakeup(dev))
chip->wakeup_flag |= 1 << PM8607_IRQ_ONKEY;
return 0;
}
static int pm860x_onkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
if (device_may_wakeup(dev))
chip->wakeup_flag &= ~(1 << PM8607_IRQ_ONKEY);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(pm860x_onkey_pm_ops,
pm860x_onkey_suspend, pm860x_onkey_resume);
static struct platform_driver pm860x_onkey_driver = {
.driver = {
.name = "88pm860x-onkey",
.pm = pm_sleep_ptr(&pm860x_onkey_pm_ops),
},
.probe = pm860x_onkey_probe,
};
module_platform_driver(pm860x_onkey_driver);
MODULE_DESCRIPTION("Marvell 88PM860x ONKEY driver");
MODULE_AUTHOR("Haojian Zhuang <[email protected]>");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/88pm860x_onkey.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PWM vibrator driver
*
* Copyright (C) 2017 Collabora Ltd.
*
* Based on previous work from:
* Copyright (C) 2012 Dmitry Torokhov <[email protected]>
*
* Based on PWM beeper driver:
* Copyright (C) 2010, Lars-Peter Clausen <[email protected]>
*/
#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
struct pwm_vibrator {
struct input_dev *input;
struct gpio_desc *enable_gpio;
struct pwm_device *pwm;
struct pwm_device *pwm_dir;
struct regulator *vcc;
struct work_struct play_work;
u16 level;
u32 direction_duty_cycle;
bool vcc_on;
};
static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
{
struct device *pdev = vibrator->input->dev.parent;
struct pwm_state state;
int err;
if (!vibrator->vcc_on) {
err = regulator_enable(vibrator->vcc);
if (err) {
dev_err(pdev, "failed to enable regulator: %d\n", err);
return err;
}
vibrator->vcc_on = true;
}
gpiod_set_value_cansleep(vibrator->enable_gpio, 1);
pwm_get_state(vibrator->pwm, &state);
pwm_set_relative_duty_cycle(&state, vibrator->level, 0xffff);
state.enabled = true;
err = pwm_apply_state(vibrator->pwm, &state);
if (err) {
dev_err(pdev, "failed to apply pwm state: %d\n", err);
return err;
}
if (vibrator->pwm_dir) {
pwm_get_state(vibrator->pwm_dir, &state);
state.duty_cycle = vibrator->direction_duty_cycle;
state.enabled = true;
err = pwm_apply_state(vibrator->pwm_dir, &state);
if (err) {
dev_err(pdev, "failed to apply dir-pwm state: %d\n", err);
pwm_disable(vibrator->pwm);
return err;
}
}
return 0;
}
static void pwm_vibrator_stop(struct pwm_vibrator *vibrator)
{
if (vibrator->pwm_dir)
pwm_disable(vibrator->pwm_dir);
pwm_disable(vibrator->pwm);
gpiod_set_value_cansleep(vibrator->enable_gpio, 0);
if (vibrator->vcc_on) {
regulator_disable(vibrator->vcc);
vibrator->vcc_on = false;
}
}
static void pwm_vibrator_play_work(struct work_struct *work)
{
struct pwm_vibrator *vibrator = container_of(work,
struct pwm_vibrator, play_work);
if (vibrator->level)
pwm_vibrator_start(vibrator);
else
pwm_vibrator_stop(vibrator);
}
static int pwm_vibrator_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct pwm_vibrator *vibrator = input_get_drvdata(dev);
vibrator->level = effect->u.rumble.strong_magnitude;
if (!vibrator->level)
vibrator->level = effect->u.rumble.weak_magnitude;
schedule_work(&vibrator->play_work);
return 0;
}
static void pwm_vibrator_close(struct input_dev *input)
{
struct pwm_vibrator *vibrator = input_get_drvdata(input);
cancel_work_sync(&vibrator->play_work);
pwm_vibrator_stop(vibrator);
}
static int pwm_vibrator_probe(struct platform_device *pdev)
{
struct pwm_vibrator *vibrator;
struct pwm_state state;
int err;
vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL);
if (!vibrator)
return -ENOMEM;
vibrator->input = devm_input_allocate_device(&pdev->dev);
if (!vibrator->input)
return -ENOMEM;
vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
if (IS_ERR(vibrator->vcc))
return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->vcc),
"Failed to request regulator\n");
vibrator->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(vibrator->enable_gpio))
return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->enable_gpio),
"Failed to request enable gpio\n");
vibrator->pwm = devm_pwm_get(&pdev->dev, "enable");
if (IS_ERR(vibrator->pwm))
return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->pwm),
"Failed to request main pwm\n");
INIT_WORK(&vibrator->play_work, pwm_vibrator_play_work);
/* Sync up PWM state and ensure it is off. */
pwm_init_state(vibrator->pwm, &state);
state.enabled = false;
err = pwm_apply_state(vibrator->pwm, &state);
if (err) {
dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
err);
return err;
}
vibrator->pwm_dir = devm_pwm_get(&pdev->dev, "direction");
err = PTR_ERR_OR_ZERO(vibrator->pwm_dir);
switch (err) {
case 0:
/* Sync up PWM state and ensure it is off. */
pwm_init_state(vibrator->pwm_dir, &state);
state.enabled = false;
err = pwm_apply_state(vibrator->pwm_dir, &state);
if (err) {
dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
err);
return err;
}
vibrator->direction_duty_cycle =
pwm_get_period(vibrator->pwm_dir) / 2;
device_property_read_u32(&pdev->dev, "direction-duty-cycle-ns",
&vibrator->direction_duty_cycle);
break;
case -ENODATA:
/* Direction PWM is optional */
vibrator->pwm_dir = NULL;
break;
default:
dev_err(&pdev->dev, "Failed to request direction pwm: %d\n", err);
fallthrough;
case -EPROBE_DEFER:
return err;
}
vibrator->input->name = "pwm-vibrator";
vibrator->input->id.bustype = BUS_HOST;
vibrator->input->dev.parent = &pdev->dev;
vibrator->input->close = pwm_vibrator_close;
input_set_drvdata(vibrator->input, vibrator);
input_set_capability(vibrator->input, EV_FF, FF_RUMBLE);
err = input_ff_create_memless(vibrator->input, NULL,
pwm_vibrator_play_effect);
if (err) {
dev_err(&pdev->dev, "Couldn't create FF dev: %d\n", err);
return err;
}
err = input_register_device(vibrator->input);
if (err) {
dev_err(&pdev->dev, "Couldn't register input dev: %d\n", err);
return err;
}
platform_set_drvdata(pdev, vibrator);
return 0;
}
static int pwm_vibrator_suspend(struct device *dev)
{
struct pwm_vibrator *vibrator = dev_get_drvdata(dev);
cancel_work_sync(&vibrator->play_work);
if (vibrator->level)
pwm_vibrator_stop(vibrator);
return 0;
}
static int pwm_vibrator_resume(struct device *dev)
{
struct pwm_vibrator *vibrator = dev_get_drvdata(dev);
if (vibrator->level)
pwm_vibrator_start(vibrator);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(pwm_vibrator_pm_ops,
pwm_vibrator_suspend, pwm_vibrator_resume);
#ifdef CONFIG_OF
static const struct of_device_id pwm_vibra_dt_match_table[] = {
{ .compatible = "pwm-vibrator" },
{},
};
MODULE_DEVICE_TABLE(of, pwm_vibra_dt_match_table);
#endif
static struct platform_driver pwm_vibrator_driver = {
.probe = pwm_vibrator_probe,
.driver = {
.name = "pwm-vibrator",
.pm = pm_sleep_ptr(&pwm_vibrator_pm_ops),
.of_match_table = of_match_ptr(pwm_vibra_dt_match_table),
},
};
module_platform_driver(pwm_vibrator_driver);
MODULE_AUTHOR("Sebastian Reichel <[email protected]>");
MODULE_DESCRIPTION("PWM vibrator driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pwm-vibrator");
|
linux-master
|
drivers/input/misc/pwm-vibra.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Azoteq IQS626A Capacitive Touch Controller
*
* Copyright (C) 2020 Jeff LaBundy <[email protected]>
*
* This driver registers up to 2 input devices: one representing capacitive or
* inductive keys as well as Hall-effect switches, and one for a trackpad that
* can express various gestures.
*/
#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#define IQS626_VER_INFO 0x00
#define IQS626_VER_INFO_PROD_NUM 0x51
#define IQS626_SYS_FLAGS 0x02
#define IQS626_SYS_FLAGS_SHOW_RESET BIT(15)
#define IQS626_SYS_FLAGS_IN_ATI BIT(12)
#define IQS626_SYS_FLAGS_PWR_MODE_MASK GENMASK(9, 8)
#define IQS626_SYS_FLAGS_PWR_MODE_SHIFT 8
#define IQS626_HALL_OUTPUT 0x23
#define IQS626_SYS_SETTINGS 0x80
#define IQS626_SYS_SETTINGS_CLK_DIV BIT(15)
#define IQS626_SYS_SETTINGS_ULP_AUTO BIT(14)
#define IQS626_SYS_SETTINGS_DIS_AUTO BIT(13)
#define IQS626_SYS_SETTINGS_PWR_MODE_MASK GENMASK(12, 11)
#define IQS626_SYS_SETTINGS_PWR_MODE_SHIFT 11
#define IQS626_SYS_SETTINGS_PWR_MODE_MAX 3
#define IQS626_SYS_SETTINGS_ULP_UPDATE_MASK GENMASK(10, 8)
#define IQS626_SYS_SETTINGS_ULP_UPDATE_SHIFT 8
#define IQS626_SYS_SETTINGS_ULP_UPDATE_MAX 7
#define IQS626_SYS_SETTINGS_EVENT_MODE BIT(5)
#define IQS626_SYS_SETTINGS_EVENT_MODE_LP BIT(4)
#define IQS626_SYS_SETTINGS_REDO_ATI BIT(2)
#define IQS626_SYS_SETTINGS_ACK_RESET BIT(0)
#define IQS626_MISC_A_ATI_BAND_DISABLE BIT(7)
#define IQS626_MISC_A_TPx_LTA_UPDATE_MASK GENMASK(6, 4)
#define IQS626_MISC_A_TPx_LTA_UPDATE_SHIFT 4
#define IQS626_MISC_A_TPx_LTA_UPDATE_MAX 7
#define IQS626_MISC_A_ATI_LP_ONLY BIT(3)
#define IQS626_MISC_A_GPIO3_SELECT_MASK GENMASK(2, 0)
#define IQS626_MISC_A_GPIO3_SELECT_MAX 7
#define IQS626_EVENT_MASK_SYS BIT(6)
#define IQS626_EVENT_MASK_GESTURE BIT(3)
#define IQS626_EVENT_MASK_DEEP BIT(2)
#define IQS626_EVENT_MASK_TOUCH BIT(1)
#define IQS626_EVENT_MASK_PROX BIT(0)
#define IQS626_RATE_NP_MS_MAX 255
#define IQS626_RATE_LP_MS_MAX 255
#define IQS626_RATE_ULP_MS_MAX 4080
#define IQS626_TIMEOUT_PWR_MS_MAX 130560
#define IQS626_TIMEOUT_LTA_MS_MAX 130560
#define IQS626_MISC_B_RESEED_UI_SEL_MASK GENMASK(7, 6)
#define IQS626_MISC_B_RESEED_UI_SEL_SHIFT 6
#define IQS626_MISC_B_RESEED_UI_SEL_MAX 3
#define IQS626_MISC_B_THRESH_EXTEND BIT(5)
#define IQS626_MISC_B_TRACKING_UI_ENABLE BIT(4)
#define IQS626_MISC_B_TPx_SWIPE BIT(3)
#define IQS626_MISC_B_RESEED_OFFSET BIT(2)
#define IQS626_MISC_B_FILT_STR_TPx GENMASK(1, 0)
#define IQS626_THRESH_SWIPE_MAX 255
#define IQS626_TIMEOUT_TAP_MS_MAX 4080
#define IQS626_TIMEOUT_SWIPE_MS_MAX 4080
#define IQS626_CHx_ENG_0_MEAS_CAP_SIZE BIT(7)
#define IQS626_CHx_ENG_0_RX_TERM_VSS BIT(5)
#define IQS626_CHx_ENG_0_LINEARIZE BIT(4)
#define IQS626_CHx_ENG_0_DUAL_DIR BIT(3)
#define IQS626_CHx_ENG_0_FILT_DISABLE BIT(2)
#define IQS626_CHx_ENG_0_ATI_MODE_MASK GENMASK(1, 0)
#define IQS626_CHx_ENG_0_ATI_MODE_MAX 3
#define IQS626_CHx_ENG_1_CCT_HIGH_1 BIT(7)
#define IQS626_CHx_ENG_1_CCT_HIGH_0 BIT(6)
#define IQS626_CHx_ENG_1_PROJ_BIAS_MASK GENMASK(5, 4)
#define IQS626_CHx_ENG_1_PROJ_BIAS_SHIFT 4
#define IQS626_CHx_ENG_1_PROJ_BIAS_MAX 3
#define IQS626_CHx_ENG_1_CCT_ENABLE BIT(3)
#define IQS626_CHx_ENG_1_SENSE_FREQ_MASK GENMASK(2, 1)
#define IQS626_CHx_ENG_1_SENSE_FREQ_SHIFT 1
#define IQS626_CHx_ENG_1_SENSE_FREQ_MAX 3
#define IQS626_CHx_ENG_1_ATI_BAND_TIGHTEN BIT(0)
#define IQS626_CHx_ENG_2_LOCAL_CAP_MASK GENMASK(7, 6)
#define IQS626_CHx_ENG_2_LOCAL_CAP_SHIFT 6
#define IQS626_CHx_ENG_2_LOCAL_CAP_MAX 3
#define IQS626_CHx_ENG_2_LOCAL_CAP_ENABLE BIT(5)
#define IQS626_CHx_ENG_2_SENSE_MODE_MASK GENMASK(3, 0)
#define IQS626_CHx_ENG_2_SENSE_MODE_MAX 15
#define IQS626_CHx_ENG_3_TX_FREQ_MASK GENMASK(5, 4)
#define IQS626_CHx_ENG_3_TX_FREQ_SHIFT 4
#define IQS626_CHx_ENG_3_TX_FREQ_MAX 3
#define IQS626_CHx_ENG_3_INV_LOGIC BIT(0)
#define IQS626_CHx_ENG_4_RX_TERM_VREG BIT(6)
#define IQS626_CHx_ENG_4_CCT_LOW_1 BIT(5)
#define IQS626_CHx_ENG_4_CCT_LOW_0 BIT(4)
#define IQS626_CHx_ENG_4_COMP_DISABLE BIT(1)
#define IQS626_CHx_ENG_4_STATIC_ENABLE BIT(0)
#define IQS626_TPx_ATI_BASE_MIN 45
#define IQS626_TPx_ATI_BASE_MAX 300
#define IQS626_CHx_ATI_BASE_MASK GENMASK(7, 6)
#define IQS626_CHx_ATI_BASE_75 0x00
#define IQS626_CHx_ATI_BASE_100 0x40
#define IQS626_CHx_ATI_BASE_150 0x80
#define IQS626_CHx_ATI_BASE_200 0xC0
#define IQS626_CHx_ATI_TARGET_MASK GENMASK(5, 0)
#define IQS626_CHx_ATI_TARGET_MAX 2016
#define IQS626_CHx_THRESH_MAX 255
#define IQS626_CHx_HYST_DEEP_MASK GENMASK(7, 4)
#define IQS626_CHx_HYST_DEEP_SHIFT 4
#define IQS626_CHx_HYST_TOUCH_MASK GENMASK(3, 0)
#define IQS626_CHx_HYST_MAX 15
#define IQS626_FILT_STR_NP_TPx_MASK GENMASK(7, 6)
#define IQS626_FILT_STR_NP_TPx_SHIFT 6
#define IQS626_FILT_STR_LP_TPx_MASK GENMASK(5, 4)
#define IQS626_FILT_STR_LP_TPx_SHIFT 4
#define IQS626_FILT_STR_NP_CNT_MASK GENMASK(7, 6)
#define IQS626_FILT_STR_NP_CNT_SHIFT 6
#define IQS626_FILT_STR_LP_CNT_MASK GENMASK(5, 4)
#define IQS626_FILT_STR_LP_CNT_SHIFT 4
#define IQS626_FILT_STR_NP_LTA_MASK GENMASK(3, 2)
#define IQS626_FILT_STR_NP_LTA_SHIFT 2
#define IQS626_FILT_STR_LP_LTA_MASK GENMASK(1, 0)
#define IQS626_FILT_STR_MAX 3
#define IQS626_ULP_PROJ_ENABLE BIT(4)
#define IQS626_GEN_WEIGHT_MAX 255
#define IQS626_MAX_REG 0xFF
#define IQS626_NUM_CH_TP_3 9
#define IQS626_NUM_CH_TP_2 6
#define IQS626_NUM_CH_GEN 3
#define IQS626_NUM_CRx_TX 8
#define IQS626_PWR_MODE_POLL_SLEEP_US 50000
#define IQS626_PWR_MODE_POLL_TIMEOUT_US 500000
#define iqs626_irq_wait() usleep_range(350, 400)
enum iqs626_ch_id {
IQS626_CH_ULP_0,
IQS626_CH_TP_2,
IQS626_CH_TP_3,
IQS626_CH_GEN_0,
IQS626_CH_GEN_1,
IQS626_CH_GEN_2,
IQS626_CH_HALL,
};
enum iqs626_rx_inactive {
IQS626_RX_INACTIVE_VSS,
IQS626_RX_INACTIVE_FLOAT,
IQS626_RX_INACTIVE_VREG,
};
enum iqs626_st_offs {
IQS626_ST_OFFS_PROX,
IQS626_ST_OFFS_DIR,
IQS626_ST_OFFS_TOUCH,
IQS626_ST_OFFS_DEEP,
};
enum iqs626_th_offs {
IQS626_TH_OFFS_PROX,
IQS626_TH_OFFS_TOUCH,
IQS626_TH_OFFS_DEEP,
};
enum iqs626_event_id {
IQS626_EVENT_PROX_DN,
IQS626_EVENT_PROX_UP,
IQS626_EVENT_TOUCH_DN,
IQS626_EVENT_TOUCH_UP,
IQS626_EVENT_DEEP_DN,
IQS626_EVENT_DEEP_UP,
};
enum iqs626_gesture_id {
IQS626_GESTURE_FLICK_X_POS,
IQS626_GESTURE_FLICK_X_NEG,
IQS626_GESTURE_FLICK_Y_POS,
IQS626_GESTURE_FLICK_Y_NEG,
IQS626_GESTURE_TAP,
IQS626_GESTURE_HOLD,
IQS626_NUM_GESTURES,
};
struct iqs626_event_desc {
const char *name;
enum iqs626_st_offs st_offs;
enum iqs626_th_offs th_offs;
bool dir_up;
u8 mask;
};
static const struct iqs626_event_desc iqs626_events[] = {
[IQS626_EVENT_PROX_DN] = {
.name = "event-prox",
.st_offs = IQS626_ST_OFFS_PROX,
.th_offs = IQS626_TH_OFFS_PROX,
.mask = IQS626_EVENT_MASK_PROX,
},
[IQS626_EVENT_PROX_UP] = {
.name = "event-prox-alt",
.st_offs = IQS626_ST_OFFS_PROX,
.th_offs = IQS626_TH_OFFS_PROX,
.dir_up = true,
.mask = IQS626_EVENT_MASK_PROX,
},
[IQS626_EVENT_TOUCH_DN] = {
.name = "event-touch",
.st_offs = IQS626_ST_OFFS_TOUCH,
.th_offs = IQS626_TH_OFFS_TOUCH,
.mask = IQS626_EVENT_MASK_TOUCH,
},
[IQS626_EVENT_TOUCH_UP] = {
.name = "event-touch-alt",
.st_offs = IQS626_ST_OFFS_TOUCH,
.th_offs = IQS626_TH_OFFS_TOUCH,
.dir_up = true,
.mask = IQS626_EVENT_MASK_TOUCH,
},
[IQS626_EVENT_DEEP_DN] = {
.name = "event-deep",
.st_offs = IQS626_ST_OFFS_DEEP,
.th_offs = IQS626_TH_OFFS_DEEP,
.mask = IQS626_EVENT_MASK_DEEP,
},
[IQS626_EVENT_DEEP_UP] = {
.name = "event-deep-alt",
.st_offs = IQS626_ST_OFFS_DEEP,
.th_offs = IQS626_TH_OFFS_DEEP,
.dir_up = true,
.mask = IQS626_EVENT_MASK_DEEP,
},
};
struct iqs626_ver_info {
u8 prod_num;
u8 sw_num;
u8 hw_num;
u8 padding;
} __packed;
struct iqs626_flags {
__be16 system;
u8 gesture;
u8 padding_a;
u8 states[4];
u8 ref_active;
u8 padding_b;
u8 comp_min;
u8 comp_max;
u8 trackpad_x;
u8 trackpad_y;
} __packed;
struct iqs626_ch_reg_ulp {
u8 thresh[2];
u8 hyst;
u8 filter;
u8 engine[2];
u8 ati_target;
u8 padding;
__be16 ati_comp;
u8 rx_enable;
u8 tx_enable;
} __packed;
struct iqs626_ch_reg_tp {
u8 thresh;
u8 ati_base;
__be16 ati_comp;
} __packed;
struct iqs626_tp_grp_reg {
u8 hyst;
u8 ati_target;
u8 engine[2];
struct iqs626_ch_reg_tp ch_reg_tp[IQS626_NUM_CH_TP_3];
} __packed;
struct iqs626_ch_reg_gen {
u8 thresh[3];
u8 padding;
u8 hyst;
u8 ati_target;
__be16 ati_comp;
u8 engine[5];
u8 filter;
u8 rx_enable;
u8 tx_enable;
u8 assoc_select;
u8 assoc_weight;
} __packed;
struct iqs626_ch_reg_hall {
u8 engine;
u8 thresh;
u8 hyst;
u8 ati_target;
__be16 ati_comp;
} __packed;
struct iqs626_sys_reg {
__be16 general;
u8 misc_a;
u8 event_mask;
u8 active;
u8 reseed;
u8 rate_np;
u8 rate_lp;
u8 rate_ulp;
u8 timeout_pwr;
u8 timeout_rdy;
u8 timeout_lta;
u8 misc_b;
u8 thresh_swipe;
u8 timeout_tap;
u8 timeout_swipe;
u8 redo_ati;
u8 padding;
struct iqs626_ch_reg_ulp ch_reg_ulp;
struct iqs626_tp_grp_reg tp_grp_reg;
struct iqs626_ch_reg_gen ch_reg_gen[IQS626_NUM_CH_GEN];
struct iqs626_ch_reg_hall ch_reg_hall;
} __packed;
struct iqs626_channel_desc {
const char *name;
int num_ch;
u8 active;
bool events[ARRAY_SIZE(iqs626_events)];
};
static const struct iqs626_channel_desc iqs626_channels[] = {
[IQS626_CH_ULP_0] = {
.name = "ulp-0",
.num_ch = 1,
.active = BIT(0),
.events = {
[IQS626_EVENT_PROX_DN] = true,
[IQS626_EVENT_PROX_UP] = true,
[IQS626_EVENT_TOUCH_DN] = true,
[IQS626_EVENT_TOUCH_UP] = true,
},
},
[IQS626_CH_TP_2] = {
.name = "trackpad-3x2",
.num_ch = IQS626_NUM_CH_TP_2,
.active = BIT(1),
.events = {
[IQS626_EVENT_TOUCH_DN] = true,
},
},
[IQS626_CH_TP_3] = {
.name = "trackpad-3x3",
.num_ch = IQS626_NUM_CH_TP_3,
.active = BIT(2) | BIT(1),
.events = {
[IQS626_EVENT_TOUCH_DN] = true,
},
},
[IQS626_CH_GEN_0] = {
.name = "generic-0",
.num_ch = 1,
.active = BIT(4),
.events = {
[IQS626_EVENT_PROX_DN] = true,
[IQS626_EVENT_PROX_UP] = true,
[IQS626_EVENT_TOUCH_DN] = true,
[IQS626_EVENT_TOUCH_UP] = true,
[IQS626_EVENT_DEEP_DN] = true,
[IQS626_EVENT_DEEP_UP] = true,
},
},
[IQS626_CH_GEN_1] = {
.name = "generic-1",
.num_ch = 1,
.active = BIT(5),
.events = {
[IQS626_EVENT_PROX_DN] = true,
[IQS626_EVENT_PROX_UP] = true,
[IQS626_EVENT_TOUCH_DN] = true,
[IQS626_EVENT_TOUCH_UP] = true,
[IQS626_EVENT_DEEP_DN] = true,
[IQS626_EVENT_DEEP_UP] = true,
},
},
[IQS626_CH_GEN_2] = {
.name = "generic-2",
.num_ch = 1,
.active = BIT(6),
.events = {
[IQS626_EVENT_PROX_DN] = true,
[IQS626_EVENT_PROX_UP] = true,
[IQS626_EVENT_TOUCH_DN] = true,
[IQS626_EVENT_TOUCH_UP] = true,
[IQS626_EVENT_DEEP_DN] = true,
[IQS626_EVENT_DEEP_UP] = true,
},
},
[IQS626_CH_HALL] = {
.name = "hall",
.num_ch = 1,
.active = BIT(7),
.events = {
[IQS626_EVENT_TOUCH_DN] = true,
[IQS626_EVENT_TOUCH_UP] = true,
},
},
};
struct iqs626_private {
struct i2c_client *client;
struct regmap *regmap;
struct iqs626_sys_reg sys_reg;
struct completion ati_done;
struct input_dev *keypad;
struct input_dev *trackpad;
struct touchscreen_properties prop;
unsigned int kp_type[ARRAY_SIZE(iqs626_channels)]
[ARRAY_SIZE(iqs626_events)];
unsigned int kp_code[ARRAY_SIZE(iqs626_channels)]
[ARRAY_SIZE(iqs626_events)];
unsigned int tp_code[IQS626_NUM_GESTURES];
unsigned int suspend_mode;
};
static noinline_for_stack int
iqs626_parse_events(struct iqs626_private *iqs626,
struct fwnode_handle *ch_node, enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
struct fwnode_handle *ev_node;
const char *ev_name;
u8 *thresh, *hyst;
unsigned int val;
int i;
switch (ch_id) {
case IQS626_CH_ULP_0:
thresh = sys_reg->ch_reg_ulp.thresh;
hyst = &sys_reg->ch_reg_ulp.hyst;
break;
case IQS626_CH_TP_2:
case IQS626_CH_TP_3:
thresh = &sys_reg->tp_grp_reg.ch_reg_tp[0].thresh;
hyst = &sys_reg->tp_grp_reg.hyst;
break;
case IQS626_CH_GEN_0:
case IQS626_CH_GEN_1:
case IQS626_CH_GEN_2:
i = ch_id - IQS626_CH_GEN_0;
thresh = sys_reg->ch_reg_gen[i].thresh;
hyst = &sys_reg->ch_reg_gen[i].hyst;
break;
case IQS626_CH_HALL:
thresh = &sys_reg->ch_reg_hall.thresh;
hyst = &sys_reg->ch_reg_hall.hyst;
break;
default:
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(iqs626_events); i++) {
if (!iqs626_channels[ch_id].events[i])
continue;
if (ch_id == IQS626_CH_TP_2 || ch_id == IQS626_CH_TP_3) {
/*
* Trackpad touch events are simply described under the
* trackpad child node.
*/
ev_node = fwnode_handle_get(ch_node);
} else {
ev_name = iqs626_events[i].name;
ev_node = fwnode_get_named_child_node(ch_node, ev_name);
if (!ev_node)
continue;
if (!fwnode_property_read_u32(ev_node, "linux,code",
&val)) {
iqs626->kp_code[ch_id][i] = val;
if (fwnode_property_read_u32(ev_node,
"linux,input-type",
&val)) {
if (ch_id == IQS626_CH_HALL)
val = EV_SW;
else
val = EV_KEY;
}
if (val != EV_KEY && val != EV_SW) {
dev_err(&client->dev,
"Invalid input type: %u\n",
val);
fwnode_handle_put(ev_node);
return -EINVAL;
}
iqs626->kp_type[ch_id][i] = val;
sys_reg->event_mask &= ~iqs626_events[i].mask;
}
}
if (!fwnode_property_read_u32(ev_node, "azoteq,hyst", &val)) {
if (val > IQS626_CHx_HYST_MAX) {
dev_err(&client->dev,
"Invalid %s channel hysteresis: %u\n",
fwnode_get_name(ch_node), val);
fwnode_handle_put(ev_node);
return -EINVAL;
}
if (i == IQS626_EVENT_DEEP_DN ||
i == IQS626_EVENT_DEEP_UP) {
*hyst &= ~IQS626_CHx_HYST_DEEP_MASK;
*hyst |= (val << IQS626_CHx_HYST_DEEP_SHIFT);
} else if (i == IQS626_EVENT_TOUCH_DN ||
i == IQS626_EVENT_TOUCH_UP) {
*hyst &= ~IQS626_CHx_HYST_TOUCH_MASK;
*hyst |= val;
}
}
if (ch_id != IQS626_CH_TP_2 && ch_id != IQS626_CH_TP_3 &&
!fwnode_property_read_u32(ev_node, "azoteq,thresh", &val)) {
if (val > IQS626_CHx_THRESH_MAX) {
dev_err(&client->dev,
"Invalid %s channel threshold: %u\n",
fwnode_get_name(ch_node), val);
fwnode_handle_put(ev_node);
return -EINVAL;
}
if (ch_id == IQS626_CH_HALL)
*thresh = val;
else
*(thresh + iqs626_events[i].th_offs) = val;
}
fwnode_handle_put(ev_node);
}
return 0;
}
static noinline_for_stack int
iqs626_parse_ati_target(struct iqs626_private *iqs626,
struct fwnode_handle *ch_node, enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
unsigned int val;
u8 *ati_target;
int i;
switch (ch_id) {
case IQS626_CH_ULP_0:
ati_target = &sys_reg->ch_reg_ulp.ati_target;
break;
case IQS626_CH_TP_2:
case IQS626_CH_TP_3:
ati_target = &sys_reg->tp_grp_reg.ati_target;
break;
case IQS626_CH_GEN_0:
case IQS626_CH_GEN_1:
case IQS626_CH_GEN_2:
i = ch_id - IQS626_CH_GEN_0;
ati_target = &sys_reg->ch_reg_gen[i].ati_target;
break;
case IQS626_CH_HALL:
ati_target = &sys_reg->ch_reg_hall.ati_target;
break;
default:
return -EINVAL;
}
if (!fwnode_property_read_u32(ch_node, "azoteq,ati-target", &val)) {
if (val > IQS626_CHx_ATI_TARGET_MAX) {
dev_err(&client->dev,
"Invalid %s channel ATI target: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*ati_target &= ~IQS626_CHx_ATI_TARGET_MASK;
*ati_target |= (val / 32);
}
if (ch_id != IQS626_CH_TP_2 && ch_id != IQS626_CH_TP_3 &&
!fwnode_property_read_u32(ch_node, "azoteq,ati-base", &val)) {
switch (val) {
case 75:
val = IQS626_CHx_ATI_BASE_75;
break;
case 100:
val = IQS626_CHx_ATI_BASE_100;
break;
case 150:
val = IQS626_CHx_ATI_BASE_150;
break;
case 200:
val = IQS626_CHx_ATI_BASE_200;
break;
default:
dev_err(&client->dev,
"Invalid %s channel ATI base: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*ati_target &= ~IQS626_CHx_ATI_BASE_MASK;
*ati_target |= val;
}
return 0;
}
static int iqs626_parse_pins(struct iqs626_private *iqs626,
struct fwnode_handle *ch_node,
const char *propname, u8 *enable)
{
struct i2c_client *client = iqs626->client;
unsigned int val[IQS626_NUM_CRx_TX];
int error, count, i;
if (!fwnode_property_present(ch_node, propname))
return 0;
count = fwnode_property_count_u32(ch_node, propname);
if (count > IQS626_NUM_CRx_TX) {
dev_err(&client->dev,
"Too many %s channel CRX/TX pins present\n",
fwnode_get_name(ch_node));
return -EINVAL;
} else if (count < 0) {
dev_err(&client->dev,
"Failed to count %s channel CRX/TX pins: %d\n",
fwnode_get_name(ch_node), count);
return count;
}
error = fwnode_property_read_u32_array(ch_node, propname, val, count);
if (error) {
dev_err(&client->dev,
"Failed to read %s channel CRX/TX pins: %d\n",
fwnode_get_name(ch_node), error);
return error;
}
*enable = 0;
for (i = 0; i < count; i++) {
if (val[i] >= IQS626_NUM_CRx_TX) {
dev_err(&client->dev,
"Invalid %s channel CRX/TX pin: %u\n",
fwnode_get_name(ch_node), val[i]);
return -EINVAL;
}
*enable |= BIT(val[i]);
}
return 0;
}
static int iqs626_parse_trackpad(struct iqs626_private *iqs626,
struct fwnode_handle *ch_node,
enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
u8 *hyst = &sys_reg->tp_grp_reg.hyst;
int error, count, i;
unsigned int val;
if (!fwnode_property_read_u32(ch_node, "azoteq,lta-update", &val)) {
if (val > IQS626_MISC_A_TPx_LTA_UPDATE_MAX) {
dev_err(&client->dev,
"Invalid %s channel update rate: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
sys_reg->misc_a &= ~IQS626_MISC_A_TPx_LTA_UPDATE_MASK;
sys_reg->misc_a |= (val << IQS626_MISC_A_TPx_LTA_UPDATE_SHIFT);
}
if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-trackpad",
&val)) {
if (val > IQS626_FILT_STR_MAX) {
dev_err(&client->dev,
"Invalid %s channel filter strength: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
sys_reg->misc_b &= ~IQS626_MISC_B_FILT_STR_TPx;
sys_reg->misc_b |= val;
}
if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-np-cnt",
&val)) {
if (val > IQS626_FILT_STR_MAX) {
dev_err(&client->dev,
"Invalid %s channel filter strength: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*hyst &= ~IQS626_FILT_STR_NP_TPx_MASK;
*hyst |= (val << IQS626_FILT_STR_NP_TPx_SHIFT);
}
if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-lp-cnt",
&val)) {
if (val > IQS626_FILT_STR_MAX) {
dev_err(&client->dev,
"Invalid %s channel filter strength: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*hyst &= ~IQS626_FILT_STR_LP_TPx_MASK;
*hyst |= (val << IQS626_FILT_STR_LP_TPx_SHIFT);
}
for (i = 0; i < iqs626_channels[ch_id].num_ch; i++) {
u8 *ati_base = &sys_reg->tp_grp_reg.ch_reg_tp[i].ati_base;
u8 *thresh = &sys_reg->tp_grp_reg.ch_reg_tp[i].thresh;
struct fwnode_handle *tc_node;
char tc_name[10];
snprintf(tc_name, sizeof(tc_name), "channel-%d", i);
tc_node = fwnode_get_named_child_node(ch_node, tc_name);
if (!tc_node)
continue;
if (!fwnode_property_read_u32(tc_node, "azoteq,ati-base",
&val)) {
if (val < IQS626_TPx_ATI_BASE_MIN ||
val > IQS626_TPx_ATI_BASE_MAX) {
dev_err(&client->dev,
"Invalid %s %s ATI base: %u\n",
fwnode_get_name(ch_node), tc_name, val);
fwnode_handle_put(tc_node);
return -EINVAL;
}
*ati_base = val - IQS626_TPx_ATI_BASE_MIN;
}
if (!fwnode_property_read_u32(tc_node, "azoteq,thresh",
&val)) {
if (val > IQS626_CHx_THRESH_MAX) {
dev_err(&client->dev,
"Invalid %s %s threshold: %u\n",
fwnode_get_name(ch_node), tc_name, val);
fwnode_handle_put(tc_node);
return -EINVAL;
}
*thresh = val;
}
fwnode_handle_put(tc_node);
}
if (!fwnode_property_present(ch_node, "linux,keycodes"))
return 0;
count = fwnode_property_count_u32(ch_node, "linux,keycodes");
if (count > IQS626_NUM_GESTURES) {
dev_err(&client->dev, "Too many keycodes present\n");
return -EINVAL;
} else if (count < 0) {
dev_err(&client->dev, "Failed to count keycodes: %d\n", count);
return count;
}
error = fwnode_property_read_u32_array(ch_node, "linux,keycodes",
iqs626->tp_code, count);
if (error) {
dev_err(&client->dev, "Failed to read keycodes: %d\n", error);
return error;
}
sys_reg->misc_b &= ~IQS626_MISC_B_TPx_SWIPE;
if (fwnode_property_present(ch_node, "azoteq,gesture-swipe"))
sys_reg->misc_b |= IQS626_MISC_B_TPx_SWIPE;
if (!fwnode_property_read_u32(ch_node, "azoteq,timeout-tap-ms",
&val)) {
if (val > IQS626_TIMEOUT_TAP_MS_MAX) {
dev_err(&client->dev,
"Invalid %s channel timeout: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
sys_reg->timeout_tap = val / 16;
}
if (!fwnode_property_read_u32(ch_node, "azoteq,timeout-swipe-ms",
&val)) {
if (val > IQS626_TIMEOUT_SWIPE_MS_MAX) {
dev_err(&client->dev,
"Invalid %s channel timeout: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
sys_reg->timeout_swipe = val / 16;
}
if (!fwnode_property_read_u32(ch_node, "azoteq,thresh-swipe",
&val)) {
if (val > IQS626_THRESH_SWIPE_MAX) {
dev_err(&client->dev,
"Invalid %s channel threshold: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
sys_reg->thresh_swipe = val;
}
sys_reg->event_mask &= ~IQS626_EVENT_MASK_GESTURE;
return 0;
}
static noinline_for_stack int
iqs626_parse_channel(struct iqs626_private *iqs626,
struct fwnode_handle *ch_node, enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
u8 *engine, *filter, *rx_enable, *tx_enable;
u8 *assoc_select, *assoc_weight;
unsigned int val;
int error, i;
switch (ch_id) {
case IQS626_CH_ULP_0:
engine = sys_reg->ch_reg_ulp.engine;
break;
case IQS626_CH_TP_2:
case IQS626_CH_TP_3:
engine = sys_reg->tp_grp_reg.engine;
break;
case IQS626_CH_GEN_0:
case IQS626_CH_GEN_1:
case IQS626_CH_GEN_2:
i = ch_id - IQS626_CH_GEN_0;
engine = sys_reg->ch_reg_gen[i].engine;
break;
case IQS626_CH_HALL:
engine = &sys_reg->ch_reg_hall.engine;
break;
default:
return -EINVAL;
}
error = iqs626_parse_ati_target(iqs626, ch_node, ch_id);
if (error)
return error;
error = iqs626_parse_events(iqs626, ch_node, ch_id);
if (error)
return error;
if (!fwnode_property_present(ch_node, "azoteq,ati-exclude"))
sys_reg->redo_ati |= iqs626_channels[ch_id].active;
if (!fwnode_property_present(ch_node, "azoteq,reseed-disable"))
sys_reg->reseed |= iqs626_channels[ch_id].active;
*engine |= IQS626_CHx_ENG_0_MEAS_CAP_SIZE;
if (fwnode_property_present(ch_node, "azoteq,meas-cap-decrease"))
*engine &= ~IQS626_CHx_ENG_0_MEAS_CAP_SIZE;
*engine |= IQS626_CHx_ENG_0_RX_TERM_VSS;
if (!fwnode_property_read_u32(ch_node, "azoteq,rx-inactive", &val)) {
switch (val) {
case IQS626_RX_INACTIVE_VSS:
break;
case IQS626_RX_INACTIVE_FLOAT:
*engine &= ~IQS626_CHx_ENG_0_RX_TERM_VSS;
if (ch_id == IQS626_CH_GEN_0 ||
ch_id == IQS626_CH_GEN_1 ||
ch_id == IQS626_CH_GEN_2)
*(engine + 4) &= ~IQS626_CHx_ENG_4_RX_TERM_VREG;
break;
case IQS626_RX_INACTIVE_VREG:
if (ch_id == IQS626_CH_GEN_0 ||
ch_id == IQS626_CH_GEN_1 ||
ch_id == IQS626_CH_GEN_2) {
*engine &= ~IQS626_CHx_ENG_0_RX_TERM_VSS;
*(engine + 4) |= IQS626_CHx_ENG_4_RX_TERM_VREG;
break;
}
fallthrough;
default:
dev_err(&client->dev,
"Invalid %s channel CRX pin termination: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
}
*engine &= ~IQS626_CHx_ENG_0_LINEARIZE;
if (fwnode_property_present(ch_node, "azoteq,linearize"))
*engine |= IQS626_CHx_ENG_0_LINEARIZE;
*engine &= ~IQS626_CHx_ENG_0_DUAL_DIR;
if (fwnode_property_present(ch_node, "azoteq,dual-direction"))
*engine |= IQS626_CHx_ENG_0_DUAL_DIR;
*engine &= ~IQS626_CHx_ENG_0_FILT_DISABLE;
if (fwnode_property_present(ch_node, "azoteq,filt-disable"))
*engine |= IQS626_CHx_ENG_0_FILT_DISABLE;
if (!fwnode_property_read_u32(ch_node, "azoteq,ati-mode", &val)) {
if (val > IQS626_CHx_ENG_0_ATI_MODE_MAX) {
dev_err(&client->dev,
"Invalid %s channel ATI mode: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*engine &= ~IQS626_CHx_ENG_0_ATI_MODE_MASK;
*engine |= val;
}
if (ch_id == IQS626_CH_HALL)
return 0;
*(engine + 1) &= ~IQS626_CHx_ENG_1_CCT_ENABLE;
if (!fwnode_property_read_u32(ch_node, "azoteq,cct-increase",
&val) && val) {
unsigned int orig_val = val--;
/*
* In the case of the generic channels, the charge cycle time
* field doubles in size and straddles two separate registers.
*/
if (ch_id == IQS626_CH_GEN_0 ||
ch_id == IQS626_CH_GEN_1 ||
ch_id == IQS626_CH_GEN_2) {
*(engine + 4) &= ~IQS626_CHx_ENG_4_CCT_LOW_1;
if (val & BIT(1))
*(engine + 4) |= IQS626_CHx_ENG_4_CCT_LOW_1;
*(engine + 4) &= ~IQS626_CHx_ENG_4_CCT_LOW_0;
if (val & BIT(0))
*(engine + 4) |= IQS626_CHx_ENG_4_CCT_LOW_0;
val >>= 2;
}
if (val & ~GENMASK(1, 0)) {
dev_err(&client->dev,
"Invalid %s channel charge cycle time: %u\n",
fwnode_get_name(ch_node), orig_val);
return -EINVAL;
}
*(engine + 1) &= ~IQS626_CHx_ENG_1_CCT_HIGH_1;
if (val & BIT(1))
*(engine + 1) |= IQS626_CHx_ENG_1_CCT_HIGH_1;
*(engine + 1) &= ~IQS626_CHx_ENG_1_CCT_HIGH_0;
if (val & BIT(0))
*(engine + 1) |= IQS626_CHx_ENG_1_CCT_HIGH_0;
*(engine + 1) |= IQS626_CHx_ENG_1_CCT_ENABLE;
}
if (!fwnode_property_read_u32(ch_node, "azoteq,proj-bias", &val)) {
if (val > IQS626_CHx_ENG_1_PROJ_BIAS_MAX) {
dev_err(&client->dev,
"Invalid %s channel bias current: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*(engine + 1) &= ~IQS626_CHx_ENG_1_PROJ_BIAS_MASK;
*(engine + 1) |= (val << IQS626_CHx_ENG_1_PROJ_BIAS_SHIFT);
}
if (!fwnode_property_read_u32(ch_node, "azoteq,sense-freq", &val)) {
if (val > IQS626_CHx_ENG_1_SENSE_FREQ_MAX) {
dev_err(&client->dev,
"Invalid %s channel sensing frequency: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*(engine + 1) &= ~IQS626_CHx_ENG_1_SENSE_FREQ_MASK;
*(engine + 1) |= (val << IQS626_CHx_ENG_1_SENSE_FREQ_SHIFT);
}
*(engine + 1) &= ~IQS626_CHx_ENG_1_ATI_BAND_TIGHTEN;
if (fwnode_property_present(ch_node, "azoteq,ati-band-tighten"))
*(engine + 1) |= IQS626_CHx_ENG_1_ATI_BAND_TIGHTEN;
if (ch_id == IQS626_CH_TP_2 || ch_id == IQS626_CH_TP_3)
return iqs626_parse_trackpad(iqs626, ch_node, ch_id);
if (ch_id == IQS626_CH_ULP_0) {
sys_reg->ch_reg_ulp.hyst &= ~IQS626_ULP_PROJ_ENABLE;
if (fwnode_property_present(ch_node, "azoteq,proj-enable"))
sys_reg->ch_reg_ulp.hyst |= IQS626_ULP_PROJ_ENABLE;
filter = &sys_reg->ch_reg_ulp.filter;
rx_enable = &sys_reg->ch_reg_ulp.rx_enable;
tx_enable = &sys_reg->ch_reg_ulp.tx_enable;
} else {
i = ch_id - IQS626_CH_GEN_0;
filter = &sys_reg->ch_reg_gen[i].filter;
rx_enable = &sys_reg->ch_reg_gen[i].rx_enable;
tx_enable = &sys_reg->ch_reg_gen[i].tx_enable;
}
if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-np-cnt",
&val)) {
if (val > IQS626_FILT_STR_MAX) {
dev_err(&client->dev,
"Invalid %s channel filter strength: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*filter &= ~IQS626_FILT_STR_NP_CNT_MASK;
*filter |= (val << IQS626_FILT_STR_NP_CNT_SHIFT);
}
if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-lp-cnt",
&val)) {
if (val > IQS626_FILT_STR_MAX) {
dev_err(&client->dev,
"Invalid %s channel filter strength: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*filter &= ~IQS626_FILT_STR_LP_CNT_MASK;
*filter |= (val << IQS626_FILT_STR_LP_CNT_SHIFT);
}
if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-np-lta",
&val)) {
if (val > IQS626_FILT_STR_MAX) {
dev_err(&client->dev,
"Invalid %s channel filter strength: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*filter &= ~IQS626_FILT_STR_NP_LTA_MASK;
*filter |= (val << IQS626_FILT_STR_NP_LTA_SHIFT);
}
if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-lp-lta",
&val)) {
if (val > IQS626_FILT_STR_MAX) {
dev_err(&client->dev,
"Invalid %s channel filter strength: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*filter &= ~IQS626_FILT_STR_LP_LTA_MASK;
*filter |= val;
}
error = iqs626_parse_pins(iqs626, ch_node, "azoteq,rx-enable",
rx_enable);
if (error)
return error;
error = iqs626_parse_pins(iqs626, ch_node, "azoteq,tx-enable",
tx_enable);
if (error)
return error;
if (ch_id == IQS626_CH_ULP_0)
return 0;
*(engine + 2) &= ~IQS626_CHx_ENG_2_LOCAL_CAP_ENABLE;
if (!fwnode_property_read_u32(ch_node, "azoteq,local-cap-size",
&val) && val) {
unsigned int orig_val = val--;
if (val > IQS626_CHx_ENG_2_LOCAL_CAP_MAX) {
dev_err(&client->dev,
"Invalid %s channel local cap. size: %u\n",
fwnode_get_name(ch_node), orig_val);
return -EINVAL;
}
*(engine + 2) &= ~IQS626_CHx_ENG_2_LOCAL_CAP_MASK;
*(engine + 2) |= (val << IQS626_CHx_ENG_2_LOCAL_CAP_SHIFT);
*(engine + 2) |= IQS626_CHx_ENG_2_LOCAL_CAP_ENABLE;
}
if (!fwnode_property_read_u32(ch_node, "azoteq,sense-mode", &val)) {
if (val > IQS626_CHx_ENG_2_SENSE_MODE_MAX) {
dev_err(&client->dev,
"Invalid %s channel sensing mode: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*(engine + 2) &= ~IQS626_CHx_ENG_2_SENSE_MODE_MASK;
*(engine + 2) |= val;
}
if (!fwnode_property_read_u32(ch_node, "azoteq,tx-freq", &val)) {
if (val > IQS626_CHx_ENG_3_TX_FREQ_MAX) {
dev_err(&client->dev,
"Invalid %s channel excitation frequency: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*(engine + 3) &= ~IQS626_CHx_ENG_3_TX_FREQ_MASK;
*(engine + 3) |= (val << IQS626_CHx_ENG_3_TX_FREQ_SHIFT);
}
*(engine + 3) &= ~IQS626_CHx_ENG_3_INV_LOGIC;
if (fwnode_property_present(ch_node, "azoteq,invert-enable"))
*(engine + 3) |= IQS626_CHx_ENG_3_INV_LOGIC;
*(engine + 4) &= ~IQS626_CHx_ENG_4_COMP_DISABLE;
if (fwnode_property_present(ch_node, "azoteq,comp-disable"))
*(engine + 4) |= IQS626_CHx_ENG_4_COMP_DISABLE;
*(engine + 4) &= ~IQS626_CHx_ENG_4_STATIC_ENABLE;
if (fwnode_property_present(ch_node, "azoteq,static-enable"))
*(engine + 4) |= IQS626_CHx_ENG_4_STATIC_ENABLE;
i = ch_id - IQS626_CH_GEN_0;
assoc_select = &sys_reg->ch_reg_gen[i].assoc_select;
assoc_weight = &sys_reg->ch_reg_gen[i].assoc_weight;
*assoc_select = 0;
if (!fwnode_property_present(ch_node, "azoteq,assoc-select"))
return 0;
for (i = 0; i < ARRAY_SIZE(iqs626_channels); i++) {
if (fwnode_property_match_string(ch_node, "azoteq,assoc-select",
iqs626_channels[i].name) < 0)
continue;
*assoc_select |= iqs626_channels[i].active;
}
if (fwnode_property_read_u32(ch_node, "azoteq,assoc-weight", &val))
return 0;
if (val > IQS626_GEN_WEIGHT_MAX) {
dev_err(&client->dev,
"Invalid %s channel associated weight: %u\n",
fwnode_get_name(ch_node), val);
return -EINVAL;
}
*assoc_weight = val;
return 0;
}
static int iqs626_parse_prop(struct iqs626_private *iqs626)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
struct fwnode_handle *ch_node;
unsigned int val;
int error, i;
u16 general;
if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode",
&val)) {
if (val > IQS626_SYS_SETTINGS_PWR_MODE_MAX) {
dev_err(&client->dev, "Invalid suspend mode: %u\n",
val);
return -EINVAL;
}
iqs626->suspend_mode = val;
}
error = regmap_raw_read(iqs626->regmap, IQS626_SYS_SETTINGS, sys_reg,
sizeof(*sys_reg));
if (error)
return error;
general = be16_to_cpu(sys_reg->general);
general &= IQS626_SYS_SETTINGS_ULP_UPDATE_MASK;
if (device_property_present(&client->dev, "azoteq,clk-div"))
general |= IQS626_SYS_SETTINGS_CLK_DIV;
if (device_property_present(&client->dev, "azoteq,ulp-enable"))
general |= IQS626_SYS_SETTINGS_ULP_AUTO;
if (!device_property_read_u32(&client->dev, "azoteq,ulp-update",
&val)) {
if (val > IQS626_SYS_SETTINGS_ULP_UPDATE_MAX) {
dev_err(&client->dev, "Invalid update rate: %u\n", val);
return -EINVAL;
}
general &= ~IQS626_SYS_SETTINGS_ULP_UPDATE_MASK;
general |= (val << IQS626_SYS_SETTINGS_ULP_UPDATE_SHIFT);
}
sys_reg->misc_a &= ~IQS626_MISC_A_ATI_BAND_DISABLE;
if (device_property_present(&client->dev, "azoteq,ati-band-disable"))
sys_reg->misc_a |= IQS626_MISC_A_ATI_BAND_DISABLE;
sys_reg->misc_a &= ~IQS626_MISC_A_ATI_LP_ONLY;
if (device_property_present(&client->dev, "azoteq,ati-lp-only"))
sys_reg->misc_a |= IQS626_MISC_A_ATI_LP_ONLY;
if (!device_property_read_u32(&client->dev, "azoteq,gpio3-select",
&val)) {
if (val > IQS626_MISC_A_GPIO3_SELECT_MAX) {
dev_err(&client->dev, "Invalid GPIO3 selection: %u\n",
val);
return -EINVAL;
}
sys_reg->misc_a &= ~IQS626_MISC_A_GPIO3_SELECT_MASK;
sys_reg->misc_a |= val;
}
if (!device_property_read_u32(&client->dev, "azoteq,reseed-select",
&val)) {
if (val > IQS626_MISC_B_RESEED_UI_SEL_MAX) {
dev_err(&client->dev, "Invalid reseed selection: %u\n",
val);
return -EINVAL;
}
sys_reg->misc_b &= ~IQS626_MISC_B_RESEED_UI_SEL_MASK;
sys_reg->misc_b |= (val << IQS626_MISC_B_RESEED_UI_SEL_SHIFT);
}
sys_reg->misc_b &= ~IQS626_MISC_B_THRESH_EXTEND;
if (device_property_present(&client->dev, "azoteq,thresh-extend"))
sys_reg->misc_b |= IQS626_MISC_B_THRESH_EXTEND;
sys_reg->misc_b &= ~IQS626_MISC_B_TRACKING_UI_ENABLE;
if (device_property_present(&client->dev, "azoteq,tracking-enable"))
sys_reg->misc_b |= IQS626_MISC_B_TRACKING_UI_ENABLE;
sys_reg->misc_b &= ~IQS626_MISC_B_RESEED_OFFSET;
if (device_property_present(&client->dev, "azoteq,reseed-offset"))
sys_reg->misc_b |= IQS626_MISC_B_RESEED_OFFSET;
if (!device_property_read_u32(&client->dev, "azoteq,rate-np-ms",
&val)) {
if (val > IQS626_RATE_NP_MS_MAX) {
dev_err(&client->dev, "Invalid report rate: %u\n", val);
return -EINVAL;
}
sys_reg->rate_np = val;
}
if (!device_property_read_u32(&client->dev, "azoteq,rate-lp-ms",
&val)) {
if (val > IQS626_RATE_LP_MS_MAX) {
dev_err(&client->dev, "Invalid report rate: %u\n", val);
return -EINVAL;
}
sys_reg->rate_lp = val;
}
if (!device_property_read_u32(&client->dev, "azoteq,rate-ulp-ms",
&val)) {
if (val > IQS626_RATE_ULP_MS_MAX) {
dev_err(&client->dev, "Invalid report rate: %u\n", val);
return -EINVAL;
}
sys_reg->rate_ulp = val / 16;
}
if (!device_property_read_u32(&client->dev, "azoteq,timeout-pwr-ms",
&val)) {
if (val > IQS626_TIMEOUT_PWR_MS_MAX) {
dev_err(&client->dev, "Invalid timeout: %u\n", val);
return -EINVAL;
}
sys_reg->timeout_pwr = val / 512;
}
if (!device_property_read_u32(&client->dev, "azoteq,timeout-lta-ms",
&val)) {
if (val > IQS626_TIMEOUT_LTA_MS_MAX) {
dev_err(&client->dev, "Invalid timeout: %u\n", val);
return -EINVAL;
}
sys_reg->timeout_lta = val / 512;
}
sys_reg->event_mask = ~((u8)IQS626_EVENT_MASK_SYS);
sys_reg->redo_ati = 0;
sys_reg->reseed = 0;
sys_reg->active = 0;
for (i = 0; i < ARRAY_SIZE(iqs626_channels); i++) {
ch_node = device_get_named_child_node(&client->dev,
iqs626_channels[i].name);
if (!ch_node)
continue;
error = iqs626_parse_channel(iqs626, ch_node, i);
fwnode_handle_put(ch_node);
if (error)
return error;
sys_reg->active |= iqs626_channels[i].active;
}
general |= IQS626_SYS_SETTINGS_EVENT_MODE;
/*
* Enable streaming during normal-power mode if the trackpad is used to
* report raw coordinates instead of gestures. In that case, the device
* returns to event mode during low-power mode.
*/
if (sys_reg->active & iqs626_channels[IQS626_CH_TP_2].active &&
sys_reg->event_mask & IQS626_EVENT_MASK_GESTURE)
general |= IQS626_SYS_SETTINGS_EVENT_MODE_LP;
general |= IQS626_SYS_SETTINGS_REDO_ATI;
general |= IQS626_SYS_SETTINGS_ACK_RESET;
sys_reg->general = cpu_to_be16(general);
error = regmap_raw_write(iqs626->regmap, IQS626_SYS_SETTINGS,
&iqs626->sys_reg, sizeof(iqs626->sys_reg));
if (error)
return error;
iqs626_irq_wait();
return 0;
}
static int iqs626_input_init(struct iqs626_private *iqs626)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
int error, i, j;
iqs626->keypad = devm_input_allocate_device(&client->dev);
if (!iqs626->keypad)
return -ENOMEM;
iqs626->keypad->keycodemax = ARRAY_SIZE(iqs626->kp_code);
iqs626->keypad->keycode = iqs626->kp_code;
iqs626->keypad->keycodesize = sizeof(**iqs626->kp_code);
iqs626->keypad->name = "iqs626a_keypad";
iqs626->keypad->id.bustype = BUS_I2C;
for (i = 0; i < ARRAY_SIZE(iqs626_channels); i++) {
if (!(sys_reg->active & iqs626_channels[i].active))
continue;
for (j = 0; j < ARRAY_SIZE(iqs626_events); j++) {
if (!iqs626->kp_type[i][j])
continue;
input_set_capability(iqs626->keypad,
iqs626->kp_type[i][j],
iqs626->kp_code[i][j]);
}
}
if (!(sys_reg->active & iqs626_channels[IQS626_CH_TP_2].active))
return 0;
iqs626->trackpad = devm_input_allocate_device(&client->dev);
if (!iqs626->trackpad)
return -ENOMEM;
iqs626->trackpad->keycodemax = ARRAY_SIZE(iqs626->tp_code);
iqs626->trackpad->keycode = iqs626->tp_code;
iqs626->trackpad->keycodesize = sizeof(*iqs626->tp_code);
iqs626->trackpad->name = "iqs626a_trackpad";
iqs626->trackpad->id.bustype = BUS_I2C;
/*
* Present the trackpad as a traditional pointing device if no gestures
* have been mapped to a keycode.
*/
if (sys_reg->event_mask & IQS626_EVENT_MASK_GESTURE) {
u8 tp_mask = iqs626_channels[IQS626_CH_TP_3].active;
input_set_capability(iqs626->trackpad, EV_KEY, BTN_TOUCH);
input_set_abs_params(iqs626->trackpad, ABS_Y, 0, 255, 0, 0);
if ((sys_reg->active & tp_mask) == tp_mask)
input_set_abs_params(iqs626->trackpad,
ABS_X, 0, 255, 0, 0);
else
input_set_abs_params(iqs626->trackpad,
ABS_X, 0, 128, 0, 0);
touchscreen_parse_properties(iqs626->trackpad, false,
&iqs626->prop);
} else {
for (i = 0; i < IQS626_NUM_GESTURES; i++)
if (iqs626->tp_code[i] != KEY_RESERVED)
input_set_capability(iqs626->trackpad, EV_KEY,
iqs626->tp_code[i]);
}
error = input_register_device(iqs626->trackpad);
if (error)
dev_err(&client->dev, "Failed to register trackpad: %d\n",
error);
return error;
}
static int iqs626_report(struct iqs626_private *iqs626)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
struct iqs626_flags flags;
__le16 hall_output;
int error, i, j;
u8 state;
u8 *dir_mask = &flags.states[IQS626_ST_OFFS_DIR];
error = regmap_raw_read(iqs626->regmap, IQS626_SYS_FLAGS, &flags,
sizeof(flags));
if (error) {
dev_err(&client->dev, "Failed to read device status: %d\n",
error);
return error;
}
/*
* The device resets itself if its own watchdog bites, which can happen
* in the event of an I2C communication error. In this case, the device
* asserts a SHOW_RESET interrupt and all registers must be restored.
*/
if (be16_to_cpu(flags.system) & IQS626_SYS_FLAGS_SHOW_RESET) {
dev_err(&client->dev, "Unexpected device reset\n");
error = regmap_raw_write(iqs626->regmap, IQS626_SYS_SETTINGS,
sys_reg, sizeof(*sys_reg));
if (error)
dev_err(&client->dev,
"Failed to re-initialize device: %d\n", error);
return error;
}
if (be16_to_cpu(flags.system) & IQS626_SYS_FLAGS_IN_ATI)
return 0;
/*
* Unlike the ULP or generic channels, the Hall channel does not have a
* direction flag. Instead, the direction (i.e. magnet polarity) can be
* derived based on the sign of the 2's complement differential output.
*/
if (sys_reg->active & iqs626_channels[IQS626_CH_HALL].active) {
error = regmap_raw_read(iqs626->regmap, IQS626_HALL_OUTPUT,
&hall_output, sizeof(hall_output));
if (error) {
dev_err(&client->dev,
"Failed to read Hall output: %d\n", error);
return error;
}
*dir_mask &= ~iqs626_channels[IQS626_CH_HALL].active;
if (le16_to_cpu(hall_output) < 0x8000)
*dir_mask |= iqs626_channels[IQS626_CH_HALL].active;
}
for (i = 0; i < ARRAY_SIZE(iqs626_channels); i++) {
if (!(sys_reg->active & iqs626_channels[i].active))
continue;
for (j = 0; j < ARRAY_SIZE(iqs626_events); j++) {
if (!iqs626->kp_type[i][j])
continue;
state = flags.states[iqs626_events[j].st_offs];
state &= iqs626_events[j].dir_up ? *dir_mask
: ~(*dir_mask);
state &= iqs626_channels[i].active;
input_event(iqs626->keypad, iqs626->kp_type[i][j],
iqs626->kp_code[i][j], !!state);
}
}
input_sync(iqs626->keypad);
/*
* The following completion signals that ATI has finished, any initial
* switch states have been reported and the keypad can be registered.
*/
complete_all(&iqs626->ati_done);
if (!(sys_reg->active & iqs626_channels[IQS626_CH_TP_2].active))
return 0;
if (sys_reg->event_mask & IQS626_EVENT_MASK_GESTURE) {
state = flags.states[IQS626_ST_OFFS_TOUCH];
state &= iqs626_channels[IQS626_CH_TP_2].active;
input_report_key(iqs626->trackpad, BTN_TOUCH, state);
if (state)
touchscreen_report_pos(iqs626->trackpad, &iqs626->prop,
flags.trackpad_x,
flags.trackpad_y, false);
} else {
for (i = 0; i < IQS626_NUM_GESTURES; i++)
input_report_key(iqs626->trackpad, iqs626->tp_code[i],
flags.gesture & BIT(i));
if (flags.gesture & GENMASK(IQS626_GESTURE_TAP, 0)) {
input_sync(iqs626->trackpad);
/*
* Momentary gestures are followed by a complementary
* release cycle so as to emulate a full keystroke.
*/
for (i = 0; i < IQS626_GESTURE_HOLD; i++)
input_report_key(iqs626->trackpad,
iqs626->tp_code[i], 0);
}
}
input_sync(iqs626->trackpad);
return 0;
}
static irqreturn_t iqs626_irq(int irq, void *context)
{
struct iqs626_private *iqs626 = context;
if (iqs626_report(iqs626))
return IRQ_NONE;
/*
* The device does not deassert its interrupt (RDY) pin until shortly
* after receiving an I2C stop condition; the following delay ensures
* the interrupt handler does not return before this time.
*/
iqs626_irq_wait();
return IRQ_HANDLED;
}
static const struct regmap_config iqs626_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = IQS626_MAX_REG,
};
static int iqs626_probe(struct i2c_client *client)
{
struct iqs626_ver_info ver_info;
struct iqs626_private *iqs626;
int error;
iqs626 = devm_kzalloc(&client->dev, sizeof(*iqs626), GFP_KERNEL);
if (!iqs626)
return -ENOMEM;
i2c_set_clientdata(client, iqs626);
iqs626->client = client;
iqs626->regmap = devm_regmap_init_i2c(client, &iqs626_regmap_config);
if (IS_ERR(iqs626->regmap)) {
error = PTR_ERR(iqs626->regmap);
dev_err(&client->dev, "Failed to initialize register map: %d\n",
error);
return error;
}
init_completion(&iqs626->ati_done);
error = regmap_raw_read(iqs626->regmap, IQS626_VER_INFO, &ver_info,
sizeof(ver_info));
if (error)
return error;
if (ver_info.prod_num != IQS626_VER_INFO_PROD_NUM) {
dev_err(&client->dev, "Unrecognized product number: 0x%02X\n",
ver_info.prod_num);
return -EINVAL;
}
error = iqs626_parse_prop(iqs626);
if (error)
return error;
error = iqs626_input_init(iqs626);
if (error)
return error;
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, iqs626_irq, IRQF_ONESHOT,
client->name, iqs626);
if (error) {
dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
return error;
}
if (!wait_for_completion_timeout(&iqs626->ati_done,
msecs_to_jiffies(2000))) {
dev_err(&client->dev, "Failed to complete ATI\n");
return -ETIMEDOUT;
}
/*
* The keypad may include one or more switches and is not registered
* until ATI is complete and the initial switch states are read.
*/
error = input_register_device(iqs626->keypad);
if (error)
dev_err(&client->dev, "Failed to register keypad: %d\n", error);
return error;
}
static int iqs626_suspend(struct device *dev)
{
struct iqs626_private *iqs626 = dev_get_drvdata(dev);
struct i2c_client *client = iqs626->client;
unsigned int val;
int error;
if (!iqs626->suspend_mode)
return 0;
disable_irq(client->irq);
/*
* Automatic power mode switching must be disabled before the device is
* forced into any particular power mode. In this case, the device will
* transition into normal-power mode.
*/
error = regmap_update_bits(iqs626->regmap, IQS626_SYS_SETTINGS,
IQS626_SYS_SETTINGS_DIS_AUTO, ~0);
if (error)
goto err_irq;
/*
* The following check ensures the device has completed its transition
* into normal-power mode before a manual mode switch is performed.
*/
error = regmap_read_poll_timeout(iqs626->regmap, IQS626_SYS_FLAGS, val,
!(val & IQS626_SYS_FLAGS_PWR_MODE_MASK),
IQS626_PWR_MODE_POLL_SLEEP_US,
IQS626_PWR_MODE_POLL_TIMEOUT_US);
if (error)
goto err_irq;
error = regmap_update_bits(iqs626->regmap, IQS626_SYS_SETTINGS,
IQS626_SYS_SETTINGS_PWR_MODE_MASK,
iqs626->suspend_mode <<
IQS626_SYS_SETTINGS_PWR_MODE_SHIFT);
if (error)
goto err_irq;
/*
* This last check ensures the device has completed its transition into
* the desired power mode to prevent any spurious interrupts from being
* triggered after iqs626_suspend has already returned.
*/
error = regmap_read_poll_timeout(iqs626->regmap, IQS626_SYS_FLAGS, val,
(val & IQS626_SYS_FLAGS_PWR_MODE_MASK)
== (iqs626->suspend_mode <<
IQS626_SYS_FLAGS_PWR_MODE_SHIFT),
IQS626_PWR_MODE_POLL_SLEEP_US,
IQS626_PWR_MODE_POLL_TIMEOUT_US);
err_irq:
iqs626_irq_wait();
enable_irq(client->irq);
return error;
}
static int iqs626_resume(struct device *dev)
{
struct iqs626_private *iqs626 = dev_get_drvdata(dev);
struct i2c_client *client = iqs626->client;
unsigned int val;
int error;
if (!iqs626->suspend_mode)
return 0;
disable_irq(client->irq);
error = regmap_update_bits(iqs626->regmap, IQS626_SYS_SETTINGS,
IQS626_SYS_SETTINGS_PWR_MODE_MASK, 0);
if (error)
goto err_irq;
/*
* This check ensures the device has returned to normal-power mode
* before automatic power mode switching is re-enabled.
*/
error = regmap_read_poll_timeout(iqs626->regmap, IQS626_SYS_FLAGS, val,
!(val & IQS626_SYS_FLAGS_PWR_MODE_MASK),
IQS626_PWR_MODE_POLL_SLEEP_US,
IQS626_PWR_MODE_POLL_TIMEOUT_US);
if (error)
goto err_irq;
error = regmap_update_bits(iqs626->regmap, IQS626_SYS_SETTINGS,
IQS626_SYS_SETTINGS_DIS_AUTO, 0);
if (error)
goto err_irq;
/*
* This step reports any events that may have been "swallowed" as a
* result of polling PWR_MODE (which automatically acknowledges any
* pending interrupts).
*/
error = iqs626_report(iqs626);
err_irq:
iqs626_irq_wait();
enable_irq(client->irq);
return error;
}
static DEFINE_SIMPLE_DEV_PM_OPS(iqs626_pm, iqs626_suspend, iqs626_resume);
static const struct of_device_id iqs626_of_match[] = {
{ .compatible = "azoteq,iqs626a" },
{ }
};
MODULE_DEVICE_TABLE(of, iqs626_of_match);
static struct i2c_driver iqs626_i2c_driver = {
.driver = {
.name = "iqs626a",
.of_match_table = iqs626_of_match,
.pm = pm_sleep_ptr(&iqs626_pm),
},
.probe = iqs626_probe,
};
module_i2c_driver(iqs626_i2c_driver);
MODULE_AUTHOR("Jeff LaBundy <[email protected]>");
MODULE_DESCRIPTION("Azoteq IQS626A Capacitive Touch Controller");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/iqs626a.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Input driver for slidebars on some Lenovo IdeaPad laptops
*
* Copyright (C) 2013 Andrey Moiseev <[email protected]>
*
* Reverse-engineered from Lenovo SlideNav software (SBarHook.dll).
*
* Trademarks are the property of their respective owners.
*/
/*
* Currently tested and works on:
* Lenovo IdeaPad Y550
* Lenovo IdeaPad Y550P
*
* Other models can be added easily. To test,
* load with 'force' parameter set 'true'.
*
* LEDs blinking and input mode are managed via sysfs,
* (hex, unsigned byte value):
* /sys/devices/platform/ideapad_slidebar/slidebar_mode
*
* The value is in byte range, however, I only figured out
* how bits 0b10011001 work. Some other bits, probably,
* are meaningfull too.
*
* Possible states:
*
* STD_INT, ONMOV_INT, OFF_INT, LAST_POLL, OFF_POLL
*
* Meaning:
* released touched
* STD 'heartbeat' lights follow the finger
* ONMOV no lights lights follow the finger
* LAST at last pos lights follow the finger
* OFF no lights no lights
*
* INT all input events are generated, interrupts are used
* POLL no input events by default, to get them,
* send 0b10000000 (read below)
*
* Commands: write
*
* All | 0b01001 -> STD_INT
* possible | 0b10001 -> ONMOV_INT
* states | 0b01000 -> OFF_INT
*
* | 0b0 -> LAST_POLL
* STD_INT or ONMOV_INT |
* | 0b1 -> STD_INT
*
* | 0b0 -> OFF_POLL
* OFF_INT or OFF_POLL |
* | 0b1 -> OFF_INT
*
* Any state | 0b10000000 -> if the slidebar has updated data,
* produce one input event (last position),
* switch to respective POLL mode
* (like 0x0), if not in POLL mode yet.
*
* Get current state: read
*
* masked by 0x11 read value means:
*
* 0x00 LAST
* 0x01 STD
* 0x10 OFF
* 0x11 ONMOV
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/dmi.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/i8042.h>
#include <linux/serio.h>
#define IDEAPAD_BASE 0xff29
static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
static DEFINE_SPINLOCK(io_lock);
static struct input_dev *slidebar_input_dev;
static struct platform_device *slidebar_platform_dev;
static u8 slidebar_pos_get(void)
{
u8 res;
unsigned long flags;
spin_lock_irqsave(&io_lock, flags);
outb(0xf4, 0xff29);
outb(0xbf, 0xff2a);
res = inb(0xff2b);
spin_unlock_irqrestore(&io_lock, flags);
return res;
}
static u8 slidebar_mode_get(void)
{
u8 res;
unsigned long flags;
spin_lock_irqsave(&io_lock, flags);
outb(0xf7, 0xff29);
outb(0x8b, 0xff2a);
res = inb(0xff2b);
spin_unlock_irqrestore(&io_lock, flags);
return res;
}
static void slidebar_mode_set(u8 mode)
{
unsigned long flags;
spin_lock_irqsave(&io_lock, flags);
outb(0xf7, 0xff29);
outb(0x8b, 0xff2a);
outb(mode, 0xff2b);
spin_unlock_irqrestore(&io_lock, flags);
}
static bool slidebar_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
static bool extended = false;
/* We are only interested in data coming form KBC port */
if (str & I8042_STR_AUXDATA)
return false;
/* Scancodes: e03b on move, e0bb on release. */
if (data == 0xe0) {
extended = true;
return true;
}
if (!extended)
return false;
extended = false;
if (likely((data & 0x7f) != 0x3b)) {
serio_interrupt(port, 0xe0, 0);
return false;
}
if (data & 0x80) {
input_report_key(slidebar_input_dev, BTN_TOUCH, 0);
} else {
input_report_key(slidebar_input_dev, BTN_TOUCH, 1);
input_report_abs(slidebar_input_dev, ABS_X, slidebar_pos_get());
}
input_sync(slidebar_input_dev);
return true;
}
static ssize_t show_slidebar_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%x\n", slidebar_mode_get());
}
static ssize_t store_slidebar_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u8 mode;
int error;
error = kstrtou8(buf, 0, &mode);
if (error)
return error;
slidebar_mode_set(mode);
return count;
}
static DEVICE_ATTR(slidebar_mode, S_IWUSR | S_IRUGO,
show_slidebar_mode, store_slidebar_mode);
static struct attribute *ideapad_attrs[] = {
&dev_attr_slidebar_mode.attr,
NULL
};
static struct attribute_group ideapad_attr_group = {
.attrs = ideapad_attrs
};
static const struct attribute_group *ideapad_attr_groups[] = {
&ideapad_attr_group,
NULL
};
static int __init ideapad_probe(struct platform_device* pdev)
{
int err;
if (!request_region(IDEAPAD_BASE, 3, "ideapad_slidebar")) {
dev_err(&pdev->dev, "IO ports are busy\n");
return -EBUSY;
}
slidebar_input_dev = input_allocate_device();
if (!slidebar_input_dev) {
dev_err(&pdev->dev, "Failed to allocate input device\n");
err = -ENOMEM;
goto err_release_ports;
}
slidebar_input_dev->name = "IdeaPad Slidebar";
slidebar_input_dev->id.bustype = BUS_HOST;
slidebar_input_dev->dev.parent = &pdev->dev;
input_set_capability(slidebar_input_dev, EV_KEY, BTN_TOUCH);
input_set_capability(slidebar_input_dev, EV_ABS, ABS_X);
input_set_abs_params(slidebar_input_dev, ABS_X, 0, 0xff, 0, 0);
err = i8042_install_filter(slidebar_i8042_filter);
if (err) {
dev_err(&pdev->dev,
"Failed to install i8042 filter: %d\n", err);
goto err_free_dev;
}
err = input_register_device(slidebar_input_dev);
if (err) {
dev_err(&pdev->dev,
"Failed to register input device: %d\n", err);
goto err_remove_filter;
}
return 0;
err_remove_filter:
i8042_remove_filter(slidebar_i8042_filter);
err_free_dev:
input_free_device(slidebar_input_dev);
err_release_ports:
release_region(IDEAPAD_BASE, 3);
return err;
}
static int ideapad_remove(struct platform_device *pdev)
{
i8042_remove_filter(slidebar_i8042_filter);
input_unregister_device(slidebar_input_dev);
release_region(IDEAPAD_BASE, 3);
return 0;
}
static struct platform_driver slidebar_drv = {
.driver = {
.name = "ideapad_slidebar",
},
.remove = ideapad_remove,
};
static int __init ideapad_dmi_check(const struct dmi_system_id *id)
{
pr_info("Laptop model '%s'\n", id->ident);
return 1;
}
static const struct dmi_system_id ideapad_dmi[] __initconst = {
{
.ident = "Lenovo IdeaPad Y550",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20017"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Y550")
},
.callback = ideapad_dmi_check
},
{
.ident = "Lenovo IdeaPad Y550P",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20035"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Y550P")
},
.callback = ideapad_dmi_check
},
{ NULL, }
};
MODULE_DEVICE_TABLE(dmi, ideapad_dmi);
static int __init slidebar_init(void)
{
int err;
if (!force && !dmi_check_system(ideapad_dmi)) {
pr_err("DMI does not match\n");
return -ENODEV;
}
slidebar_platform_dev = platform_device_alloc("ideapad_slidebar", -1);
if (!slidebar_platform_dev) {
pr_err("Not enough memory\n");
return -ENOMEM;
}
slidebar_platform_dev->dev.groups = ideapad_attr_groups;
err = platform_device_add(slidebar_platform_dev);
if (err) {
pr_err("Failed to register platform device\n");
goto err_free_dev;
}
err = platform_driver_probe(&slidebar_drv, ideapad_probe);
if (err) {
pr_err("Failed to register platform driver\n");
goto err_delete_dev;
}
return 0;
err_delete_dev:
platform_device_del(slidebar_platform_dev);
err_free_dev:
platform_device_put(slidebar_platform_dev);
return err;
}
static void __exit slidebar_exit(void)
{
platform_device_unregister(slidebar_platform_dev);
platform_driver_unregister(&slidebar_drv);
}
module_init(slidebar_init);
module_exit(slidebar_exit);
MODULE_AUTHOR("Andrey Moiseev <[email protected]>");
MODULE_DESCRIPTION("Slidebar input support for some Lenovo IdeaPad laptops");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/ideapad_slidebar.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* keyspan_remote: USB driver for the Keyspan DMR
*
* Copyright (C) 2005 Zymeta Corporation - Michael Downey ([email protected])
*
* This driver has been put together with the support of Innosys, Inc.
* and Keyspan, Inc the manufacturers of the Keyspan USB DMR product.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
/* Parameters that can be passed to the driver. */
static int debug;
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "Enable extra debug messages and information");
/* Vendor and product ids */
#define USB_KEYSPAN_VENDOR_ID 0x06CD
#define USB_KEYSPAN_PRODUCT_UIA11 0x0202
/* Defines for converting the data from the remote. */
#define ZERO 0x18
#define ZERO_MASK 0x1F /* 5 bits for a 0 */
#define ONE 0x3C
#define ONE_MASK 0x3F /* 6 bits for a 1 */
#define SYNC 0x3F80
#define SYNC_MASK 0x3FFF /* 14 bits for a SYNC sequence */
#define STOP 0x00
#define STOP_MASK 0x1F /* 5 bits for the STOP sequence */
#define GAP 0xFF
#define RECV_SIZE 8 /* The UIA-11 type have a 8 byte limit. */
/*
* Table that maps the 31 possible keycodes to input keys.
* Currently there are 15 and 17 button models so RESERVED codes
* are blank areas in the mapping.
*/
static const unsigned short keyspan_key_table[] = {
KEY_RESERVED, /* 0 is just a place holder. */
KEY_RESERVED,
KEY_STOP,
KEY_PLAYCD,
KEY_RESERVED,
KEY_PREVIOUSSONG,
KEY_REWIND,
KEY_FORWARD,
KEY_NEXTSONG,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_PAUSE,
KEY_VOLUMEUP,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_VOLUMEDOWN,
KEY_RESERVED,
KEY_UP,
KEY_RESERVED,
KEY_MUTE,
KEY_LEFT,
KEY_ENTER,
KEY_RIGHT,
KEY_RESERVED,
KEY_RESERVED,
KEY_DOWN,
KEY_RESERVED,
KEY_KPASTERISK,
KEY_RESERVED,
KEY_MENU
};
/* table of devices that work with this driver */
static const struct usb_device_id keyspan_table[] = {
{ USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) },
{ } /* Terminating entry */
};
/* Structure to store all the real stuff that a remote sends to us. */
struct keyspan_message {
u16 system;
u8 button;
u8 toggle;
};
/* Structure used for all the bit testing magic needed to be done. */
struct bit_tester {
u32 tester;
int len;
int pos;
int bits_left;
u8 buffer[32];
};
/* Structure to hold all of our driver specific stuff */
struct usb_keyspan {
char name[128];
char phys[64];
unsigned short keymap[ARRAY_SIZE(keyspan_key_table)];
struct usb_device *udev;
struct input_dev *input;
struct usb_interface *interface;
struct usb_endpoint_descriptor *in_endpoint;
struct urb* irq_urb;
int open;
dma_addr_t in_dma;
unsigned char *in_buffer;
/* variables used to parse messages from remote. */
struct bit_tester data;
int stage;
int toggle;
};
static struct usb_driver keyspan_driver;
/*
* Debug routine that prints out what we've received from the remote.
*/
static void keyspan_print(struct usb_keyspan* dev) /*unsigned char* data)*/
{
char codes[4 * RECV_SIZE];
int i;
for (i = 0; i < RECV_SIZE; i++)
snprintf(codes + i * 3, 4, "%02x ", dev->in_buffer[i]);
dev_info(&dev->udev->dev, "%s\n", codes);
}
/*
* Routine that manages the bit_tester structure. It makes sure that there are
* at least bits_needed bits loaded into the tester.
*/
static int keyspan_load_tester(struct usb_keyspan* dev, int bits_needed)
{
if (dev->data.bits_left >= bits_needed)
return 0;
/*
* Somehow we've missed the last message. The message will be repeated
* though so it's not too big a deal
*/
if (dev->data.pos >= dev->data.len) {
dev_dbg(&dev->interface->dev,
"%s - Error ran out of data. pos: %d, len: %d\n",
__func__, dev->data.pos, dev->data.len);
return -1;
}
/* Load as much as we can into the tester. */
while ((dev->data.bits_left + 7 < (sizeof(dev->data.tester) * 8)) &&
(dev->data.pos < dev->data.len)) {
dev->data.tester += (dev->data.buffer[dev->data.pos++] << dev->data.bits_left);
dev->data.bits_left += 8;
}
return 0;
}
static void keyspan_report_button(struct usb_keyspan *remote, int button, int press)
{
struct input_dev *input = remote->input;
input_event(input, EV_MSC, MSC_SCAN, button);
input_report_key(input, remote->keymap[button], press);
input_sync(input);
}
/*
* Routine that handles all the logic needed to parse out the message from the remote.
*/
static void keyspan_check_data(struct usb_keyspan *remote)
{
int i;
int found = 0;
struct keyspan_message message;
switch(remote->stage) {
case 0:
/*
* In stage 0 we want to find the start of a message. The remote sends a 0xFF as filler.
* So the first byte that isn't a FF should be the start of a new message.
*/
for (i = 0; i < RECV_SIZE && remote->in_buffer[i] == GAP; ++i);
if (i < RECV_SIZE) {
memcpy(remote->data.buffer, remote->in_buffer, RECV_SIZE);
remote->data.len = RECV_SIZE;
remote->data.pos = 0;
remote->data.tester = 0;
remote->data.bits_left = 0;
remote->stage = 1;
}
break;
case 1:
/*
* Stage 1 we should have 16 bytes and should be able to detect a
* SYNC. The SYNC is 14 bits, 7 0's and then 7 1's.
*/
memcpy(remote->data.buffer + remote->data.len, remote->in_buffer, RECV_SIZE);
remote->data.len += RECV_SIZE;
found = 0;
while ((remote->data.bits_left >= 14 || remote->data.pos < remote->data.len) && !found) {
for (i = 0; i < 8; ++i) {
if (keyspan_load_tester(remote, 14) != 0) {
remote->stage = 0;
return;
}
if ((remote->data.tester & SYNC_MASK) == SYNC) {
remote->data.tester = remote->data.tester >> 14;
remote->data.bits_left -= 14;
found = 1;
break;
} else {
remote->data.tester = remote->data.tester >> 1;
--remote->data.bits_left;
}
}
}
if (!found) {
remote->stage = 0;
remote->data.len = 0;
} else {
remote->stage = 2;
}
break;
case 2:
/*
* Stage 2 we should have 24 bytes which will be enough for a full
* message. We need to parse out the system code, button code,
* toggle code, and stop.
*/
memcpy(remote->data.buffer + remote->data.len, remote->in_buffer, RECV_SIZE);
remote->data.len += RECV_SIZE;
message.system = 0;
for (i = 0; i < 9; i++) {
keyspan_load_tester(remote, 6);
if ((remote->data.tester & ZERO_MASK) == ZERO) {
message.system = message.system << 1;
remote->data.tester = remote->data.tester >> 5;
remote->data.bits_left -= 5;
} else if ((remote->data.tester & ONE_MASK) == ONE) {
message.system = (message.system << 1) + 1;
remote->data.tester = remote->data.tester >> 6;
remote->data.bits_left -= 6;
} else {
dev_err(&remote->interface->dev,
"%s - Unknown sequence found in system data.\n",
__func__);
remote->stage = 0;
return;
}
}
message.button = 0;
for (i = 0; i < 5; i++) {
keyspan_load_tester(remote, 6);
if ((remote->data.tester & ZERO_MASK) == ZERO) {
message.button = message.button << 1;
remote->data.tester = remote->data.tester >> 5;
remote->data.bits_left -= 5;
} else if ((remote->data.tester & ONE_MASK) == ONE) {
message.button = (message.button << 1) + 1;
remote->data.tester = remote->data.tester >> 6;
remote->data.bits_left -= 6;
} else {
dev_err(&remote->interface->dev,
"%s - Unknown sequence found in button data.\n",
__func__);
remote->stage = 0;
return;
}
}
keyspan_load_tester(remote, 6);
if ((remote->data.tester & ZERO_MASK) == ZERO) {
message.toggle = 0;
remote->data.tester = remote->data.tester >> 5;
remote->data.bits_left -= 5;
} else if ((remote->data.tester & ONE_MASK) == ONE) {
message.toggle = 1;
remote->data.tester = remote->data.tester >> 6;
remote->data.bits_left -= 6;
} else {
dev_err(&remote->interface->dev,
"%s - Error in message, invalid toggle.\n",
__func__);
remote->stage = 0;
return;
}
keyspan_load_tester(remote, 5);
if ((remote->data.tester & STOP_MASK) == STOP) {
remote->data.tester = remote->data.tester >> 5;
remote->data.bits_left -= 5;
} else {
dev_err(&remote->interface->dev,
"Bad message received, no stop bit found.\n");
}
dev_dbg(&remote->interface->dev,
"%s found valid message: system: %d, button: %d, toggle: %d\n",
__func__, message.system, message.button, message.toggle);
if (message.toggle != remote->toggle) {
keyspan_report_button(remote, message.button, 1);
keyspan_report_button(remote, message.button, 0);
remote->toggle = message.toggle;
}
remote->stage = 0;
break;
}
}
/*
* Routine for sending all the initialization messages to the remote.
*/
static int keyspan_setup(struct usb_device* dev)
{
int retval = 0;
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0x11, 0x40, 0x5601, 0x0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
__func__, retval);
return(retval);
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0x44, 0x40, 0x0, 0x0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
__func__, retval);
return(retval);
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0x22, 0x40, 0x0, 0x0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
__func__, retval);
return(retval);
}
dev_dbg(&dev->dev, "%s - Setup complete.\n", __func__);
return(retval);
}
/*
* Routine used to handle a new message that has come in.
*/
static void keyspan_irq_recv(struct urb *urb)
{
struct usb_keyspan *dev = urb->context;
int retval;
/* Check our status in case we need to bail out early. */
switch (urb->status) {
case 0:
break;
/* Device went away so don't keep trying to read from it. */
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
return;
default:
goto resubmit;
}
if (debug)
keyspan_print(dev);
keyspan_check_data(dev);
resubmit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&dev->interface->dev,
"%s - usb_submit_urb failed with result: %d\n",
__func__, retval);
}
static int keyspan_open(struct input_dev *dev)
{
struct usb_keyspan *remote = input_get_drvdata(dev);
remote->irq_urb->dev = remote->udev;
if (usb_submit_urb(remote->irq_urb, GFP_KERNEL))
return -EIO;
return 0;
}
static void keyspan_close(struct input_dev *dev)
{
struct usb_keyspan *remote = input_get_drvdata(dev);
usb_kill_urb(remote->irq_urb);
}
static struct usb_endpoint_descriptor *keyspan_get_in_endpoint(struct usb_host_interface *iface)
{
struct usb_endpoint_descriptor *endpoint;
int i;
for (i = 0; i < iface->desc.bNumEndpoints; ++i) {
endpoint = &iface->endpoint[i].desc;
if (usb_endpoint_is_int_in(endpoint)) {
/* we found our interrupt in endpoint */
return endpoint;
}
}
return NULL;
}
/*
* Routine that sets up the driver to handle a specific USB device detected on the bus.
*/
static int keyspan_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_endpoint_descriptor *endpoint;
struct usb_keyspan *remote;
struct input_dev *input_dev;
int i, error;
endpoint = keyspan_get_in_endpoint(interface->cur_altsetting);
if (!endpoint)
return -ENODEV;
remote = kzalloc(sizeof(*remote), GFP_KERNEL);
input_dev = input_allocate_device();
if (!remote || !input_dev) {
error = -ENOMEM;
goto fail1;
}
remote->udev = udev;
remote->input = input_dev;
remote->interface = interface;
remote->in_endpoint = endpoint;
remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */
remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_KERNEL, &remote->in_dma);
if (!remote->in_buffer) {
error = -ENOMEM;
goto fail1;
}
remote->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!remote->irq_urb) {
error = -ENOMEM;
goto fail2;
}
error = keyspan_setup(udev);
if (error) {
error = -ENODEV;
goto fail3;
}
if (udev->manufacturer)
strscpy(remote->name, udev->manufacturer, sizeof(remote->name));
if (udev->product) {
if (udev->manufacturer)
strlcat(remote->name, " ", sizeof(remote->name));
strlcat(remote->name, udev->product, sizeof(remote->name));
}
if (!strlen(remote->name))
snprintf(remote->name, sizeof(remote->name),
"USB Keyspan Remote %04x:%04x",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
usb_make_path(udev, remote->phys, sizeof(remote->phys));
strlcat(remote->phys, "/input0", sizeof(remote->phys));
memcpy(remote->keymap, keyspan_key_table, sizeof(remote->keymap));
input_dev->name = remote->name;
input_dev->phys = remote->phys;
usb_to_input_id(udev, &input_dev->id);
input_dev->dev.parent = &interface->dev;
input_dev->keycode = remote->keymap;
input_dev->keycodesize = sizeof(unsigned short);
input_dev->keycodemax = ARRAY_SIZE(remote->keymap);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
__set_bit(EV_KEY, input_dev->evbit);
for (i = 0; i < ARRAY_SIZE(keyspan_key_table); i++)
__set_bit(keyspan_key_table[i], input_dev->keybit);
__clear_bit(KEY_RESERVED, input_dev->keybit);
input_set_drvdata(input_dev, remote);
input_dev->open = keyspan_open;
input_dev->close = keyspan_close;
/*
* Initialize the URB to access the device.
* The urb gets sent to the device in keyspan_open()
*/
usb_fill_int_urb(remote->irq_urb,
remote->udev,
usb_rcvintpipe(remote->udev, endpoint->bEndpointAddress),
remote->in_buffer, RECV_SIZE, keyspan_irq_recv, remote,
endpoint->bInterval);
remote->irq_urb->transfer_dma = remote->in_dma;
remote->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* we can register the device now, as it is ready */
error = input_register_device(remote->input);
if (error)
goto fail3;
/* save our data pointer in this interface device */
usb_set_intfdata(interface, remote);
return 0;
fail3: usb_free_urb(remote->irq_urb);
fail2: usb_free_coherent(udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
fail1: kfree(remote);
input_free_device(input_dev);
return error;
}
/*
* Routine called when a device is disconnected from the USB.
*/
static void keyspan_disconnect(struct usb_interface *interface)
{
struct usb_keyspan *remote;
remote = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
if (remote) { /* We have a valid driver structure so clean up everything we allocated. */
input_unregister_device(remote->input);
usb_kill_urb(remote->irq_urb);
usb_free_urb(remote->irq_urb);
usb_free_coherent(remote->udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
kfree(remote);
}
}
/*
* Standard driver set up sections
*/
static struct usb_driver keyspan_driver =
{
.name = "keyspan_remote",
.probe = keyspan_probe,
.disconnect = keyspan_disconnect,
.id_table = keyspan_table
};
module_usb_driver(keyspan_driver);
MODULE_DEVICE_TABLE(usb, keyspan_table);
MODULE_AUTHOR("Michael Downey <[email protected]>");
MODULE_DESCRIPTION("Driver for the USB Keyspan remote control.");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/keyspan_remote.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* atlas_btns.c - Atlas Wallmount Touchscreen ACPI Extras
*
* Copyright (C) 2006 Jaya Kumar
* Based on Toshiba ACPI by John Belmonte and ASUS ACPI
* This work was sponsored by CIS(M) Sdn Bhd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/uaccess.h>
#define ACPI_ATLAS_NAME "Atlas ACPI"
#define ACPI_ATLAS_CLASS "Atlas"
static unsigned short atlas_keymap[16];
static struct input_dev *input_dev;
/* button handling code */
static acpi_status acpi_atlas_button_setup(acpi_handle region_handle,
u32 function, void *handler_context, void **return_context)
{
*return_context =
(function != ACPI_REGION_DEACTIVATE) ? handler_context : NULL;
return AE_OK;
}
static acpi_status acpi_atlas_button_handler(u32 function,
acpi_physical_address address,
u32 bit_width, u64 *value,
void *handler_context, void *region_context)
{
acpi_status status;
if (function == ACPI_WRITE) {
int code = address & 0x0f;
int key_down = !(address & 0x10);
input_event(input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(input_dev, atlas_keymap[code], key_down);
input_sync(input_dev);
status = AE_OK;
} else {
pr_warn("shrugged on unexpected function: function=%x,address=%lx,value=%x\n",
function, (unsigned long)address, (u32)*value);
status = AE_BAD_PARAMETER;
}
return status;
}
static int atlas_acpi_button_add(struct acpi_device *device)
{
acpi_status status;
int i;
int err;
input_dev = input_allocate_device();
if (!input_dev) {
pr_err("unable to allocate input device\n");
return -ENOMEM;
}
input_dev->name = "Atlas ACPI button driver";
input_dev->phys = "ASIM0000/atlas/input0";
input_dev->id.bustype = BUS_HOST;
input_dev->keycode = atlas_keymap;
input_dev->keycodesize = sizeof(unsigned short);
input_dev->keycodemax = ARRAY_SIZE(atlas_keymap);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
__set_bit(EV_KEY, input_dev->evbit);
for (i = 0; i < ARRAY_SIZE(atlas_keymap); i++) {
if (i < 9) {
atlas_keymap[i] = KEY_F1 + i;
__set_bit(KEY_F1 + i, input_dev->keybit);
} else
atlas_keymap[i] = KEY_RESERVED;
}
err = input_register_device(input_dev);
if (err) {
pr_err("couldn't register input device\n");
input_free_device(input_dev);
return err;
}
/* hookup button handler */
status = acpi_install_address_space_handler(device->handle,
0x81, &acpi_atlas_button_handler,
&acpi_atlas_button_setup, device);
if (ACPI_FAILURE(status)) {
pr_err("error installing addr spc handler\n");
input_unregister_device(input_dev);
err = -EINVAL;
}
return err;
}
static void atlas_acpi_button_remove(struct acpi_device *device)
{
acpi_status status;
status = acpi_remove_address_space_handler(device->handle,
0x81, &acpi_atlas_button_handler);
if (ACPI_FAILURE(status))
pr_err("error removing addr spc handler\n");
input_unregister_device(input_dev);
}
static const struct acpi_device_id atlas_device_ids[] = {
{"ASIM0000", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, atlas_device_ids);
static struct acpi_driver atlas_acpi_driver = {
.name = ACPI_ATLAS_NAME,
.class = ACPI_ATLAS_CLASS,
.owner = THIS_MODULE,
.ids = atlas_device_ids,
.ops = {
.add = atlas_acpi_button_add,
.remove = atlas_acpi_button_remove,
},
};
module_acpi_driver(atlas_acpi_driver);
MODULE_AUTHOR("Jaya Kumar");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Atlas button driver");
|
linux-master
|
drivers/input/misc/atlas_btns.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) IBM Corporation 2020
*/
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#define DEVICE_NAME "ibm-panel"
#define PANEL_KEYCODES_COUNT 3
struct ibm_panel {
u8 idx;
u8 command[11];
u32 keycodes[PANEL_KEYCODES_COUNT];
spinlock_t lock; /* protects writes to idx and command */
struct input_dev *input;
};
static u8 ibm_panel_calculate_checksum(struct ibm_panel *panel)
{
u8 chksum;
u16 sum = 0;
unsigned int i;
for (i = 0; i < sizeof(panel->command) - 1; ++i) {
sum += panel->command[i];
if (sum & 0xff00) {
sum &= 0xff;
sum++;
}
}
chksum = sum & 0xff;
chksum = ~chksum;
chksum++;
return chksum;
}
static void ibm_panel_process_command(struct ibm_panel *panel)
{
u8 button;
u8 chksum;
if (panel->command[0] != 0xff && panel->command[1] != 0xf0) {
dev_dbg(&panel->input->dev, "command invalid: %02x %02x\n",
panel->command[0], panel->command[1]);
return;
}
chksum = ibm_panel_calculate_checksum(panel);
if (chksum != panel->command[sizeof(panel->command) - 1]) {
dev_dbg(&panel->input->dev,
"command failed checksum: %u != %u\n", chksum,
panel->command[sizeof(panel->command) - 1]);
return;
}
button = panel->command[2] & 0xf;
if (button < PANEL_KEYCODES_COUNT) {
input_report_key(panel->input, panel->keycodes[button],
!(panel->command[2] & 0x80));
input_sync(panel->input);
} else {
dev_dbg(&panel->input->dev, "unknown button %u\n",
button);
}
}
static int ibm_panel_i2c_slave_cb(struct i2c_client *client,
enum i2c_slave_event event, u8 *val)
{
unsigned long flags;
struct ibm_panel *panel = i2c_get_clientdata(client);
dev_dbg(&panel->input->dev, "event: %u data: %02x\n", event, *val);
spin_lock_irqsave(&panel->lock, flags);
switch (event) {
case I2C_SLAVE_STOP:
if (panel->idx == sizeof(panel->command))
ibm_panel_process_command(panel);
else
dev_dbg(&panel->input->dev,
"command incorrect size %u\n", panel->idx);
fallthrough;
case I2C_SLAVE_WRITE_REQUESTED:
panel->idx = 0;
break;
case I2C_SLAVE_WRITE_RECEIVED:
if (panel->idx < sizeof(panel->command))
panel->command[panel->idx++] = *val;
else
/*
* The command is too long and therefore invalid, so set the index
* to it's largest possible value. When a STOP is finally received,
* the command will be rejected upon processing.
*/
panel->idx = U8_MAX;
break;
case I2C_SLAVE_READ_REQUESTED:
case I2C_SLAVE_READ_PROCESSED:
*val = 0xff;
break;
default:
break;
}
spin_unlock_irqrestore(&panel->lock, flags);
return 0;
}
static int ibm_panel_probe(struct i2c_client *client)
{
struct ibm_panel *panel;
int i;
int error;
panel = devm_kzalloc(&client->dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return -ENOMEM;
spin_lock_init(&panel->lock);
panel->input = devm_input_allocate_device(&client->dev);
if (!panel->input)
return -ENOMEM;
panel->input->name = client->name;
panel->input->id.bustype = BUS_I2C;
error = device_property_read_u32_array(&client->dev,
"linux,keycodes",
panel->keycodes,
PANEL_KEYCODES_COUNT);
if (error) {
/*
* Use gamepad buttons as defaults for compatibility with
* existing applications.
*/
panel->keycodes[0] = BTN_NORTH;
panel->keycodes[1] = BTN_SOUTH;
panel->keycodes[2] = BTN_SELECT;
}
for (i = 0; i < PANEL_KEYCODES_COUNT; ++i)
input_set_capability(panel->input, EV_KEY, panel->keycodes[i]);
error = input_register_device(panel->input);
if (error) {
dev_err(&client->dev,
"Failed to register input device: %d\n", error);
return error;
}
i2c_set_clientdata(client, panel);
error = i2c_slave_register(client, ibm_panel_i2c_slave_cb);
if (error) {
dev_err(&client->dev,
"Failed to register as i2c slave: %d\n", error);
return error;
}
return 0;
}
static void ibm_panel_remove(struct i2c_client *client)
{
i2c_slave_unregister(client);
}
static const struct of_device_id ibm_panel_match[] = {
{ .compatible = "ibm,op-panel" },
{ }
};
MODULE_DEVICE_TABLE(of, ibm_panel_match);
static struct i2c_driver ibm_panel_driver = {
.driver = {
.name = DEVICE_NAME,
.of_match_table = ibm_panel_match,
},
.probe = ibm_panel_probe,
.remove = ibm_panel_remove,
};
module_i2c_driver(ibm_panel_driver);
MODULE_AUTHOR("Eddie James <[email protected]>");
MODULE_DESCRIPTION("IBM Operation Panel Driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/ibm-panel.c
|
/*
* Copyright (C) 2011 Philippe Rétornaz
*
* Based on twl4030-pwrbutton driver by:
* Peter De Schrijver <[email protected]>
* Felipe Balbi <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/mfd/mc13783.h>
#include <linux/sched.h>
#include <linux/slab.h>
struct mc13783_pwrb {
struct input_dev *pwr;
struct mc13xxx *mc13783;
#define MC13783_PWRB_B1_POL_INVERT (1 << 0)
#define MC13783_PWRB_B2_POL_INVERT (1 << 1)
#define MC13783_PWRB_B3_POL_INVERT (1 << 2)
int flags;
unsigned short keymap[3];
};
#define MC13783_REG_INTERRUPT_SENSE_1 5
#define MC13783_IRQSENSE1_ONOFD1S (1 << 3)
#define MC13783_IRQSENSE1_ONOFD2S (1 << 4)
#define MC13783_IRQSENSE1_ONOFD3S (1 << 5)
#define MC13783_REG_POWER_CONTROL_2 15
#define MC13783_POWER_CONTROL_2_ON1BDBNC 4
#define MC13783_POWER_CONTROL_2_ON2BDBNC 6
#define MC13783_POWER_CONTROL_2_ON3BDBNC 8
#define MC13783_POWER_CONTROL_2_ON1BRSTEN (1 << 1)
#define MC13783_POWER_CONTROL_2_ON2BRSTEN (1 << 2)
#define MC13783_POWER_CONTROL_2_ON3BRSTEN (1 << 3)
static irqreturn_t button_irq(int irq, void *_priv)
{
struct mc13783_pwrb *priv = _priv;
int val;
mc13xxx_irq_ack(priv->mc13783, irq);
mc13xxx_reg_read(priv->mc13783, MC13783_REG_INTERRUPT_SENSE_1, &val);
switch (irq) {
case MC13783_IRQ_ONOFD1:
val = val & MC13783_IRQSENSE1_ONOFD1S ? 1 : 0;
if (priv->flags & MC13783_PWRB_B1_POL_INVERT)
val ^= 1;
input_report_key(priv->pwr, priv->keymap[0], val);
break;
case MC13783_IRQ_ONOFD2:
val = val & MC13783_IRQSENSE1_ONOFD2S ? 1 : 0;
if (priv->flags & MC13783_PWRB_B2_POL_INVERT)
val ^= 1;
input_report_key(priv->pwr, priv->keymap[1], val);
break;
case MC13783_IRQ_ONOFD3:
val = val & MC13783_IRQSENSE1_ONOFD3S ? 1 : 0;
if (priv->flags & MC13783_PWRB_B3_POL_INVERT)
val ^= 1;
input_report_key(priv->pwr, priv->keymap[2], val);
break;
}
input_sync(priv->pwr);
return IRQ_HANDLED;
}
static int mc13783_pwrbutton_probe(struct platform_device *pdev)
{
const struct mc13xxx_buttons_platform_data *pdata;
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
struct input_dev *pwr;
struct mc13783_pwrb *priv;
int err = 0;
int reg = 0;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "missing platform data\n");
return -ENODEV;
}
pwr = input_allocate_device();
if (!pwr) {
dev_dbg(&pdev->dev, "Can't allocate power button\n");
return -ENOMEM;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
err = -ENOMEM;
dev_dbg(&pdev->dev, "Can't allocate power button\n");
goto free_input_dev;
}
reg |= (pdata->b1on_flags & 0x3) << MC13783_POWER_CONTROL_2_ON1BDBNC;
reg |= (pdata->b2on_flags & 0x3) << MC13783_POWER_CONTROL_2_ON2BDBNC;
reg |= (pdata->b3on_flags & 0x3) << MC13783_POWER_CONTROL_2_ON3BDBNC;
priv->pwr = pwr;
priv->mc13783 = mc13783;
mc13xxx_lock(mc13783);
if (pdata->b1on_flags & MC13783_BUTTON_ENABLE) {
priv->keymap[0] = pdata->b1on_key;
if (pdata->b1on_key != KEY_RESERVED)
__set_bit(pdata->b1on_key, pwr->keybit);
if (pdata->b1on_flags & MC13783_BUTTON_POL_INVERT)
priv->flags |= MC13783_PWRB_B1_POL_INVERT;
if (pdata->b1on_flags & MC13783_BUTTON_RESET_EN)
reg |= MC13783_POWER_CONTROL_2_ON1BRSTEN;
err = mc13xxx_irq_request(mc13783, MC13783_IRQ_ONOFD1,
button_irq, "b1on", priv);
if (err) {
dev_dbg(&pdev->dev, "Can't request irq\n");
goto free_priv;
}
}
if (pdata->b2on_flags & MC13783_BUTTON_ENABLE) {
priv->keymap[1] = pdata->b2on_key;
if (pdata->b2on_key != KEY_RESERVED)
__set_bit(pdata->b2on_key, pwr->keybit);
if (pdata->b2on_flags & MC13783_BUTTON_POL_INVERT)
priv->flags |= MC13783_PWRB_B2_POL_INVERT;
if (pdata->b2on_flags & MC13783_BUTTON_RESET_EN)
reg |= MC13783_POWER_CONTROL_2_ON2BRSTEN;
err = mc13xxx_irq_request(mc13783, MC13783_IRQ_ONOFD2,
button_irq, "b2on", priv);
if (err) {
dev_dbg(&pdev->dev, "Can't request irq\n");
goto free_irq_b1;
}
}
if (pdata->b3on_flags & MC13783_BUTTON_ENABLE) {
priv->keymap[2] = pdata->b3on_key;
if (pdata->b3on_key != KEY_RESERVED)
__set_bit(pdata->b3on_key, pwr->keybit);
if (pdata->b3on_flags & MC13783_BUTTON_POL_INVERT)
priv->flags |= MC13783_PWRB_B3_POL_INVERT;
if (pdata->b3on_flags & MC13783_BUTTON_RESET_EN)
reg |= MC13783_POWER_CONTROL_2_ON3BRSTEN;
err = mc13xxx_irq_request(mc13783, MC13783_IRQ_ONOFD3,
button_irq, "b3on", priv);
if (err) {
dev_dbg(&pdev->dev, "Can't request irq: %d\n", err);
goto free_irq_b2;
}
}
mc13xxx_reg_rmw(mc13783, MC13783_REG_POWER_CONTROL_2, 0x3FE, reg);
mc13xxx_unlock(mc13783);
pwr->name = "mc13783_pwrbutton";
pwr->phys = "mc13783_pwrbutton/input0";
pwr->dev.parent = &pdev->dev;
pwr->keycode = priv->keymap;
pwr->keycodemax = ARRAY_SIZE(priv->keymap);
pwr->keycodesize = sizeof(priv->keymap[0]);
__set_bit(EV_KEY, pwr->evbit);
err = input_register_device(pwr);
if (err) {
dev_dbg(&pdev->dev, "Can't register power button: %d\n", err);
goto free_irq;
}
platform_set_drvdata(pdev, priv);
return 0;
free_irq:
mc13xxx_lock(mc13783);
if (pdata->b3on_flags & MC13783_BUTTON_ENABLE)
mc13xxx_irq_free(mc13783, MC13783_IRQ_ONOFD3, priv);
free_irq_b2:
if (pdata->b2on_flags & MC13783_BUTTON_ENABLE)
mc13xxx_irq_free(mc13783, MC13783_IRQ_ONOFD2, priv);
free_irq_b1:
if (pdata->b1on_flags & MC13783_BUTTON_ENABLE)
mc13xxx_irq_free(mc13783, MC13783_IRQ_ONOFD1, priv);
free_priv:
mc13xxx_unlock(mc13783);
kfree(priv);
free_input_dev:
input_free_device(pwr);
return err;
}
static int mc13783_pwrbutton_remove(struct platform_device *pdev)
{
struct mc13783_pwrb *priv = platform_get_drvdata(pdev);
const struct mc13xxx_buttons_platform_data *pdata;
pdata = dev_get_platdata(&pdev->dev);
mc13xxx_lock(priv->mc13783);
if (pdata->b3on_flags & MC13783_BUTTON_ENABLE)
mc13xxx_irq_free(priv->mc13783, MC13783_IRQ_ONOFD3, priv);
if (pdata->b2on_flags & MC13783_BUTTON_ENABLE)
mc13xxx_irq_free(priv->mc13783, MC13783_IRQ_ONOFD2, priv);
if (pdata->b1on_flags & MC13783_BUTTON_ENABLE)
mc13xxx_irq_free(priv->mc13783, MC13783_IRQ_ONOFD1, priv);
mc13xxx_unlock(priv->mc13783);
input_unregister_device(priv->pwr);
kfree(priv);
return 0;
}
static struct platform_driver mc13783_pwrbutton_driver = {
.probe = mc13783_pwrbutton_probe,
.remove = mc13783_pwrbutton_remove,
.driver = {
.name = "mc13783-pwrbutton",
},
};
module_platform_driver(mc13783_pwrbutton_driver);
MODULE_ALIAS("platform:mc13783-pwrbutton");
MODULE_DESCRIPTION("MC13783 Power Button");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Philippe Retornaz");
|
linux-master
|
drivers/input/misc/mc13783-pwrbutton.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* DRV2667 haptics driver family
*
* Author: Dan Murphy <[email protected]>
*
* Copyright: (C) 2014 Texas Instruments, Inc.
*/
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
/* Contol registers */
#define DRV2667_STATUS 0x00
#define DRV2667_CTRL_1 0x01
#define DRV2667_CTRL_2 0x02
/* Waveform sequencer */
#define DRV2667_WV_SEQ_0 0x03
#define DRV2667_WV_SEQ_1 0x04
#define DRV2667_WV_SEQ_2 0x05
#define DRV2667_WV_SEQ_3 0x06
#define DRV2667_WV_SEQ_4 0x07
#define DRV2667_WV_SEQ_5 0x08
#define DRV2667_WV_SEQ_6 0x09
#define DRV2667_WV_SEQ_7 0x0A
#define DRV2667_FIFO 0x0B
#define DRV2667_PAGE 0xFF
#define DRV2667_MAX_REG DRV2667_PAGE
#define DRV2667_PAGE_0 0x00
#define DRV2667_PAGE_1 0x01
#define DRV2667_PAGE_2 0x02
#define DRV2667_PAGE_3 0x03
#define DRV2667_PAGE_4 0x04
#define DRV2667_PAGE_5 0x05
#define DRV2667_PAGE_6 0x06
#define DRV2667_PAGE_7 0x07
#define DRV2667_PAGE_8 0x08
/* RAM fields */
#define DRV2667_RAM_HDR_SZ 0x0
/* RAM Header addresses */
#define DRV2667_RAM_START_HI 0x01
#define DRV2667_RAM_START_LO 0x02
#define DRV2667_RAM_STOP_HI 0x03
#define DRV2667_RAM_STOP_LO 0x04
#define DRV2667_RAM_REPEAT_CT 0x05
/* RAM data addresses */
#define DRV2667_RAM_AMP 0x06
#define DRV2667_RAM_FREQ 0x07
#define DRV2667_RAM_DURATION 0x08
#define DRV2667_RAM_ENVELOPE 0x09
/* Control 1 Register */
#define DRV2667_25_VPP_GAIN 0x00
#define DRV2667_50_VPP_GAIN 0x01
#define DRV2667_75_VPP_GAIN 0x02
#define DRV2667_100_VPP_GAIN 0x03
#define DRV2667_DIGITAL_IN 0xfc
#define DRV2667_ANALOG_IN (1 << 2)
/* Control 2 Register */
#define DRV2667_GO (1 << 0)
#define DRV2667_STANDBY (1 << 6)
#define DRV2667_DEV_RST (1 << 7)
/* RAM Envelope settings */
#define DRV2667_NO_ENV 0x00
#define DRV2667_32_MS_ENV 0x01
#define DRV2667_64_MS_ENV 0x02
#define DRV2667_96_MS_ENV 0x03
#define DRV2667_128_MS_ENV 0x04
#define DRV2667_160_MS_ENV 0x05
#define DRV2667_192_MS_ENV 0x06
#define DRV2667_224_MS_ENV 0x07
#define DRV2667_256_MS_ENV 0x08
#define DRV2667_512_MS_ENV 0x09
#define DRV2667_768_MS_ENV 0x0a
#define DRV2667_1024_MS_ENV 0x0b
#define DRV2667_1280_MS_ENV 0x0c
#define DRV2667_1536_MS_ENV 0x0d
#define DRV2667_1792_MS_ENV 0x0e
#define DRV2667_2048_MS_ENV 0x0f
/**
* struct drv2667_data -
* @input_dev: Pointer to the input device
* @client: Pointer to the I2C client
* @regmap: Register map of the device
* @work: Work item used to off load the enable/disable of the vibration
* @regulator: Pointer to the regulator for the IC
* @page: Page number
* @magnitude: Magnitude of the vibration event
* @frequency: Frequency of the vibration event
**/
struct drv2667_data {
struct input_dev *input_dev;
struct i2c_client *client;
struct regmap *regmap;
struct work_struct work;
struct regulator *regulator;
u32 page;
u32 magnitude;
u32 frequency;
};
static const struct reg_default drv2667_reg_defs[] = {
{ DRV2667_STATUS, 0x02 },
{ DRV2667_CTRL_1, 0x28 },
{ DRV2667_CTRL_2, 0x40 },
{ DRV2667_WV_SEQ_0, 0x00 },
{ DRV2667_WV_SEQ_1, 0x00 },
{ DRV2667_WV_SEQ_2, 0x00 },
{ DRV2667_WV_SEQ_3, 0x00 },
{ DRV2667_WV_SEQ_4, 0x00 },
{ DRV2667_WV_SEQ_5, 0x00 },
{ DRV2667_WV_SEQ_6, 0x00 },
{ DRV2667_WV_SEQ_7, 0x00 },
{ DRV2667_FIFO, 0x00 },
{ DRV2667_PAGE, 0x00 },
};
static int drv2667_set_waveform_freq(struct drv2667_data *haptics)
{
unsigned int read_buf;
int freq;
int error;
/* Per the data sheet:
* Sinusoid Frequency (Hz) = 7.8125 x Frequency
*/
freq = (haptics->frequency * 1000) / 78125;
if (freq <= 0) {
dev_err(&haptics->client->dev,
"ERROR: Frequency calculated to %i\n", freq);
return -EINVAL;
}
error = regmap_read(haptics->regmap, DRV2667_PAGE, &read_buf);
if (error) {
dev_err(&haptics->client->dev,
"Failed to read the page number: %d\n", error);
return -EIO;
}
if (read_buf == DRV2667_PAGE_0 ||
haptics->page != read_buf) {
error = regmap_write(haptics->regmap,
DRV2667_PAGE, haptics->page);
if (error) {
dev_err(&haptics->client->dev,
"Failed to set the page: %d\n", error);
return -EIO;
}
}
error = regmap_write(haptics->regmap, DRV2667_RAM_FREQ, freq);
if (error)
dev_err(&haptics->client->dev,
"Failed to set the frequency: %d\n", error);
/* Reset back to original page */
if (read_buf == DRV2667_PAGE_0 ||
haptics->page != read_buf) {
error = regmap_write(haptics->regmap, DRV2667_PAGE, read_buf);
if (error) {
dev_err(&haptics->client->dev,
"Failed to set the page: %d\n", error);
return -EIO;
}
}
return error;
}
static void drv2667_worker(struct work_struct *work)
{
struct drv2667_data *haptics = container_of(work, struct drv2667_data, work);
int error;
if (haptics->magnitude) {
error = regmap_write(haptics->regmap,
DRV2667_PAGE, haptics->page);
if (error) {
dev_err(&haptics->client->dev,
"Failed to set the page: %d\n", error);
return;
}
error = regmap_write(haptics->regmap, DRV2667_RAM_AMP,
haptics->magnitude);
if (error) {
dev_err(&haptics->client->dev,
"Failed to set the amplitude: %d\n", error);
return;
}
error = regmap_write(haptics->regmap,
DRV2667_PAGE, DRV2667_PAGE_0);
if (error) {
dev_err(&haptics->client->dev,
"Failed to set the page: %d\n", error);
return;
}
error = regmap_write(haptics->regmap,
DRV2667_CTRL_2, DRV2667_GO);
if (error) {
dev_err(&haptics->client->dev,
"Failed to set the GO bit: %d\n", error);
}
} else {
error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
DRV2667_GO, 0);
if (error) {
dev_err(&haptics->client->dev,
"Failed to unset the GO bit: %d\n", error);
}
}
}
static int drv2667_haptics_play(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct drv2667_data *haptics = input_get_drvdata(input);
if (effect->u.rumble.strong_magnitude > 0)
haptics->magnitude = effect->u.rumble.strong_magnitude;
else if (effect->u.rumble.weak_magnitude > 0)
haptics->magnitude = effect->u.rumble.weak_magnitude;
else
haptics->magnitude = 0;
schedule_work(&haptics->work);
return 0;
}
static void drv2667_close(struct input_dev *input)
{
struct drv2667_data *haptics = input_get_drvdata(input);
int error;
cancel_work_sync(&haptics->work);
error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
DRV2667_STANDBY, DRV2667_STANDBY);
if (error)
dev_err(&haptics->client->dev,
"Failed to enter standby mode: %d\n", error);
}
static const struct reg_sequence drv2667_init_regs[] = {
{ DRV2667_CTRL_2, 0 },
{ DRV2667_CTRL_1, DRV2667_25_VPP_GAIN },
{ DRV2667_WV_SEQ_0, 1 },
{ DRV2667_WV_SEQ_1, 0 }
};
static const struct reg_sequence drv2667_page1_init[] = {
{ DRV2667_RAM_HDR_SZ, 0x05 },
{ DRV2667_RAM_START_HI, 0x80 },
{ DRV2667_RAM_START_LO, 0x06 },
{ DRV2667_RAM_STOP_HI, 0x00 },
{ DRV2667_RAM_STOP_LO, 0x09 },
{ DRV2667_RAM_REPEAT_CT, 0 },
{ DRV2667_RAM_DURATION, 0x05 },
{ DRV2667_RAM_ENVELOPE, DRV2667_NO_ENV },
{ DRV2667_RAM_AMP, 0x60 },
};
static int drv2667_init(struct drv2667_data *haptics)
{
int error;
/* Set default haptic frequency to 195Hz on Page 1*/
haptics->frequency = 195;
haptics->page = DRV2667_PAGE_1;
error = regmap_register_patch(haptics->regmap,
drv2667_init_regs,
ARRAY_SIZE(drv2667_init_regs));
if (error) {
dev_err(&haptics->client->dev,
"Failed to write init registers: %d\n",
error);
return error;
}
error = regmap_write(haptics->regmap, DRV2667_PAGE, haptics->page);
if (error) {
dev_err(&haptics->client->dev, "Failed to set page: %d\n",
error);
goto error_out;
}
error = drv2667_set_waveform_freq(haptics);
if (error)
goto error_page;
error = regmap_register_patch(haptics->regmap,
drv2667_page1_init,
ARRAY_SIZE(drv2667_page1_init));
if (error) {
dev_err(&haptics->client->dev,
"Failed to write page registers: %d\n",
error);
return error;
}
error = regmap_write(haptics->regmap, DRV2667_PAGE, DRV2667_PAGE_0);
return error;
error_page:
regmap_write(haptics->regmap, DRV2667_PAGE, DRV2667_PAGE_0);
error_out:
return error;
}
static const struct regmap_config drv2667_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = DRV2667_MAX_REG,
.reg_defaults = drv2667_reg_defs,
.num_reg_defaults = ARRAY_SIZE(drv2667_reg_defs),
.cache_type = REGCACHE_NONE,
};
static int drv2667_probe(struct i2c_client *client)
{
struct drv2667_data *haptics;
int error;
haptics = devm_kzalloc(&client->dev, sizeof(*haptics), GFP_KERNEL);
if (!haptics)
return -ENOMEM;
haptics->regulator = devm_regulator_get(&client->dev, "vbat");
if (IS_ERR(haptics->regulator)) {
error = PTR_ERR(haptics->regulator);
dev_err(&client->dev,
"unable to get regulator, error: %d\n", error);
return error;
}
haptics->input_dev = devm_input_allocate_device(&client->dev);
if (!haptics->input_dev) {
dev_err(&client->dev, "Failed to allocate input device\n");
return -ENOMEM;
}
haptics->input_dev->name = "drv2667:haptics";
haptics->input_dev->dev.parent = client->dev.parent;
haptics->input_dev->close = drv2667_close;
input_set_drvdata(haptics->input_dev, haptics);
input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
error = input_ff_create_memless(haptics->input_dev, NULL,
drv2667_haptics_play);
if (error) {
dev_err(&client->dev, "input_ff_create() failed: %d\n",
error);
return error;
}
INIT_WORK(&haptics->work, drv2667_worker);
haptics->client = client;
i2c_set_clientdata(client, haptics);
haptics->regmap = devm_regmap_init_i2c(client, &drv2667_regmap_config);
if (IS_ERR(haptics->regmap)) {
error = PTR_ERR(haptics->regmap);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
error);
return error;
}
error = drv2667_init(haptics);
if (error) {
dev_err(&client->dev, "Device init failed: %d\n", error);
return error;
}
error = input_register_device(haptics->input_dev);
if (error) {
dev_err(&client->dev, "couldn't register input device: %d\n",
error);
return error;
}
return 0;
}
static int drv2667_suspend(struct device *dev)
{
struct drv2667_data *haptics = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&haptics->input_dev->mutex);
if (input_device_enabled(haptics->input_dev)) {
ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
DRV2667_STANDBY, DRV2667_STANDBY);
if (ret) {
dev_err(dev, "Failed to set standby mode\n");
regulator_disable(haptics->regulator);
goto out;
}
ret = regulator_disable(haptics->regulator);
if (ret) {
dev_err(dev, "Failed to disable regulator\n");
regmap_update_bits(haptics->regmap,
DRV2667_CTRL_2,
DRV2667_STANDBY, 0);
}
}
out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
static int drv2667_resume(struct device *dev)
{
struct drv2667_data *haptics = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&haptics->input_dev->mutex);
if (input_device_enabled(haptics->input_dev)) {
ret = regulator_enable(haptics->regulator);
if (ret) {
dev_err(dev, "Failed to enable regulator\n");
goto out;
}
ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
DRV2667_STANDBY, 0);
if (ret) {
dev_err(dev, "Failed to unset standby mode\n");
regulator_disable(haptics->regulator);
goto out;
}
}
out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(drv2667_pm_ops, drv2667_suspend, drv2667_resume);
static const struct i2c_device_id drv2667_id[] = {
{ "drv2667", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, drv2667_id);
#ifdef CONFIG_OF
static const struct of_device_id drv2667_of_match[] = {
{ .compatible = "ti,drv2667", },
{ }
};
MODULE_DEVICE_TABLE(of, drv2667_of_match);
#endif
static struct i2c_driver drv2667_driver = {
.probe = drv2667_probe,
.driver = {
.name = "drv2667-haptics",
.of_match_table = of_match_ptr(drv2667_of_match),
.pm = pm_sleep_ptr(&drv2667_pm_ops),
},
.id_table = drv2667_id,
};
module_i2c_driver(drv2667_driver);
MODULE_DESCRIPTION("TI DRV2667 haptics driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Murphy <[email protected]>");
|
linux-master
|
drivers/input/misc/drv2667.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PC Speaker beeper driver for Linux
*
* Copyright (c) 2002 Vojtech Pavlik
* Copyright (c) 1992 Orest Zborowski
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i8253.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/timex.h>
#include <linux/io.h>
MODULE_AUTHOR("Vojtech Pavlik <[email protected]>");
MODULE_DESCRIPTION("PC Speaker beeper driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pcspkr");
static int pcspkr_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
unsigned int count = 0;
unsigned long flags;
if (type != EV_SND)
return -EINVAL;
switch (code) {
case SND_BELL:
if (value)
value = 1000;
break;
case SND_TONE:
break;
default:
return -EINVAL;
}
if (value > 20 && value < 32767)
count = PIT_TICK_RATE / value;
raw_spin_lock_irqsave(&i8253_lock, flags);
if (count) {
/* set command for counter 2, 2 byte write */
outb_p(0xB6, 0x43);
/* select desired HZ */
outb_p(count & 0xff, 0x42);
outb((count >> 8) & 0xff, 0x42);
/* enable counter 2 */
outb_p(inb_p(0x61) | 3, 0x61);
} else {
/* disable counter 2 */
outb(inb_p(0x61) & 0xFC, 0x61);
}
raw_spin_unlock_irqrestore(&i8253_lock, flags);
return 0;
}
static int pcspkr_probe(struct platform_device *dev)
{
struct input_dev *pcspkr_dev;
int err;
pcspkr_dev = input_allocate_device();
if (!pcspkr_dev)
return -ENOMEM;
pcspkr_dev->name = "PC Speaker";
pcspkr_dev->phys = "isa0061/input0";
pcspkr_dev->id.bustype = BUS_ISA;
pcspkr_dev->id.vendor = 0x001f;
pcspkr_dev->id.product = 0x0001;
pcspkr_dev->id.version = 0x0100;
pcspkr_dev->dev.parent = &dev->dev;
pcspkr_dev->evbit[0] = BIT_MASK(EV_SND);
pcspkr_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
pcspkr_dev->event = pcspkr_event;
err = input_register_device(pcspkr_dev);
if (err) {
input_free_device(pcspkr_dev);
return err;
}
platform_set_drvdata(dev, pcspkr_dev);
return 0;
}
static int pcspkr_remove(struct platform_device *dev)
{
struct input_dev *pcspkr_dev = platform_get_drvdata(dev);
input_unregister_device(pcspkr_dev);
/* turn off the speaker */
pcspkr_event(NULL, EV_SND, SND_BELL, 0);
return 0;
}
static int pcspkr_suspend(struct device *dev)
{
pcspkr_event(NULL, EV_SND, SND_BELL, 0);
return 0;
}
static void pcspkr_shutdown(struct platform_device *dev)
{
/* turn off the speaker */
pcspkr_event(NULL, EV_SND, SND_BELL, 0);
}
static const struct dev_pm_ops pcspkr_pm_ops = {
.suspend = pcspkr_suspend,
};
static struct platform_driver pcspkr_platform_driver = {
.driver = {
.name = "pcspkr",
.pm = &pcspkr_pm_ops,
},
.probe = pcspkr_probe,
.remove = pcspkr_remove,
.shutdown = pcspkr_shutdown,
};
module_platform_driver(pcspkr_platform_driver);
|
linux-master
|
drivers/input/misc/pcspkr.c
|
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2022 NXP.
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#define BBNSM_CTRL 0x8
#define BBNSM_INT_EN 0x10
#define BBNSM_EVENTS 0x14
#define BBNSM_PAD_CTRL 0x24
#define BBNSM_BTN_PRESSED BIT(7)
#define BBNSM_PWR_ON BIT(6)
#define BBNSM_BTN_OFF BIT(5)
#define BBNSM_EMG_OFF BIT(4)
#define BBNSM_PWRKEY_EVENTS (BBNSM_PWR_ON | BBNSM_BTN_OFF | BBNSM_EMG_OFF)
#define BBNSM_DP_EN BIT(24)
#define DEBOUNCE_TIME 30
#define REPEAT_INTERVAL 60
struct bbnsm_pwrkey {
struct regmap *regmap;
int irq;
int keycode;
int keystate; /* 1:pressed */
struct timer_list check_timer;
struct input_dev *input;
};
static void bbnsm_pwrkey_check_for_events(struct timer_list *t)
{
struct bbnsm_pwrkey *bbnsm = from_timer(bbnsm, t, check_timer);
struct input_dev *input = bbnsm->input;
u32 state;
regmap_read(bbnsm->regmap, BBNSM_EVENTS, &state);
state = state & BBNSM_BTN_PRESSED ? 1 : 0;
/* only report new event if status changed */
if (state ^ bbnsm->keystate) {
bbnsm->keystate = state;
input_event(input, EV_KEY, bbnsm->keycode, state);
input_sync(input);
pm_relax(bbnsm->input->dev.parent);
}
/* repeat check if pressed long */
if (state)
mod_timer(&bbnsm->check_timer,
jiffies + msecs_to_jiffies(REPEAT_INTERVAL));
}
static irqreturn_t bbnsm_pwrkey_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct bbnsm_pwrkey *bbnsm = platform_get_drvdata(pdev);
u32 event;
regmap_read(bbnsm->regmap, BBNSM_EVENTS, &event);
if (!(event & BBNSM_BTN_OFF))
return IRQ_NONE;
pm_wakeup_event(bbnsm->input->dev.parent, 0);
mod_timer(&bbnsm->check_timer,
jiffies + msecs_to_jiffies(DEBOUNCE_TIME));
/* clear PWR OFF */
regmap_write(bbnsm->regmap, BBNSM_EVENTS, BBNSM_BTN_OFF);
return IRQ_HANDLED;
}
static void bbnsm_pwrkey_act(void *pdata)
{
struct bbnsm_pwrkey *bbnsm = pdata;
timer_shutdown_sync(&bbnsm->check_timer);
}
static int bbnsm_pwrkey_probe(struct platform_device *pdev)
{
struct bbnsm_pwrkey *bbnsm;
struct input_dev *input;
struct device_node *np = pdev->dev.of_node;
int error;
bbnsm = devm_kzalloc(&pdev->dev, sizeof(*bbnsm), GFP_KERNEL);
if (!bbnsm)
return -ENOMEM;
bbnsm->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(bbnsm->regmap)) {
dev_err(&pdev->dev, "bbnsm pwerkey get regmap failed\n");
return PTR_ERR(bbnsm->regmap);
}
if (device_property_read_u32(&pdev->dev, "linux,code",
&bbnsm->keycode)) {
bbnsm->keycode = KEY_POWER;
dev_warn(&pdev->dev, "key code is not specified, using default KEY_POWER\n");
}
bbnsm->irq = platform_get_irq(pdev, 0);
if (bbnsm->irq < 0)
return -EINVAL;
/* config the BBNSM power related register */
regmap_update_bits(bbnsm->regmap, BBNSM_CTRL, BBNSM_DP_EN, BBNSM_DP_EN);
/* clear the unexpected interrupt before driver ready */
regmap_write_bits(bbnsm->regmap, BBNSM_EVENTS, BBNSM_PWRKEY_EVENTS,
BBNSM_PWRKEY_EVENTS);
timer_setup(&bbnsm->check_timer, bbnsm_pwrkey_check_for_events, 0);
input = devm_input_allocate_device(&pdev->dev);
if (!input) {
dev_err(&pdev->dev, "failed to allocate the input device\n");
return -ENOMEM;
}
input->name = pdev->name;
input->phys = "bbnsm-pwrkey/input0";
input->id.bustype = BUS_HOST;
input_set_capability(input, EV_KEY, bbnsm->keycode);
/* input customer action to cancel release timer */
error = devm_add_action(&pdev->dev, bbnsm_pwrkey_act, bbnsm);
if (error) {
dev_err(&pdev->dev, "failed to register remove action\n");
return error;
}
bbnsm->input = input;
platform_set_drvdata(pdev, bbnsm);
error = devm_request_irq(&pdev->dev, bbnsm->irq, bbnsm_pwrkey_interrupt,
IRQF_SHARED, pdev->name, pdev);
if (error) {
dev_err(&pdev->dev, "interrupt not available.\n");
return error;
}
error = input_register_device(input);
if (error) {
dev_err(&pdev->dev, "failed to register input device\n");
return error;
}
device_init_wakeup(&pdev->dev, true);
error = dev_pm_set_wake_irq(&pdev->dev, bbnsm->irq);
if (error)
dev_warn(&pdev->dev, "irq wake enable failed.\n");
return 0;
}
static const struct of_device_id bbnsm_pwrkey_ids[] = {
{ .compatible = "nxp,imx93-bbnsm-pwrkey" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, bbnsm_pwrkey_ids);
static struct platform_driver bbnsm_pwrkey_driver = {
.driver = {
.name = "bbnsm_pwrkey",
.of_match_table = bbnsm_pwrkey_ids,
},
.probe = bbnsm_pwrkey_probe,
};
module_platform_driver(bbnsm_pwrkey_driver);
MODULE_AUTHOR("Jacky Bai <[email protected]>");
MODULE_DESCRIPTION("NXP bbnsm power key Driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/nxp-bbnsm-pwrkey.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Texas Instruments' TPS65217 and TPS65218 Power Button Input Driver
*
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
* Author: Felipe Balbi <[email protected]>
* Author: Marcin Niestroj <[email protected]>
*/
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/tps65217.h>
#include <linux/mfd/tps65218.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
struct tps6521x_data {
unsigned int reg_status;
unsigned int pb_mask;
const char *name;
};
static const struct tps6521x_data tps65217_data = {
.reg_status = TPS65217_REG_STATUS,
.pb_mask = TPS65217_STATUS_PB,
.name = "tps65217_pwrbutton",
};
static const struct tps6521x_data tps65218_data = {
.reg_status = TPS65218_REG_STATUS,
.pb_mask = TPS65218_STATUS_PB_STATE,
.name = "tps65218_pwrbutton",
};
struct tps6521x_pwrbutton {
struct device *dev;
struct regmap *regmap;
struct input_dev *idev;
const struct tps6521x_data *data;
char phys[32];
};
static const struct of_device_id of_tps6521x_pb_match[] = {
{ .compatible = "ti,tps65217-pwrbutton", .data = &tps65217_data },
{ .compatible = "ti,tps65218-pwrbutton", .data = &tps65218_data },
{ },
};
MODULE_DEVICE_TABLE(of, of_tps6521x_pb_match);
static irqreturn_t tps6521x_pb_irq(int irq, void *_pwr)
{
struct tps6521x_pwrbutton *pwr = _pwr;
const struct tps6521x_data *tps_data = pwr->data;
unsigned int reg;
int error;
error = regmap_read(pwr->regmap, tps_data->reg_status, ®);
if (error) {
dev_err(pwr->dev, "can't read register: %d\n", error);
goto out;
}
if (reg & tps_data->pb_mask) {
input_report_key(pwr->idev, KEY_POWER, 1);
pm_wakeup_event(pwr->dev, 0);
} else {
input_report_key(pwr->idev, KEY_POWER, 0);
}
input_sync(pwr->idev);
out:
return IRQ_HANDLED;
}
static int tps6521x_pb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tps6521x_pwrbutton *pwr;
struct input_dev *idev;
const struct of_device_id *match;
int error;
int irq;
match = of_match_node(of_tps6521x_pb_match, dev->of_node);
if (!match)
return -ENXIO;
pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
if (!pwr)
return -ENOMEM;
pwr->data = match->data;
idev = devm_input_allocate_device(dev);
if (!idev)
return -ENOMEM;
idev->name = pwr->data->name;
snprintf(pwr->phys, sizeof(pwr->phys), "%s/input0",
pwr->data->name);
idev->phys = pwr->phys;
idev->dev.parent = dev;
idev->id.bustype = BUS_I2C;
input_set_capability(idev, EV_KEY, KEY_POWER);
pwr->regmap = dev_get_regmap(dev->parent, NULL);
pwr->dev = dev;
pwr->idev = idev;
device_init_wakeup(dev, true);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
error = devm_request_threaded_irq(dev, irq, NULL, tps6521x_pb_irq,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
pwr->data->name, pwr);
if (error) {
dev_err(dev, "failed to request IRQ #%d: %d\n", irq, error);
return error;
}
error= input_register_device(idev);
if (error) {
dev_err(dev, "Can't register power button: %d\n", error);
return error;
}
return 0;
}
static const struct platform_device_id tps6521x_pwrbtn_id_table[] = {
{ "tps65218-pwrbutton", },
{ "tps65217-pwrbutton", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, tps6521x_pwrbtn_id_table);
static struct platform_driver tps6521x_pb_driver = {
.probe = tps6521x_pb_probe,
.driver = {
.name = "tps6521x_pwrbutton",
.of_match_table = of_tps6521x_pb_match,
},
.id_table = tps6521x_pwrbtn_id_table,
};
module_platform_driver(tps6521x_pb_driver);
MODULE_DESCRIPTION("TPS6521X Power Button");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Felipe Balbi <[email protected]>");
|
linux-master
|
drivers/input/misc/tps65218-pwrbutton.c
|
/*
* MAX8925 ONKEY driver
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mfd/max8925.h>
#include <linux/slab.h>
#include <linux/device.h>
#define SW_INPUT (1 << 7) /* 0/1 -- up/down */
#define HARDRESET_EN (1 << 7)
#define PWREN_EN (1 << 7)
struct max8925_onkey_info {
struct input_dev *idev;
struct i2c_client *i2c;
struct device *dev;
unsigned int irq[2];
};
/*
* MAX8925 gives us an interrupt when ONKEY is pressed or released.
* max8925_set_bits() operates I2C bus and may sleep. So implement
* it in thread IRQ handler.
*/
static irqreturn_t max8925_onkey_handler(int irq, void *data)
{
struct max8925_onkey_info *info = data;
int state;
state = max8925_reg_read(info->i2c, MAX8925_ON_OFF_STATUS);
input_report_key(info->idev, KEY_POWER, state & SW_INPUT);
input_sync(info->idev);
dev_dbg(info->dev, "onkey state:%d\n", state);
/* Enable hardreset to halt if system isn't shutdown on time */
max8925_set_bits(info->i2c, MAX8925_SYSENSEL,
HARDRESET_EN, HARDRESET_EN);
return IRQ_HANDLED;
}
static int max8925_onkey_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct max8925_onkey_info *info;
struct input_dev *input;
int irq[2], error;
irq[0] = platform_get_irq(pdev, 0);
if (irq[0] < 0)
return -EINVAL;
irq[1] = platform_get_irq(pdev, 1);
if (irq[1] < 0)
return -EINVAL;
info = devm_kzalloc(&pdev->dev, sizeof(struct max8925_onkey_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
info->idev = input;
info->i2c = chip->i2c;
info->dev = &pdev->dev;
info->irq[0] = irq[0];
info->irq[1] = irq[1];
input->name = "max8925_on";
input->phys = "max8925_on/input0";
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
input_set_capability(input, EV_KEY, KEY_POWER);
error = devm_request_threaded_irq(&pdev->dev, irq[0], NULL,
max8925_onkey_handler, IRQF_ONESHOT,
"onkey-down", info);
if (error < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
irq[0], error);
return error;
}
error = devm_request_threaded_irq(&pdev->dev, irq[1], NULL,
max8925_onkey_handler, IRQF_ONESHOT,
"onkey-up", info);
if (error < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
irq[1], error);
return error;
}
error = input_register_device(info->idev);
if (error) {
dev_err(chip->dev, "Can't register input device: %d\n", error);
return error;
}
platform_set_drvdata(pdev, info);
device_init_wakeup(&pdev->dev, 1);
return 0;
}
static int max8925_onkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
if (device_may_wakeup(dev)) {
chip->wakeup_flag |= 1 << info->irq[0];
chip->wakeup_flag |= 1 << info->irq[1];
}
return 0;
}
static int max8925_onkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
if (device_may_wakeup(dev)) {
chip->wakeup_flag &= ~(1 << info->irq[0]);
chip->wakeup_flag &= ~(1 << info->irq[1]);
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(max8925_onkey_pm_ops,
max8925_onkey_suspend, max8925_onkey_resume);
static struct platform_driver max8925_onkey_driver = {
.driver = {
.name = "max8925-onkey",
.pm = pm_sleep_ptr(&max8925_onkey_pm_ops),
},
.probe = max8925_onkey_probe,
};
module_platform_driver(max8925_onkey_driver);
MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
MODULE_AUTHOR("Haojian Zhuang <[email protected]>");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/max8925_onkey.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OnKey device driver for DA9063, DA9062 and DA9061 PMICs
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*/
#include <linux/devm-helpers.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/workqueue.h>
#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/mfd/da9063/core.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/mfd/da9062/core.h>
#include <linux/mfd/da9062/registers.h>
struct da906x_chip_config {
/* REGS */
int onkey_status;
int onkey_pwr_signalling;
int onkey_fault_log;
int onkey_shutdown;
/* MASKS */
int onkey_nonkey_mask;
int onkey_nonkey_lock_mask;
int onkey_key_reset_mask;
int onkey_shutdown_mask;
/* NAMES */
const char *name;
};
struct da9063_onkey {
struct delayed_work work;
struct input_dev *input;
struct device *dev;
struct regmap *regmap;
const struct da906x_chip_config *config;
char phys[32];
bool key_power;
};
static const struct da906x_chip_config da9063_regs = {
/* REGS */
.onkey_status = DA9063_REG_STATUS_A,
.onkey_pwr_signalling = DA9063_REG_CONTROL_B,
.onkey_fault_log = DA9063_REG_FAULT_LOG,
.onkey_shutdown = DA9063_REG_CONTROL_F,
/* MASKS */
.onkey_nonkey_mask = DA9063_NONKEY,
.onkey_nonkey_lock_mask = DA9063_NONKEY_LOCK,
.onkey_key_reset_mask = DA9063_KEY_RESET,
.onkey_shutdown_mask = DA9063_SHUTDOWN,
/* NAMES */
.name = DA9063_DRVNAME_ONKEY,
};
static const struct da906x_chip_config da9062_regs = {
/* REGS */
.onkey_status = DA9062AA_STATUS_A,
.onkey_pwr_signalling = DA9062AA_CONTROL_B,
.onkey_fault_log = DA9062AA_FAULT_LOG,
.onkey_shutdown = DA9062AA_CONTROL_F,
/* MASKS */
.onkey_nonkey_mask = DA9062AA_NONKEY_MASK,
.onkey_nonkey_lock_mask = DA9062AA_NONKEY_LOCK_MASK,
.onkey_key_reset_mask = DA9062AA_KEY_RESET_MASK,
.onkey_shutdown_mask = DA9062AA_SHUTDOWN_MASK,
/* NAMES */
.name = "da9062-onkey",
};
static const struct of_device_id da9063_compatible_reg_id_table[] = {
{ .compatible = "dlg,da9063-onkey", .data = &da9063_regs },
{ .compatible = "dlg,da9062-onkey", .data = &da9062_regs },
{ },
};
MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
static void da9063_poll_on(struct work_struct *work)
{
struct da9063_onkey *onkey = container_of(work,
struct da9063_onkey,
work.work);
const struct da906x_chip_config *config = onkey->config;
unsigned int val;
int fault_log = 0;
bool poll = true;
int error;
/* Poll to see when the pin is released */
error = regmap_read(onkey->regmap,
config->onkey_status,
&val);
if (error) {
dev_err(onkey->dev,
"Failed to read ON status: %d\n", error);
goto err_poll;
}
if (!(val & config->onkey_nonkey_mask)) {
error = regmap_update_bits(onkey->regmap,
config->onkey_pwr_signalling,
config->onkey_nonkey_lock_mask,
0);
if (error) {
dev_err(onkey->dev,
"Failed to reset the Key Delay %d\n", error);
goto err_poll;
}
input_report_key(onkey->input, KEY_POWER, 0);
input_sync(onkey->input);
poll = false;
}
/*
* If the fault log KEY_RESET is detected, then clear it
* and shut down the system.
*/
error = regmap_read(onkey->regmap,
config->onkey_fault_log,
&fault_log);
if (error) {
dev_warn(&onkey->input->dev,
"Cannot read FAULT_LOG: %d\n", error);
} else if (fault_log & config->onkey_key_reset_mask) {
error = regmap_write(onkey->regmap,
config->onkey_fault_log,
config->onkey_key_reset_mask);
if (error) {
dev_warn(&onkey->input->dev,
"Cannot reset KEY_RESET fault log: %d\n",
error);
} else {
/* at this point we do any S/W housekeeping
* and then send shutdown command
*/
dev_dbg(&onkey->input->dev,
"Sending SHUTDOWN to PMIC ...\n");
error = regmap_write(onkey->regmap,
config->onkey_shutdown,
config->onkey_shutdown_mask);
if (error)
dev_err(&onkey->input->dev,
"Cannot SHUTDOWN PMIC: %d\n",
error);
}
}
err_poll:
if (poll)
schedule_delayed_work(&onkey->work, msecs_to_jiffies(50));
}
static irqreturn_t da9063_onkey_irq_handler(int irq, void *data)
{
struct da9063_onkey *onkey = data;
const struct da906x_chip_config *config = onkey->config;
unsigned int val;
int error;
error = regmap_read(onkey->regmap,
config->onkey_status,
&val);
if (onkey->key_power && !error && (val & config->onkey_nonkey_mask)) {
input_report_key(onkey->input, KEY_POWER, 1);
input_sync(onkey->input);
schedule_delayed_work(&onkey->work, 0);
dev_dbg(onkey->dev, "KEY_POWER long press.\n");
} else {
input_report_key(onkey->input, KEY_POWER, 1);
input_sync(onkey->input);
input_report_key(onkey->input, KEY_POWER, 0);
input_sync(onkey->input);
dev_dbg(onkey->dev, "KEY_POWER short press.\n");
}
return IRQ_HANDLED;
}
static int da9063_onkey_probe(struct platform_device *pdev)
{
struct da9063_onkey *onkey;
const struct of_device_id *match;
int irq;
int error;
match = of_match_node(da9063_compatible_reg_id_table,
pdev->dev.of_node);
if (!match)
return -ENXIO;
onkey = devm_kzalloc(&pdev->dev, sizeof(struct da9063_onkey),
GFP_KERNEL);
if (!onkey) {
dev_err(&pdev->dev, "Failed to allocate memory.\n");
return -ENOMEM;
}
onkey->config = match->data;
onkey->dev = &pdev->dev;
onkey->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!onkey->regmap) {
dev_err(&pdev->dev, "Parent regmap unavailable.\n");
return -ENXIO;
}
onkey->key_power = !of_property_read_bool(pdev->dev.of_node,
"dlg,disable-key-power");
onkey->input = devm_input_allocate_device(&pdev->dev);
if (!onkey->input) {
dev_err(&pdev->dev, "Failed to allocated input device.\n");
return -ENOMEM;
}
onkey->input->name = onkey->config->name;
snprintf(onkey->phys, sizeof(onkey->phys), "%s/input0",
onkey->config->name);
onkey->input->phys = onkey->phys;
onkey->input->dev.parent = &pdev->dev;
input_set_capability(onkey->input, EV_KEY, KEY_POWER);
error = devm_delayed_work_autocancel(&pdev->dev, &onkey->work,
da9063_poll_on);
if (error) {
dev_err(&pdev->dev,
"Failed to add cancel poll action: %d\n",
error);
return error;
}
irq = platform_get_irq_byname(pdev, "ONKEY");
if (irq < 0)
return irq;
error = devm_request_threaded_irq(&pdev->dev, irq,
NULL, da9063_onkey_irq_handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"ONKEY", onkey);
if (error) {
dev_err(&pdev->dev,
"Failed to request IRQ %d: %d\n", irq, error);
return error;
}
error = dev_pm_set_wake_irq(&pdev->dev, irq);
if (error)
dev_warn(&pdev->dev,
"Failed to set IRQ %d as a wake IRQ: %d\n",
irq, error);
else
device_init_wakeup(&pdev->dev, true);
error = input_register_device(onkey->input);
if (error) {
dev_err(&pdev->dev,
"Failed to register input device: %d\n", error);
return error;
}
return 0;
}
static struct platform_driver da9063_onkey_driver = {
.probe = da9063_onkey_probe,
.driver = {
.name = DA9063_DRVNAME_ONKEY,
.of_match_table = da9063_compatible_reg_id_table,
},
};
module_platform_driver(da9063_onkey_driver);
MODULE_AUTHOR("S Twiss <[email protected]>");
MODULE_DESCRIPTION("Onkey device driver for Dialog DA9063, DA9062 and DA9061");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DA9063_DRVNAME_ONKEY);
|
linux-master
|
drivers/input/misc/da9063_onkey.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Fujitsu Lifebook Application Panel button drive
*
* Copyright (C) 2007 Stephen Hemminger <[email protected]>
* Copyright (C) 2001-2003 Jochen Eisinger <[email protected]>
*
* Many Fujitsu Lifebook laptops have a small panel of buttons that are
* accessible via the i2c/smbus interface. This driver polls those
* buttons and generates input events.
*
* For more details see:
* http://apanel.sourceforge.net/tech.php
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/leds.h>
#define APANEL_NAME "Fujitsu Application Panel"
#define APANEL "apanel"
/* How often we poll keys - msecs */
#define POLL_INTERVAL_DEFAULT 1000
/* Magic constants in BIOS that tell about buttons */
enum apanel_devid {
APANEL_DEV_NONE = 0,
APANEL_DEV_APPBTN = 1,
APANEL_DEV_CDBTN = 2,
APANEL_DEV_LCD = 3,
APANEL_DEV_LED = 4,
APANEL_DEV_MAX,
};
enum apanel_chip {
CHIP_NONE = 0,
CHIP_OZ992C = 1,
CHIP_OZ163T = 2,
CHIP_OZ711M3 = 4,
};
/* Result of BIOS snooping/probing -- what features are supported */
static enum apanel_chip device_chip[APANEL_DEV_MAX];
#define MAX_PANEL_KEYS 12
struct apanel {
struct input_dev *idev;
struct i2c_client *client;
unsigned short keymap[MAX_PANEL_KEYS];
u16 nkeys;
struct led_classdev mail_led;
};
static const unsigned short apanel_keymap[MAX_PANEL_KEYS] = {
[0] = KEY_MAIL,
[1] = KEY_WWW,
[2] = KEY_PROG2,
[3] = KEY_PROG1,
[8] = KEY_FORWARD,
[9] = KEY_REWIND,
[10] = KEY_STOPCD,
[11] = KEY_PLAYPAUSE,
};
static void report_key(struct input_dev *input, unsigned keycode)
{
dev_dbg(input->dev.parent, "report key %#x\n", keycode);
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
input_sync(input);
}
/* Poll for key changes
*
* Read Application keys via SMI
* A (0x4), B (0x8), Internet (0x2), Email (0x1).
*
* CD keys:
* Forward (0x100), Rewind (0x200), Stop (0x400), Pause (0x800)
*/
static void apanel_poll(struct input_dev *idev)
{
struct apanel *ap = input_get_drvdata(idev);
u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
s32 data;
int i;
data = i2c_smbus_read_word_data(ap->client, cmd);
if (data < 0)
return; /* ignore errors (due to ACPI??) */
/* write back to clear latch */
i2c_smbus_write_word_data(ap->client, cmd, 0);
if (!data)
return;
dev_dbg(&idev->dev, APANEL ": data %#x\n", data);
for (i = 0; i < idev->keycodemax; i++)
if ((1u << i) & data)
report_key(idev, ap->keymap[i]);
}
static int mail_led_set(struct led_classdev *led,
enum led_brightness value)
{
struct apanel *ap = container_of(led, struct apanel, mail_led);
u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000;
return i2c_smbus_write_word_data(ap->client, 0x10, led_bits);
}
static int apanel_probe(struct i2c_client *client)
{
struct apanel *ap;
struct input_dev *idev;
u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
int i, err;
ap = devm_kzalloc(&client->dev, sizeof(*ap), GFP_KERNEL);
if (!ap)
return -ENOMEM;
idev = devm_input_allocate_device(&client->dev);
if (!idev)
return -ENOMEM;
ap->idev = idev;
ap->client = client;
i2c_set_clientdata(client, ap);
err = i2c_smbus_write_word_data(client, cmd, 0);
if (err) {
dev_warn(&client->dev, "smbus write error %d\n", err);
return err;
}
input_set_drvdata(idev, ap);
idev->name = APANEL_NAME " buttons";
idev->phys = "apanel/input0";
idev->id.bustype = BUS_HOST;
memcpy(ap->keymap, apanel_keymap, sizeof(apanel_keymap));
idev->keycode = ap->keymap;
idev->keycodesize = sizeof(ap->keymap[0]);
idev->keycodemax = (device_chip[APANEL_DEV_CDBTN] != CHIP_NONE) ? 12 : 4;
set_bit(EV_KEY, idev->evbit);
for (i = 0; i < idev->keycodemax; i++)
if (ap->keymap[i])
set_bit(ap->keymap[i], idev->keybit);
err = input_setup_polling(idev, apanel_poll);
if (err)
return err;
input_set_poll_interval(idev, POLL_INTERVAL_DEFAULT);
err = input_register_device(idev);
if (err)
return err;
if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
ap->mail_led.name = "mail:blue";
ap->mail_led.brightness_set_blocking = mail_led_set;
err = devm_led_classdev_register(&client->dev, &ap->mail_led);
if (err)
return err;
}
return 0;
}
static void apanel_shutdown(struct i2c_client *client)
{
struct apanel *ap = i2c_get_clientdata(client);
if (device_chip[APANEL_DEV_LED] != CHIP_NONE)
led_set_brightness(&ap->mail_led, LED_OFF);
}
static const struct i2c_device_id apanel_id[] = {
{ "fujitsu_apanel", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, apanel_id);
static struct i2c_driver apanel_driver = {
.driver = {
.name = APANEL,
},
.probe = apanel_probe,
.shutdown = apanel_shutdown,
.id_table = apanel_id,
};
/* Scan the system ROM for the signature "FJKEYINF" */
static __init const void __iomem *bios_signature(const void __iomem *bios)
{
ssize_t offset;
const unsigned char signature[] = "FJKEYINF";
for (offset = 0; offset < 0x10000; offset += 0x10) {
if (check_signature(bios + offset, signature,
sizeof(signature)-1))
return bios + offset;
}
pr_notice(APANEL ": Fujitsu BIOS signature '%s' not found...\n",
signature);
return NULL;
}
static int __init apanel_init(void)
{
void __iomem *bios;
const void __iomem *p;
u8 devno;
unsigned char i2c_addr;
int found = 0;
bios = ioremap(0xF0000, 0x10000); /* Can't fail */
p = bios_signature(bios);
if (!p) {
iounmap(bios);
return -ENODEV;
}
/* just use the first address */
p += 8;
i2c_addr = readb(p + 3) >> 1;
for ( ; (devno = readb(p)) & 0x7f; p += 4) {
unsigned char method, slave, chip;
method = readb(p + 1);
chip = readb(p + 2);
slave = readb(p + 3) >> 1;
if (slave != i2c_addr) {
pr_notice(APANEL ": only one SMBus slave "
"address supported, skipping device...\n");
continue;
}
/* translate alternative device numbers */
switch (devno) {
case 6:
devno = APANEL_DEV_APPBTN;
break;
case 7:
devno = APANEL_DEV_LED;
break;
}
if (devno >= APANEL_DEV_MAX)
pr_notice(APANEL ": unknown device %u found\n", devno);
else if (device_chip[devno] != CHIP_NONE)
pr_warn(APANEL ": duplicate entry for devno %u\n",
devno);
else if (method != 1 && method != 2 && method != 4) {
pr_notice(APANEL ": unknown method %u for devno %u\n",
method, devno);
} else {
device_chip[devno] = (enum apanel_chip) chip;
++found;
}
}
iounmap(bios);
if (found == 0) {
pr_info(APANEL ": no input devices reported by BIOS\n");
return -EIO;
}
return i2c_add_driver(&apanel_driver);
}
module_init(apanel_init);
static void __exit apanel_cleanup(void)
{
i2c_del_driver(&apanel_driver);
}
module_exit(apanel_cleanup);
MODULE_AUTHOR("Stephen Hemminger <[email protected]>");
MODULE_DESCRIPTION(APANEL_NAME " driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("dmi:*:svnFUJITSU:pnLifeBook*:pvr*:rvnFUJITSU:*");
MODULE_ALIAS("dmi:*:svnFUJITSU:pnLifebook*:pvr*:rvnFUJITSU:*");
|
linux-master
|
drivers/input/misc/apanel.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Texas Instruments' Palmas Power Button Input Driver
*
* Copyright (C) 2012-2014 Texas Instruments Incorporated - http://www.ti.com/
* Girish S Ghongdemath
* Nishanth Menon
*/
#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/palmas.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define PALMAS_LPK_TIME_MASK 0x0c
#define PALMAS_PWRON_DEBOUNCE_MASK 0x03
#define PALMAS_PWR_KEY_Q_TIME_MS 20
/**
* struct palmas_pwron - Palmas power on data
* @palmas: pointer to palmas device
* @input_dev: pointer to input device
* @input_work: work for detecting release of key
* @irq: irq that we are hooked on to
*/
struct palmas_pwron {
struct palmas *palmas;
struct input_dev *input_dev;
struct delayed_work input_work;
int irq;
};
/**
* struct palmas_pwron_config - configuration of palmas power on
* @long_press_time_val: value for long press h/w shutdown event
* @pwron_debounce_val: value for debounce of power button
*/
struct palmas_pwron_config {
u8 long_press_time_val;
u8 pwron_debounce_val;
};
/**
* palmas_power_button_work() - Detects the button release event
* @work: work item to detect button release
*/
static void palmas_power_button_work(struct work_struct *work)
{
struct palmas_pwron *pwron = container_of(work,
struct palmas_pwron,
input_work.work);
struct input_dev *input_dev = pwron->input_dev;
unsigned int reg;
int error;
error = palmas_read(pwron->palmas, PALMAS_INTERRUPT_BASE,
PALMAS_INT1_LINE_STATE, ®);
if (error) {
dev_err(input_dev->dev.parent,
"Cannot read palmas PWRON status: %d\n", error);
} else if (reg & BIT(1)) {
/* The button is released, report event. */
input_report_key(input_dev, KEY_POWER, 0);
input_sync(input_dev);
} else {
/* The button is still depressed, keep checking. */
schedule_delayed_work(&pwron->input_work,
msecs_to_jiffies(PALMAS_PWR_KEY_Q_TIME_MS));
}
}
/**
* pwron_irq() - button press isr
* @irq: irq
* @palmas_pwron: pwron struct
*
* Return: IRQ_HANDLED
*/
static irqreturn_t pwron_irq(int irq, void *palmas_pwron)
{
struct palmas_pwron *pwron = palmas_pwron;
struct input_dev *input_dev = pwron->input_dev;
input_report_key(input_dev, KEY_POWER, 1);
pm_wakeup_event(input_dev->dev.parent, 0);
input_sync(input_dev);
mod_delayed_work(system_wq, &pwron->input_work,
msecs_to_jiffies(PALMAS_PWR_KEY_Q_TIME_MS));
return IRQ_HANDLED;
}
/**
* palmas_pwron_params_ofinit() - device tree parameter parser
* @dev: palmas button device
* @config: configuration params that this fills up
*/
static void palmas_pwron_params_ofinit(struct device *dev,
struct palmas_pwron_config *config)
{
struct device_node *np;
u32 val;
int i, error;
static const u8 lpk_times[] = { 6, 8, 10, 12 };
static const int pwr_on_deb_ms[] = { 15, 100, 500, 1000 };
memset(config, 0, sizeof(*config));
/* Default config parameters */
config->long_press_time_val = ARRAY_SIZE(lpk_times) - 1;
np = dev->of_node;
if (!np)
return;
error = of_property_read_u32(np, "ti,palmas-long-press-seconds", &val);
if (!error) {
for (i = 0; i < ARRAY_SIZE(lpk_times); i++) {
if (val <= lpk_times[i]) {
config->long_press_time_val = i;
break;
}
}
}
error = of_property_read_u32(np,
"ti,palmas-pwron-debounce-milli-seconds",
&val);
if (!error) {
for (i = 0; i < ARRAY_SIZE(pwr_on_deb_ms); i++) {
if (val <= pwr_on_deb_ms[i]) {
config->pwron_debounce_val = i;
break;
}
}
}
dev_info(dev, "h/w controlled shutdown duration=%d seconds\n",
lpk_times[config->long_press_time_val]);
}
/**
* palmas_pwron_probe() - probe
* @pdev: platform device for the button
*
* Return: 0 for successful probe else appropriate error
*/
static int palmas_pwron_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct input_dev *input_dev;
struct palmas_pwron *pwron;
struct palmas_pwron_config config;
int val;
int error;
palmas_pwron_params_ofinit(dev, &config);
pwron = kzalloc(sizeof(*pwron), GFP_KERNEL);
if (!pwron)
return -ENOMEM;
input_dev = input_allocate_device();
if (!input_dev) {
dev_err(dev, "Can't allocate power button\n");
error = -ENOMEM;
goto err_free_mem;
}
input_dev->name = "palmas_pwron";
input_dev->phys = "palmas_pwron/input0";
input_dev->dev.parent = dev;
input_set_capability(input_dev, EV_KEY, KEY_POWER);
/*
* Setup default hardware shutdown option (long key press)
* and debounce.
*/
val = FIELD_PREP(PALMAS_LPK_TIME_MASK, config.long_press_time_val) |
FIELD_PREP(PALMAS_PWRON_DEBOUNCE_MASK, config.pwron_debounce_val);
error = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
PALMAS_LONG_PRESS_KEY,
PALMAS_LPK_TIME_MASK |
PALMAS_PWRON_DEBOUNCE_MASK,
val);
if (error) {
dev_err(dev, "LONG_PRESS_KEY_UPDATE failed: %d\n", error);
goto err_free_input;
}
pwron->palmas = palmas;
pwron->input_dev = input_dev;
INIT_DELAYED_WORK(&pwron->input_work, palmas_power_button_work);
pwron->irq = platform_get_irq(pdev, 0);
if (pwron->irq < 0) {
error = pwron->irq;
goto err_free_input;
}
error = request_threaded_irq(pwron->irq, NULL, pwron_irq,
IRQF_TRIGGER_HIGH |
IRQF_TRIGGER_LOW |
IRQF_ONESHOT,
dev_name(dev), pwron);
if (error) {
dev_err(dev, "Can't get IRQ for pwron: %d\n", error);
goto err_free_input;
}
error = input_register_device(input_dev);
if (error) {
dev_err(dev, "Can't register power button: %d\n", error);
goto err_free_irq;
}
platform_set_drvdata(pdev, pwron);
device_init_wakeup(dev, true);
return 0;
err_free_irq:
cancel_delayed_work_sync(&pwron->input_work);
free_irq(pwron->irq, pwron);
err_free_input:
input_free_device(input_dev);
err_free_mem:
kfree(pwron);
return error;
}
/**
* palmas_pwron_remove() - Cleanup on removal
* @pdev: platform device for the button
*
* Return: 0
*/
static int palmas_pwron_remove(struct platform_device *pdev)
{
struct palmas_pwron *pwron = platform_get_drvdata(pdev);
free_irq(pwron->irq, pwron);
cancel_delayed_work_sync(&pwron->input_work);
input_unregister_device(pwron->input_dev);
kfree(pwron);
return 0;
}
/**
* palmas_pwron_suspend() - suspend handler
* @dev: power button device
*
* Cancel all pending work items for the power button, setup irq for wakeup
*
* Return: 0
*/
static int palmas_pwron_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct palmas_pwron *pwron = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&pwron->input_work);
if (device_may_wakeup(dev))
enable_irq_wake(pwron->irq);
return 0;
}
/**
* palmas_pwron_resume() - resume handler
* @dev: power button device
*
* Just disable the wakeup capability of irq here.
*
* Return: 0
*/
static int palmas_pwron_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct palmas_pwron *pwron = platform_get_drvdata(pdev);
if (device_may_wakeup(dev))
disable_irq_wake(pwron->irq);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(palmas_pwron_pm,
palmas_pwron_suspend, palmas_pwron_resume);
#ifdef CONFIG_OF
static const struct of_device_id of_palmas_pwr_match[] = {
{ .compatible = "ti,palmas-pwrbutton" },
{ },
};
MODULE_DEVICE_TABLE(of, of_palmas_pwr_match);
#endif
static struct platform_driver palmas_pwron_driver = {
.probe = palmas_pwron_probe,
.remove = palmas_pwron_remove,
.driver = {
.name = "palmas_pwrbutton",
.of_match_table = of_match_ptr(of_palmas_pwr_match),
.pm = pm_sleep_ptr(&palmas_pwron_pm),
},
};
module_platform_driver(palmas_pwron_driver);
MODULE_ALIAS("platform:palmas-pwrbutton");
MODULE_DESCRIPTION("Palmas Power Button");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Texas Instruments Inc.");
|
linux-master
|
drivers/input/misc/palmas-pwrbutton.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* m68k beeper driver for Linux
*
* Copyright (c) 2002 Richard Zidlicky
* Copyright (c) 2002 Vojtech Pavlik
* Copyright (c) 1992 Orest Zborowski
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <asm/machdep.h>
#include <asm/io.h>
MODULE_AUTHOR("Richard Zidlicky <[email protected]>");
MODULE_DESCRIPTION("m68k beeper driver");
MODULE_LICENSE("GPL");
static struct platform_device *m68kspkr_platform_device;
static int m68kspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
unsigned int count = 0;
if (type != EV_SND)
return -1;
switch (code) {
case SND_BELL: if (value) value = 1000;
case SND_TONE: break;
default: return -1;
}
if (value > 20 && value < 32767)
count = 1193182 / value;
mach_beep(count, -1);
return 0;
}
static int m68kspkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
int err;
input_dev = input_allocate_device();
if (!input_dev)
return -ENOMEM;
input_dev->name = "m68k beeper";
input_dev->phys = "m68k/generic";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x001f;
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &dev->dev;
input_dev->evbit[0] = BIT_MASK(EV_SND);
input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
input_dev->event = m68kspkr_event;
err = input_register_device(input_dev);
if (err) {
input_free_device(input_dev);
return err;
}
platform_set_drvdata(dev, input_dev);
return 0;
}
static int m68kspkr_remove(struct platform_device *dev)
{
struct input_dev *input_dev = platform_get_drvdata(dev);
input_unregister_device(input_dev);
/* turn off the speaker */
m68kspkr_event(NULL, EV_SND, SND_BELL, 0);
return 0;
}
static void m68kspkr_shutdown(struct platform_device *dev)
{
/* turn off the speaker */
m68kspkr_event(NULL, EV_SND, SND_BELL, 0);
}
static struct platform_driver m68kspkr_platform_driver = {
.driver = {
.name = "m68kspkr",
},
.probe = m68kspkr_probe,
.remove = m68kspkr_remove,
.shutdown = m68kspkr_shutdown,
};
static int __init m68kspkr_init(void)
{
int err;
if (!mach_beep) {
printk(KERN_INFO "m68kspkr: no lowlevel beep support\n");
return -ENODEV;
}
err = platform_driver_register(&m68kspkr_platform_driver);
if (err)
return err;
m68kspkr_platform_device = platform_device_alloc("m68kspkr", -1);
if (!m68kspkr_platform_device) {
err = -ENOMEM;
goto err_unregister_driver;
}
err = platform_device_add(m68kspkr_platform_device);
if (err)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(m68kspkr_platform_device);
err_unregister_driver:
platform_driver_unregister(&m68kspkr_platform_driver);
return err;
}
static void __exit m68kspkr_exit(void)
{
platform_device_unregister(m68kspkr_platform_device);
platform_driver_unregister(&m68kspkr_platform_driver);
}
module_init(m68kspkr_init);
module_exit(m68kspkr_exit);
|
linux-master
|
drivers/input/misc/m68kspkr.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011 Bosch Sensortec GmbH
* Copyright (c) 2011 Unixphere
*
* This driver adds support for Bosch Sensortec's digital acceleration
* sensors BMA150 and SMB380.
* The SMB380 is fully compatible with BMA150 and only differs in packaging.
*
* The datasheet for the BMA150 chip can be found here:
* http://www.bosch-sensortec.com/content/language1/downloads/BST-BMA150-DS000-07.pdf
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/bma150.h>
#define ABSMAX_ACC_VAL 0x01FF
#define ABSMIN_ACC_VAL -(ABSMAX_ACC_VAL)
/* Each axis is represented by a 2-byte data word */
#define BMA150_XYZ_DATA_SIZE 6
/* Input poll interval in milliseconds */
#define BMA150_POLL_INTERVAL 10
#define BMA150_POLL_MAX 200
#define BMA150_POLL_MIN 0
#define BMA150_MODE_NORMAL 0
#define BMA150_MODE_SLEEP 2
#define BMA150_MODE_WAKE_UP 3
/* Data register addresses */
#define BMA150_DATA_0_REG 0x00
#define BMA150_DATA_1_REG 0x01
#define BMA150_DATA_2_REG 0x02
/* Control register addresses */
#define BMA150_CTRL_0_REG 0x0A
#define BMA150_CTRL_1_REG 0x0B
#define BMA150_CTRL_2_REG 0x14
#define BMA150_CTRL_3_REG 0x15
/* Configuration/Setting register addresses */
#define BMA150_CFG_0_REG 0x0C
#define BMA150_CFG_1_REG 0x0D
#define BMA150_CFG_2_REG 0x0E
#define BMA150_CFG_3_REG 0x0F
#define BMA150_CFG_4_REG 0x10
#define BMA150_CFG_5_REG 0x11
#define BMA150_CHIP_ID 2
#define BMA150_CHIP_ID_REG BMA150_DATA_0_REG
#define BMA150_ACC_X_LSB_REG BMA150_DATA_2_REG
#define BMA150_SLEEP_POS 0
#define BMA150_SLEEP_MSK 0x01
#define BMA150_SLEEP_REG BMA150_CTRL_0_REG
#define BMA150_BANDWIDTH_POS 0
#define BMA150_BANDWIDTH_MSK 0x07
#define BMA150_BANDWIDTH_REG BMA150_CTRL_2_REG
#define BMA150_RANGE_POS 3
#define BMA150_RANGE_MSK 0x18
#define BMA150_RANGE_REG BMA150_CTRL_2_REG
#define BMA150_WAKE_UP_POS 0
#define BMA150_WAKE_UP_MSK 0x01
#define BMA150_WAKE_UP_REG BMA150_CTRL_3_REG
#define BMA150_SW_RES_POS 1
#define BMA150_SW_RES_MSK 0x02
#define BMA150_SW_RES_REG BMA150_CTRL_0_REG
/* Any-motion interrupt register fields */
#define BMA150_ANY_MOTION_EN_POS 6
#define BMA150_ANY_MOTION_EN_MSK 0x40
#define BMA150_ANY_MOTION_EN_REG BMA150_CTRL_1_REG
#define BMA150_ANY_MOTION_DUR_POS 6
#define BMA150_ANY_MOTION_DUR_MSK 0xC0
#define BMA150_ANY_MOTION_DUR_REG BMA150_CFG_5_REG
#define BMA150_ANY_MOTION_THRES_REG BMA150_CFG_4_REG
/* Advanced interrupt register fields */
#define BMA150_ADV_INT_EN_POS 6
#define BMA150_ADV_INT_EN_MSK 0x40
#define BMA150_ADV_INT_EN_REG BMA150_CTRL_3_REG
/* High-G interrupt register fields */
#define BMA150_HIGH_G_EN_POS 1
#define BMA150_HIGH_G_EN_MSK 0x02
#define BMA150_HIGH_G_EN_REG BMA150_CTRL_1_REG
#define BMA150_HIGH_G_HYST_POS 3
#define BMA150_HIGH_G_HYST_MSK 0x38
#define BMA150_HIGH_G_HYST_REG BMA150_CFG_5_REG
#define BMA150_HIGH_G_DUR_REG BMA150_CFG_3_REG
#define BMA150_HIGH_G_THRES_REG BMA150_CFG_2_REG
/* Low-G interrupt register fields */
#define BMA150_LOW_G_EN_POS 0
#define BMA150_LOW_G_EN_MSK 0x01
#define BMA150_LOW_G_EN_REG BMA150_CTRL_1_REG
#define BMA150_LOW_G_HYST_POS 0
#define BMA150_LOW_G_HYST_MSK 0x07
#define BMA150_LOW_G_HYST_REG BMA150_CFG_5_REG
#define BMA150_LOW_G_DUR_REG BMA150_CFG_1_REG
#define BMA150_LOW_G_THRES_REG BMA150_CFG_0_REG
struct bma150_data {
struct i2c_client *client;
struct input_dev *input;
u8 mode;
};
/*
* The settings for the given range, bandwidth and interrupt features
* are stated and verified by Bosch Sensortec where they are configured
* to provide a generic sensitivity performance.
*/
static const struct bma150_cfg default_cfg = {
.any_motion_int = 1,
.hg_int = 1,
.lg_int = 1,
.any_motion_dur = 0,
.any_motion_thres = 0,
.hg_hyst = 0,
.hg_dur = 150,
.hg_thres = 160,
.lg_hyst = 0,
.lg_dur = 150,
.lg_thres = 20,
.range = BMA150_RANGE_2G,
.bandwidth = BMA150_BW_50HZ
};
static int bma150_write_byte(struct i2c_client *client, u8 reg, u8 val)
{
s32 ret;
/* As per specification, disable irq in between register writes */
if (client->irq)
disable_irq_nosync(client->irq);
ret = i2c_smbus_write_byte_data(client, reg, val);
if (client->irq)
enable_irq(client->irq);
return ret;
}
static int bma150_set_reg_bits(struct i2c_client *client,
int val, int shift, u8 mask, u8 reg)
{
int data;
data = i2c_smbus_read_byte_data(client, reg);
if (data < 0)
return data;
data = (data & ~mask) | ((val << shift) & mask);
return bma150_write_byte(client, reg, data);
}
static int bma150_set_mode(struct bma150_data *bma150, u8 mode)
{
int error;
error = bma150_set_reg_bits(bma150->client, mode, BMA150_WAKE_UP_POS,
BMA150_WAKE_UP_MSK, BMA150_WAKE_UP_REG);
if (error)
return error;
error = bma150_set_reg_bits(bma150->client, mode, BMA150_SLEEP_POS,
BMA150_SLEEP_MSK, BMA150_SLEEP_REG);
if (error)
return error;
if (mode == BMA150_MODE_NORMAL)
usleep_range(2000, 2100);
bma150->mode = mode;
return 0;
}
static int bma150_soft_reset(struct bma150_data *bma150)
{
int error;
error = bma150_set_reg_bits(bma150->client, 1, BMA150_SW_RES_POS,
BMA150_SW_RES_MSK, BMA150_SW_RES_REG);
if (error)
return error;
usleep_range(2000, 2100);
return 0;
}
static int bma150_set_range(struct bma150_data *bma150, u8 range)
{
return bma150_set_reg_bits(bma150->client, range, BMA150_RANGE_POS,
BMA150_RANGE_MSK, BMA150_RANGE_REG);
}
static int bma150_set_bandwidth(struct bma150_data *bma150, u8 bw)
{
return bma150_set_reg_bits(bma150->client, bw, BMA150_BANDWIDTH_POS,
BMA150_BANDWIDTH_MSK, BMA150_BANDWIDTH_REG);
}
static int bma150_set_low_g_interrupt(struct bma150_data *bma150,
u8 enable, u8 hyst, u8 dur, u8 thres)
{
int error;
error = bma150_set_reg_bits(bma150->client, hyst,
BMA150_LOW_G_HYST_POS, BMA150_LOW_G_HYST_MSK,
BMA150_LOW_G_HYST_REG);
if (error)
return error;
error = bma150_write_byte(bma150->client, BMA150_LOW_G_DUR_REG, dur);
if (error)
return error;
error = bma150_write_byte(bma150->client, BMA150_LOW_G_THRES_REG, thres);
if (error)
return error;
return bma150_set_reg_bits(bma150->client, !!enable,
BMA150_LOW_G_EN_POS, BMA150_LOW_G_EN_MSK,
BMA150_LOW_G_EN_REG);
}
static int bma150_set_high_g_interrupt(struct bma150_data *bma150,
u8 enable, u8 hyst, u8 dur, u8 thres)
{
int error;
error = bma150_set_reg_bits(bma150->client, hyst,
BMA150_HIGH_G_HYST_POS, BMA150_HIGH_G_HYST_MSK,
BMA150_HIGH_G_HYST_REG);
if (error)
return error;
error = bma150_write_byte(bma150->client,
BMA150_HIGH_G_DUR_REG, dur);
if (error)
return error;
error = bma150_write_byte(bma150->client,
BMA150_HIGH_G_THRES_REG, thres);
if (error)
return error;
return bma150_set_reg_bits(bma150->client, !!enable,
BMA150_HIGH_G_EN_POS, BMA150_HIGH_G_EN_MSK,
BMA150_HIGH_G_EN_REG);
}
static int bma150_set_any_motion_interrupt(struct bma150_data *bma150,
u8 enable, u8 dur, u8 thres)
{
int error;
error = bma150_set_reg_bits(bma150->client, dur,
BMA150_ANY_MOTION_DUR_POS,
BMA150_ANY_MOTION_DUR_MSK,
BMA150_ANY_MOTION_DUR_REG);
if (error)
return error;
error = bma150_write_byte(bma150->client,
BMA150_ANY_MOTION_THRES_REG, thres);
if (error)
return error;
error = bma150_set_reg_bits(bma150->client, !!enable,
BMA150_ADV_INT_EN_POS, BMA150_ADV_INT_EN_MSK,
BMA150_ADV_INT_EN_REG);
if (error)
return error;
return bma150_set_reg_bits(bma150->client, !!enable,
BMA150_ANY_MOTION_EN_POS,
BMA150_ANY_MOTION_EN_MSK,
BMA150_ANY_MOTION_EN_REG);
}
static void bma150_report_xyz(struct bma150_data *bma150)
{
u8 data[BMA150_XYZ_DATA_SIZE];
s16 x, y, z;
s32 ret;
ret = i2c_smbus_read_i2c_block_data(bma150->client,
BMA150_ACC_X_LSB_REG, BMA150_XYZ_DATA_SIZE, data);
if (ret != BMA150_XYZ_DATA_SIZE)
return;
x = ((0xc0 & data[0]) >> 6) | (data[1] << 2);
y = ((0xc0 & data[2]) >> 6) | (data[3] << 2);
z = ((0xc0 & data[4]) >> 6) | (data[5] << 2);
x = sign_extend32(x, 9);
y = sign_extend32(y, 9);
z = sign_extend32(z, 9);
input_report_abs(bma150->input, ABS_X, x);
input_report_abs(bma150->input, ABS_Y, y);
input_report_abs(bma150->input, ABS_Z, z);
input_sync(bma150->input);
}
static irqreturn_t bma150_irq_thread(int irq, void *dev)
{
bma150_report_xyz(dev);
return IRQ_HANDLED;
}
static void bma150_poll(struct input_dev *input)
{
struct bma150_data *bma150 = input_get_drvdata(input);
bma150_report_xyz(bma150);
}
static int bma150_open(struct input_dev *input)
{
struct bma150_data *bma150 = input_get_drvdata(input);
int error;
error = pm_runtime_get_sync(&bma150->client->dev);
if (error < 0 && error != -ENOSYS)
return error;
/*
* See if runtime PM woke up the device. If runtime PM
* is disabled we need to do it ourselves.
*/
if (bma150->mode != BMA150_MODE_NORMAL) {
error = bma150_set_mode(bma150, BMA150_MODE_NORMAL);
if (error)
return error;
}
return 0;
}
static void bma150_close(struct input_dev *input)
{
struct bma150_data *bma150 = input_get_drvdata(input);
pm_runtime_put_sync(&bma150->client->dev);
if (bma150->mode != BMA150_MODE_SLEEP)
bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
static int bma150_initialize(struct bma150_data *bma150,
const struct bma150_cfg *cfg)
{
int error;
error = bma150_soft_reset(bma150);
if (error)
return error;
error = bma150_set_bandwidth(bma150, cfg->bandwidth);
if (error)
return error;
error = bma150_set_range(bma150, cfg->range);
if (error)
return error;
if (bma150->client->irq) {
error = bma150_set_any_motion_interrupt(bma150,
cfg->any_motion_int,
cfg->any_motion_dur,
cfg->any_motion_thres);
if (error)
return error;
error = bma150_set_high_g_interrupt(bma150,
cfg->hg_int, cfg->hg_hyst,
cfg->hg_dur, cfg->hg_thres);
if (error)
return error;
error = bma150_set_low_g_interrupt(bma150,
cfg->lg_int, cfg->lg_hyst,
cfg->lg_dur, cfg->lg_thres);
if (error)
return error;
}
return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
static int bma150_probe(struct i2c_client *client)
{
const struct bma150_platform_data *pdata =
dev_get_platdata(&client->dev);
const struct bma150_cfg *cfg;
struct bma150_data *bma150;
struct input_dev *idev;
int chip_id;
int error;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "i2c_check_functionality error\n");
return -EIO;
}
chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
if (chip_id != BMA150_CHIP_ID) {
dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
return -EINVAL;
}
bma150 = devm_kzalloc(&client->dev, sizeof(*bma150), GFP_KERNEL);
if (!bma150)
return -ENOMEM;
bma150->client = client;
if (pdata) {
if (pdata->irq_gpio_cfg) {
error = pdata->irq_gpio_cfg();
if (error) {
dev_err(&client->dev,
"IRQ GPIO conf. error %d, error %d\n",
client->irq, error);
return error;
}
}
cfg = &pdata->cfg;
} else {
cfg = &default_cfg;
}
error = bma150_initialize(bma150, cfg);
if (error)
return error;
idev = devm_input_allocate_device(&bma150->client->dev);
if (!idev)
return -ENOMEM;
input_set_drvdata(idev, bma150);
bma150->input = idev;
idev->name = BMA150_DRIVER;
idev->phys = BMA150_DRIVER "/input0";
idev->id.bustype = BUS_I2C;
idev->open = bma150_open;
idev->close = bma150_close;
input_set_abs_params(idev, ABS_X, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
input_set_abs_params(idev, ABS_Y, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
input_set_abs_params(idev, ABS_Z, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
if (client->irq <= 0) {
error = input_setup_polling(idev, bma150_poll);
if (error)
return error;
input_set_poll_interval(idev, BMA150_POLL_INTERVAL);
input_set_min_poll_interval(idev, BMA150_POLL_MIN);
input_set_max_poll_interval(idev, BMA150_POLL_MAX);
}
error = input_register_device(idev);
if (error)
return error;
if (client->irq > 0) {
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, bma150_irq_thread,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
BMA150_DRIVER, bma150);
if (error) {
dev_err(&client->dev,
"irq request failed %d, error %d\n",
client->irq, error);
return error;
}
}
i2c_set_clientdata(client, bma150);
pm_runtime_enable(&client->dev);
return 0;
}
static void bma150_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
}
static int __maybe_unused bma150_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bma150_data *bma150 = i2c_get_clientdata(client);
return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
static int __maybe_unused bma150_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bma150_data *bma150 = i2c_get_clientdata(client);
return bma150_set_mode(bma150, BMA150_MODE_NORMAL);
}
static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
static const struct i2c_device_id bma150_id[] = {
{ "bma150", 0 },
{ "smb380", 0 },
{ "bma023", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, bma150_id);
static struct i2c_driver bma150_driver = {
.driver = {
.name = BMA150_DRIVER,
.pm = &bma150_pm,
},
.class = I2C_CLASS_HWMON,
.id_table = bma150_id,
.probe = bma150_probe,
.remove = bma150_remove,
};
module_i2c_driver(bma150_driver);
MODULE_AUTHOR("Albert Zhang <[email protected]>");
MODULE_DESCRIPTION("BMA150 driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/input/misc/bma150.c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.